diff --git a/.env b/.env index e2fbb60d7..ea946cdb4 100644 --- a/.env +++ b/.env @@ -1 +1,7 @@ -CHVER=25.5.10 \ No newline at end of file +CHVER=25.5.10 + +# Port overrides to avoid conflicts +GRAFANA_PORT=3001 +XATU_SERVER_PORT=8082 +POSTGRES_PORT=5433 +POSTGRES_ADDRESS=0.0.0.0 \ No newline at end of file diff --git a/cmd/horizon.go b/cmd/horizon.go new file mode 100644 index 000000000..5b7a32fc0 --- /dev/null +++ b/cmd/horizon.go @@ -0,0 +1,175 @@ +//nolint:dupl // disable duplicate code warning for cmds +package cmd + +import ( + "os" + + "github.com/creasty/defaults" + "github.com/ethpandaops/xatu/pkg/horizon" + "github.com/spf13/cobra" + yaml "gopkg.in/yaml.v3" +) + +var ( + horizonCfgFile string +) + +type HorizonOverride struct { + FlagHelper func(cmd *cobra.Command) + Setter func(cmd *cobra.Command, overrides *horizon.Override) error +} + +type HorizonOverrideConfig struct { + FlagName string + EnvName string + Description string + OverrideFunc func(val string, overrides *horizon.Override) +} + +func createHorizonOverride(config HorizonOverrideConfig) HorizonOverride { + return HorizonOverride{ + FlagHelper: func(cmd *cobra.Command) { + cmd.Flags().String(config.FlagName, "", config.Description+` (env: `+config.EnvName+`)`) + }, + Setter: func(cmd *cobra.Command, overrides *horizon.Override) error { + val := "" + + if cmd.Flags().Changed(config.FlagName) { + val = cmd.Flags().Lookup(config.FlagName).Value.String() + } + + if os.Getenv(config.EnvName) != "" { + val = os.Getenv(config.EnvName) + } + + if val == "" { + return nil + } + + config.OverrideFunc(val, overrides) + + return nil + }, + } +} + +var HorizonOverrides = []HorizonOverride{ + createHorizonOverride(HorizonOverrideConfig{ + FlagName: "horizon-xatu-output-authorization", + EnvName: "HORIZON_XATU_OUTPUT_AUTHORIZATION", + Description: "sets the authorization secret for all xatu outputs", + OverrideFunc: func(val string, overrides *horizon.Override) { + overrides.XatuOutputAuth.Enabled = true + overrides.XatuOutputAuth.Value = val + }, + }), + createHorizonOverride(HorizonOverrideConfig{ + FlagName: "horizon-xatu-coordinator-authorization", + EnvName: "HORIZON_XATU_COORDINATOR_AUTHORIZATION", + Description: "sets the authorization secret for coordinator requests", + OverrideFunc: func(val string, overrides *horizon.Override) { + overrides.CoordinatorAuth.Enabled = true + overrides.CoordinatorAuth.Value = val + }, + }), + createHorizonOverride(HorizonOverrideConfig{ + FlagName: "metrics-addr", + EnvName: "METRICS_ADDR", + Description: "sets the metrics address", + OverrideFunc: func(val string, overrides *horizon.Override) { + overrides.MetricsAddr.Enabled = true + overrides.MetricsAddr.Value = val + }, + }), + createHorizonOverride(HorizonOverrideConfig{ + FlagName: "horizon-beacon-node-url", + EnvName: "HORIZON_BEACON_NODE_URL", + Description: "sets a single beacon node URL (overrides configured list)", + OverrideFunc: func(val string, overrides *horizon.Override) { + overrides.BeaconNodeURLs.Enabled = true + overrides.BeaconNodeURLs.Value = val + }, + }), + createHorizonOverride(HorizonOverrideConfig{ + FlagName: "horizon-network-name", + EnvName: "HORIZON_NETWORK_NAME", + Description: "overrides the network name detected from the beacon node", + OverrideFunc: func(val string, overrides *horizon.Override) { + overrides.NetworkName.Enabled = true + overrides.NetworkName.Value = val + }, + }), +} + +// horizonCmd represents the horizon command +var horizonCmd = &cobra.Command{ + Use: "horizon", + Short: "Runs Xatu in horizon mode.", + Long: `Runs Xatu in horizon mode, which provides real-time head tracking + with multi-beacon node support and dual-iterator coordination.`, + Run: func(cmd *cobra.Command, args []string) { + initCommon() + + config, err := loadHorizonConfigFromFile(horizonCfgFile) + if err != nil { + log.Fatal(err) + } + + log = getLogger(config.LoggingLevel, "") + + log.WithField("location", horizonCfgFile).Info("Loaded config") + + overrides := &horizon.Override{} + for _, override := range HorizonOverrides { + if errr := override.Setter(cmd, overrides); errr != nil { + log.Fatal(errr) + } + } + + h, err := horizon.New(cmd.Context(), log, config, overrides) + if err != nil { + log.Fatal(err) + } + + if err := h.Start(cmd.Context()); err != nil { + log.Fatal(err) + } + + log.Info("Xatu horizon exited - cya!") + }, +} + +func init() { + rootCmd.AddCommand(horizonCmd) + + horizonCmd.Flags().StringVar(&horizonCfgFile, "config", "horizon.yaml", "config file (default is horizon.yaml)") + + for _, override := range HorizonOverrides { + override.FlagHelper(horizonCmd) + } +} + +func loadHorizonConfigFromFile(file string) (*horizon.Config, error) { + if file == "" { + file = "horizon.yaml" + } + + config := &horizon.Config{} + + if err := defaults.Set(config); err != nil { + return nil, err + } + + yamlFile, err := os.ReadFile(file) + if err != nil { + return nil, err + } + + type plain horizon.Config + + if err := yaml.Unmarshal(yamlFile, (*plain)(config)); err != nil { + return nil, err + } + + return config, nil +} diff --git a/deploy/kurtosis/horizon-test.yaml b/deploy/kurtosis/horizon-test.yaml new file mode 100644 index 000000000..90eb5e3a4 --- /dev/null +++ b/deploy/kurtosis/horizon-test.yaml @@ -0,0 +1,74 @@ +# Kurtosis ethereum-package configuration for Horizon E2E testing +# +# This configuration creates a local Ethereum testnet with all consensus clients +# for testing Horizon's multi-beacon node support. +# +# Usage: +# kurtosis run github.com/ethpandaops/ethereum-package --args-file horizon-test.yaml --enclave horizon +# +# After starting, beacon nodes will be available at: +# - lighthouse: http://cl-lighthouse-geth:4000 +# - prysm: http://cl-prysm-nethermind:3500 +# - teku: http://cl-teku-erigon:4000 +# - lodestar: http://cl-lodestar-reth:4000 +# - nimbus: http://cl-nimbus-besu:4000 +# - grandine: http://cl-grandine-geth:4000 +# +# Note: Actual hostnames will vary based on Kurtosis enclave. Use: +# kurtosis enclave inspect horizon +# to get the actual service names and ports. + +participants: + # Lighthouse CL + Geth EL + - el_type: geth + cl_type: lighthouse + count: 1 + + # Prysm CL + Nethermind EL + - el_type: nethermind + cl_type: prysm + count: 1 + + # Teku CL + Erigon EL + - el_type: erigon + cl_type: teku + count: 1 + + # Lodestar CL + Reth EL + - el_type: reth + cl_type: lodestar + count: 1 + + # Nimbus CL + Besu EL + - el_type: besu + cl_type: nimbus + count: 1 + # Nimbus needs subscribe-all-subnets for full attestation coverage + cl_extra_params: + - --subscribe-all-subnets + + # Grandine CL + Geth EL (different Geth instance) + - el_type: geth + cl_type: grandine + count: 1 + +# Network configuration for faster testing +network_params: + # Shorter genesis delay for faster startup + genesis_delay: 120 + # Standard slot time + seconds_per_slot: 12 + # Deneb fork for blob testing + deneb_fork_epoch: 0 + # Electra fork for testing Electra attestations + electra_fork_epoch: 1 + +# Disable additional services - we'll run xatu separately +additional_services: [] + +# Global settings +global_log_level: info + +# Port publishing disabled - we'll use docker network for connectivity +port_publisher: + nat_exit_ip: KURTOSIS_IP_ADDR_PLACEHOLDER diff --git a/deploy/kurtosis/xatu-horizon.yaml b/deploy/kurtosis/xatu-horizon.yaml new file mode 100644 index 000000000..75a07cc31 --- /dev/null +++ b/deploy/kurtosis/xatu-horizon.yaml @@ -0,0 +1,136 @@ +# Horizon configuration for Kurtosis E2E testing +# +# This configuration connects Horizon to all consensus clients in the Kurtosis network. +# Beacon node URLs must be updated based on the Kurtosis enclave inspection output. +# +# Usage: +# 1. Start Kurtosis network: kurtosis run github.com/ethpandaops/ethereum-package --args-file horizon-test.yaml --enclave horizon +# 2. Get beacon node URLs: kurtosis enclave inspect horizon | grep -E "cl-.+-http" +# 3. Update beaconNodes section below with actual URLs +# 4. Run Horizon: xatu horizon --config xatu-horizon.yaml +# +# Or use environment variables: +# export HORIZON_BEACON_NODES="lighthouse=http://...,prysm=http://...,..." +# xatu horizon --config xatu-horizon.yaml + +logging: "info" # panic,fatal,warn,info,debug,trace +metricsAddr: ":9098" +pprofAddr: ":6062" + +name: xatu-horizon-e2e + +# Labels for E2E test identification +labels: + environment: e2e-test + network: kurtosis + +# NTP server +ntpServer: time.google.com + +# Coordinator for tracking processing locations +coordinator: + address: xatu-server:8080 + tls: false + +# Multi-beacon node pool - all 6 consensus clients +# Update these URLs after starting the Kurtosis network +ethereum: + # Override network name for Kurtosis devnet + overrideNetworkName: kurtosis + + # Allow extra time for clients to become healthy after genesis + startupTimeout: 5m + + beaconNodes: + # Lighthouse + - name: lighthouse + address: http://cl-lighthouse-geth:4000 + # Prysm (uses port 3500 by default) + - name: prysm + address: http://cl-prysm-nethermind:3500 + # Teku + - name: teku + address: http://cl-teku-erigon:4000 + # Lodestar + - name: lodestar + address: http://cl-lodestar-reth:4000 + # Nimbus + - name: nimbus + address: http://cl-nimbus-besu:4000 + # Grandine + - name: grandine + address: http://cl-grandine-geth:4000 + + # Health check interval + healthCheckInterval: 3s + + # Block cache settings + blockCacheSize: 1000 + blockCacheTtl: 1h + blockPreloadWorkers: 5 + blockPreloadQueueSize: 5000 + +# Deduplication cache - 13 minutes covers ~1 epoch plus delays +dedupCache: + ttl: 13m + +# SSE subscription settings +subscription: + bufferSize: 1000 + +# Reorg handling +reorg: + enabled: true + maxDepth: 64 + bufferSize: 100 + +# Epoch iterator - trigger at 50% through epoch +epochIterator: + enabled: true + triggerPercent: 0.5 + +# Enable all derivers for comprehensive E2E testing +derivers: + # Block-based derivers + beaconBlock: + enabled: true + attesterSlashing: + enabled: true + proposerSlashing: + enabled: true + deposit: + enabled: true + withdrawal: + enabled: true + voluntaryExit: + enabled: true + blsToExecutionChange: + enabled: true + executionTransaction: + enabled: true + elaboratedAttestation: + enabled: true + + # Epoch-based derivers + proposerDuty: + enabled: true + beaconBlob: + enabled: true + beaconValidators: + enabled: true + chunkSize: 100 + beaconCommittee: + enabled: true + +# Output to local xatu server +outputs: + - name: xatu + type: xatu + config: + address: xatu-server:8080 + tls: false + maxQueueSize: 51200 + batchTimeout: 0.5s + exportTimeout: 30s + maxExportBatchSize: 32 + workers: 50 diff --git a/deploy/kurtosis/xatu-server.yaml b/deploy/kurtosis/xatu-server.yaml new file mode 100644 index 000000000..fdc9ef046 --- /dev/null +++ b/deploy/kurtosis/xatu-server.yaml @@ -0,0 +1,100 @@ +# Xatu Server configuration for Kurtosis E2E testing +# +# This configuration runs the xatu-server for receiving events from Horizon +# and routing them to ClickHouse. +# +# Usage: +# xatu server --config xatu-server.yaml + +logging: "info" # panic,fatal,warn,info,debug,trace +addr: ":8080" +metricsAddr: ":9090" + +labels: + environment: e2e-test + network: kurtosis + +# NTP server +ntpServer: time.google.com + +# Persistence for coordinator +persistence: + enabled: true + driverName: postgres + connectionString: postgres://user:password@xatu-postgres:5432/xatu?sslmode=disable + maxIdleConns: 2 + maxOpenConns: 5 + +# In-memory store (sufficient for E2E testing) +store: + type: memory + +# GeoIP disabled for testing +geoip: + enabled: false + +# Services configuration +services: + coordinator: + enabled: true + auth: + enabled: false + nodeRecord: + maxQueueSize: 51200 + batchTimeout: 5s + exportTimeout: 30s + maxExportBatchSize: 512 + + eventIngester: + enabled: true + clientNameSalt: "e2e_test_salt" + outputs: + # Horizon events - block-based derivers + - name: horizon + type: http + shippingMethod: sync + filter: + modules: + - HORIZON + eventNames: + - BEACON_API_ETH_V1_BEACON_COMMITTEE + - BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR + - BEACON_API_ETH_V1_PROPOSER_DUTY + - BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING + - BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING + - BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE + - BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION + - BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT + - BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT + - BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL + - BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION + - BEACON_API_ETH_V2_BEACON_BLOCK + - BEACON_API_ETH_V2_BEACON_BLOCK_V2 + config: + address: http://xatu-vector-http-kafka:9005 + maxQueueSize: 50000 + batchTimeout: 0.5s + exportTimeout: 30s + maxExportBatchSize: 64 + compression: gzip + keepAlive: false + workers: 100 + + # Horizon validators - separate handler for high-volume events + - name: horizon-validators + type: http + shippingMethod: sync + filter: + modules: + - HORIZON + eventNames: + - BEACON_API_ETH_V1_BEACON_VALIDATORS + config: + address: http://xatu-vector-http-kafka:9005 + maxQueueSize: 50000 + batchTimeout: 0.5s + exportTimeout: 30s + maxExportBatchSize: 64 + compression: gzip + keepAlive: false + workers: 400 diff --git a/deploy/local/docker-compose/xatu-horizon.yaml b/deploy/local/docker-compose/xatu-horizon.yaml new file mode 100644 index 000000000..6edecebd4 --- /dev/null +++ b/deploy/local/docker-compose/xatu-horizon.yaml @@ -0,0 +1,114 @@ +logging: "info" # panic,fatal,warn,info,debug,trace +metricsAddr: ":9098" +pprofAddr: ":6062" # optional. if supplied it enables pprof server + +name: xatu-horizon + +# Labels applied to all events from this instance +labels: + ethpandaops: rocks + +# Better to use a NTP server close eg. +# time.aws.com - AWS +# time.windows.com - Azure +# time.google.com - GCP +# pool.ntp.org - https://www.pool.ntp.org/zone/@ +ntpServer: time.google.com + +tracing: + enabled: true + endpoint: tempo:4318 + insecure: true + sampling: + rate: 0.1 + +# Coordinator configuration for tracking processing locations +coordinator: + address: xatu-server:8080 + tls: false + headers: + Authorization: "Bearer SET_ME" + +# Ethereum configuration - multi-beacon node pool +ethereum: + # List of beacon nodes to connect to (at least one required) + # Set HORIZON_BEACON_NODE_URL environment variable to override + beaconNodes: + - name: beacon-node-1 + address: http://SET_ME:5052 + + # Health check interval for beacon node connections + healthCheckInterval: 3s + + # Block cache settings + blockCacheSize: 1000 + blockCacheTtl: 1h + blockPreloadWorkers: 5 + blockPreloadQueueSize: 5000 + +# Deduplication cache configuration +dedupCache: + ttl: 13m + +# SSE subscription configuration +subscription: + bufferSize: 1000 + +# Chain reorg handling configuration +reorg: + enabled: true + maxDepth: 64 + bufferSize: 100 + +# Epoch iterator configuration +epochIterator: + enabled: true + triggerPercent: 0.5 + +# Derivers configuration - enable all derivers for local testing +derivers: + # Block-based derivers + beaconBlock: + enabled: true + attesterSlashing: + enabled: true + proposerSlashing: + enabled: true + deposit: + enabled: true + withdrawal: + enabled: true + voluntaryExit: + enabled: true + blsToExecutionChange: + enabled: true + executionTransaction: + enabled: true + elaboratedAttestation: + enabled: true + + # Epoch-based derivers + proposerDuty: + enabled: true + beaconBlob: + enabled: true + beaconValidators: + enabled: true + chunkSize: 100 + beaconCommittee: + enabled: true + +# Output to local xatu server +outputs: + - name: xatu + type: xatu + config: + address: xatu-server:8080 + tls: false + maxQueueSize: 51200 + batchTimeout: 0.5s + exportTimeout: 30s + maxExportBatchSize: 32 + workers: 50 + headers: + Authorization: "Bearer SET_ME" diff --git a/deploy/local/docker-compose/xatu-server.yaml b/deploy/local/docker-compose/xatu-server.yaml index c139feecd..518361390 100644 --- a/deploy/local/docker-compose/xatu-server.yaml +++ b/deploy/local/docker-compose/xatu-server.yaml @@ -204,6 +204,52 @@ services: compression: gzip keepAlive: false workers: 100 + - name: horizon + type: http + shippingMethod: sync + filter: + modules: + - HORIZON + eventNames: + - BEACON_API_ETH_V1_BEACON_COMMITTEE + - BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR + - BEACON_API_ETH_V1_PROPOSER_DUTY + - BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING + - BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING + - BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE + - BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION + - BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT + - BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT + - BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL + - BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION + - BEACON_API_ETH_V2_BEACON_BLOCK + - BEACON_API_ETH_V2_BEACON_BLOCK_V2 + config: + address: http://xatu-vector-http-kafka:9005 + maxQueueSize: 50000 + batchTimeout: .1s + exportTimeout: 30s + maxExportBatchSize: 64 + compression: gzip + keepAlive: false + workers: 100 + - name: horizon-vals + type: http + shippingMethod: sync + filter: + modules: + - HORIZON + eventNames: + - BEACON_API_ETH_V1_BEACON_VALIDATORS + config: + address: http://xatu-vector-http-kafka:9005 + maxQueueSize: 50000 + batchTimeout: .1s + exportTimeout: 30s + maxExportBatchSize: 64 + compression: gzip + keepAlive: false + workers: 400 - name: cannon-vals type: http shippingMethod: sync diff --git a/docker-compose.yml b/docker-compose.yml index 657a53b79..44a609b37 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -630,6 +630,28 @@ services: networks: - xatu-net + xatu-horizon: + profiles: + - "horizon" + command: horizon --config /etc/horizon/config.yaml + container_name: xatu-horizon + hostname: xatu-horizon + build: + context: . + dockerfile: Dockerfile + environment: + # Default + HORIZON_XATU_COORDINATOR_AUTHORIZATION: ${HORIZON_XATU_COORDINATOR_AUTHORIZATION:-Bearer super_secret} + # Default of xatu:example + HORIZON_XATU_OUTPUT_AUTHORIZATION: ${HORIZON_XATU_OUTPUT_AUTHORIZATION:-Basic eGF0dTpleGFtcGxl} + # Default of http://localhost:5052 + HORIZON_BEACON_NODE_URL: ${HORIZON_BEACON_NODE_URL:-http://localhost:5052} + HORIZON_NETWORK_NAME: ${HORIZON_NETWORK_NAME} + volumes: + - ./deploy/local/docker-compose/xatu-horizon.yaml:/etc/horizon/config.yaml + networks: + - xatu-net + networks: xatu-net: driver: bridge diff --git a/docs/horizon.md b/docs/horizon.md new file mode 100644 index 000000000..594e0f961 --- /dev/null +++ b/docs/horizon.md @@ -0,0 +1,550 @@ +# Horizon + +Horizon is a HEAD data collection module with multi-beacon node support, high-availability coordination, and shared derivers. Unlike [Cannon](./cannon.md) which focuses on backfilling historical data, Horizon is optimized for real-time HEAD tracking of the Ethereum beacon chain. + +This module can output events to various sinks and it is **not** a hard requirement to run the [Xatu server](./server.md), though it is required for high-availability deployments. + +## Table of contents + +- [Architecture Overview](#architecture-overview) +- [Dual-Iterator Design](#dual-iterator-design) +- [Multi-Beacon Node Support](#multi-beacon-node-support) +- [High Availability Deployment](#high-availability-deployment) +- [Horizon vs Cannon: When to Use Which](#horizon-vs-cannon-when-to-use-which) +- [Usage](#usage) +- [Requirements](#requirements) +- [Configuration](#configuration) + - [Beacon Nodes](#beacon-nodes-configuration) + - [Coordinator](#coordinator-configuration) + - [Derivers](#derivers-configuration) + - [Output Sinks](#output-sink-configuration) +- [Metrics Reference](#metrics-reference) +- [Running Locally](#running-locally) + +## Architecture Overview + +Horizon follows a modular architecture designed for reliability and real-time data collection: + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ HORIZON MODULE │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ BEACON NODE POOL │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ │ +│ │ │Lighthouse│ │ Prysm │ │ Teku │ ... │ │ +│ │ │ :5052 │ │ :3500 │ │ :5051 │ │ │ +│ │ └────┬─────┘ └────┬─────┘ └────┬─────┘ │ │ +│ │ │ │ │ │ │ +│ │ └─────────────┼─────────────┘ │ │ +│ │ │ │ │ +│ │ ┌────────┴────────┐ │ │ +│ │ │ Health Checker │ │ │ +│ │ │ + Failover │ │ │ +│ │ └────────┬────────┘ │ │ +│ └─────────────────────┼───────────────────────────────────────────────┘ │ +│ │ │ +│ ┌─────────────────────┼───────────────────────────────────────────────┐ │ +│ │ ▼ │ │ +│ │ ┌──────────────────────────────┐ ┌─────────────────────────┐ │ │ +│ │ │ SSE Block Subscription │ │ SSE Reorg Subscription │ │ │ +│ │ │ /eth/v1/events?topics=block │ │ chain_reorg events │ │ │ +│ │ └──────────────┬───────────────┘ └───────────┬─────────────┘ │ │ +│ │ │ │ │ │ +│ │ ▼ │ │ │ +│ │ ┌──────────────────────────────┐ │ │ │ +│ │ │ Deduplication Cache │◄──────────────┘ │ │ +│ │ │ (TTL-based block roots) │ (clears reorged blocks) │ │ +│ │ └──────────────┬───────────────┘ │ │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌──────────────────────────────────────────────────────────────┐ │ │ +│ │ │ HEAD ITERATOR │ │ │ +│ │ │ • Receives real-time SSE block events │ │ │ +│ │ │ • Processes slots immediately as they arrive │ │ │ +│ │ │ • Updates head_slot in coordinator │ │ │ +│ │ └──────────────┬───────────────────────────────────────────────┘ │ │ +│ │ │ │ │ +│ │ ├──────────────────────────────────────┐ │ │ +│ │ │ │ │ │ +│ │ ▼ ▼ │ │ +│ │ ┌─────────────────────────┐ ┌─────────────────────────┐│ │ +│ │ │ Block-Based │ │ Epoch-Based ││ │ +│ │ │ Derivers │ │ Derivers ││ │ +│ │ │ • BeaconBlock │ │ • ProposerDuty ││ │ +│ │ │ • AttesterSlashing │ │ • BeaconBlob ││ │ +│ │ │ • ProposerSlashing │ │ • BeaconValidators ││ │ +│ │ │ • Deposit │ │ • BeaconCommittee ││ │ +│ │ │ • Withdrawal │ │ ││ │ +│ │ │ • VoluntaryExit │ │ (Triggered midway ││ │ +│ │ │ • BLSToExecutionChange │ │ through each epoch) ││ │ +│ │ │ • ExecutionTransaction │ │ ││ │ +│ │ │ • ElaboratedAttestation│ │ ││ │ +│ │ └───────────┬─────────────┘ └───────────┬─────────────┘│ │ +│ │ │ │ │ │ +│ │ └────────────────┬───────────────────┘ │ │ +│ │ │ │ │ +│ └───────────────────────────────┼──────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌───────────────────────────────────────────────────────────────────┐ │ +│ │ OUTPUT SINKS │ │ +│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │ +│ │ │ Xatu │ │ HTTP │ │ Kafka │ │ Stdout │ │ │ +│ │ │ Server │ │ Server │ │ Brokers │ │ │ │ │ +│ │ └─────────┘ └─────────┘ └─────────┘ └─────────┘ │ │ +│ └───────────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────┘ + │ + ▼ + ┌───────────────────────────────────────┐ + │ COORDINATOR SERVER │ + │ (Tracks head_slot / fill_slot per │ + │ deriver for HA coordination) │ + └───────────────────────────────────────┘ +``` + +## Dual-Iterator Design + +Horizon uses a dual-iterator architecture to ensure both real-time data collection and consistency: + +### HEAD Iterator +- **Purpose**: Real-time processing of new blocks as they are produced +- **Mechanism**: Subscribes to SSE `/eth/v1/events?topics=block` on all beacon nodes +- **Priority**: Highest - never blocks, processes events immediately +- **Location Tracking**: Updates `head_slot` in coordinator after processing each slot + +### FILL Iterator +- **Purpose**: Catches up on any missed slots between restarts +- **Mechanism**: Walks slots from `fill_slot` toward `HEAD - LAG` +- **Configuration**: + - `lagSlots`: Number of slots to stay behind HEAD (default: 32) + - `maxBoundedSlots`: Maximum slots to process in one cycle (default: 7200) + - `rateLimit`: Maximum slots per second (default: 10.0) +- **Location Tracking**: Updates `fill_slot` in coordinator after processing each slot + +### Coordination +Both iterators coordinate through the Coordinator service to avoid duplicate processing: +- HEAD checks both `head_slot` and `fill_slot` before processing +- FILL checks both markers to skip slots already processed by HEAD +- On restart, HEAD immediately begins tracking new blocks while FILL catches up from its last position + +``` +Timeline: +─────────────────────────────────────────────────────────────► + HEAD + fill_slot HEAD - LAG (real-time) + │ │ │ + ▼ ▼ ▼ +────────────[FILL ITERATOR RANGE]──────────[LAG BUFFER]────► + +FILL processes historical slots ─┐ + │ + Never overlaps with HEAD ─► LAG ensures separation +``` + +### Epoch Iterator +For epoch-based derivers (ProposerDuty, BeaconBlob, BeaconValidators, BeaconCommittee): +- **Trigger**: Fires at a configurable percentage through each epoch (default: 50%) +- **Purpose**: Pre-fetches data for the NEXT epoch before it starts +- **Configuration**: `triggerPercent` (0.0 to 1.0, default: 0.5) + +## Multi-Beacon Node Support + +Horizon connects to multiple beacon nodes simultaneously for redundancy and reliability: + +### Features +- **Health Checking**: Periodic health checks per node (configurable interval) +- **Automatic Failover**: Falls back to healthy nodes when primary is unavailable +- **Exponential Backoff Retry**: Failed connections retry with backoff (1s initial, 30s max) +- **SSE Aggregation**: Receives block events from all nodes, deduplicates locally +- **Shared Block Cache**: Single cache across all nodes with singleflight deduplication +- **Shared Services**: Metadata and Duties services initialized from first healthy node + +### Configuration Example +```yaml +ethereum: + beaconNodes: + - name: lighthouse-1 + address: http://lighthouse:5052 + headers: + authorization: Bearer token1 + - name: prysm-1 + address: http://prysm:3500 + - name: teku-1 + address: http://teku:5051 + healthCheckInterval: 3s + blockCacheSize: 1000 + blockCacheTtl: 1h +``` + +### Node Selection +- `GetHealthyNode()`: Returns any healthy node (round-robin) +- `PreferNode(address)`: Prefers specific node, falls back to healthy if unavailable +- All nodes receive SSE subscriptions for redundancy + +## High Availability Deployment + +For production deployments requiring high availability: + +### Single Instance Mode +Run one Horizon instance per network. Suitable for: +- Development/testing +- Non-critical data collection +- Networks with low stakes + +### Multi-Instance HA Mode +Run multiple Horizon instances with Coordinator: + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Network │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ Horizon-1 │ │ Horizon-2 │ │ Horizon-3 │ │ +│ │ (Primary) │ │ (Standby) │ │ (Standby) │ │ +│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ │ +│ └─────────────────┼─────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────┐ │ +│ │ Coordinator Server │ │ +│ │ (PostgreSQL backend) │ │ +│ └────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**How it works:** +1. All instances track the same `HorizonLocation` records in the Coordinator +2. When processing a slot, each instance checks if `slot <= head_slot OR slot <= fill_slot` +3. First instance to process a slot updates the location, others skip +4. On failover, another instance picks up where the failed one left off + +**Requirements:** +- [Server](./server.md) running with Coordinator service enabled +- PostgreSQL database for location persistence +- All instances configured with same `networkId` and deriver types + +## Horizon vs Cannon: When to Use Which + +| Feature | Horizon | Cannon | +|---------|---------|--------| +| **Primary Focus** | Real-time HEAD tracking | Historical backfilling | +| **Beacon Nodes** | Multiple (pool with failover) | Single | +| **Data Direction** | Forward (new blocks) | Backward (historical) | +| **Use Case** | Live monitoring, real-time analytics | Data backfilling, gap filling | +| **Latency** | Sub-second (SSE events) | Variable (backfill pace) | +| **HA Support** | Built-in (multi-node pool + coordinator) | Via coordinator | +| **Reorg Handling** | Native (SSE reorg events) | Relies on canonical chain | + +### When to Use Horizon +- You need real-time data as blocks are produced +- You want redundancy across multiple beacon nodes +- You're building live dashboards or monitoring systems +- You need automatic failover and high availability + +### When to Use Cannon +- You need to backfill historical data from genesis +- You're processing data at your own pace (rate-limited) +- You have a single reliable beacon node +- You're doing one-time historical analysis + +### Using Both Together +For complete data coverage, run both: +1. **Horizon**: Tracks HEAD in real-time, ensures no new data is missed +2. **Cannon**: Backfills historical data at a controlled pace + +Both modules share the same deriver implementations (`pkg/cldata/deriver/`) ensuring data consistency. + +## Usage + +Horizon requires a [config file](#configuration). + +```bash +Usage: + xatu horizon [flags] + +Flags: + --config string config file (default is horizon.yaml) (default "horizon.yaml") + -h, --help help for horizon +``` + +## Requirements + +- Multiple [Ethereum consensus clients](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) with exposed HTTP servers (recommended: 2+ for redundancy) +- [Server](./server.md) running with the [Coordinator](./server.md#coordinator) service enabled (required for HA) +- PostgreSQL database (for coordinator persistence) + +## Configuration + +Horizon requires a single `yaml` config file. An example file can be found [here](../example_horizon.yaml). + +### General Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| logging | string | `info` | Log level (`panic`, `fatal`, `warn`, `info`, `debug`, `trace`) | +| metricsAddr | string | `:9090` | The address the metrics server will listen on | +| pprofAddr | string | | The address the pprof server will listen on (disabled if omitted) | +| name | string | | **Required.** Unique name of the Horizon instance | +| labels | object | | Key-value map of labels to append to every event | +| ntpServer | string | `time.google.com` | NTP server for clock drift correction | + +### Beacon Nodes Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| ethereum.beaconNodes | array | | **Required.** List of beacon node configurations | +| ethereum.beaconNodes[].name | string | | **Required.** Unique name for this beacon node | +| ethereum.beaconNodes[].address | string | | **Required.** HTTP endpoint of the beacon node | +| ethereum.beaconNodes[].headers | object | | Key-value map of headers to append to requests | +| ethereum.overrideNetworkName | string | | Override auto-detected network name | +| ethereum.startupTimeout | duration | `60s` | Max time to wait for a healthy beacon node on startup | +| ethereum.healthCheckInterval | duration | `3s` | Interval between health checks | +| ethereum.blockCacheSize | int | `1000` | Maximum number of blocks to cache | +| ethereum.blockCacheTtl | duration | `1h` | TTL for cached blocks | +| ethereum.blockPreloadWorkers | int | `5` | Number of workers for block preloading | +| ethereum.blockPreloadQueueSize | int | `5000` | Size of block preload queue | + +### Coordinator Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| coordinator.address | string | | **Required.** Address of the Xatu Coordinator server | +| coordinator.tls | bool | `false` | Server requires TLS | +| coordinator.headers | object | | Key-value map of headers (e.g., authorization) | + +### Deduplication Cache Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| dedupCache.ttl | duration | `13m` | TTL for cached block roots (should exceed 1 epoch) | + +### Subscription Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| subscription.bufferSize | int | `1000` | Size of the block events channel buffer | + +### Reorg Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| reorg.enabled | bool | `true` | Enable chain reorg handling | +| reorg.maxDepth | int | `64` | Maximum reorg depth to handle (deeper reorgs ignored) | +| reorg.bufferSize | int | `100` | Size of the reorg events channel buffer | + +### Epoch Iterator Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| epochIterator.enabled | bool | `true` | Enable epoch-based derivers | +| epochIterator.triggerPercent | float | `0.5` | Trigger point within epoch (0.0-1.0, 0.5 = midway) | + +### Derivers Configuration + +#### Block-Based Derivers + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| derivers.beaconBlock.enabled | bool | `true` | Enable beacon block deriver | +| derivers.attesterSlashing.enabled | bool | `true` | Enable attester slashing deriver | +| derivers.proposerSlashing.enabled | bool | `true` | Enable proposer slashing deriver | +| derivers.deposit.enabled | bool | `true` | Enable deposit deriver | +| derivers.withdrawal.enabled | bool | `true` | Enable withdrawal deriver (Capella+) | +| derivers.voluntaryExit.enabled | bool | `true` | Enable voluntary exit deriver | +| derivers.blsToExecutionChange.enabled | bool | `true` | Enable BLS to execution change deriver (Capella+) | +| derivers.executionTransaction.enabled | bool | `true` | Enable execution transaction deriver (Bellatrix+) | +| derivers.elaboratedAttestation.enabled | bool | `true` | Enable elaborated attestation deriver | + +#### Epoch-Based Derivers + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| derivers.proposerDuty.enabled | bool | `true` | Enable proposer duty deriver | +| derivers.beaconBlob.enabled | bool | `true` | Enable beacon blob deriver (Deneb+) | +| derivers.beaconValidators.enabled | bool | `true` | Enable beacon validators deriver | +| derivers.beaconValidators.chunkSize | int | `100` | Validators per event chunk | +| derivers.beaconCommittee.enabled | bool | `true` | Enable beacon committee deriver | + +### Output Sink Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| outputs | array | | **Required.** List of output sinks | +| outputs[].name | string | | Name of the output | +| outputs[].type | string | | Type: `xatu`, `http`, `kafka`, `stdout` | +| outputs[].config | object | | Output-specific configuration | +| outputs[].filter | object | | Event filtering configuration | + +See [Cannon documentation](./cannon.md#output-xatu-configuration) for detailed output sink configuration options. + +## Metrics Reference + +All Horizon metrics use the `xatu_horizon` namespace. + +### Core Metrics + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `decorated_event_total` | Counter | type, network | Total decorated events created | +| `head_slot` | Gauge | deriver, network | Current HEAD slot position | +| `fill_slot` | Gauge | deriver, network | Current FILL slot position | +| `lag_slots` | Gauge | deriver, network | Slots FILL is behind HEAD | +| `blocks_derived_total` | Counter | deriver, network, iterator | Total blocks derived | + +### Beacon Node Pool Metrics + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `beacon_node_status` | Gauge | node, status | Node health status (1=active) | +| `beacon_blocks_fetched_total` | Counter | node, network | Blocks fetched per node | +| `beacon_block_cache_hits_total` | Counter | network | Block cache hits | +| `beacon_block_cache_misses_total` | Counter | network | Block cache misses | +| `beacon_block_fetch_errors_total` | Counter | node, network | Block fetch errors | +| `beacon_health_check_total` | Counter | node, status | Health checks per node | +| `beacon_health_check_duration_seconds` | Histogram | node | Health check duration | + +### SSE Subscription Metrics + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `sse_events_total` | Counter | node, topic, network | SSE events received | +| `sse_connection_status` | Gauge | node | SSE connection status (1=connected) | +| `sse_reconnects_total` | Counter | node | SSE reconnection attempts | +| `sse_last_event_received_at` | Gauge | node, topic | Unix timestamp of last event | +| `sse_event_processing_delay_seconds` | Histogram | node, topic | Event processing delay | + +### Reorg Metrics + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `reorg_events_total` | Counter | node, network | Reorg events received | +| `reorg_depth` | Histogram | node, network | Reorg depth distribution | +| `reorg_ignored_total` | Counter | node, network | Reorgs ignored (too deep) | +| `reorg_last_event_at` | Gauge | node, network | Timestamp of last reorg | +| `reorg_last_depth` | Gauge | node, network | Depth of last reorg | +| `reorg_last_slot` | Gauge | node, network | Slot of last reorg | + +### Deduplication Cache Metrics + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `dedup_hits_total` | Counter | | Duplicate events dropped | +| `dedup_misses_total` | Counter | | New events processed | +| `dedup_cache_size` | Gauge | | Current cache entries | + +### Iterator Metrics + +#### HEAD Iterator + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `head_iterator_processed_total` | Counter | deriver, network | Slots processed | +| `head_iterator_skipped_total` | Counter | deriver, network, reason | Slots skipped | +| `head_iterator_position_slot` | Gauge | deriver, network | Current slot position | + +#### FILL Iterator + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `fill_iterator_processed_total` | Counter | deriver, network | Slots processed | +| `fill_iterator_skipped_total` | Counter | deriver, network, reason | Slots skipped | +| `fill_iterator_position_slot` | Gauge | deriver, network | Current slot position | +| `fill_iterator_target_slot` | Gauge | deriver, network | Target slot (HEAD - LAG) | +| `fill_iterator_slots_remaining` | Gauge | deriver, network | Slots until caught up | +| `fill_iterator_rate_limit_wait_total` | Counter | | Rate limit wait events | +| `fill_iterator_cycles_complete_total` | Counter | deriver, network | Fill cycles completed | + +#### Epoch Iterator + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `epoch_iterator_processed_total` | Counter | deriver, network | Epochs processed | +| `epoch_iterator_skipped_total` | Counter | deriver, network, reason | Epochs skipped | +| `epoch_iterator_position_epoch` | Gauge | deriver, network | Current epoch position | +| `epoch_iterator_trigger_wait_total` | Counter | deriver, network | Trigger point waits | + +## Running Locally + +```bash +# Docker +docker run -d --name xatu-horizon \ + -v $HOST_DIR_CHANGE_ME/config.yaml:/opt/xatu/config.yaml \ + -p 9090:9090 \ + -it ethpandaops/xatu:latest horizon --config /opt/xatu/config.yaml + +# Build from source +go build -o dist/xatu main.go +./dist/xatu horizon --config horizon.yaml + +# Development +go run main.go horizon --config horizon.yaml +``` + +### Minimal Configuration Example + +```yaml +name: my-horizon + +coordinator: + address: localhost:8080 + +ethereum: + beaconNodes: + - name: local-beacon + address: http://localhost:5052 + +outputs: + - name: stdout + type: stdout +``` + +### Production Configuration Example + +```yaml +logging: info +metricsAddr: ":9090" +name: horizon-mainnet-1 + +labels: + environment: production + region: us-east-1 + +coordinator: + address: coordinator.example.com:8080 + tls: true + headers: + authorization: Bearer mytoken + +ethereum: + beaconNodes: + - name: lighthouse-1 + address: http://lighthouse-1:5052 + - name: prysm-1 + address: http://prysm-1:3500 + - name: teku-1 + address: http://teku-1:5051 + healthCheckInterval: 3s + +dedupCache: + ttl: 13m + +reorg: + enabled: true + maxDepth: 64 + +outputs: + - name: xatu-server + type: xatu + config: + address: xatu.example.com:8080 + tls: true + headers: + authorization: Bearer mytoken + maxQueueSize: 51200 + batchTimeout: 5s + maxExportBatchSize: 512 +``` diff --git a/example_horizon.yaml b/example_horizon.yaml new file mode 100644 index 000000000..0712ed38e --- /dev/null +++ b/example_horizon.yaml @@ -0,0 +1,199 @@ +# Horizon Configuration Example +# +# Horizon is a HEAD data collection module with multi-beacon node support, +# high-availability coordination, and shared derivers. It processes real-time +# blockchain data from multiple beacon nodes and outputs decorated events. +# +# Key features: +# - Multi-beacon node support with health checking and failover +# - Real-time HEAD tracking via SSE block events +# - Local deduplication to prevent duplicate event processing +# - Chain reorg handling with configurable depth limits +# - Shared derivers with Cannon module for consistency +# - Coordinator integration for HA deployments + +logging: "info" # panic,fatal,warn,info,debug,trace +metricsAddr: ":9090" +# pprofAddr: ":6060" # optional. if supplied it enables pprof server + +# The name of this Horizon instance (required) +# Used for identification in logs, metrics, and coordinator +name: example-horizon-instance + +# Labels applied to all events from this instance +labels: + ethpandaops: rocks + environment: production + +# NTP Server for clock drift correction +# Better to use a NTP server close to your deployment: +# time.aws.com - AWS +# time.windows.com - Azure +# time.google.com - GCP +# pool.ntp.org - https://www.pool.ntp.org/zone/@ +ntpServer: time.google.com + +# Tracing configuration (optional) +# tracing: +# enabled: false +# endpoint: localhost:4317 +# insecure: true + +# Coordinator configuration for tracking processing locations +# Required for HA deployments to coordinate slot processing across instances +coordinator: + address: localhost:8080 + tls: false + # headers: + # authorization: Someb64Value + +# Ethereum configuration - multi-beacon node pool +# Horizon connects to multiple beacon nodes for redundancy and load distribution +ethereum: + # List of beacon nodes to connect to (at least one required) + beaconNodes: + - name: lighthouse-1 + address: http://localhost:5052 + # headers: + # authorization: Someb64Value + - name: prysm-1 + address: http://localhost:3500 + - name: teku-1 + address: http://localhost:5051 + # Add more nodes for redundancy + # - name: lodestar-1 + # address: http://localhost:9596 + # - name: nimbus-1 + # address: http://localhost:5052 + # - name: grandine-1 + # address: http://localhost:5052 + + # Override network name (optional) + # If not set, network name is auto-detected from the first healthy beacon node + # overrideNetworkName: mainnet + + # Startup timeout while waiting for a healthy beacon node + # Increase for slow-starting networks or cold cache scenarios + # startupTimeout: 5m + + # Health check interval for beacon node connections + healthCheckInterval: 3s + + # Block cache settings for performance optimization + blockCacheSize: 1000 + blockCacheTtl: 1h + blockPreloadWorkers: 5 + blockPreloadQueueSize: 5000 + +# Deduplication cache configuration +# Prevents duplicate processing of block events received from multiple beacon nodes +dedupCache: + # TTL for cached block roots (default: 13m - slightly more than 1 epoch) + # After this duration, entries are automatically evicted + ttl: 13m + +# SSE subscription configuration for real-time block events +subscription: + # Size of the block events channel buffer + # Increase if you see "channel full" warnings in logs + bufferSize: 1000 + +# Chain reorg handling configuration +reorg: + # Enable/disable reorg handling + enabled: true + # Maximum reorg depth to handle (slots) + # Reorgs deeper than this are logged and ignored + maxDepth: 64 + # Size of the reorg events channel buffer + bufferSize: 100 + +# Epoch iterator configuration for epoch-based derivers +# (ProposerDuty, BeaconBlob, BeaconValidators, BeaconCommittee) +epochIterator: + enabled: true + # Trigger point within epoch (0.0 to 1.0) + # 0.5 = trigger at 50% through epoch (midway) + # This allows pre-fetching next epoch data before it's needed + triggerPercent: 0.5 + +# Derivers configuration +# Enable/disable individual data derivers +derivers: + # Block-based derivers (real-time processing via HEAD iterator) + beaconBlock: + enabled: true + attesterSlashing: + enabled: true + proposerSlashing: + enabled: true + deposit: + enabled: true + withdrawal: + enabled: true + voluntaryExit: + enabled: true + blsToExecutionChange: + enabled: true + executionTransaction: + enabled: true + elaboratedAttestation: + enabled: true + + # Epoch-based derivers (triggered midway through epoch) + proposerDuty: + enabled: true + beaconBlob: + enabled: true + beaconValidators: + enabled: true + # Chunk size for validator data (to avoid very large events) + chunkSize: 100 + beaconCommittee: + enabled: true + +# Output sinks configuration (at least one required) +# Events are sent to all configured outputs +outputs: + # Xatu server output (recommended for production) + - name: xatu-server + type: xatu + # filter: + # eventNames: + # - BEACON_API_ETH_V2_BEACON_BLOCK + # - BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT + config: + address: localhost:8080 + tls: false + # headers: + # authorization: Someb64Value + maxQueueSize: 51200 + batchTimeout: 5s + exportTimeout: 30s + maxExportBatchSize: 512 + + # HTTP output (alternative sink) + # - name: http-sink + # type: http + # config: + # address: http://localhost:8080 + # headers: + # authorization: Someb64Value + # maxQueueSize: 51200 + # batchTimeout: 5s + # exportTimeout: 30s + # maxExportBatchSize: 512 + + # Kafka output (for streaming pipelines) + # - name: kafka-sink + # type: kafka + # config: + # brokers: localhost:19092 + # topic: xatu-events + # flushFrequency: 1s + # flushMessages: 500 + # flushBytes: 1000000 + # maxRetries: 6 + # compression: snappy + # requiredAcks: leader + # partitioning: random diff --git a/migrations/postgres/009_horizon.down.sql b/migrations/postgres/009_horizon.down.sql new file mode 100644 index 000000000..cbdc8f564 --- /dev/null +++ b/migrations/postgres/009_horizon.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS horizon_location; diff --git a/migrations/postgres/009_horizon.up.sql b/migrations/postgres/009_horizon.up.sql new file mode 100644 index 000000000..eaf27dacb --- /dev/null +++ b/migrations/postgres/009_horizon.up.sql @@ -0,0 +1,10 @@ +CREATE TABLE horizon_location ( + location_id SERIAL PRIMARY KEY, + create_time TIMESTAMPTZ NOT NULL DEFAULT now(), + update_time TIMESTAMPTZ NOT NULL DEFAULT now(), + network_id VARCHAR(256), + type VARCHAR(256), + head_slot BIGINT NOT NULL DEFAULT 0, + fill_slot BIGINT NOT NULL DEFAULT 0, + CONSTRAINT horizon_location_unique UNIQUE (network_id, type) +); diff --git a/pkg/cannon/cannon.go b/pkg/cannon/cannon.go index fde42fcf1..3b8ca61f1 100644 --- a/pkg/cannon/cannon.go +++ b/pkg/cannon/cannon.go @@ -14,16 +14,19 @@ import ( //nolint:gosec // only exposed if pprofAddr config is set _ "net/http/pprof" + // Import extractors package to register all derivers via init(). + _ "github.com/ethpandaops/xatu/pkg/cldata/deriver/extractors" + "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/beevik/ntp" "github.com/ethpandaops/ethwallclock" "github.com/ethpandaops/xatu/pkg/cannon/coordinator" "github.com/ethpandaops/xatu/pkg/cannon/deriver" - v1 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v1" - v2 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v2" "github.com/ethpandaops/xatu/pkg/cannon/ethereum" "github.com/ethpandaops/xatu/pkg/cannon/iterator" + cldataderiver "github.com/ethpandaops/xatu/pkg/cldata/deriver" + cldataiterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" "github.com/ethpandaops/xatu/pkg/observability" "github.com/ethpandaops/xatu/pkg/output" oxatu "github.com/ethpandaops/xatu/pkg/output/xatu" @@ -384,6 +387,7 @@ func (c *Cannon) startBeaconBlockProcessor(ctx context.Context) error { networkID := fmt.Sprintf("%d", c.beacon.Metadata().Network.ID) wallclock := c.beacon.Metadata().Wallclock() + depositChainID := c.beacon.Metadata().Spec.DepositChainID clientMeta, err := c.createNewClientMeta(ctx) if err != nil { @@ -391,257 +395,59 @@ func (c *Cannon) startBeaconBlockProcessor(ctx context.Context) error { } backfillingCheckpointIteratorMetrics := iterator.NewBackfillingCheckpointMetrics("xatu_cannon") - finalizedCheckpoint := "finalized" - eventDerivers := []deriver.EventDeriver{ - v2.NewAttesterSlashingDeriver( - c.log, - &c.Config.Derivers.AttesterSlashingConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.AttesterSlashingConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewProposerSlashingDeriver( - c.log, - &c.Config.Derivers.ProposerSlashingConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.ProposerSlashingConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewVoluntaryExitDeriver( - c.log, - &c.Config.Derivers.VoluntaryExitConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.VoluntaryExitConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewDepositDeriver( - c.log, - &c.Config.Derivers.DepositConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.DepositConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewBLSToExecutionChangeDeriver( - c.log, - &c.Config.Derivers.BLSToExecutionConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.BLSToExecutionConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewExecutionTransactionDeriver( - c.log, - &c.Config.Derivers.ExecutionTransactionConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.ExecutionTransactionConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewWithdrawalDeriver( - c.log, - &c.Config.Derivers.WithdrawalConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.WithdrawalConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewBeaconBlockDeriver( - c.log, - &c.Config.Derivers.BeaconBlockConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.BeaconBlockConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v1.NewBeaconBlobDeriver( - c.log, - &c.Config.Derivers.BeaconBlobSidecarConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.BeaconBlobSidecarConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v1.NewProposerDutyDeriver( - c.log, - &c.Config.Derivers.ProposerDutyConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.ProposerDutyConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v2.NewElaboratedAttestationDeriver( - c.log, - &c.Config.Derivers.ElaboratedAttestationConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 3, - &c.Config.Derivers.ElaboratedAttestationConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v1.NewBeaconValidatorsDeriver( - c.log, - &c.Config.Derivers.BeaconValidatorsConfig, - iterator.NewBackfillingCheckpoint( - c.log, - networkName, - networkID, - xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS, - c.coordinatorClient, - wallclock, - &backfillingCheckpointIteratorMetrics, - c.beacon, - finalizedCheckpoint, - 2, - &c.Config.Derivers.BeaconValidatorsConfig.Iterator, - ), - c.beacon, - clientMeta, - ), - v1.NewBeaconCommitteeDeriver( - c.log, - &c.Config.Derivers.BeaconCommitteeConfig, + // Create beacon client and context provider adapters. + beaconClient := deriver.NewBeaconClientAdapter(c.beacon) + ctxProvider := deriver.NewContextProviderAdapter(clientMeta, networkName, c.beacon.Metadata().Network.ID, wallclock, depositChainID) + + // Create derivers using the factory pattern. + factory := cldataderiver.NewDeriverFactory(c.log, beaconClient, ctxProvider) + + // Create iterator factory that returns appropriate iterator for each cannon type. + iteratorFactory := func(cannonType xatu.CannonType) cldataiterator.Iterator { + iterConfig := GetIteratorConfig(&c.Config.Derivers, cannonType) + if iterConfig == nil { + c.log.WithField("cannon_type", cannonType.String()).Warn("Unknown cannon type, skipping") + + return nil + } + + // Use lookAhead of 2 for validators/committees, 3 for others. + lookAhead := 3 + if cannonType == xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS || + cannonType == xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE { + lookAhead = 2 + } + + return deriver.NewIteratorAdapter( iterator.NewBackfillingCheckpoint( c.log, networkName, networkID, - xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE, + cannonType, c.coordinatorClient, wallclock, &backfillingCheckpointIteratorMetrics, c.beacon, finalizedCheckpoint, - 2, - &c.Config.Derivers.BeaconCommitteeConfig.Iterator, + lookAhead, + iterConfig, ), - c.beacon, - clientMeta, - ), + ) + } + + // Create enabled function that checks config. + enabledFunc := func(cannonType xatu.CannonType) bool { + return IsDeriverEnabled(&c.Config.Derivers, cannonType) + } + + // Create all derivers using factory. + genericDerivers := factory.CreateAll(iteratorFactory, enabledFunc) + + eventDerivers := make([]deriver.EventDeriver, 0, len(genericDerivers)) + for _, d := range genericDerivers { + eventDerivers = append(eventDerivers, d) } c.eventDerivers = eventDerivers diff --git a/pkg/cannon/deriver/adapters.go b/pkg/cannon/deriver/adapters.go new file mode 100644 index 000000000..27d71c972 --- /dev/null +++ b/pkg/cannon/deriver/adapters.go @@ -0,0 +1,194 @@ +package deriver + +import ( + "context" + + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/beacon/pkg/beacon" + "github.com/ethpandaops/ethwallclock" + "github.com/ethpandaops/xatu/pkg/cannon/ethereum" + "github.com/ethpandaops/xatu/pkg/cannon/iterator" + "github.com/ethpandaops/xatu/pkg/cldata" + cldataiterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// BeaconClientAdapter wraps the Cannon's BeaconNode to implement cldata.BeaconClient. +type BeaconClientAdapter struct { + beacon *ethereum.BeaconNode +} + +// NewBeaconClientAdapter creates a new BeaconClientAdapter. +func NewBeaconClientAdapter(beaconNode *ethereum.BeaconNode) *BeaconClientAdapter { + return &BeaconClientAdapter{beacon: beaconNode} +} + +// GetBeaconBlock retrieves a beacon block by its identifier. +func (a *BeaconClientAdapter) GetBeaconBlock(ctx context.Context, identifier string) (*spec.VersionedSignedBeaconBlock, error) { + return a.beacon.GetBeaconBlock(ctx, identifier) +} + +// LazyLoadBeaconBlock queues a block for background preloading. +func (a *BeaconClientAdapter) LazyLoadBeaconBlock(identifier string) { + a.beacon.LazyLoadBeaconBlock(identifier) +} + +// Synced checks if the beacon node is synced. +func (a *BeaconClientAdapter) Synced(ctx context.Context) error { + return a.beacon.Synced(ctx) +} + +// Node returns the underlying beacon node. +func (a *BeaconClientAdapter) Node() beacon.Node { + return a.beacon.Node() +} + +// FetchBeaconBlockBlobs retrieves blob sidecars for a given block identifier. +func (a *BeaconClientAdapter) FetchBeaconBlockBlobs(ctx context.Context, identifier string) ([]*deneb.BlobSidecar, error) { + return a.beacon.Node().FetchBeaconBlockBlobs(ctx, identifier) +} + +// FetchBeaconCommittee retrieves the beacon committees for a given epoch. +func (a *BeaconClientAdapter) FetchBeaconCommittee(ctx context.Context, epoch phase0.Epoch) ([]*v1.BeaconCommittee, error) { + return a.beacon.Duties().FetchBeaconCommittee(ctx, epoch) +} + +// GetValidatorIndex looks up a validator index from the committee for a given position. +func (a *BeaconClientAdapter) GetValidatorIndex( + ctx context.Context, + epoch phase0.Epoch, + slot phase0.Slot, + committeeIndex phase0.CommitteeIndex, + position uint64, +) (phase0.ValidatorIndex, error) { + return a.beacon.Duties().GetValidatorIndex(ctx, epoch, slot, committeeIndex, position) +} + +// FetchProposerDuties retrieves the proposer duties for a given epoch. +func (a *BeaconClientAdapter) FetchProposerDuties(ctx context.Context, epoch phase0.Epoch) ([]*v1.ProposerDuty, error) { + return a.beacon.Node().FetchProposerDuties(ctx, epoch) +} + +// GetValidators retrieves validators for a given state identifier. +func (a *BeaconClientAdapter) GetValidators(ctx context.Context, identifier string) (map[phase0.ValidatorIndex]*v1.Validator, error) { + return a.beacon.GetValidators(ctx, identifier) +} + +// LazyLoadValidators queues validators for background preloading. +func (a *BeaconClientAdapter) LazyLoadValidators(stateID string) { + a.beacon.LazyLoadValidators(stateID) +} + +// DeleteValidatorsFromCache removes validators from the cache. +func (a *BeaconClientAdapter) DeleteValidatorsFromCache(stateID string) { + a.beacon.DeleteValidatorsFromCache(stateID) +} + +// Verify BeaconClientAdapter implements cldata.BeaconClient. +var _ cldata.BeaconClient = (*BeaconClientAdapter)(nil) + +// IteratorAdapter wraps the Cannon's BackfillingCheckpoint to implement cldata/iterator.Iterator. +type IteratorAdapter struct { + iter *iterator.BackfillingCheckpoint +} + +// NewIteratorAdapter creates a new IteratorAdapter. +func NewIteratorAdapter(iter *iterator.BackfillingCheckpoint) *IteratorAdapter { + return &IteratorAdapter{iter: iter} +} + +// Start initializes the iterator. +func (a *IteratorAdapter) Start(ctx context.Context, activationFork spec.DataVersion) error { + return a.iter.Start(ctx, activationFork) +} + +// Next returns the next position to process. +func (a *IteratorAdapter) Next(ctx context.Context) (*cldataiterator.Position, error) { + resp, err := a.iter.Next(ctx) + if err != nil { + return nil, err + } + + // Convert BackfillingCheckpoint response to shared Position + direction := cldataiterator.DirectionForward + if resp.Direction == iterator.BackfillingCheckpointDirectionBackfill { + direction = cldataiterator.DirectionBackward + } + + return &cldataiterator.Position{ + Epoch: resp.Next, + LookAheadEpochs: resp.LookAheads, + Direction: direction, + }, nil +} + +// UpdateLocation persists the current position. +func (a *IteratorAdapter) UpdateLocation(ctx context.Context, position *cldataiterator.Position) error { + // Convert shared Direction to BackfillingCheckpoint direction + direction := iterator.BackfillingCheckpointDirectionHead + if position.Direction == cldataiterator.DirectionBackward { + direction = iterator.BackfillingCheckpointDirectionBackfill + } + + return a.iter.UpdateLocation(ctx, position.Epoch, direction) +} + +// Verify IteratorAdapter implements cldataiterator.Iterator. +var _ cldataiterator.Iterator = (*IteratorAdapter)(nil) + +// ContextProviderAdapter wraps Cannon's metadata creation to implement cldata.ContextProvider. +type ContextProviderAdapter struct { + clientMeta *xatu.ClientMeta + networkName string + networkID uint64 + wallclock *ethwallclock.EthereumBeaconChain + depositChainID uint64 +} + +// NewContextProviderAdapter creates a new ContextProviderAdapter. +func NewContextProviderAdapter( + clientMeta *xatu.ClientMeta, + networkName string, + networkID uint64, + wallclock *ethwallclock.EthereumBeaconChain, + depositChainID uint64, +) *ContextProviderAdapter { + return &ContextProviderAdapter{ + clientMeta: clientMeta, + networkName: networkName, + networkID: networkID, + wallclock: wallclock, + depositChainID: depositChainID, + } +} + +// CreateClientMeta returns the client metadata. +func (a *ContextProviderAdapter) CreateClientMeta(ctx context.Context) (*xatu.ClientMeta, error) { + return a.clientMeta, nil +} + +// NetworkName returns the network name. +func (a *ContextProviderAdapter) NetworkName() string { + return a.networkName +} + +// NetworkID returns the network ID. +func (a *ContextProviderAdapter) NetworkID() uint64 { + return a.networkID +} + +// Wallclock returns the Ethereum wallclock. +func (a *ContextProviderAdapter) Wallclock() *ethwallclock.EthereumBeaconChain { + return a.wallclock +} + +// DepositChainID returns the execution layer chain ID. +func (a *ContextProviderAdapter) DepositChainID() uint64 { + return a.depositChainID +} + +// Verify ContextProviderAdapter implements cldata.ContextProvider. +var _ cldata.ContextProvider = (*ContextProviderAdapter)(nil) diff --git a/pkg/cannon/deriver/beacon/eth/v1/CLAUDE.md b/pkg/cannon/deriver/beacon/eth/v1/CLAUDE.md deleted file mode 100644 index ac89ca8d0..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/CLAUDE.md +++ /dev/null @@ -1,6 +0,0 @@ -# Beacon API ETH V1 - -Finalized beacon chain data extractors that collect structured data via Ethereum Beacon API v1 endpoints. - -## Architecture -Claude MUST read the `./CURSOR.mdc` file before making any changes to this component. \ No newline at end of file diff --git a/pkg/cannon/deriver/beacon/eth/v1/CURSOR.mdc b/pkg/cannon/deriver/beacon/eth/v1/CURSOR.mdc deleted file mode 100644 index 1f94dc53a..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/CURSOR.mdc +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: Beacon API ETH V1 derivers - Extracts beacon chain data via finalized ETH V1 endpoints -globs: - - "*.go" - - "**/*_test.go" -alwaysApply: false ---- - -# Beacon API ETH V1 Derivers - -Finalized beacon chain data extractors that collect structured data via Ethereum Beacon API v1 endpoints. - -## Core Implementation Pattern -- **Iterator-Driven Processing**: All derivers use BackfillingCheckpoint iterators for systematic epoch-by-epoch data collection -- **Fork-Aware Activation**: Each deriver specifies ActivationFork (Phase0, Deneb) to handle network upgrade compatibility -- **Exponential Backoff Retry**: Use 3-minute max interval backoff for resilience against temporary beacon node issues - -## Key Design Requirements -- Process epochs sequentially via iterator.Next() for data completeness -- Check beacon.Synced() before processing to avoid stale data -- Implement proper fork activation (Phase0 for most, Deneb for blob sidecars) -- Use callback-based event emission via OnEventsDerived for output handling \ No newline at end of file diff --git a/pkg/cannon/deriver/beacon/eth/v1/beacon_blob.go b/pkg/cannon/deriver/beacon/eth/v1/beacon_blob.go deleted file mode 100644 index 1e9986ec8..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/beacon_blob.go +++ /dev/null @@ -1,321 +0,0 @@ -package v1 - -import ( - "context" - "encoding/hex" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/api" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/deneb" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - BeaconBlobDeriverName = xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR -) - -type BeaconBlobDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type BeaconBlobDeriver struct { - log logrus.FieldLogger - cfg *BeaconBlobDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewBeaconBlobDeriver(log logrus.FieldLogger, config *BeaconBlobDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *BeaconBlobDeriver { - return &BeaconBlobDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v1/beacon_blob", - "type": BeaconBlobDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *BeaconBlobDeriver) CannonType() xatu.CannonType { - return BeaconBlobDeriverName -} - -func (b *BeaconBlobDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionDeneb -} - -func (b *BeaconBlobDeriver) Name() string { - return BeaconBlobDeriverName.String() -} - -func (b *BeaconBlobDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *BeaconBlobDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Beacon blob deriver disabled") - - return nil - } - - b.log.Info("Beacon blob deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *BeaconBlobDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *BeaconBlobDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - span.AddEvent("Checking if beacon node is synced") - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - span.AddEvent("Grabbing next location") - - // Get the next position. - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *BeaconBlobDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconBlobDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *BeaconBlobDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconBlobDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - blobs, err := b.beacon.Node().FetchBeaconBlockBlobs(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - var apiErr *api.Error - if errors.As(err, &apiErr) { - switch apiErr.StatusCode { - case 404: - return []*xatu.DecoratedEvent{}, nil - case 503: - return nil, errors.New("beacon node is syncing") - } - } - - return nil, errors.Wrapf(err, "failed to get beacon blob sidecars for slot %d", slot) - } - - if blobs == nil { - return []*xatu.DecoratedEvent{}, nil - } - - events := []*xatu.DecoratedEvent{} - - for _, blob := range blobs { - event, err := b.createEventFromBlob(ctx, blob) - if err != nil { - return nil, errors.Wrapf(err, "failed to create event from blob sidecars for slot %d", slot) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *BeaconBlobDeriver) createEventFromBlob(ctx context.Context, blob *deneb.BlobSidecar) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - blockRoot, err := blob.SignedBlockHeader.Message.HashTreeRoot() - if err != nil { - return nil, errors.Wrap(err, "failed to get block root") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV1BeaconBlockBlobSidecar{ - EthV1BeaconBlockBlobSidecar: &xatuethv1.BlobSidecar{ - Slot: &wrapperspb.UInt64Value{Value: uint64(blob.SignedBlockHeader.Message.Slot)}, - Blob: fmt.Sprintf("0x%s", hex.EncodeToString(blob.Blob[:])), - Index: &wrapperspb.UInt64Value{Value: uint64(blob.Index)}, - BlockRoot: fmt.Sprintf("0x%s", hex.EncodeToString(blockRoot[:])), - BlockParentRoot: blob.SignedBlockHeader.Message.ParentRoot.String(), - ProposerIndex: &wrapperspb.UInt64Value{Value: uint64(blob.SignedBlockHeader.Message.ProposerIndex)}, - KzgCommitment: blob.KZGCommitment.String(), - KzgProof: blob.KZGProof.String(), - }, - }, - } - - additionalData, err := b.getAdditionalData(ctx, blob) - if err != nil { - b.log.WithError(err).Error("Failed to get extra beacon blob data") - - return nil, err - } else { - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1BeaconBlobSidecar{ - EthV1BeaconBlobSidecar: additionalData, - } - } - - return decoratedEvent, nil -} - -func (b *BeaconBlobDeriver) getAdditionalData(_ context.Context, blob *deneb.BlobSidecar) (*xatu.ClientMeta_AdditionalEthV1BeaconBlobSidecarData, error) { - extra := &xatu.ClientMeta_AdditionalEthV1BeaconBlobSidecarData{ - DataSize: &wrapperspb.UInt64Value{Value: uint64(len(blob.Blob))}, - DataEmptySize: &wrapperspb.UInt64Value{Value: uint64(ethereum.CountConsecutiveEmptyBytes(blob.Blob[:], 4))}, - VersionedHash: ethereum.ConvertKzgCommitmentToVersionedHash(blob.KZGCommitment[:]).String(), - } - - slot := b.beacon.Metadata().Wallclock().Slots().FromNumber(uint64(blob.SignedBlockHeader.Message.Slot)) - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(blob.SignedBlockHeader.Message.Slot)) - - extra.Slot = &xatu.SlotV2{ - StartDateTime: timestamppb.New(slot.TimeWindow().Start()), - Number: &wrapperspb.UInt64Value{Value: uint64(blob.SignedBlockHeader.Message.Slot)}, - } - - extra.Epoch = &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, - StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), - } - - return extra, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v1/beacon_committee.go b/pkg/cannon/deriver/beacon/eth/v1/beacon_committee.go deleted file mode 100644 index 9127f97fb..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/beacon_committee.go +++ /dev/null @@ -1,306 +0,0 @@ -package v1 - -import ( - "context" - "fmt" - "time" - - apiv1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - BeaconCommitteeDeriverName = xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE -) - -type BeaconCommitteeDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type BeaconCommitteeDeriver struct { - log logrus.FieldLogger - cfg *BeaconCommitteeDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewBeaconCommitteeDeriver(log logrus.FieldLogger, config *BeaconCommitteeDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *BeaconCommitteeDeriver { - return &BeaconCommitteeDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v1/beacon_committee", - "type": BeaconCommitteeDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *BeaconCommitteeDeriver) CannonType() xatu.CannonType { - return BeaconCommitteeDeriverName -} - -func (b *BeaconCommitteeDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *BeaconCommitteeDeriver) Name() string { - return BeaconCommitteeDeriverName.String() -} - -func (b *BeaconCommitteeDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *BeaconCommitteeDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Beacon committee deriver disabled") - - return nil - } - - b.log.Info("Beacon committee deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *BeaconCommitteeDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *BeaconCommitteeDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Get the next position. - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *BeaconCommitteeDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconCommitteeDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - spec, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to get beacon spec") - } - - // Get the beacon committees for this epoch - beaconCommittees, err := b.beacon.Node().FetchBeaconCommittees(ctx, fmt.Sprintf("%d", phase0.Slot(epoch)*spec.SlotsPerEpoch), nil) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch beacon committees") - } - - allEvents := []*xatu.DecoratedEvent{} - uniqueEpochs := make(map[phase0.Epoch]struct{}) - uniqueSlots := make(map[phase0.Slot]struct{}) - uniqueCommittees := make(map[phase0.CommitteeIndex]struct{}) - - for _, committee := range beaconCommittees { - uniqueEpochs[epoch] = struct{}{} - uniqueSlots[committee.Slot] = struct{}{} - uniqueCommittees[committee.Index] = struct{}{} - } - - if len(uniqueEpochs) > 1 { - b.log.WithField("epochs", uniqueEpochs).Warn("Multiple epochs found") - - return nil, errors.New("multiple epochs found") - } - - minSlot := phase0.Slot(epoch) * spec.SlotsPerEpoch - maxSlot := (phase0.Slot(epoch) * spec.SlotsPerEpoch) + spec.SlotsPerEpoch - 1 - - for _, committee := range beaconCommittees { - if committee.Slot < minSlot || committee.Slot > maxSlot { - return nil, fmt.Errorf("beacon committee slot outside of epoch. (epoch: %d, slot: %d, min: %d, max: %d)", epoch, committee.Slot, minSlot, maxSlot) - } - - event, err := b.createEventFromBeaconCommittee(ctx, committee) - if err != nil { - b.log. - WithError(err). - WithField("slot", committee.Slot). - WithField("epoch", epoch). - Error("Failed to create event from beacon committee") - - return nil, err - } - - allEvents = append(allEvents, event) - } - - return allEvents, nil -} - -func (b *BeaconCommitteeDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - // Not supported. -} - -func (b *BeaconCommitteeDeriver) createEventFromBeaconCommittee(ctx context.Context, committee *apiv1.BeaconCommittee) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - validators := []*wrapperspb.UInt64Value{} - for _, validator := range committee.Validators { - validators = append(validators, wrapperspb.UInt64(uint64(validator))) - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V1_BEACON_COMMITTEE, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV1BeaconCommittee{ - EthV1BeaconCommittee: &xatuethv1.Committee{ - Slot: wrapperspb.UInt64(uint64(committee.Slot)), - Index: wrapperspb.UInt64(uint64(committee.Index)), - Validators: validators, - }, - }, - } - - additionalData, err := b.getAdditionalData(ctx, committee) - if err != nil { - b.log.WithError(err).Error("Failed to get extra beacon committee data") - - return nil, err - } else { - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1BeaconCommittee{ - EthV1BeaconCommittee: additionalData, - } - } - - return decoratedEvent, nil -} - -func (b *BeaconCommitteeDeriver) getAdditionalData(_ context.Context, committee *apiv1.BeaconCommittee) (*xatu.ClientMeta_AdditionalEthV1BeaconCommitteeData, error) { - extra := &xatu.ClientMeta_AdditionalEthV1BeaconCommitteeData{ - StateId: xatuethv1.StateIDFinalized, - } - - slot := b.beacon.Metadata().Wallclock().Slots().FromNumber(uint64(committee.Slot)) - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(committee.Slot)) - - extra.Slot = &xatu.SlotV2{ - StartDateTime: timestamppb.New(slot.TimeWindow().Start()), - Number: &wrapperspb.UInt64Value{Value: uint64(committee.Slot)}, - } - - extra.Epoch = &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, - StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), - } - - return extra, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v1/beacon_validators.go b/pkg/cannon/deriver/beacon/eth/v1/beacon_validators.go deleted file mode 100644 index ed9b285d1..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/beacon_validators.go +++ /dev/null @@ -1,327 +0,0 @@ -package v1 - -import ( - "context" - "fmt" - "time" - - apiv1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - BeaconValidatorsDeriverName = xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS -) - -type BeaconValidatorsDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - ChunkSize int `yaml:"chunkSize" default:"100"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type BeaconValidatorsDeriver struct { - log logrus.FieldLogger - cfg *BeaconValidatorsDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewBeaconValidatorsDeriver(log logrus.FieldLogger, config *BeaconValidatorsDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *BeaconValidatorsDeriver { - return &BeaconValidatorsDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v1/validators", - "type": BeaconValidatorsDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *BeaconValidatorsDeriver) CannonType() xatu.CannonType { - return BeaconValidatorsDeriverName -} - -func (b *BeaconValidatorsDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *BeaconValidatorsDeriver) Name() string { - return BeaconValidatorsDeriverName.String() -} - -func (b *BeaconValidatorsDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *BeaconValidatorsDeriver) Start(ctx context.Context) error { - b.log.WithFields(logrus.Fields{ - "chunk_size": b.cfg.ChunkSize, - "enabled": b.cfg.Enabled, - }).Info("Starting BeaconValidatorsDeriver") - - if !b.cfg.Enabled { - b.log.Info("Validator states deriver disabled") - - return nil - } - - b.log.Info("Validator states deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *BeaconValidatorsDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *BeaconValidatorsDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Get the next position. - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - events, slot, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).WithField("epoch", position.Next).Error("Failed to process epoch") - - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - b.lookAhead(ctx, position.LookAheads) - - // Be a good citizen and clean up the validator cache for the current epoch - b.beacon.DeleteValidatorsFromCache(xatuethv1.SlotAsString(slot)) - - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead takes the upcoming epochs and looks ahead to do any pre-processing that might be required. -func (b *BeaconValidatorsDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "BeaconValidatorsDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - // Add the state to the preload queue so it's available when we need it - b.beacon.LazyLoadValidators(xatuethv1.SlotAsString(phase0.Slot(uint64(epoch) * uint64(sp.SlotsPerEpoch)))) - } -} - -func (b *BeaconValidatorsDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, phase0.Slot, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconValidatorsDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - spec, err := b.beacon.Node().Spec() - if err != nil { - return nil, 0, errors.Wrap(err, "failed to fetch spec") - } - - boundarySlot := phase0.Slot(uint64(epoch) * uint64(spec.SlotsPerEpoch)) - - validatorsMap, err := b.beacon.GetValidators(ctx, xatuethv1.SlotAsString(boundarySlot)) - if err != nil { - return nil, 0, errors.Wrap(err, "failed to fetch validator states") - } - - // Chunk the validators per the configured chunk size - chunkSize := b.cfg.ChunkSize - - var validatorChunks [][]*apiv1.Validator - - currentChunk := []*apiv1.Validator{} - - for _, validator := range validatorsMap { - if len(currentChunk) == chunkSize { - validatorChunks = append(validatorChunks, currentChunk) - currentChunk = []*apiv1.Validator{} - } - - currentChunk = append(currentChunk, validator) - } - - if len(currentChunk) > 0 { - validatorChunks = append(validatorChunks, currentChunk) - } - - allEvents := []*xatu.DecoratedEvent{} - - for chunkNum, chunk := range validatorChunks { - event, err := b.createEventFromValidators(ctx, chunk, epoch) - if err != nil { - b.log. - WithError(err). - WithField("chunk_size", len(chunk)). - WithField("chunk_number", chunkNum). - WithField("epoch", epoch). - Error("Failed to create event from validator state") - - return nil, 0, err - } - - allEvents = append(allEvents, event) - } - - return allEvents, boundarySlot, nil -} - -func (b *BeaconValidatorsDeriver) createEventFromValidators(ctx context.Context, validators []*apiv1.Validator, epoch phase0.Epoch) (*xatu.DecoratedEvent, error) { - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - data := xatu.Validators{} - for _, validator := range validators { - data.Validators = append(data.Validators, &xatuethv1.Validator{ - Index: wrapperspb.UInt64(uint64(validator.Index)), - Balance: wrapperspb.UInt64(uint64(validator.Balance)), - Status: wrapperspb.String(validator.Status.String()), - Data: &xatuethv1.ValidatorData{ - Pubkey: wrapperspb.String(validator.Validator.PublicKey.String()), - WithdrawalCredentials: wrapperspb.String(fmt.Sprintf("%#x", validator.Validator.WithdrawalCredentials)), - EffectiveBalance: wrapperspb.UInt64(uint64(validator.Validator.EffectiveBalance)), - Slashed: wrapperspb.Bool(validator.Validator.Slashed), - ActivationEpoch: wrapperspb.UInt64(uint64(validator.Validator.ActivationEpoch)), - ActivationEligibilityEpoch: wrapperspb.UInt64(uint64(validator.Validator.ActivationEligibilityEpoch)), - ExitEpoch: wrapperspb.UInt64(uint64(validator.Validator.ExitEpoch)), - WithdrawableEpoch: wrapperspb.UInt64(uint64(validator.Validator.WithdrawableEpoch)), - }, - }) - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V1_BEACON_VALIDATORS, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV1Validators{ - EthV1Validators: &data, - }, - } - - additionalData, err := b.getAdditionalData(ctx, epoch) - if err != nil { - b.log.WithError(err).Error("Failed to get extra validator state data") - - return nil, err - } else { - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1Validators{ - EthV1Validators: additionalData, - } - } - - return decoratedEvent, nil -} - -func (b *BeaconValidatorsDeriver) getAdditionalData(_ context.Context, epoch phase0.Epoch) (*xatu.ClientMeta_AdditionalEthV1ValidatorsData, error) { - epochInfo := b.beacon.Metadata().Wallclock().Epochs().FromNumber(uint64(epoch)) - - return &xatu.ClientMeta_AdditionalEthV1ValidatorsData{ - Epoch: &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: uint64(epoch)}, - StartDateTime: timestamppb.New(epochInfo.TimeWindow().Start()), - }, - }, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v1/proposer_duty.go b/pkg/cannon/deriver/beacon/eth/v1/proposer_duty.go deleted file mode 100644 index 57bc6fe4d..000000000 --- a/pkg/cannon/deriver/beacon/eth/v1/proposer_duty.go +++ /dev/null @@ -1,296 +0,0 @@ -package v1 - -import ( - "context" - "encoding/hex" - "fmt" - "time" - - apiv1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - ProposerDutyDeriverName = xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY -) - -type ProposerDutyDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type ProposerDutyDeriver struct { - log logrus.FieldLogger - cfg *ProposerDutyDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewProposerDutyDeriver(log logrus.FieldLogger, config *ProposerDutyDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *ProposerDutyDeriver { - return &ProposerDutyDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v1/proposer_duty", - "type": ProposerDutyDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *ProposerDutyDeriver) CannonType() xatu.CannonType { - return ProposerDutyDeriverName -} - -func (b *ProposerDutyDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *ProposerDutyDeriver) Name() string { - return ProposerDutyDeriverName.String() -} - -func (b *ProposerDutyDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *ProposerDutyDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Proposer duty deriver disabled") - - return nil - } - - b.log.Info("Proposer duty deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *ProposerDutyDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *ProposerDutyDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Get the next position. - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *ProposerDutyDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ProposerDutyDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - // Get the proposer duties for this epoch - proposerDuties, err := b.beacon.Node().FetchProposerDuties(ctx, epoch) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch proposer duties") - } - - allEvents := []*xatu.DecoratedEvent{} - - for _, duty := range proposerDuties { - event, err := b.createEventFromProposerDuty(ctx, duty) - if err != nil { - b.log. - WithError(err). - WithField("slot", duty.Slot). - WithField("epoch", epoch). - Error("Failed to create event from proposer duty") - - return nil, err - } - - allEvents = append(allEvents, event) - } - - return allEvents, nil -} - -// lookAhead takes the upcoming epochs and looks ahead to do any pre-processing that might be required. -func (b *ProposerDutyDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "ProposerDutyDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *ProposerDutyDeriver) createEventFromProposerDuty(ctx context.Context, duty *apiv1.ProposerDuty) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V1_PROPOSER_DUTY, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV1ProposerDuty{ - EthV1ProposerDuty: &xatuethv1.ProposerDuty{ - Slot: wrapperspb.UInt64(uint64(duty.Slot)), - Pubkey: fmt.Sprintf("0x%s", hex.EncodeToString(duty.PubKey[:])), - ValidatorIndex: wrapperspb.UInt64(uint64(duty.ValidatorIndex)), - }, - }, - } - - additionalData, err := b.getAdditionalData(ctx, duty) - if err != nil { - b.log.WithError(err).Error("Failed to get extra proposer duty data") - - return nil, err - } else { - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1ProposerDuty{ - EthV1ProposerDuty: additionalData, - } - } - - return decoratedEvent, nil -} - -func (b *ProposerDutyDeriver) getAdditionalData(_ context.Context, duty *apiv1.ProposerDuty) (*xatu.ClientMeta_AdditionalEthV1ProposerDutyData, error) { - extra := &xatu.ClientMeta_AdditionalEthV1ProposerDutyData{ - StateId: xatuethv1.StateIDFinalized, - } - - slot := b.beacon.Metadata().Wallclock().Slots().FromNumber(uint64(duty.Slot)) - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(duty.Slot)) - - extra.Slot = &xatu.SlotV2{ - StartDateTime: timestamppb.New(slot.TimeWindow().Start()), - Number: &wrapperspb.UInt64Value{Value: uint64(duty.Slot)}, - } - - extra.Epoch = &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, - StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), - } - - return extra, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/CLAUDE.md b/pkg/cannon/deriver/beacon/eth/v2/CLAUDE.md deleted file mode 100644 index 19f11553c..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/CLAUDE.md +++ /dev/null @@ -1,6 +0,0 @@ -# ETH v2 Beacon API Derivers - -ETH v2 Beacon API data derivers that extract specific beacon chain events from finalized blockchain data. - -## Architecture -Claude MUST read the `./CURSOR.mdc` file before making any changes to this component. \ No newline at end of file diff --git a/pkg/cannon/deriver/beacon/eth/v2/CURSOR.mdc b/pkg/cannon/deriver/beacon/eth/v2/CURSOR.mdc deleted file mode 100644 index f518d11df..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/CURSOR.mdc +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: ETH v2 Beacon API derivers - Extract beacon chain events from finalized blocks -globs: - - "*.go" - - "**/*_test.go" -alwaysApply: false ---- - -# ETH v2 Beacon API Derivers - -ETH v2 Beacon API data derivers that extract specific beacon chain events from finalized blockchain data. - -## Key Implementation Pattern -- **Uniform Deriver Structure**: All v2 derivers follow identical pattern with backfilling checkpoint iterator and event callbacks -- **Multi-version Ethereum Spec Support**: Handle version-specific data extraction (Phase0, Bellatrix, Capella, Deneb, Electra) -- **Exponential Backoff Retry**: Use consistent retry logic with 3-minute max interval for robustness - -## Critical Requirements -- All derivers extract events from beacon blocks during slot processing within epoch iteration -- Use snappy compression for transaction data and SSZ marshaling for block data -- Always set FinalizedWhenRequested=true for cannon-derived events -- Include comprehensive additional metadata (slot, epoch, block root, transaction counts/sizes) \ No newline at end of file diff --git a/pkg/cannon/deriver/beacon/eth/v2/attester_slashing.go b/pkg/cannon/deriver/beacon/eth/v2/attester_slashing.go deleted file mode 100644 index be8158cee..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/attester_slashing.go +++ /dev/null @@ -1,369 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - AttesterSlashingDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING -) - -type AttesterSlashingDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type AttesterSlashingDeriver struct { - log logrus.FieldLogger - cfg *AttesterSlashingDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewAttesterSlashingDeriver(log logrus.FieldLogger, config *AttesterSlashingDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *AttesterSlashingDeriver { - return &AttesterSlashingDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/attester_slashing", - "type": AttesterSlashingDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (a *AttesterSlashingDeriver) CannonType() xatu.CannonType { - return AttesterSlashingDeriverName -} - -func (a *AttesterSlashingDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (a *AttesterSlashingDeriver) Name() string { - return AttesterSlashingDeriverName.String() -} - -func (a *AttesterSlashingDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - a.onEventsCallbacks = append(a.onEventsCallbacks, fn) -} - -func (a *AttesterSlashingDeriver) Start(ctx context.Context) error { - if !a.cfg.Enabled { - a.log.Info("Attester slashing deriver disabled") - - return nil - } - - a.log.Info("Attester slashing deriver enabled") - - if err := a.iterator.Start(ctx, a.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - go a.run(ctx) - - return nil -} - -func (a *AttesterSlashingDeriver) Stop(ctx context.Context) error { - return nil -} - -func (a *AttesterSlashingDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, - fmt.Sprintf("Derive %s", a.Name()), - trace.WithAttributes( - attribute.String("network", string(a.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := a.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next slot - position, err := a.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := a.processEpoch(ctx, position.Next) - if err != nil { - a.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - a.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range a.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := a.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - a.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - a.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead takes the upcoming epochs and looks ahead to do any pre-processing that might be required. -func (a *AttesterSlashingDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "AttesterSlashingDeriver.lookAhead", - ) - defer span.End() - - sp, err := a.beacon.Node().Spec() - if err != nil { - a.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - a.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (a *AttesterSlashingDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "AttesterSlashingDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := a.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := a.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (a *AttesterSlashingDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "AttesterSlashingDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := a.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, a.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - slashings, err := a.getAttesterSlashings(ctx, block) - if err != nil { - return nil, errors.Wrapf(err, "failed to get attester slashings for slot %d", slot) - } - - for _, slashing := range slashings { - event, err := a.createEvent(ctx, slashing, blockIdentifier) - if err != nil { - a.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for attester slashing %s", slashing.String()) - } - - events = append(events, event) - } - - return events, nil -} - -func (a *AttesterSlashingDeriver) getAttesterSlashings(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv1.AttesterSlashingV2, error) { - slashings := []*xatuethv1.AttesterSlashingV2{} - - attesterSlashings, err := block.AttesterSlashings() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attester slashings") - } - - for _, slashing := range attesterSlashings { - att1, err := slashing.Attestation1() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation 1") - } - - indexedAttestation1, err := convertIndexedAttestation(att1) - if err != nil { - return nil, errors.Wrap(err, "failed to convert indexed attestation 1") - } - - att2, err := slashing.Attestation2() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation 2") - } - - indexedAttestation2, err := convertIndexedAttestation(att2) - if err != nil { - return nil, errors.Wrap(err, "failed to convert indexed attestation 2") - } - - slashings = append(slashings, &xatuethv1.AttesterSlashingV2{ - Attestation_1: indexedAttestation1, - Attestation_2: indexedAttestation2, - }) - } - - return slashings, nil -} - -func convertIndexedAttestation(attestation *spec.VersionedIndexedAttestation) (*xatuethv1.IndexedAttestationV2, error) { - indicies := []*wrapperspb.UInt64Value{} - - atIndicies, err := attestation.AttestingIndices() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attesting indices") - } - - for _, index := range atIndicies { - indicies = append(indicies, &wrapperspb.UInt64Value{Value: index}) - } - - data, err := attestation.Data() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation data") - } - - sig, err := attestation.Signature() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation signature") - } - - return &xatuethv1.IndexedAttestationV2{ - AttestingIndices: indicies, - Data: &xatuethv1.AttestationDataV2{ - Slot: &wrapperspb.UInt64Value{Value: uint64(data.Slot)}, - Index: &wrapperspb.UInt64Value{Value: uint64(data.Index)}, - BeaconBlockRoot: data.BeaconBlockRoot.String(), - Source: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(data.Source.Epoch)}, - Root: data.Source.Root.String(), - }, - Target: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(data.Target.Epoch)}, - Root: data.Target.Root.String(), - }, - }, - Signature: sig.String(), - }, nil -} - -func (a *AttesterSlashingDeriver) createEvent(ctx context.Context, slashing *xatuethv1.AttesterSlashingV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(a.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockAttesterSlashing{ - EthV2BeaconBlockAttesterSlashing: slashing, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockAttesterSlashing{ - EthV2BeaconBlockAttesterSlashing: &xatu.ClientMeta_AdditionalEthV2BeaconBlockAttesterSlashingData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/beacon_block.go b/pkg/cannon/deriver/beacon/eth/v2/beacon_block.go deleted file mode 100644 index 801c18032..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/beacon_block.go +++ /dev/null @@ -1,410 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - "github.com/ethpandaops/xatu/pkg/proto/eth" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - ssz "github.com/ferranbt/fastssz" - "github.com/golang/snappy" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - BeaconBlockDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK -) - -type BeaconBlockDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type BeaconBlockDeriver struct { - log logrus.FieldLogger - cfg *BeaconBlockDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewBeaconBlockDeriver(log logrus.FieldLogger, config *BeaconBlockDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *BeaconBlockDeriver { - return &BeaconBlockDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/beacon_block", - "type": BeaconBlockDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *BeaconBlockDeriver) CannonType() xatu.CannonType { - return BeaconBlockDeriverName -} - -func (b *BeaconBlockDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *BeaconBlockDeriver) Name() string { - return BeaconBlockDeriverName.String() -} - -func (b *BeaconBlockDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *BeaconBlockDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Beacon block deriver disabled") - - return nil - } - - b.log.Info("Beacon block deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *BeaconBlockDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *BeaconBlockDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Get the next slot - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - span.AddEvent("Epoch processing complete. Sending events...") - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - span.AddEvent("Events sent. Updating location...") - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - span.AddEvent("Location updated. Done.") - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead takes the upcoming epochs and looks ahead to do any pre-processing that might be required. -func (b *BeaconBlockDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "BeaconBlockDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *BeaconBlockDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconBlockDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *BeaconBlockDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BeaconBlockDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - event, err := b.createEventFromBlock(ctx, block) - if err != nil { - return nil, errors.Wrapf(err, "failed to create event from block for slot %d", slot) - } - - return []*xatu.DecoratedEvent{event}, nil -} - -func (b *BeaconBlockDeriver) createEventFromBlock(ctx context.Context, block *spec.VersionedSignedBeaconBlock) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - data, err := eth.NewEventBlockV2FromVersionSignedBeaconBlock(block) - if err != nil { - return nil, err - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_V2, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockV2{ - EthV2BeaconBlockV2: data, - }, - } - - additionalData, err := b.getAdditionalData(ctx, block) - if err != nil { - b.log.WithError(err).Error("Failed to get extra beacon block data") - - return nil, err - } else { - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockV2{ - EthV2BeaconBlockV2: additionalData, - } - } - - return decoratedEvent, nil -} - -func (b *BeaconBlockDeriver) getAdditionalData(_ context.Context, block *spec.VersionedSignedBeaconBlock) (*xatu.ClientMeta_AdditionalEthV2BeaconBlockV2Data, error) { - extra := &xatu.ClientMeta_AdditionalEthV2BeaconBlockV2Data{} - - slotI, err := block.Slot() - if err != nil { - return nil, err - } - - slot := b.beacon.Metadata().Wallclock().Slots().FromNumber(uint64(slotI)) - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(slotI)) - - extra.Slot = &xatu.SlotV2{ - StartDateTime: timestamppb.New(slot.TimeWindow().Start()), - Number: &wrapperspb.UInt64Value{Value: uint64(slotI)}, - } - - extra.Epoch = &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, - StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), - } - - extra.Version = block.Version.String() - - var txCount int - - var txSize int - - var transactionsBytes []byte - - addTxData := func(txs [][]byte) { - txCount = len(txs) - - for _, tx := range txs { - txSize += len(tx) - transactionsBytes = append(transactionsBytes, tx...) - } - } - - blockMessage, err := getBlockMessage(block) - if err != nil { - return nil, err - } - - sszData, err := ssz.MarshalSSZ(blockMessage) - if err != nil { - return nil, err - } - - dataSize := len(sszData) - compressedData := snappy.Encode(nil, sszData) - compressedDataSize := len(compressedData) - - blockRoot, err := block.Root() - if err != nil { - return nil, err - } - - extra.BlockRoot = fmt.Sprintf("%#x", blockRoot) - - transactions, err := block.ExecutionTransactions() - if err != nil { - return nil, errors.Wrap(err, "failed to get execution transactions") - } - - txs := make([][]byte, len(transactions)) - for i, tx := range transactions { - txs[i] = tx - } - - addTxData(txs) - - compressedTransactions := snappy.Encode(nil, transactionsBytes) - compressedTxSize := len(compressedTransactions) - - extra.TotalBytes = wrapperspb.UInt64(uint64(dataSize)) - extra.TotalBytesCompressed = wrapperspb.UInt64(uint64(compressedDataSize)) - extra.TransactionsCount = wrapperspb.UInt64(uint64(txCount)) - extra.TransactionsTotalBytes = wrapperspb.UInt64(uint64(txSize)) - extra.TransactionsTotalBytesCompressed = wrapperspb.UInt64(uint64(compressedTxSize)) - - // Always set to true when derived from the cannon. - extra.FinalizedWhenRequested = true - - return extra, nil -} - -func getBlockMessage(block *spec.VersionedSignedBeaconBlock) (ssz.Marshaler, error) { - switch block.Version { - case spec.DataVersionPhase0: - return block.Phase0.Message, nil - case spec.DataVersionAltair: - return block.Altair.Message, nil - case spec.DataVersionBellatrix: - return block.Bellatrix.Message, nil - case spec.DataVersionCapella: - return block.Capella.Message, nil - case spec.DataVersionDeneb: - return block.Deneb.Message, nil - case spec.DataVersionElectra: - return block.Electra.Message, nil - case spec.DataVersionFulu: - return block.Fulu.Message, nil - default: - return nil, fmt.Errorf("unsupported block version: %s", block.Version) - } -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/bls_to_execution_change.go b/pkg/cannon/deriver/beacon/eth/v2/bls_to_execution_change.go deleted file mode 100644 index 37bc000a7..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/bls_to_execution_change.go +++ /dev/null @@ -1,313 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - xatuethv2 "github.com/ethpandaops/xatu/pkg/proto/eth/v2" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/sirupsen/logrus" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - BLSToExecutionChangeDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE -) - -type BLSToExecutionChangeDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type BLSToExecutionChangeDeriver struct { - log logrus.FieldLogger - cfg *BLSToExecutionChangeDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewBLSToExecutionChangeDeriver(log logrus.FieldLogger, config *BLSToExecutionChangeDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *BLSToExecutionChangeDeriver { - return &BLSToExecutionChangeDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/bls_to_execution_change", - "type": BLSToExecutionChangeDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *BLSToExecutionChangeDeriver) CannonType() xatu.CannonType { - return BLSToExecutionChangeDeriverName -} - -func (b *BLSToExecutionChangeDeriver) Name() string { - return BLSToExecutionChangeDeriverName.String() -} - -func (b *BLSToExecutionChangeDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *BLSToExecutionChangeDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionCapella -} - -func (b *BLSToExecutionChangeDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("BLS to execution change deriver disabled") - - return nil - } - - b.log.Info("BLS to execution change deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *BLSToExecutionChangeDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *BLSToExecutionChangeDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead takes the upcoming locations and looks ahead to do any pre-processing that might be required. -func (b *BLSToExecutionChangeDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "BLSToExecutionChangeDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *BLSToExecutionChangeDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BLSToExecutionChangeDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *BLSToExecutionChangeDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "BLSToExecutionChangeDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - changes, err := b.getBLSToExecutionChanges(ctx, block) - if err != nil { - return nil, err - } - - for _, change := range changes { - event, err := b.createEvent(ctx, change, blockIdentifier) - if err != nil { - b.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for BLS to execution change %s", change.String()) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *BLSToExecutionChangeDeriver) getBLSToExecutionChanges(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv2.SignedBLSToExecutionChangeV2, error) { - changes := []*xatuethv2.SignedBLSToExecutionChangeV2{} - - chs, err := block.BLSToExecutionChanges() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain BLS to execution changes") - } - - for _, change := range chs { - changes = append(changes, &xatuethv2.SignedBLSToExecutionChangeV2{ - Message: &xatuethv2.BLSToExecutionChangeV2{ - ValidatorIndex: wrapperspb.UInt64(uint64(change.Message.ValidatorIndex)), - FromBlsPubkey: change.Message.FromBLSPubkey.String(), - ToExecutionAddress: change.Message.ToExecutionAddress.String(), - }, - Signature: change.Signature.String(), - }) - } - - return changes, nil -} - -func (b *BLSToExecutionChangeDeriver) createEvent(ctx context.Context, change *xatuethv2.SignedBLSToExecutionChangeV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockBlsToExecutionChange{ - EthV2BeaconBlockBlsToExecutionChange: change, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockBlsToExecutionChange{ - EthV2BeaconBlockBlsToExecutionChange: &xatu.ClientMeta_AdditionalEthV2BeaconBlockBLSToExecutionChangeData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/deposit.go b/pkg/cannon/deriver/beacon/eth/v2/deposit.go deleted file mode 100644 index 51f93bc83..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/deposit.go +++ /dev/null @@ -1,317 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - DepositDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT -) - -type DepositDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type DepositDeriver struct { - log logrus.FieldLogger - cfg *DepositDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewDepositDeriver(log logrus.FieldLogger, config *DepositDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *DepositDeriver { - return &DepositDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/deposit", - "type": DepositDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *DepositDeriver) CannonType() xatu.CannonType { - return DepositDeriverName -} - -func (b *DepositDeriver) Name() string { - return DepositDeriverName.String() -} - -func (b *DepositDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *DepositDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *DepositDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Deposit deriver disabled") - - return nil - } - - b.log.Info("Deposit deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *DepositDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *DepositDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *DepositDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "DepositDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *DepositDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "DepositDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *DepositDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "DepositDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - deposits, err := b.getDeposits(ctx, block) - if err != nil { - return nil, errors.Wrapf(err, "failed to get deposits for block %s", blockIdentifier.String()) - } - - for _, deposit := range deposits { - event, err := b.createEvent(ctx, deposit, blockIdentifier) - if err != nil { - b.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for deposit %s", deposit.String()) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *DepositDeriver) getDeposits(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv1.DepositV2, error) { - deposits := []*xatuethv1.DepositV2{} - - dps, err := block.Deposits() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain deposits") - } - - for _, deposit := range dps { - proof := []string{} - for _, p := range deposit.Proof { - proof = append(proof, fmt.Sprintf("0x%x", p)) - } - - deposits = append(deposits, &xatuethv1.DepositV2{ - Proof: proof, - Data: &xatuethv1.DepositV2_Data{ - Pubkey: deposit.Data.PublicKey.String(), - WithdrawalCredentials: fmt.Sprintf("0x%x", deposit.Data.WithdrawalCredentials), - Amount: wrapperspb.UInt64(uint64(deposit.Data.Amount)), - Signature: deposit.Data.Signature.String(), - }, - }) - } - - return deposits, nil -} - -func (b *DepositDeriver) createEvent(ctx context.Context, deposit *xatuethv1.DepositV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockDeposit{ - EthV2BeaconBlockDeposit: deposit, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockDeposit{ - EthV2BeaconBlockDeposit: &xatu.ClientMeta_AdditionalEthV2BeaconBlockDepositData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/elaborated_attestation.go b/pkg/cannon/deriver/beacon/eth/v2/elaborated_attestation.go deleted file mode 100644 index 797ef49f1..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/elaborated_attestation.go +++ /dev/null @@ -1,511 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - ElaboratedAttestationDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION -) - -type ElaboratedAttestationDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type ElaboratedAttestationDeriver struct { - log logrus.FieldLogger - cfg *ElaboratedAttestationDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewElaboratedAttestationDeriver(log logrus.FieldLogger, config *ElaboratedAttestationDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *ElaboratedAttestationDeriver { - return &ElaboratedAttestationDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/elaborated_attestation", - "type": ElaboratedAttestationDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *ElaboratedAttestationDeriver) CannonType() xatu.CannonType { - return ElaboratedAttestationDeriverName -} - -func (b *ElaboratedAttestationDeriver) Name() string { - return ElaboratedAttestationDeriverName.String() -} - -func (b *ElaboratedAttestationDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *ElaboratedAttestationDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *ElaboratedAttestationDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Elaborated attestation deriver disabled") - - return nil - } - - b.log.Info("Elaborated attestation deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *ElaboratedAttestationDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *ElaboratedAttestationDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - tracer := observability.Tracer() - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Get the next position. - position, err := b.iterator.Next(ctx) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - span.SetStatus(codes.Error, err.Error()) - - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *ElaboratedAttestationDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ElaboratedAttestationDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - allEvents := []*xatu.DecoratedEvent{} - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).WithField("epoch", epoch).Warn("Failed to look ahead at epoch") - - return nil, err - } - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *ElaboratedAttestationDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ElaboratedAttestationDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - events, err := b.getElaboratedAttestations(ctx, block) - if err != nil { - return nil, errors.Wrapf(err, "failed to get elaborated attestations for slot %d", slot) - } - - return events, nil -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *ElaboratedAttestationDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "ElaboratedAttestationDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *ElaboratedAttestationDeriver) getElaboratedAttestations(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatu.DecoratedEvent, error) { - blockAttestations, err := block.Attestations() - if err != nil { - return nil, err - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for block") - } - - events := []*xatu.DecoratedEvent{} - - for positionInBlock, attestation := range blockAttestations { - attestationData, err := attestation.Data() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation data") - } - - signature, err := attestation.Signature() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation signature") - } - - // Handle different attestation versions - switch attestation.Version { - case spec.DataVersionPhase0, spec.DataVersionAltair, spec.DataVersionBellatrix, spec.DataVersionCapella, spec.DataVersionDeneb: - // For pre-Electra attestations, each attestation can only have one committee - indexes, indexErr := b.getAttestatingValidatorIndexesPhase0(ctx, attestation) - if indexErr != nil { - return nil, errors.Wrap(indexErr, "failed to get attestating validator indexes") - } - - // Create a single elaborated attestation - elaboratedAttestation := &xatuethv1.ElaboratedAttestation{ - Signature: signature.String(), - Data: &xatuethv1.AttestationDataV2{ - Slot: &wrapperspb.UInt64Value{Value: uint64(attestationData.Slot)}, - Index: &wrapperspb.UInt64Value{Value: uint64(attestationData.Index)}, - BeaconBlockRoot: xatuethv1.RootAsString(attestationData.BeaconBlockRoot), - Source: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Source.Epoch)}, - Root: xatuethv1.RootAsString(attestationData.Source.Root), - }, - Target: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Target.Epoch)}, - Root: xatuethv1.RootAsString(attestationData.Target.Root), - }, - }, - ValidatorIndexes: indexes, - } - - //nolint:gosec // If we have that many attestations in a block we're cooked - event, err := b.createEventFromElaboratedAttestation(ctx, elaboratedAttestation, uint64(positionInBlock), blockIdentifier) - if err != nil { - return nil, errors.Wrapf(err, "failed to create event for attestation %s", attestation.String()) - } - - events = append(events, event) - - default: - // For Electra attestations, create multiple events (one per committee) - // Get the committee bits (this indicates which committees are included in this attestation) - committeeBits, err := attestation.CommitteeBits() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation committee bits") - } - - // Get aggregation bits - aggregationBits, err := attestation.AggregationBits() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation aggregation bits") - } - - // Process each committee from the committee_bits - committeeIndices := committeeBits.BitIndices() - committeeOffset := 0 - - for _, committeeIdx := range committeeIndices { - // Get the committee information - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(attestationData.Slot)) - - epochCommittees, err := b.beacon.Duties().FetchBeaconCommittee(ctx, phase0.Epoch(epoch.Number())) - if err != nil { - return nil, errors.Wrap(err, "failed to get committees for epoch") - } - - // Find the committee matching our current slot and index - var committee *v1.BeaconCommittee - - for _, c := range epochCommittees { - //nolint:gosec // This is capped at 64 committees in the spec - if c.Slot == attestationData.Slot && c.Index == phase0.CommitteeIndex(committeeIdx) { - committee = c - - break - } - } - - if committee == nil { - return nil, errors.New(fmt.Sprintf("committee %d in slot %d not found", committeeIdx, attestationData.Slot)) - } - - committeeSize := len(committee.Validators) - - // Create committee-specific validator indexes array - committeeValidatorIndexes := []*wrapperspb.UInt64Value{} - - // For each validator position in this committee - for i := 0; i < committeeSize; i++ { - // Calculate the bit position in the aggregation_bits - aggregationBitPosition := committeeOffset + i - - // Check if this position is valid and set - //nolint:gosec // This is capped at 64 committees in the spec - if uint64(aggregationBitPosition) < aggregationBits.Len() && aggregationBits.BitAt(uint64(aggregationBitPosition)) { - validatorIndex := committee.Validators[i] - committeeValidatorIndexes = append(committeeValidatorIndexes, wrapperspb.UInt64(uint64(validatorIndex))) - } - } - - // Create an elaborated attestation for this committee - elaboratedAttestation := &xatuethv1.ElaboratedAttestation{ - Signature: signature.String(), - Data: &xatuethv1.AttestationDataV2{ - Slot: &wrapperspb.UInt64Value{Value: uint64(attestationData.Slot)}, - //nolint:gosec // This is capped at 64 committees in the spec - Index: &wrapperspb.UInt64Value{Value: uint64(committeeIdx)}, // Use the committee index from committee_bits - BeaconBlockRoot: xatuethv1.RootAsString(attestationData.BeaconBlockRoot), - Source: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Source.Epoch)}, - Root: xatuethv1.RootAsString(attestationData.Source.Root), - }, - Target: &xatuethv1.CheckpointV2{ - Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Target.Epoch)}, - Root: xatuethv1.RootAsString(attestationData.Target.Root), - }, - }, - ValidatorIndexes: committeeValidatorIndexes, - } - - //nolint:gosec // If we have that many attestations in a block we're cooked - event, err := b.createEventFromElaboratedAttestation(ctx, elaboratedAttestation, uint64(positionInBlock), blockIdentifier) - if err != nil { - return nil, errors.Wrapf(err, "failed to create event for attestation %s committee %d", attestation.String(), committeeIdx) - } - - events = append(events, event) - - // Update offset for the next committee - committeeOffset += committeeSize - } - } - } - - return events, nil -} - -func (b *ElaboratedAttestationDeriver) getAttestatingValidatorIndexesPhase0(ctx context.Context, attestation *spec.VersionedAttestation) ([]*wrapperspb.UInt64Value, error) { - indexes := []*wrapperspb.UInt64Value{} - - attestationData, err := attestation.Data() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation data") - } - - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(uint64(attestationData.Slot)) - - bitIndices, err := attestation.AggregationBits() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain attestation aggregation bits") - } - - for _, position := range bitIndices.BitIndices() { - validatorIndex, err := b.beacon.Duties().GetValidatorIndex( - ctx, - phase0.Epoch(epoch.Number()), - attestationData.Slot, - attestationData.Index, - //nolint:gosec // This is capped at 64 committees in the spec - uint64(position), - ) - if err != nil { - return nil, errors.Wrapf(err, "failed to get validator index for position %d", position) - } - - indexes = append(indexes, wrapperspb.UInt64(uint64(validatorIndex))) - } - - return indexes, nil -} - -func (b *ElaboratedAttestationDeriver) createEventFromElaboratedAttestation(ctx context.Context, attestation *xatuethv1.ElaboratedAttestation, positionInBlock uint64, blockIdentifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockElaboratedAttestation{ - EthV2BeaconBlockElaboratedAttestation: attestation, - }, - } - - attestationSlot := b.beacon.Metadata().Wallclock().Slots().FromNumber(attestation.Data.Slot.Value) - epoch := b.beacon.Metadata().Wallclock().Epochs().FromSlot(attestationSlot.Number()) - - // Build out the target section - targetEpoch := b.beacon.Metadata().Wallclock().Epochs().FromNumber(attestation.Data.Target.Epoch.GetValue()) - target := &xatu.ClientMeta_AdditionalEthV1AttestationTargetV2Data{ - Epoch: &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: targetEpoch.Number()}, - StartDateTime: timestamppb.New(targetEpoch.TimeWindow().Start()), - }, - } - - // Build out the source section - sourceEpoch := b.beacon.Metadata().Wallclock().Epochs().FromNumber(attestation.Data.Source.Epoch.GetValue()) - source := &xatu.ClientMeta_AdditionalEthV1AttestationSourceV2Data{ - Epoch: &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: sourceEpoch.Number()}, - StartDateTime: timestamppb.New(sourceEpoch.TimeWindow().Start()), - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockElaboratedAttestation{ - EthV2BeaconBlockElaboratedAttestation: &xatu.ClientMeta_AdditionalEthV2BeaconBlockElaboratedAttestationData{ - Block: blockIdentifier, - PositionInBlock: wrapperspb.UInt64(positionInBlock), - Slot: &xatu.SlotV2{ - Number: &wrapperspb.UInt64Value{Value: attestationSlot.Number()}, - StartDateTime: timestamppb.New(attestationSlot.TimeWindow().Start()), - }, - Epoch: &xatu.EpochV2{ - Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, - StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), - }, - Source: source, - Target: target, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/execution_transaction.go b/pkg/cannon/deriver/beacon/eth/v2/execution_transaction.go deleted file mode 100644 index c5f404b0c..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/execution_transaction.go +++ /dev/null @@ -1,461 +0,0 @@ -package v2 - -import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "strconv" - "time" - - "github.com/attestantio/go-eth2-client/api" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/deneb" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -type ExecutionTransactionDeriver struct { - log logrus.FieldLogger - cfg *ExecutionTransactionDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -type ExecutionTransactionDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -const ( - ExecutionTransactionDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION -) - -func NewExecutionTransactionDeriver(log logrus.FieldLogger, config *ExecutionTransactionDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *ExecutionTransactionDeriver { - return &ExecutionTransactionDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/execution_transaction", - "type": ExecutionTransactionDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *ExecutionTransactionDeriver) CannonType() xatu.CannonType { - return ExecutionTransactionDeriverName -} - -func (b *ExecutionTransactionDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionBellatrix -} - -func (b *ExecutionTransactionDeriver) Name() string { - return ExecutionTransactionDeriverName.String() -} - -func (b *ExecutionTransactionDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *ExecutionTransactionDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Execution transaction deriver disabled") - - return nil - } - - b.log.Info("Execution transaction deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *ExecutionTransactionDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *ExecutionTransactionDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *ExecutionTransactionDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ExecutionTransactionDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *ExecutionTransactionDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "ExecutionTransactionDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *ExecutionTransactionDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ExecutionTransactionDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - blobSidecars := []*deneb.BlobSidecar{} - - if block.Version >= spec.DataVersionDeneb { - sidecars, errr := b.beacon.Node().FetchBeaconBlockBlobs(ctx, xatuethv1.SlotAsString(slot)) - if errr != nil { - var apiErr *api.Error - if errors.As(errr, &apiErr) { - switch apiErr.StatusCode { - case 404: - b.log.WithError(errr).WithField("slot", slot).Debug("no beacon block blob sidecars found for slot") - case 503: - return nil, errors.New("beacon node is syncing") - default: - return nil, errors.Wrapf(errr, "failed to get beacon block blob sidecars for slot %d", slot) - } - } else { - return nil, errors.Wrapf(errr, "failed to get beacon block blob sidecars for slot %d", slot) - } - } - - blobSidecars = sidecars - } - - blobSidecarsMap := map[string]*deneb.BlobSidecar{} - - for _, blobSidecar := range blobSidecars { - versionedHash := ethereum.ConvertKzgCommitmentToVersionedHash(blobSidecar.KZGCommitment[:]) - blobSidecarsMap[versionedHash.String()] = blobSidecar - } - - events := []*xatu.DecoratedEvent{} - - transactions, err := b.getExecutionTransactions(ctx, block) - if err != nil { - return nil, err - } - - chainID := new(big.Int).SetUint64(b.beacon.Metadata().Spec.DepositChainID) - if chainID.Cmp(big.NewInt(0)) == 0 { - return nil, fmt.Errorf("failed to get chain ID from beacon node metadata") - } - - signer := types.LatestSignerForChainID(chainID) - - for index, transaction := range transactions { - from, err := types.Sender(signer, transaction) - if err != nil { - return nil, fmt.Errorf("failed to get transaction sender: %v", err) - } - - gasPrice, err := GetGasPrice(block, transaction) - if err != nil { - return nil, fmt.Errorf("failed to get transaction gas price: %v", err) - } - - if gasPrice == nil { - return nil, fmt.Errorf("failed to get transaction gas price") - } - - value := transaction.Value() - if value == nil { - return nil, fmt.Errorf("failed to get transaction value") - } - - to := "" - - if transaction.To() != nil { - to = transaction.To().Hex() - } - - tx := &xatuethv1.Transaction{ - Nonce: wrapperspb.UInt64(transaction.Nonce()), - Gas: wrapperspb.UInt64(transaction.Gas()), - GasPrice: gasPrice.String(), - GasTipCap: transaction.GasTipCap().String(), - GasFeeCap: transaction.GasFeeCap().String(), - To: to, - From: from.Hex(), - Value: value.String(), - Input: hex.EncodeToString(transaction.Data()), - Hash: transaction.Hash().Hex(), - ChainId: chainID.String(), - Type: wrapperspb.UInt32(uint32(transaction.Type())), - } - - sidecarsEmptySize := 0 - sidecarsSize := 0 - - if transaction.Type() == 3 { - blobHashes := make([]string, len(transaction.BlobHashes())) - - if len(transaction.BlobHashes()) == 0 { - b.log.WithField("transaction", transaction.Hash().Hex()).Warn("no versioned hashes for type 3 transaction") - } - - for i := 0; i < len(transaction.BlobHashes()); i++ { - hash := transaction.BlobHashes()[i] - blobHashes[i] = hash.String() - sidecar := blobSidecarsMap[hash.String()] - - if sidecar != nil { - sidecarsSize += len(sidecar.Blob) - sidecarsEmptySize += ethereum.CountConsecutiveEmptyBytes(sidecar.Blob[:], 4) - } else { - b.log.WithField("versioned hash", hash.String()).WithField("transaction", transaction.Hash().Hex()).Warn("missing blob sidecar") - } - } - - tx.BlobGas = wrapperspb.UInt64(transaction.BlobGas()) - tx.BlobGasFeeCap = transaction.BlobGasFeeCap().String() - tx.BlobHashes = blobHashes - } - - event, err := b.createEvent(ctx, tx, uint64(index), blockIdentifier, transaction, sidecarsSize, sidecarsEmptySize) - if err != nil { - b.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for execution transaction %s", transaction.Hash()) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *ExecutionTransactionDeriver) getExecutionTransactions(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*types.Transaction, error) { - transactions := []*types.Transaction{} - - txs, err := block.ExecutionTransactions() - if err != nil { - return nil, fmt.Errorf("failed to get execution transactions: %v", err) - } - - for _, transaction := range txs { - ethTransaction := new(types.Transaction) - if err := ethTransaction.UnmarshalBinary(transaction); err != nil { - return nil, fmt.Errorf("failed to unmarshal transaction: %v", err) - } - - transactions = append(transactions, ethTransaction) - } - - return transactions, nil -} - -func (b *ExecutionTransactionDeriver) createEvent(ctx context.Context, transaction *xatuethv1.Transaction, positionInBlock uint64, blockIdentifier *xatu.BlockIdentifier, rlpTransaction *types.Transaction, sidecarsSize, sidecarsEmptySize int) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockExecutionTransaction{ - EthV2BeaconBlockExecutionTransaction: transaction, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockExecutionTransaction{ - EthV2BeaconBlockExecutionTransaction: &xatu.ClientMeta_AdditionalEthV2BeaconBlockExecutionTransactionData{ - Block: blockIdentifier, - PositionInBlock: wrapperspb.UInt64(positionInBlock), - Size: strconv.FormatFloat(float64(rlpTransaction.Size()), 'f', 0, 64), - CallDataSize: fmt.Sprintf("%d", len(rlpTransaction.Data())), - BlobSidecarsSize: fmt.Sprint(sidecarsSize), - BlobSidecarsEmptySize: fmt.Sprint(sidecarsEmptySize), - }, - } - - return decoratedEvent, nil -} - -func GetGasPrice(block *spec.VersionedSignedBeaconBlock, transaction *types.Transaction) (*big.Int, error) { - if transaction.Type() == 0 || transaction.Type() == 1 { - return transaction.GasPrice(), nil - } - - if transaction.Type() == 2 || transaction.Type() == 3 || transaction.Type() == 4 { // EIP-1559/blob/7702 transactions - baseFee := new(big.Int) - - switch block.Version { - case spec.DataVersionBellatrix: - baseFee = new(big.Int).SetBytes(block.Bellatrix.Message.Body.ExecutionPayload.BaseFeePerGas[:]) - case spec.DataVersionDeneb: - executionPayload := block.Deneb.Message.Body.ExecutionPayload - baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) - case spec.DataVersionElectra: - executionPayload := block.Electra.Message.Body.ExecutionPayload - baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) - case spec.DataVersionFulu: - executionPayload := block.Fulu.Message.Body.ExecutionPayload - baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) - default: - return nil, fmt.Errorf("unknown block version: %d", block.Version) - } - - // Calculate Effective Gas Price: min(max_fee_per_gas, base_fee + max_priority_fee_per_gas) - gasPrice := new(big.Int).Add(baseFee, transaction.GasTipCap()) - if gasPrice.Cmp(transaction.GasFeeCap()) > 0 { - gasPrice = transaction.GasFeeCap() - } - - return gasPrice, nil - } - - return nil, fmt.Errorf("unknown transaction type: %d", transaction.Type()) -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/proposer_slashing.go b/pkg/cannon/deriver/beacon/eth/v2/proposer_slashing.go deleted file mode 100644 index 38a5d9ab2..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/proposer_slashing.go +++ /dev/null @@ -1,329 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - ProposerSlashingDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING -) - -type ProposerSlashingDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type ProposerSlashingDeriver struct { - log logrus.FieldLogger - cfg *ProposerSlashingDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewProposerSlashingDeriver(log logrus.FieldLogger, config *ProposerSlashingDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *ProposerSlashingDeriver { - return &ProposerSlashingDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/proposer_slashing", - "type": ProposerSlashingDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *ProposerSlashingDeriver) CannonType() xatu.CannonType { - return ProposerSlashingDeriverName -} - -func (b *ProposerSlashingDeriver) Name() string { - return ProposerSlashingDeriverName.String() -} - -func (b *ProposerSlashingDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *ProposerSlashingDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *ProposerSlashingDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Proposer slashing deriver disabled") - - return nil - } - - b.log.Info("Proposer slashing deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *ProposerSlashingDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *ProposerSlashingDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *ProposerSlashingDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ProposerSlashingDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *ProposerSlashingDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "ProposerSlashingDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - slashings, err := b.getProposerSlashings(ctx, block) - if err != nil { - return nil, err - } - - for _, slashing := range slashings { - event, err := b.createEvent(ctx, slashing, blockIdentifier) - if err != nil { - b.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for proposer slashing %s", slashing.String()) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *ProposerSlashingDeriver) getProposerSlashings(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv1.ProposerSlashingV2, error) { - slashings := []*xatuethv1.ProposerSlashingV2{} - - blockSlashings, err := block.ProposerSlashings() - if err != nil { - return nil, err - } - - for _, slashing := range blockSlashings { - slashings = append(slashings, &xatuethv1.ProposerSlashingV2{ - SignedHeader_1: &xatuethv1.SignedBeaconBlockHeaderV2{ - Message: &xatuethv1.BeaconBlockHeaderV2{ - Slot: wrapperspb.UInt64(uint64(slashing.SignedHeader1.Message.Slot)), - ProposerIndex: wrapperspb.UInt64(uint64(slashing.SignedHeader1.Message.ProposerIndex)), - ParentRoot: slashing.SignedHeader1.Message.ParentRoot.String(), - StateRoot: slashing.SignedHeader1.Message.StateRoot.String(), - BodyRoot: slashing.SignedHeader1.Message.BodyRoot.String(), - }, - Signature: slashing.SignedHeader1.Signature.String(), - }, - SignedHeader_2: &xatuethv1.SignedBeaconBlockHeaderV2{ - Message: &xatuethv1.BeaconBlockHeaderV2{ - Slot: wrapperspb.UInt64(uint64(slashing.SignedHeader2.Message.Slot)), - ProposerIndex: wrapperspb.UInt64(uint64(slashing.SignedHeader2.Message.ProposerIndex)), - ParentRoot: slashing.SignedHeader2.Message.ParentRoot.String(), - StateRoot: slashing.SignedHeader2.Message.StateRoot.String(), - BodyRoot: slashing.SignedHeader2.Message.BodyRoot.String(), - }, - Signature: slashing.SignedHeader2.Signature.String(), - }, - }) - } - - return slashings, nil -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *ProposerSlashingDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "ProposerSlashingDeriver.lookAhead", - ) - defer span.End() - - if epochs == nil { - return - } - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *ProposerSlashingDeriver) createEvent(ctx context.Context, slashing *xatuethv1.ProposerSlashingV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockProposerSlashing{ - EthV2BeaconBlockProposerSlashing: slashing, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockProposerSlashing{ - EthV2BeaconBlockProposerSlashing: &xatu.ClientMeta_AdditionalEthV2BeaconBlockProposerSlashingData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/voluntary_exit.go b/pkg/cannon/deriver/beacon/eth/v2/voluntary_exit.go deleted file mode 100644 index 6a04888d0..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/voluntary_exit.go +++ /dev/null @@ -1,313 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - VoluntaryExitDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT -) - -type VoluntaryExitDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type VoluntaryExitDeriver struct { - log logrus.FieldLogger - cfg *VoluntaryExitDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewVoluntaryExitDeriver(log logrus.FieldLogger, config *VoluntaryExitDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *VoluntaryExitDeriver { - return &VoluntaryExitDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/voluntary_exit", - "type": VoluntaryExitDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *VoluntaryExitDeriver) CannonType() xatu.CannonType { - return VoluntaryExitDeriverName -} - -func (b *VoluntaryExitDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionPhase0 -} - -func (b *VoluntaryExitDeriver) Name() string { - return VoluntaryExitDeriverName.String() -} - -func (b *VoluntaryExitDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *VoluntaryExitDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Voluntary exit deriver disabled") - - return nil - } - - b.log.Info("Voluntary exit deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *VoluntaryExitDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *VoluntaryExitDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - // Send the events - for _, fn := range b.onEventsCallbacks { - if err := fn(ctx, events); err != nil { - return "", errors.Wrap(err, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *VoluntaryExitDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "VoluntaryExitDeriver.lookAheadAtLocations", - ) - defer span.End() - - if epochs == nil { - return - } - - for _, epoch := range epochs { - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).WithField("epoch", epoch).Warn("Failed to look ahead at epoch") - - return - } - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *VoluntaryExitDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "VoluntaryExitDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *VoluntaryExitDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "VoluntaryExitDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - exits, err := b.getVoluntaryExits(ctx, block) - if err != nil { - return nil, err - } - - for _, exit := range exits { - event, err := b.createEvent(ctx, exit, blockIdentifier) - if err != nil { - b.log.WithError(err).Error("Failed to create event") - - return nil, errors.Wrapf(err, "failed to create event for voluntary exit %s", exit.String()) - } - - events = append(events, event) - } - - return events, nil -} - -func (b *VoluntaryExitDeriver) getVoluntaryExits(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv1.SignedVoluntaryExitV2, error) { - exits := []*xatuethv1.SignedVoluntaryExitV2{} - - voluntaryExits, err := block.VoluntaryExits() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain voluntary exits") - } - - for _, exit := range voluntaryExits { - exits = append(exits, &xatuethv1.SignedVoluntaryExitV2{ - Message: &xatuethv1.VoluntaryExitV2{ - Epoch: wrapperspb.UInt64(uint64(exit.Message.Epoch)), - ValidatorIndex: wrapperspb.UInt64(uint64(exit.Message.ValidatorIndex)), - }, - Signature: exit.Signature.String(), - }) - } - - return exits, nil -} - -func (b *VoluntaryExitDeriver) createEvent(ctx context.Context, exit *xatuethv1.SignedVoluntaryExitV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockVoluntaryExit{ - EthV2BeaconBlockVoluntaryExit: exit, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockVoluntaryExit{ - EthV2BeaconBlockVoluntaryExit: &xatu.ClientMeta_AdditionalEthV2BeaconBlockVoluntaryExitData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/beacon/eth/v2/withdrawal.go b/pkg/cannon/deriver/beacon/eth/v2/withdrawal.go deleted file mode 100644 index 88e88c0ba..000000000 --- a/pkg/cannon/deriver/beacon/eth/v2/withdrawal.go +++ /dev/null @@ -1,306 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" - backoff "github.com/cenkalti/backoff/v5" - "github.com/ethpandaops/xatu/pkg/cannon/ethereum" - "github.com/ethpandaops/xatu/pkg/cannon/iterator" - "github.com/ethpandaops/xatu/pkg/observability" - xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" - "github.com/ethpandaops/xatu/pkg/proto/xatu" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - WithdrawalDeriverName = xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL -) - -type WithdrawalDeriverConfig struct { - Enabled bool `yaml:"enabled" default:"true"` - Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` -} - -type WithdrawalDeriver struct { - log logrus.FieldLogger - cfg *WithdrawalDeriverConfig - iterator *iterator.BackfillingCheckpoint - onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error - beacon *ethereum.BeaconNode - clientMeta *xatu.ClientMeta -} - -func NewWithdrawalDeriver(log logrus.FieldLogger, config *WithdrawalDeriverConfig, iter *iterator.BackfillingCheckpoint, beacon *ethereum.BeaconNode, clientMeta *xatu.ClientMeta) *WithdrawalDeriver { - return &WithdrawalDeriver{ - log: log.WithFields(logrus.Fields{ - "module": "cannon/event/beacon/eth/v2/withdrawal", - "type": WithdrawalDeriverName.String(), - }), - cfg: config, - iterator: iter, - beacon: beacon, - clientMeta: clientMeta, - } -} - -func (b *WithdrawalDeriver) CannonType() xatu.CannonType { - return WithdrawalDeriverName -} - -func (b *WithdrawalDeriver) Name() string { - return WithdrawalDeriverName.String() -} - -func (b *WithdrawalDeriver) ActivationFork() spec.DataVersion { - return spec.DataVersionCapella -} - -func (b *WithdrawalDeriver) OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) { - b.onEventsCallbacks = append(b.onEventsCallbacks, fn) -} - -func (b *WithdrawalDeriver) Start(ctx context.Context) error { - if !b.cfg.Enabled { - b.log.Info("Withdrawal deriver disabled") - - return nil - } - - b.log.Info("Withdrawal deriver enabled") - - if err := b.iterator.Start(ctx, b.ActivationFork()); err != nil { - return errors.Wrap(err, "failed to start iterator") - } - - // Start our main loop - b.run(ctx) - - return nil -} - -func (b *WithdrawalDeriver) Stop(ctx context.Context) error { - return nil -} - -func (b *WithdrawalDeriver) run(rctx context.Context) { - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = 3 * time.Minute - - for { - select { - case <-rctx.Done(): - return - default: - operation := func() (string, error) { - ctx, span := observability.Tracer().Start(rctx, fmt.Sprintf("Derive %s", b.Name()), - trace.WithAttributes( - attribute.String("network", string(b.beacon.Metadata().Network.Name))), - ) - defer span.End() - - time.Sleep(100 * time.Millisecond) - - if err := b.beacon.Synced(ctx); err != nil { - return "", err - } - - // Get the next position - position, err := b.iterator.Next(ctx) - if err != nil { - return "", err - } - - // Process the epoch - events, err := b.processEpoch(ctx, position.Next) - if err != nil { - b.log.WithError(err).Error("Failed to process epoch") - - return "", err - } - - // Look ahead - b.lookAhead(ctx, position.LookAheads) - - for _, fn := range b.onEventsCallbacks { - if errr := fn(ctx, events); errr != nil { - return "", errors.Wrapf(errr, "failed to send events") - } - } - - // Update our location - if err := b.iterator.UpdateLocation(ctx, position.Next, position.Direction); err != nil { - return "", err - } - - bo.Reset() - - return "", nil - } - - retryOpts := []backoff.RetryOption{ - backoff.WithBackOff(bo), - backoff.WithNotify(func(err error, timer time.Duration) { - b.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") - }), - } - - if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { - b.log.WithError(err).Warn("Failed to process") - } - } - } -} - -func (b *WithdrawalDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "WithdrawalDeriver.processEpoch", - trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain spec") - } - - allEvents := []*xatu.DecoratedEvent{} - - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - events, err := b.processSlot(ctx, slot) - if err != nil { - return nil, errors.Wrapf(err, "failed to process slot %d", slot) - } - - allEvents = append(allEvents, events...) - } - - return allEvents, nil -} - -func (b *WithdrawalDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { - ctx, span := observability.Tracer().Start(ctx, - "WithdrawalDeriver.processSlot", - trace.WithAttributes(attribute.Int64("slot", int64(slot))), - ) - defer span.End() - - // Get the block - block, err := b.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) - } - - if block == nil { - return []*xatu.DecoratedEvent{}, nil - } - - blockIdentifier, err := GetBlockIdentifier(block, b.beacon.Metadata().Wallclock()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) - } - - events := []*xatu.DecoratedEvent{} - - withdrawals, err := b.getWithdrawals(ctx, block) - if err != nil { - return nil, errors.Wrap(err, "failed to get withdrawals") - } - - for _, withdrawal := range withdrawals { - event, err := b.createEvent(ctx, withdrawal, blockIdentifier) - if err != nil { - return nil, errors.Wrapf(err, "failed to create event for withdrawal %s", withdrawal.String()) - } - - events = append(events, event) - } - - return events, nil -} - -// lookAhead attempts to pre-load any blocks that might be required for the epochs that are coming up. -func (b *WithdrawalDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { - _, span := observability.Tracer().Start(ctx, - "WithdrawalDeriver.lookAhead", - ) - defer span.End() - - sp, err := b.beacon.Node().Spec() - if err != nil { - b.log.WithError(err).Warn("Failed to look ahead at epoch") - - return - } - - for _, epoch := range epochs { - for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { - slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) - - // Add the block to the preload queue so it's available when we need it - b.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) - } - } -} - -func (b *WithdrawalDeriver) getWithdrawals(ctx context.Context, block *spec.VersionedSignedBeaconBlock) ([]*xatuethv1.WithdrawalV2, error) { - withdrawals := []*xatuethv1.WithdrawalV2{} - - withd, err := block.Withdrawals() - if err != nil { - return nil, errors.Wrap(err, "failed to obtain withdrawals") - } - - for _, withdrawal := range withd { - withdrawals = append(withdrawals, &xatuethv1.WithdrawalV2{ - Index: &wrapperspb.UInt64Value{Value: uint64(withdrawal.Index)}, - ValidatorIndex: &wrapperspb.UInt64Value{Value: uint64(withdrawal.ValidatorIndex)}, - Address: withdrawal.Address.String(), - Amount: &wrapperspb.UInt64Value{Value: uint64(withdrawal.Amount)}, - }) - } - - return withdrawals, nil -} - -func (b *WithdrawalDeriver) createEvent(ctx context.Context, withdrawal *xatuethv1.WithdrawalV2, identifier *xatu.BlockIdentifier) (*xatu.DecoratedEvent, error) { - // Make a clone of the metadata - metadata, ok := proto.Clone(b.clientMeta).(*xatu.ClientMeta) - if !ok { - return nil, errors.New("failed to clone client metadata") - } - - decoratedEvent := &xatu.DecoratedEvent{ - Event: &xatu.Event{ - Name: xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL, - DateTime: timestamppb.New(time.Now()), - Id: uuid.New().String(), - }, - Meta: &xatu.Meta{ - Client: metadata, - }, - Data: &xatu.DecoratedEvent_EthV2BeaconBlockWithdrawal{ - EthV2BeaconBlockWithdrawal: withdrawal, - }, - } - - decoratedEvent.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockWithdrawal{ - EthV2BeaconBlockWithdrawal: &xatu.ClientMeta_AdditionalEthV2BeaconBlockWithdrawalData{ - Block: identifier, - }, - } - - return decoratedEvent, nil -} diff --git a/pkg/cannon/deriver/config.go b/pkg/cannon/deriver/config.go index a6a31b449..46128f930 100644 --- a/pkg/cannon/deriver/config.go +++ b/pkg/cannon/deriver/config.go @@ -1,26 +1,41 @@ package deriver import ( - v1 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v1" - v2 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v2" + "github.com/ethpandaops/xatu/pkg/cannon/iterator" ) +// DeriverConfig is the base configuration for all Cannon derivers. +// It combines the Enabled flag with iterator-specific configuration. +type DeriverConfig struct { + Enabled bool `yaml:"enabled" default:"true"` + Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` +} + +// BeaconValidatorsDeriverConfig extends DeriverConfig with validator-specific settings. +type BeaconValidatorsDeriverConfig struct { + Enabled bool `yaml:"enabled" default:"true"` + ChunkSize int `yaml:"chunkSize" default:"100"` + Iterator iterator.BackfillingCheckpointConfig `yaml:"iterator"` +} + +// Config holds configuration for all Cannon derivers. type Config struct { - AttesterSlashingConfig v2.AttesterSlashingDeriverConfig `yaml:"attesterSlashing"` - BLSToExecutionConfig v2.BLSToExecutionChangeDeriverConfig `yaml:"blsToExecutionChange"` - DepositConfig v2.DepositDeriverConfig `yaml:"deposit"` - ExecutionTransactionConfig v2.ExecutionTransactionDeriverConfig `yaml:"executionTransaction"` - ProposerSlashingConfig v2.ProposerSlashingDeriverConfig `yaml:"proposerSlashing"` - VoluntaryExitConfig v2.VoluntaryExitDeriverConfig `yaml:"voluntaryExit"` - WithdrawalConfig v2.WithdrawalDeriverConfig `yaml:"withdrawal"` - BeaconBlockConfig v2.BeaconBlockDeriverConfig `yaml:"beaconBlock"` - BeaconBlobSidecarConfig v1.BeaconBlobDeriverConfig `yaml:"beaconBlobSidecar"` - ProposerDutyConfig v1.ProposerDutyDeriverConfig `yaml:"proposerDuty"` - ElaboratedAttestationConfig v2.ElaboratedAttestationDeriverConfig `yaml:"elaboratedAttestation"` - BeaconValidatorsConfig v1.BeaconValidatorsDeriverConfig `yaml:"beaconValidators"` - BeaconCommitteeConfig v1.BeaconCommitteeDeriverConfig `yaml:"beaconCommittee"` + AttesterSlashingConfig DeriverConfig `yaml:"attesterSlashing"` + BLSToExecutionConfig DeriverConfig `yaml:"blsToExecutionChange"` + DepositConfig DeriverConfig `yaml:"deposit"` + ExecutionTransactionConfig DeriverConfig `yaml:"executionTransaction"` + ProposerSlashingConfig DeriverConfig `yaml:"proposerSlashing"` + VoluntaryExitConfig DeriverConfig `yaml:"voluntaryExit"` + WithdrawalConfig DeriverConfig `yaml:"withdrawal"` + BeaconBlockConfig DeriverConfig `yaml:"beaconBlock"` + BeaconBlobSidecarConfig DeriverConfig `yaml:"beaconBlobSidecar"` + ProposerDutyConfig DeriverConfig `yaml:"proposerDuty"` + ElaboratedAttestationConfig DeriverConfig `yaml:"elaboratedAttestation"` + BeaconValidatorsConfig BeaconValidatorsDeriverConfig `yaml:"beaconValidators"` + BeaconCommitteeConfig DeriverConfig `yaml:"beaconCommittee"` } +// Validate validates the deriver configuration. func (c *Config) Validate() error { return nil } diff --git a/pkg/cannon/deriver/event_deriver.go b/pkg/cannon/deriver/event_deriver.go index 08574582c..f064b4e57 100644 --- a/pkg/cannon/deriver/event_deriver.go +++ b/pkg/cannon/deriver/event_deriver.go @@ -4,11 +4,11 @@ import ( "context" "github.com/attestantio/go-eth2-client/spec" - v1 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v1" - v2 "github.com/ethpandaops/xatu/pkg/cannon/deriver/beacon/eth/v2" + cldataderiver "github.com/ethpandaops/xatu/pkg/cldata/deriver" "github.com/ethpandaops/xatu/pkg/proto/xatu" ) +// EventDeriver is the interface that all event derivers must implement. type EventDeriver interface { Start(ctx context.Context) error Stop(ctx context.Context) error @@ -20,17 +20,5 @@ type EventDeriver interface { ActivationFork() spec.DataVersion } -// Ensure that derivers implements the EventDeriver interface -var _ EventDeriver = &v2.AttesterSlashingDeriver{} -var _ EventDeriver = &v2.ProposerSlashingDeriver{} -var _ EventDeriver = &v2.DepositDeriver{} -var _ EventDeriver = &v2.VoluntaryExitDeriver{} -var _ EventDeriver = &v2.ExecutionTransactionDeriver{} -var _ EventDeriver = &v2.BLSToExecutionChangeDeriver{} -var _ EventDeriver = &v2.WithdrawalDeriver{} -var _ EventDeriver = &v2.BeaconBlockDeriver{} -var _ EventDeriver = &v2.ElaboratedAttestationDeriver{} -var _ EventDeriver = &v1.ProposerDutyDeriver{} -var _ EventDeriver = &v1.BeaconBlobDeriver{} -var _ EventDeriver = &v1.BeaconValidatorsDeriver{} -var _ EventDeriver = &v1.BeaconCommitteeDeriver{} +// Ensure that GenericDeriver from cldata package implements the EventDeriver interface. +var _ EventDeriver = (*cldataderiver.GenericDeriver)(nil) diff --git a/pkg/cannon/deriver_mapping.go b/pkg/cannon/deriver_mapping.go new file mode 100644 index 000000000..26ca59447 --- /dev/null +++ b/pkg/cannon/deriver_mapping.go @@ -0,0 +1,107 @@ +package cannon + +import ( + "github.com/ethpandaops/xatu/pkg/cannon/deriver" + "github.com/ethpandaops/xatu/pkg/cannon/iterator" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// GetDeriverConfig returns the deriver config for a given cannon type. +func GetDeriverConfig(config *deriver.Config, cannonType xatu.CannonType) *deriver.DeriverConfig { + switch cannonType { + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK: + return &config.BeaconBlockConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING: + return &config.AttesterSlashingConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING: + return &config.ProposerSlashingConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT: + return &config.DepositConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL: + return &config.WithdrawalConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT: + return &config.VoluntaryExitConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE: + return &config.BLSToExecutionConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION: + return &config.ExecutionTransactionConfig + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION: + return &config.ElaboratedAttestationConfig + case xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY: + return &config.ProposerDutyConfig + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR: + return &config.BeaconBlobSidecarConfig + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE: + return &config.BeaconCommitteeConfig + default: + return nil + } +} + +// GetIteratorConfig returns the iterator config for a given cannon type. +func GetIteratorConfig(config *deriver.Config, cannonType xatu.CannonType) *iterator.BackfillingCheckpointConfig { + switch cannonType { + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK: + return &config.BeaconBlockConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING: + return &config.AttesterSlashingConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING: + return &config.ProposerSlashingConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT: + return &config.DepositConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL: + return &config.WithdrawalConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT: + return &config.VoluntaryExitConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE: + return &config.BLSToExecutionConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION: + return &config.ExecutionTransactionConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION: + return &config.ElaboratedAttestationConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY: + return &config.ProposerDutyConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR: + return &config.BeaconBlobSidecarConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS: + return &config.BeaconValidatorsConfig.Iterator + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE: + return &config.BeaconCommitteeConfig.Iterator + default: + return nil + } +} + +// IsDeriverEnabled returns whether a deriver is enabled based on config. +func IsDeriverEnabled(config *deriver.Config, cannonType xatu.CannonType) bool { + switch cannonType { + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK: + return config.BeaconBlockConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING: + return config.AttesterSlashingConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING: + return config.ProposerSlashingConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT: + return config.DepositConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL: + return config.WithdrawalConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT: + return config.VoluntaryExitConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE: + return config.BLSToExecutionConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION: + return config.ExecutionTransactionConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION: + return config.ElaboratedAttestationConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY: + return config.ProposerDutyConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR: + return config.BeaconBlobSidecarConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS: + return config.BeaconValidatorsConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE: + return config.BeaconCommitteeConfig.Enabled + default: + return false + } +} diff --git a/pkg/cldata/beacon.go b/pkg/cldata/beacon.go new file mode 100644 index 000000000..5caaf3f2c --- /dev/null +++ b/pkg/cldata/beacon.go @@ -0,0 +1,67 @@ +// Package cldata provides shared types and interfaces for consensus layer data processing. +package cldata + +import ( + "context" + + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/beacon/pkg/beacon" +) + +// BeaconClient provides access to beacon node functionality needed by derivers. +// It abstracts the differences between how Cannon and Horizon interact with beacon nodes. +type BeaconClient interface { + // GetBeaconBlock retrieves a beacon block by its identifier (slot number as string). + // Returns nil without error if the block doesn't exist (missed slot). + GetBeaconBlock(ctx context.Context, identifier string) (*spec.VersionedSignedBeaconBlock, error) + + // LazyLoadBeaconBlock queues a block for background preloading. + // This is used for look-ahead optimization. + LazyLoadBeaconBlock(identifier string) + + // Synced checks if the beacon node is synced and ready. + // Returns an error if the node is not synced. + Synced(ctx context.Context) error + + // Node returns the underlying beacon node for spec access. + // This is needed for accessing fork epochs and slots per epoch. + Node() beacon.Node + + // FetchBeaconBlockBlobs retrieves blob sidecars for a given block identifier. + // Returns empty slice without error if no blobs exist for the slot. + // This is used for Deneb+ blocks that contain blob transactions. + FetchBeaconBlockBlobs(ctx context.Context, identifier string) ([]*deneb.BlobSidecar, error) + + // FetchBeaconCommittee retrieves the beacon committees for a given epoch. + // This is used by derivers that need committee information (e.g., ElaboratedAttestationDeriver). + FetchBeaconCommittee(ctx context.Context, epoch phase0.Epoch) ([]*v1.BeaconCommittee, error) + + // GetValidatorIndex looks up a validator index from the committee for a given position. + // Returns the validator index at the specified position in the committee. + GetValidatorIndex( + ctx context.Context, + epoch phase0.Epoch, + slot phase0.Slot, + committeeIndex phase0.CommitteeIndex, + position uint64, + ) (phase0.ValidatorIndex, error) + + // FetchProposerDuties retrieves the proposer duties for a given epoch. + // Returns a slice of proposer duties, one for each slot in the epoch. + FetchProposerDuties(ctx context.Context, epoch phase0.Epoch) ([]*v1.ProposerDuty, error) + + // GetValidators retrieves validators for a given state identifier (e.g., slot as string). + // Returns a map of validator index to validator information. + GetValidators(ctx context.Context, identifier string) (map[phase0.ValidatorIndex]*v1.Validator, error) + + // LazyLoadValidators queues validators for background preloading at the specified state. + // This is used for look-ahead optimization. + LazyLoadValidators(stateID string) + + // DeleteValidatorsFromCache removes validators from the cache for the specified state. + // This is used to clean up memory after processing. + DeleteValidatorsFromCache(stateID string) +} diff --git a/pkg/cldata/blob.go b/pkg/cldata/blob.go new file mode 100644 index 000000000..e9010812a --- /dev/null +++ b/pkg/cldata/blob.go @@ -0,0 +1,46 @@ +// Package cldata provides shared types and interfaces for consensus layer data processing. +package cldata + +import ( + "crypto/sha256" + + "github.com/ethereum/go-ethereum/common" +) + +const blobCommitmentVersionKZG uint8 = 0x01 + +// ConvertKzgCommitmentToVersionedHash converts a KZG commitment to a versioned hash. +// Reference: https://github.com/prysmaticlabs/prysm/blob/bfae7f3c9fa30cf0d513b59ad95cc99a5316eacd/beacon-chain/blockchain/execution_engine.go#L413 +func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash { + versionedHash := sha256.Sum256(commitment) + + versionedHash[0] = blobCommitmentVersionKZG + + return versionedHash +} + +// CountConsecutiveEmptyBytes counts the total number of consecutive zero bytes +// in the array that exceed the given threshold length. +func CountConsecutiveEmptyBytes(byteArray []byte, threshold int) int { + count := 0 + consecutiveZeros := 0 + + for _, b := range byteArray { + if b == 0 { + consecutiveZeros++ + } else { + if consecutiveZeros > threshold { + count += consecutiveZeros + } + + consecutiveZeros = 0 + } + } + + // Check if the last sequence in the array is longer than the threshold and hasn't been counted yet + if consecutiveZeros > threshold { + count += consecutiveZeros + } + + return count +} diff --git a/pkg/cldata/context.go b/pkg/cldata/context.go new file mode 100644 index 000000000..e4219a48b --- /dev/null +++ b/pkg/cldata/context.go @@ -0,0 +1,34 @@ +// Package cldata provides shared types and interfaces for consensus layer data processing. +// It enables code reuse between the Cannon (historical backfill) and Horizon (real-time) +// modules by defining common abstractions. +package cldata + +import ( + "context" + + "github.com/ethpandaops/ethwallclock" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// ContextProvider supplies the contextual information needed by derivers +// to create properly decorated events. It abstracts the differences between +// Cannon and Horizon execution contexts. +type ContextProvider interface { + // CreateClientMeta creates the client metadata for events. + // This includes network information, client version, and other identifying data. + CreateClientMeta(ctx context.Context) (*xatu.ClientMeta, error) + + // NetworkName returns the human-readable name of the network being monitored. + NetworkName() string + + // NetworkID returns the numeric identifier of the network. + NetworkID() uint64 + + // Wallclock returns the Ethereum beacon chain wallclock for time calculations. + // It provides slot and epoch timing information based on genesis time and slot duration. + Wallclock() *ethwallclock.EthereumBeaconChain + + // DepositChainID returns the execution layer chain ID. + // This is needed for transaction signing and verification. + DepositChainID() uint64 +} diff --git a/pkg/cannon/deriver/beacon/eth/v2/block_identifier.go b/pkg/cldata/deriver/block_identifier.go similarity index 80% rename from pkg/cannon/deriver/beacon/eth/v2/block_identifier.go rename to pkg/cldata/deriver/block_identifier.go index 4783224b0..fed260acb 100644 --- a/pkg/cannon/deriver/beacon/eth/v2/block_identifier.go +++ b/pkg/cldata/deriver/block_identifier.go @@ -1,4 +1,4 @@ -package v2 +package deriver import ( "fmt" @@ -11,7 +11,11 @@ import ( "google.golang.org/protobuf/types/known/wrapperspb" ) -func GetBlockIdentifier(block *spec.VersionedSignedBeaconBlock, wallclock *ethwallclock.EthereumBeaconChain) (*xatu.BlockIdentifier, error) { +// GetBlockIdentifier creates a BlockIdentifier from a versioned signed beacon block. +func GetBlockIdentifier( + block *spec.VersionedSignedBeaconBlock, + wallclock *ethwallclock.EthereumBeaconChain, +) (*xatu.BlockIdentifier, error) { if block == nil { return nil, fmt.Errorf("block is nil") } diff --git a/pkg/cldata/deriver/event_builder.go b/pkg/cldata/deriver/event_builder.go new file mode 100644 index 000000000..8d0381909 --- /dev/null +++ b/pkg/cldata/deriver/event_builder.go @@ -0,0 +1,81 @@ +package deriver + +import ( + "context" + "time" + + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/google/uuid" + "github.com/pkg/errors" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// EventBuilder provides helper methods for constructing decorated events. +type EventBuilder struct { + ctxProvider cldata.ContextProvider +} + +// NewEventBuilder creates a new event builder. +func NewEventBuilder(ctxProvider cldata.ContextProvider) *EventBuilder { + return &EventBuilder{ctxProvider: ctxProvider} +} + +// CreateDecoratedEvent creates a new decorated event with common fields populated. +func (b *EventBuilder) CreateDecoratedEvent( + ctx context.Context, + eventName xatu.Event_Name, +) (*xatu.DecoratedEvent, error) { + clientMeta, err := b.ctxProvider.CreateClientMeta(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to create client metadata") + } + + metadata, ok := proto.Clone(clientMeta).(*xatu.ClientMeta) + if !ok { + return nil, errors.New("failed to clone client metadata") + } + + return &xatu.DecoratedEvent{ + Event: &xatu.Event{ + Name: eventName, + DateTime: timestamppb.New(time.Now()), + Id: uuid.New().String(), + }, + Meta: &xatu.Meta{ + Client: metadata, + }, + }, nil +} + +// BuildSlotV2 creates a SlotV2 from a slot number using the wallclock. +func (b *EventBuilder) BuildSlotV2(slotNum uint64) *xatu.SlotV2 { + slot := b.ctxProvider.Wallclock().Slots().FromNumber(slotNum) + + return &xatu.SlotV2{ + Number: &wrapperspb.UInt64Value{Value: slotNum}, + StartDateTime: timestamppb.New(slot.TimeWindow().Start()), + } +} + +// BuildEpochV2 creates an EpochV2 from an epoch number using the wallclock. +func (b *EventBuilder) BuildEpochV2(epochNum uint64) *xatu.EpochV2 { + epoch := b.ctxProvider.Wallclock().Epochs().FromNumber(epochNum) + + return &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: epochNum}, + StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), + } +} + +// BuildEpochV2FromSlot creates an EpochV2 from a slot number using the wallclock. +func (b *EventBuilder) BuildEpochV2FromSlot(slotNum uint64) *xatu.EpochV2 { + epoch := b.ctxProvider.Wallclock().Epochs().FromSlot(slotNum) + + return &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, + StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), + } +} diff --git a/pkg/cldata/deriver/extractors/attester_slashing.go b/pkg/cldata/deriver/extractors/attester_slashing.go new file mode 100644 index 000000000..8fd7e0f45 --- /dev/null +++ b/pkg/cldata/deriver/extractors/attester_slashing.go @@ -0,0 +1,130 @@ +package extractors + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "attester_slashing", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractAttesterSlashings, + }) +} + +// ExtractAttesterSlashings extracts attester slashing events from a beacon block. +func ExtractAttesterSlashings( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + slashings, err := block.AttesterSlashings() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attester slashings") + } + + if len(slashings) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(slashings)) + + for _, slashing := range slashings { + att1, err := slashing.Attestation1() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation 1") + } + + indexedAttestation1, err := convertIndexedAttestation(att1) + if err != nil { + return nil, errors.Wrap(err, "failed to convert indexed attestation 1") + } + + att2, err := slashing.Attestation2() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation 2") + } + + indexedAttestation2, err := convertIndexedAttestation(att2) + if err != nil { + return nil, errors.Wrap(err, "failed to convert indexed attestation 2") + } + + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockAttesterSlashing{ + EthV2BeaconBlockAttesterSlashing: &xatuethv1.AttesterSlashingV2{ + Attestation_1: indexedAttestation1, + Attestation_2: indexedAttestation2, + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockAttesterSlashing{ + EthV2BeaconBlockAttesterSlashing: &xatu.ClientMeta_AdditionalEthV2BeaconBlockAttesterSlashingData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} + +// convertIndexedAttestation converts a VersionedIndexedAttestation to an IndexedAttestationV2. +func convertIndexedAttestation(attestation *spec.VersionedIndexedAttestation) (*xatuethv1.IndexedAttestationV2, error) { + indices := make([]*wrapperspb.UInt64Value, 0) + + atIndices, err := attestation.AttestingIndices() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attesting indices") + } + + for _, index := range atIndices { + indices = append(indices, &wrapperspb.UInt64Value{Value: index}) + } + + data, err := attestation.Data() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation data") + } + + sig, err := attestation.Signature() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation signature") + } + + return &xatuethv1.IndexedAttestationV2{ + AttestingIndices: indices, + Data: &xatuethv1.AttestationDataV2{ + Slot: &wrapperspb.UInt64Value{Value: uint64(data.Slot)}, + Index: &wrapperspb.UInt64Value{Value: uint64(data.Index)}, + BeaconBlockRoot: data.BeaconBlockRoot.String(), + Source: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(data.Source.Epoch)}, + Root: data.Source.Root.String(), + }, + Target: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(data.Target.Epoch)}, + Root: data.Target.Root.String(), + }, + }, + Signature: sig.String(), + }, nil +} diff --git a/pkg/cldata/deriver/extractors/beacon_blob.go b/pkg/cldata/deriver/extractors/beacon_blob.go new file mode 100644 index 000000000..2f60db8a0 --- /dev/null +++ b/pkg/cldata/deriver/extractors/beacon_blob.go @@ -0,0 +1,106 @@ +package extractors + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/attestantio/go-eth2-client/api" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "beacon_blob", + CannonType: xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR, + ActivationFork: spec.DataVersionDeneb, + Mode: deriver.ProcessingModeEpoch, + EpochProcessor: ProcessBeaconBlobs, + }) +} + +// ProcessBeaconBlobs fetches and creates events for all blob sidecars in an epoch. +func ProcessBeaconBlobs( + ctx context.Context, + epoch phase0.Epoch, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + sp, err := beacon.Node().Spec() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain spec") + } + + builder := deriver.NewEventBuilder(ctxProvider) + allEvents := []*xatu.DecoratedEvent{} + + for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { + slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) + + blobs, err := beacon.FetchBeaconBlockBlobs(ctx, xatuethv1.SlotAsString(slot)) + if err != nil { + var apiErr *api.Error + if errors.As(err, &apiErr) { + switch apiErr.StatusCode { + case 404: + continue // No blobs for this slot + case 503: + return nil, errors.New("beacon node is syncing") + } + } + + return nil, errors.Wrapf(err, "failed to get beacon blob sidecars for slot %d", slot) + } + + if blobs == nil { + continue + } + + for _, blob := range blobs { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR) + if err != nil { + return nil, err + } + + blockRoot, err := blob.SignedBlockHeader.Message.HashTreeRoot() + if err != nil { + return nil, errors.Wrap(err, "failed to get block root") + } + + event.Data = &xatu.DecoratedEvent_EthV1BeaconBlockBlobSidecar{ + EthV1BeaconBlockBlobSidecar: &xatuethv1.BlobSidecar{ + Slot: &wrapperspb.UInt64Value{Value: uint64(blob.SignedBlockHeader.Message.Slot)}, + Blob: fmt.Sprintf("0x%s", hex.EncodeToString(blob.Blob[:])), + Index: &wrapperspb.UInt64Value{Value: uint64(blob.Index)}, + BlockRoot: fmt.Sprintf("0x%s", hex.EncodeToString(blockRoot[:])), + BlockParentRoot: blob.SignedBlockHeader.Message.ParentRoot.String(), + ProposerIndex: &wrapperspb.UInt64Value{Value: uint64(blob.SignedBlockHeader.Message.ProposerIndex)}, + KzgCommitment: blob.KZGCommitment.String(), + KzgProof: blob.KZGProof.String(), + }, + } + + //nolint:gosec // blob sizes are bounded + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1BeaconBlobSidecar{ + EthV1BeaconBlobSidecar: &xatu.ClientMeta_AdditionalEthV1BeaconBlobSidecarData{ + DataSize: &wrapperspb.UInt64Value{Value: uint64(len(blob.Blob))}, + DataEmptySize: &wrapperspb.UInt64Value{Value: uint64(cldata.CountConsecutiveEmptyBytes(blob.Blob[:], 4))}, + VersionedHash: cldata.ConvertKzgCommitmentToVersionedHash(blob.KZGCommitment[:]).String(), + Slot: builder.BuildSlotV2(uint64(blob.SignedBlockHeader.Message.Slot)), + Epoch: builder.BuildEpochV2FromSlot(uint64(blob.SignedBlockHeader.Message.Slot)), + }, + } + + allEvents = append(allEvents, event) + } + } + + return allEvents, nil +} diff --git a/pkg/cldata/deriver/extractors/beacon_block.go b/pkg/cldata/deriver/extractors/beacon_block.go new file mode 100644 index 000000000..8709024f7 --- /dev/null +++ b/pkg/cldata/deriver/extractors/beacon_block.go @@ -0,0 +1,178 @@ +package extractors + +import ( + "context" + "fmt" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + "github.com/ethpandaops/xatu/pkg/proto/eth" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + ssz "github.com/ferranbt/fastssz" + "github.com/golang/snappy" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "beacon_block", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractBeaconBlock, + }) +} + +// ExtractBeaconBlock extracts a beacon block event from a block. +func ExtractBeaconBlock( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + builder := deriver.NewEventBuilder(ctxProvider) + + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_V2) + if err != nil { + return nil, err + } + + data, err := eth.NewEventBlockV2FromVersionSignedBeaconBlock(block) + if err != nil { + return nil, errors.Wrap(err, "failed to create event block") + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockV2{ + EthV2BeaconBlockV2: data, + } + + additionalData, err := getBeaconBlockAdditionalData(block, blockID, ctxProvider) + if err != nil { + return nil, errors.Wrap(err, "failed to get additional data") + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockV2{ + EthV2BeaconBlockV2: additionalData, + } + + return []*xatu.DecoratedEvent{event}, nil +} + +func getBeaconBlockAdditionalData( + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + ctxProvider cldata.ContextProvider, +) (*xatu.ClientMeta_AdditionalEthV2BeaconBlockV2Data, error) { + extra := &xatu.ClientMeta_AdditionalEthV2BeaconBlockV2Data{} + + slotI, err := block.Slot() + if err != nil { + return nil, err + } + + wallclock := ctxProvider.Wallclock() + slot := wallclock.Slots().FromNumber(uint64(slotI)) + epoch := wallclock.Epochs().FromSlot(uint64(slotI)) + + extra.Slot = &xatu.SlotV2{ + StartDateTime: timestamppb.New(slot.TimeWindow().Start()), + Number: &wrapperspb.UInt64Value{Value: uint64(slotI)}, + } + + extra.Epoch = &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, + StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), + } + + extra.Version = block.Version.String() + + var txCount int + + var txSize int + + var transactionsBytes []byte + + transactions, err := block.ExecutionTransactions() + if err != nil { + return nil, errors.Wrap(err, "failed to get execution transactions") + } + + txs := make([][]byte, len(transactions)) + for i, tx := range transactions { + txs[i] = tx + } + + txCount = len(txs) + + for _, tx := range txs { + txSize += len(tx) + transactionsBytes = append(transactionsBytes, tx...) + } + + blockMessage, err := getBlockMessage(block) + if err != nil { + return nil, err + } + + sszData, err := ssz.MarshalSSZ(blockMessage) + if err != nil { + return nil, err + } + + dataSize := len(sszData) + compressedData := snappy.Encode(nil, sszData) + compressedDataSize := len(compressedData) + + blockRoot, err := block.Root() + if err != nil { + return nil, err + } + + extra.BlockRoot = fmt.Sprintf("%#x", blockRoot) + + compressedTransactions := snappy.Encode(nil, transactionsBytes) + compressedTxSize := len(compressedTransactions) + + extra.TotalBytes = wrapperspb.UInt64(uint64(dataSize)) + extra.TotalBytesCompressed = wrapperspb.UInt64(uint64(compressedDataSize)) + extra.TransactionsCount = wrapperspb.UInt64(uint64(txCount)) + //nolint:gosec // txSize is always non-negative + extra.TransactionsTotalBytes = wrapperspb.UInt64(uint64(txSize)) + extra.TransactionsTotalBytesCompressed = wrapperspb.UInt64(uint64(compressedTxSize)) + + // Always set to true when derived from the cannon. + extra.FinalizedWhenRequested = true + + // Copy block identifier fields + if blockID != nil { + extra.Slot = blockID.Slot + extra.Epoch = blockID.Epoch + } + + return extra, nil +} + +func getBlockMessage(block *spec.VersionedSignedBeaconBlock) (ssz.Marshaler, error) { + switch block.Version { + case spec.DataVersionPhase0: + return block.Phase0.Message, nil + case spec.DataVersionAltair: + return block.Altair.Message, nil + case spec.DataVersionBellatrix: + return block.Bellatrix.Message, nil + case spec.DataVersionCapella: + return block.Capella.Message, nil + case spec.DataVersionDeneb: + return block.Deneb.Message, nil + case spec.DataVersionElectra: + return block.Electra.Message, nil + case spec.DataVersionFulu: + return block.Fulu.Message, nil + default: + return nil, fmt.Errorf("unsupported block version: %s", block.Version) + } +} diff --git a/pkg/cldata/deriver/extractors/beacon_committee.go b/pkg/cldata/deriver/extractors/beacon_committee.go new file mode 100644 index 000000000..af6bb263f --- /dev/null +++ b/pkg/cldata/deriver/extractors/beacon_committee.go @@ -0,0 +1,89 @@ +package extractors + +import ( + "context" + "fmt" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "beacon_committee", + CannonType: xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeEpoch, + EpochProcessor: ProcessBeaconCommittees, + }) +} + +// ProcessBeaconCommittees fetches and creates events for all beacon committees in an epoch. +func ProcessBeaconCommittees( + ctx context.Context, + epoch phase0.Epoch, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + sp, err := beacon.Node().Spec() + if err != nil { + return nil, errors.Wrap(err, "failed to get beacon spec") + } + + committees, err := beacon.FetchBeaconCommittee(ctx, epoch) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch beacon committees") + } + + // Validate committees belong to the correct epoch. + minSlot := phase0.Slot(epoch) * sp.SlotsPerEpoch + maxSlot := (phase0.Slot(epoch) * sp.SlotsPerEpoch) + sp.SlotsPerEpoch - 1 + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(committees)) + + for _, committee := range committees { + if committee.Slot < minSlot || committee.Slot > maxSlot { + return nil, fmt.Errorf( + "beacon committee slot outside of epoch. (epoch: %d, slot: %d, min: %d, max: %d)", + epoch, committee.Slot, minSlot, maxSlot, + ) + } + + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V1_BEACON_COMMITTEE) + if err != nil { + return nil, err + } + + validators := make([]*wrapperspb.UInt64Value, 0, len(committee.Validators)) + for _, validator := range committee.Validators { + validators = append(validators, wrapperspb.UInt64(uint64(validator))) + } + + event.Data = &xatu.DecoratedEvent_EthV1BeaconCommittee{ + EthV1BeaconCommittee: &xatuethv1.Committee{ + Slot: wrapperspb.UInt64(uint64(committee.Slot)), + Index: wrapperspb.UInt64(uint64(committee.Index)), + Validators: validators, + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1BeaconCommittee{ + EthV1BeaconCommittee: &xatu.ClientMeta_AdditionalEthV1BeaconCommitteeData{ + StateId: xatuethv1.StateIDFinalized, + Slot: builder.BuildSlotV2(uint64(committee.Slot)), + Epoch: builder.BuildEpochV2FromSlot(uint64(committee.Slot)), + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/beacon_validators.go b/pkg/cldata/deriver/extractors/beacon_validators.go new file mode 100644 index 000000000..bb2e16e14 --- /dev/null +++ b/pkg/cldata/deriver/extractors/beacon_validators.go @@ -0,0 +1,116 @@ +package extractors + +import ( + "context" + "fmt" + + apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// DefaultValidatorChunkSize is the default number of validators per event. +const DefaultValidatorChunkSize = 100 + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "beacon_validators", + CannonType: xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeEpoch, + EpochProcessor: ProcessBeaconValidators, + }) +} + +// ProcessBeaconValidators fetches and creates chunked events for all validators in an epoch. +func ProcessBeaconValidators( + ctx context.Context, + epoch phase0.Epoch, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + sp, err := beacon.Node().Spec() + if err != nil { + return nil, errors.Wrap(err, "failed to fetch spec") + } + + boundarySlot := phase0.Slot(uint64(epoch) * uint64(sp.SlotsPerEpoch)) + + validatorsMap, err := beacon.GetValidators(ctx, xatuethv1.SlotAsString(boundarySlot)) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch validator states") + } + + // Clean up cache after fetch + defer beacon.DeleteValidatorsFromCache(xatuethv1.SlotAsString(boundarySlot)) + + // Chunk the validators + chunkSize := DefaultValidatorChunkSize + + var validatorChunks [][]*apiv1.Validator + + currentChunk := make([]*apiv1.Validator, 0, chunkSize) + + for _, validator := range validatorsMap { + if len(currentChunk) == chunkSize { + validatorChunks = append(validatorChunks, currentChunk) + currentChunk = make([]*apiv1.Validator, 0, chunkSize) + } + + currentChunk = append(currentChunk, validator) + } + + if len(currentChunk) > 0 { + validatorChunks = append(validatorChunks, currentChunk) + } + + builder := deriver.NewEventBuilder(ctxProvider) + allEvents := make([]*xatu.DecoratedEvent, 0, len(validatorChunks)) + + for _, chunk := range validatorChunks { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V1_BEACON_VALIDATORS) + if err != nil { + return nil, err + } + + data := xatu.Validators{} + + for _, validator := range chunk { + data.Validators = append(data.Validators, &xatuethv1.Validator{ + Index: wrapperspb.UInt64(uint64(validator.Index)), + Balance: wrapperspb.UInt64(uint64(validator.Balance)), + Status: wrapperspb.String(validator.Status.String()), + Data: &xatuethv1.ValidatorData{ + Pubkey: wrapperspb.String(validator.Validator.PublicKey.String()), + WithdrawalCredentials: wrapperspb.String(fmt.Sprintf("%#x", validator.Validator.WithdrawalCredentials)), + EffectiveBalance: wrapperspb.UInt64(uint64(validator.Validator.EffectiveBalance)), + Slashed: wrapperspb.Bool(validator.Validator.Slashed), + ActivationEpoch: wrapperspb.UInt64(uint64(validator.Validator.ActivationEpoch)), + ActivationEligibilityEpoch: wrapperspb.UInt64(uint64(validator.Validator.ActivationEligibilityEpoch)), + ExitEpoch: wrapperspb.UInt64(uint64(validator.Validator.ExitEpoch)), + WithdrawableEpoch: wrapperspb.UInt64(uint64(validator.Validator.WithdrawableEpoch)), + }, + }) + } + + event.Data = &xatu.DecoratedEvent_EthV1Validators{ + EthV1Validators: &data, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1Validators{ + EthV1Validators: &xatu.ClientMeta_AdditionalEthV1ValidatorsData{ + Epoch: builder.BuildEpochV2(uint64(epoch)), + }, + } + + allEvents = append(allEvents, event) + } + + return allEvents, nil +} diff --git a/pkg/cldata/deriver/extractors/bls_to_execution_change.go b/pkg/cldata/deriver/extractors/bls_to_execution_change.go new file mode 100644 index 000000000..f88da3c32 --- /dev/null +++ b/pkg/cldata/deriver/extractors/bls_to_execution_change.go @@ -0,0 +1,72 @@ +package extractors + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv2 "github.com/ethpandaops/xatu/pkg/proto/eth/v2" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "bls_to_execution_change", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE, + ActivationFork: spec.DataVersionCapella, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractBLSToExecutionChanges, + }) +} + +// ExtractBLSToExecutionChanges extracts BLS to execution change events from a beacon block. +func ExtractBLSToExecutionChanges( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + changes, err := block.BLSToExecutionChanges() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain BLS to execution changes") + } + + if len(changes) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(changes)) + + for _, change := range changes { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockBlsToExecutionChange{ + EthV2BeaconBlockBlsToExecutionChange: &xatuethv2.SignedBLSToExecutionChangeV2{ + Message: &xatuethv2.BLSToExecutionChangeV2{ + ValidatorIndex: wrapperspb.UInt64(uint64(change.Message.ValidatorIndex)), + FromBlsPubkey: change.Message.FromBLSPubkey.String(), + ToExecutionAddress: change.Message.ToExecutionAddress.String(), + }, + Signature: change.Signature.String(), + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockBlsToExecutionChange{ + EthV2BeaconBlockBlsToExecutionChange: &xatu.ClientMeta_AdditionalEthV2BeaconBlockBLSToExecutionChangeData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/deposit.go b/pkg/cldata/deriver/extractors/deposit.go new file mode 100644 index 000000000..2cecdd634 --- /dev/null +++ b/pkg/cldata/deriver/extractors/deposit.go @@ -0,0 +1,79 @@ +package extractors + +import ( + "context" + "fmt" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "deposit", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractDeposits, + }) +} + +// ExtractDeposits extracts deposit events from a beacon block. +func ExtractDeposits( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + deposits, err := block.Deposits() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain deposits") + } + + if len(deposits) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(deposits)) + + for _, deposit := range deposits { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT) + if err != nil { + return nil, err + } + + proof := make([]string, 0, len(deposit.Proof)) + for _, p := range deposit.Proof { + proof = append(proof, fmt.Sprintf("0x%x", p)) + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockDeposit{ + EthV2BeaconBlockDeposit: &xatuethv1.DepositV2{ + Proof: proof, + Data: &xatuethv1.DepositV2_Data{ + Pubkey: deposit.Data.PublicKey.String(), + WithdrawalCredentials: fmt.Sprintf("0x%x", deposit.Data.WithdrawalCredentials), + Amount: wrapperspb.UInt64(uint64(deposit.Data.Amount)), + Signature: deposit.Data.Signature.String(), + }, + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockDeposit{ + EthV2BeaconBlockDeposit: &xatu.ClientMeta_AdditionalEthV2BeaconBlockDepositData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/elaborated_attestation.go b/pkg/cldata/deriver/extractors/elaborated_attestation.go new file mode 100644 index 000000000..9631faa79 --- /dev/null +++ b/pkg/cldata/deriver/extractors/elaborated_attestation.go @@ -0,0 +1,327 @@ +package extractors + +import ( + "context" + "fmt" + + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "elaborated_attestation", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractElaboratedAttestations, + }) +} + +// ExtractElaboratedAttestations extracts elaborated attestation events from a beacon block. +func ExtractElaboratedAttestations( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + blockAttestations, err := block.Attestations() + if err != nil { + return nil, err + } + + if len(blockAttestations) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(blockAttestations)) + + for positionInBlock, attestation := range blockAttestations { + attestationData, err := attestation.Data() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation data") + } + + signature, err := attestation.Signature() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation signature") + } + + // Handle different attestation versions + switch attestation.Version { + case spec.DataVersionPhase0, spec.DataVersionAltair, spec.DataVersionBellatrix, + spec.DataVersionCapella, spec.DataVersionDeneb: + // For pre-Electra attestations, each attestation can only have one committee + indexes, indexErr := getAttestingValidatorIndexesPhase0(ctx, attestation, beacon, ctxProvider) + if indexErr != nil { + return nil, errors.Wrap(indexErr, "failed to get attesting validator indexes") + } + + elaboratedAttestation := &xatuethv1.ElaboratedAttestation{ + Signature: signature.String(), + Data: &xatuethv1.AttestationDataV2{ + Slot: &wrapperspb.UInt64Value{Value: uint64(attestationData.Slot)}, + Index: &wrapperspb.UInt64Value{Value: uint64(attestationData.Index)}, + BeaconBlockRoot: xatuethv1.RootAsString(attestationData.BeaconBlockRoot), + Source: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Source.Epoch)}, + Root: xatuethv1.RootAsString(attestationData.Source.Root), + }, + Target: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Target.Epoch)}, + Root: xatuethv1.RootAsString(attestationData.Target.Root), + }, + }, + ValidatorIndexes: indexes, + } + + //nolint:gosec // positionInBlock bounded by attestations per block + event, eventErr := createElaboratedAttestationEvent( + ctx, + builder, + elaboratedAttestation, + uint64(positionInBlock), + blockID, + ctxProvider, + ) + if eventErr != nil { + return nil, errors.Wrapf(eventErr, "failed to create event for attestation %s", attestation.String()) + } + + events = append(events, event) + + default: + // For Electra attestations, create multiple events (one per committee) + electraEvents, electraErr := processElectraAttestation( + ctx, + builder, + attestation, + attestationData, + &signature, + positionInBlock, + blockID, + beacon, + ctxProvider, + ) + if electraErr != nil { + return nil, electraErr + } + + events = append(events, electraEvents...) + } + } + + return events, nil +} + +func processElectraAttestation( + ctx context.Context, + builder *deriver.EventBuilder, + attestation *spec.VersionedAttestation, + attestationData *phase0.AttestationData, + signature *phase0.BLSSignature, + positionInBlock int, + blockID *xatu.BlockIdentifier, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + committeeBits, err := attestation.CommitteeBits() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation committee bits") + } + + aggregationBits, err := attestation.AggregationBits() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation aggregation bits") + } + + committeeIndices := committeeBits.BitIndices() + committeeOffset := 0 + events := make([]*xatu.DecoratedEvent, 0, len(committeeIndices)) + + for _, committeeIdx := range committeeIndices { + epoch := ctxProvider.Wallclock().Epochs().FromSlot(uint64(attestationData.Slot)) + + epochCommittees, err := beacon.FetchBeaconCommittee(ctx, phase0.Epoch(epoch.Number())) + if err != nil { + return nil, errors.Wrap(err, "failed to get committees for epoch") + } + + var committee *v1.BeaconCommittee + + for _, c := range epochCommittees { + //nolint:gosec // committeeIdx capped at 64 committees in spec + if c.Slot == attestationData.Slot && c.Index == phase0.CommitteeIndex(committeeIdx) { + committee = c + + break + } + } + + if committee == nil { + return nil, fmt.Errorf("committee %d in slot %d not found", committeeIdx, attestationData.Slot) + } + + committeeSize := len(committee.Validators) + committeeValidatorIndexes := make([]*wrapperspb.UInt64Value, 0, committeeSize) + + for i := 0; i < committeeSize; i++ { + aggregationBitPosition := committeeOffset + i + + //nolint:gosec // aggregationBitPosition bounded by committee size + if uint64(aggregationBitPosition) < aggregationBits.Len() && + aggregationBits.BitAt(uint64(aggregationBitPosition)) { + validatorIndex := committee.Validators[i] + committeeValidatorIndexes = append(committeeValidatorIndexes, wrapperspb.UInt64(uint64(validatorIndex))) + } + } + + elaboratedAttestation := &xatuethv1.ElaboratedAttestation{ + Signature: signature.String(), + Data: &xatuethv1.AttestationDataV2{ + Slot: &wrapperspb.UInt64Value{Value: uint64(attestationData.Slot)}, + //nolint:gosec // committeeIdx capped at 64 committees in spec + Index: &wrapperspb.UInt64Value{Value: uint64(committeeIdx)}, + BeaconBlockRoot: xatuethv1.RootAsString(attestationData.BeaconBlockRoot), + Source: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Source.Epoch)}, + Root: xatuethv1.RootAsString(attestationData.Source.Root), + }, + Target: &xatuethv1.CheckpointV2{ + Epoch: &wrapperspb.UInt64Value{Value: uint64(attestationData.Target.Epoch)}, + Root: xatuethv1.RootAsString(attestationData.Target.Root), + }, + }, + ValidatorIndexes: committeeValidatorIndexes, + } + + //nolint:gosec // positionInBlock bounded by attestations per block + event, err := createElaboratedAttestationEvent( + ctx, + builder, + elaboratedAttestation, + uint64(positionInBlock), + blockID, + ctxProvider, + ) + if err != nil { + return nil, errors.Wrapf( + err, + "failed to create event for attestation %s committee %d", + attestation.String(), + committeeIdx, + ) + } + + events = append(events, event) + committeeOffset += committeeSize + } + + return events, nil +} + +func getAttestingValidatorIndexesPhase0( + ctx context.Context, + attestation *spec.VersionedAttestation, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*wrapperspb.UInt64Value, error) { + attestationData, err := attestation.Data() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation data") + } + + epoch := ctxProvider.Wallclock().Epochs().FromSlot(uint64(attestationData.Slot)) + + bitIndices, err := attestation.AggregationBits() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain attestation aggregation bits") + } + + positions := bitIndices.BitIndices() + indexes := make([]*wrapperspb.UInt64Value, 0, len(positions)) + + for _, position := range positions { + validatorIndex, err := beacon.GetValidatorIndex( + ctx, + phase0.Epoch(epoch.Number()), + attestationData.Slot, + attestationData.Index, + //nolint:gosec // position bounded by committee size + uint64(position), + ) + if err != nil { + return nil, errors.Wrapf(err, "failed to get validator index for position %d", position) + } + + indexes = append(indexes, wrapperspb.UInt64(uint64(validatorIndex))) + } + + return indexes, nil +} + +func createElaboratedAttestationEvent( + ctx context.Context, + builder *deriver.EventBuilder, + attestation *xatuethv1.ElaboratedAttestation, + positionInBlock uint64, + blockID *xatu.BlockIdentifier, + ctxProvider cldata.ContextProvider, +) (*xatu.DecoratedEvent, error) { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockElaboratedAttestation{ + EthV2BeaconBlockElaboratedAttestation: attestation, + } + + attestationSlot := ctxProvider.Wallclock().Slots().FromNumber(attestation.Data.Slot.Value) + epoch := ctxProvider.Wallclock().Epochs().FromSlot(attestationSlot.Number()) + + targetEpoch := ctxProvider.Wallclock().Epochs().FromNumber(attestation.Data.Target.Epoch.GetValue()) + target := &xatu.ClientMeta_AdditionalEthV1AttestationTargetV2Data{ + Epoch: &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: targetEpoch.Number()}, + StartDateTime: timestamppb.New(targetEpoch.TimeWindow().Start()), + }, + } + + sourceEpoch := ctxProvider.Wallclock().Epochs().FromNumber(attestation.Data.Source.Epoch.GetValue()) + source := &xatu.ClientMeta_AdditionalEthV1AttestationSourceV2Data{ + Epoch: &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: sourceEpoch.Number()}, + StartDateTime: timestamppb.New(sourceEpoch.TimeWindow().Start()), + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockElaboratedAttestation{ + EthV2BeaconBlockElaboratedAttestation: &xatu.ClientMeta_AdditionalEthV2BeaconBlockElaboratedAttestationData{ + Block: blockID, + PositionInBlock: wrapperspb.UInt64(positionInBlock), + Slot: &xatu.SlotV2{ + Number: &wrapperspb.UInt64Value{Value: attestationSlot.Number()}, + StartDateTime: timestamppb.New(attestationSlot.TimeWindow().Start()), + }, + Epoch: &xatu.EpochV2{ + Number: &wrapperspb.UInt64Value{Value: epoch.Number()}, + StartDateTime: timestamppb.New(epoch.TimeWindow().Start()), + }, + Source: source, + Target: target, + }, + } + + return event, nil +} diff --git a/pkg/cldata/deriver/extractors/execution_transaction.go b/pkg/cldata/deriver/extractors/execution_transaction.go new file mode 100644 index 000000000..e156828b1 --- /dev/null +++ b/pkg/cldata/deriver/extractors/execution_transaction.go @@ -0,0 +1,237 @@ +package extractors + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "strconv" + + "github.com/attestantio/go-eth2-client/api" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "execution_transaction", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION, + ActivationFork: spec.DataVersionBellatrix, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractExecutionTransactions, + }) +} + +// ExtractExecutionTransactions extracts execution transaction events from a beacon block. +func ExtractExecutionTransactions( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + log := logrus.WithField("extractor", "execution_transaction") + + // Fetch blob sidecars for Deneb+ blocks + blobSidecars := []*deneb.BlobSidecar{} + + slot, err := block.Slot() + if err != nil { + return nil, errors.Wrap(err, "failed to get block slot") + } + + if block.Version >= spec.DataVersionDeneb { + sidecars, fetchErr := beacon.FetchBeaconBlockBlobs(ctx, xatuethv1.SlotAsString(slot)) + if fetchErr != nil { + var apiErr *api.Error + if errors.As(fetchErr, &apiErr) { + switch apiErr.StatusCode { + case 404: + log.WithField("slot", slot).Debug("no beacon block blob sidecars found for slot") + case 503: + return nil, errors.New("beacon node is syncing") + default: + return nil, errors.Wrapf(err, "failed to get beacon block blob sidecars for slot %d", slot) + } + } else { + return nil, errors.Wrapf(err, "failed to get beacon block blob sidecars for slot %d", slot) + } + } else { + blobSidecars = sidecars + } + } + + blobSidecarsMap := make(map[string]*deneb.BlobSidecar, len(blobSidecars)) + + for _, blobSidecar := range blobSidecars { + versionedHash := cldata.ConvertKzgCommitmentToVersionedHash(blobSidecar.KZGCommitment[:]) + blobSidecarsMap[versionedHash.String()] = blobSidecar + } + + // Get execution transactions + txBytes, err := block.ExecutionTransactions() + if err != nil { + return nil, fmt.Errorf("failed to get execution transactions: %w", err) + } + + transactions := make([]*types.Transaction, 0, len(txBytes)) + + for _, txData := range txBytes { + ethTransaction := new(types.Transaction) + if err := ethTransaction.UnmarshalBinary(txData); err != nil { + return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) + } + + transactions = append(transactions, ethTransaction) + } + + chainID := new(big.Int).SetUint64(ctxProvider.DepositChainID()) + if chainID.Cmp(big.NewInt(0)) == 0 { + return nil, fmt.Errorf("failed to get chain ID from context provider") + } + + signer := types.LatestSignerForChainID(chainID) + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(transactions)) + + for index, transaction := range transactions { + from, err := types.Sender(signer, transaction) + if err != nil { + return nil, fmt.Errorf("failed to get transaction sender: %w", err) + } + + gasPrice, err := getGasPrice(block, transaction) + if err != nil { + return nil, fmt.Errorf("failed to get transaction gas price: %w", err) + } + + if gasPrice == nil { + return nil, fmt.Errorf("failed to get transaction gas price") + } + + value := transaction.Value() + if value == nil { + return nil, fmt.Errorf("failed to get transaction value") + } + + to := "" + if transaction.To() != nil { + to = transaction.To().Hex() + } + + tx := &xatuethv1.Transaction{ + Nonce: wrapperspb.UInt64(transaction.Nonce()), + Gas: wrapperspb.UInt64(transaction.Gas()), + GasPrice: gasPrice.String(), + GasTipCap: transaction.GasTipCap().String(), + GasFeeCap: transaction.GasFeeCap().String(), + To: to, + From: from.Hex(), + Value: value.String(), + Input: hex.EncodeToString(transaction.Data()), + Hash: transaction.Hash().Hex(), + ChainId: chainID.String(), + Type: wrapperspb.UInt32(uint32(transaction.Type())), + } + + sidecarsEmptySize := 0 + sidecarsSize := 0 + + if transaction.Type() == 3 { + blobHashes := make([]string, len(transaction.BlobHashes())) + + if len(transaction.BlobHashes()) == 0 { + log.WithField("transaction", transaction.Hash().Hex()).Warn("no versioned hashes for type 3 transaction") + } + + for i := 0; i < len(transaction.BlobHashes()); i++ { + hash := transaction.BlobHashes()[i] + blobHashes[i] = hash.String() + sidecar := blobSidecarsMap[hash.String()] + + if sidecar != nil { + sidecarsSize += len(sidecar.Blob) + sidecarsEmptySize += cldata.CountConsecutiveEmptyBytes(sidecar.Blob[:], 4) + } else { + log.WithField("versioned hash", hash.String()).WithField("transaction", transaction.Hash().Hex()).Warn("missing blob sidecar") + } + } + + tx.BlobGas = wrapperspb.UInt64(transaction.BlobGas()) + tx.BlobGasFeeCap = transaction.BlobGasFeeCap().String() + tx.BlobHashes = blobHashes + } + + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockExecutionTransaction{ + EthV2BeaconBlockExecutionTransaction: tx, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockExecutionTransaction{ + EthV2BeaconBlockExecutionTransaction: &xatu.ClientMeta_AdditionalEthV2BeaconBlockExecutionTransactionData{ + Block: blockID, + //nolint:gosec // index from range is bounded by block transaction limit + PositionInBlock: wrapperspb.UInt64(uint64(index)), + Size: strconv.FormatFloat(float64(transaction.Size()), 'f', 0, 64), + CallDataSize: fmt.Sprintf("%d", len(transaction.Data())), + BlobSidecarsSize: fmt.Sprint(sidecarsSize), + BlobSidecarsEmptySize: fmt.Sprint(sidecarsEmptySize), + }, + } + + events = append(events, event) + } + + return events, nil +} + +// getGasPrice calculates the effective gas price for a transaction based on its type and block version. +func getGasPrice(block *spec.VersionedSignedBeaconBlock, transaction *types.Transaction) (*big.Int, error) { + if transaction.Type() == 0 || transaction.Type() == 1 { + return transaction.GasPrice(), nil + } + + if transaction.Type() == 2 || transaction.Type() == 3 || transaction.Type() == 4 { // EIP-1559/blob/7702 transactions + baseFee := new(big.Int) + + switch block.Version { + case spec.DataVersionBellatrix: + baseFee = new(big.Int).SetBytes(block.Bellatrix.Message.Body.ExecutionPayload.BaseFeePerGas[:]) + case spec.DataVersionCapella: + baseFee = new(big.Int).SetBytes(block.Capella.Message.Body.ExecutionPayload.BaseFeePerGas[:]) + case spec.DataVersionDeneb: + executionPayload := block.Deneb.Message.Body.ExecutionPayload + baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) + case spec.DataVersionElectra: + executionPayload := block.Electra.Message.Body.ExecutionPayload + baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) + case spec.DataVersionFulu: + executionPayload := block.Fulu.Message.Body.ExecutionPayload + baseFee.SetBytes(executionPayload.BaseFeePerGas.Bytes()) + default: + return nil, fmt.Errorf("unknown block version: %d", block.Version) + } + + // Calculate Effective Gas Price: min(max_fee_per_gas, base_fee + max_priority_fee_per_gas) + gasPrice := new(big.Int).Add(baseFee, transaction.GasTipCap()) + if gasPrice.Cmp(transaction.GasFeeCap()) > 0 { + gasPrice = transaction.GasFeeCap() + } + + return gasPrice, nil + } + + return nil, fmt.Errorf("unknown transaction type: %d", transaction.Type()) +} diff --git a/pkg/cldata/deriver/extractors/proposer_duty.go b/pkg/cldata/deriver/extractors/proposer_duty.go new file mode 100644 index 000000000..31adfe254 --- /dev/null +++ b/pkg/cldata/deriver/extractors/proposer_duty.go @@ -0,0 +1,69 @@ +package extractors + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "proposer_duty", + CannonType: xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeEpoch, + EpochProcessor: ProcessProposerDuties, + }) +} + +// ProcessProposerDuties fetches and creates events for all proposer duties in an epoch. +func ProcessProposerDuties( + ctx context.Context, + epoch phase0.Epoch, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + duties, err := beacon.FetchProposerDuties(ctx, epoch) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch proposer duties") + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(duties)) + + for _, duty := range duties { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V1_PROPOSER_DUTY) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV1ProposerDuty{ + EthV1ProposerDuty: &xatuethv1.ProposerDuty{ + Slot: wrapperspb.UInt64(uint64(duty.Slot)), + Pubkey: fmt.Sprintf("0x%s", hex.EncodeToString(duty.PubKey[:])), + ValidatorIndex: wrapperspb.UInt64(uint64(duty.ValidatorIndex)), + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV1ProposerDuty{ + EthV1ProposerDuty: &xatu.ClientMeta_AdditionalEthV1ProposerDutyData{ + StateId: xatuethv1.StateIDFinalized, + Slot: builder.BuildSlotV2(uint64(duty.Slot)), + Epoch: builder.BuildEpochV2FromSlot(uint64(duty.Slot)), + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/proposer_slashing.go b/pkg/cldata/deriver/extractors/proposer_slashing.go new file mode 100644 index 000000000..8072309ad --- /dev/null +++ b/pkg/cldata/deriver/extractors/proposer_slashing.go @@ -0,0 +1,86 @@ +package extractors + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "proposer_slashing", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractProposerSlashings, + }) +} + +// ExtractProposerSlashings extracts proposer slashing events from a beacon block. +func ExtractProposerSlashings( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + slashings, err := block.ProposerSlashings() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain proposer slashings") + } + + if len(slashings) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(slashings)) + + for _, slashing := range slashings { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockProposerSlashing{ + EthV2BeaconBlockProposerSlashing: &xatuethv1.ProposerSlashingV2{ + SignedHeader_1: &xatuethv1.SignedBeaconBlockHeaderV2{ + Message: &xatuethv1.BeaconBlockHeaderV2{ + Slot: wrapperspb.UInt64(uint64(slashing.SignedHeader1.Message.Slot)), + ProposerIndex: wrapperspb.UInt64(uint64(slashing.SignedHeader1.Message.ProposerIndex)), + ParentRoot: xatuethv1.RootAsString(slashing.SignedHeader1.Message.ParentRoot), + StateRoot: xatuethv1.RootAsString(slashing.SignedHeader1.Message.StateRoot), + BodyRoot: xatuethv1.RootAsString(slashing.SignedHeader1.Message.BodyRoot), + }, + Signature: slashing.SignedHeader1.Signature.String(), + }, + SignedHeader_2: &xatuethv1.SignedBeaconBlockHeaderV2{ + Message: &xatuethv1.BeaconBlockHeaderV2{ + Slot: wrapperspb.UInt64(uint64(slashing.SignedHeader2.Message.Slot)), + ProposerIndex: wrapperspb.UInt64(uint64(slashing.SignedHeader2.Message.ProposerIndex)), + ParentRoot: xatuethv1.RootAsString(slashing.SignedHeader2.Message.ParentRoot), + StateRoot: xatuethv1.RootAsString(slashing.SignedHeader2.Message.StateRoot), + BodyRoot: xatuethv1.RootAsString(slashing.SignedHeader2.Message.BodyRoot), + }, + Signature: slashing.SignedHeader2.Signature.String(), + }, + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockProposerSlashing{ + EthV2BeaconBlockProposerSlashing: &xatu.ClientMeta_AdditionalEthV2BeaconBlockProposerSlashingData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/voluntary_exit.go b/pkg/cldata/deriver/extractors/voluntary_exit.go new file mode 100644 index 000000000..4f43b4833 --- /dev/null +++ b/pkg/cldata/deriver/extractors/voluntary_exit.go @@ -0,0 +1,71 @@ +package extractors + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "voluntary_exit", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT, + ActivationFork: spec.DataVersionPhase0, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractVoluntaryExits, + }) +} + +// ExtractVoluntaryExits extracts voluntary exit events from a beacon block. +func ExtractVoluntaryExits( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + exits, err := block.VoluntaryExits() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain voluntary exits") + } + + if len(exits) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(exits)) + + for _, exit := range exits { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockVoluntaryExit{ + EthV2BeaconBlockVoluntaryExit: &xatuethv1.SignedVoluntaryExitV2{ + Message: &xatuethv1.VoluntaryExitV2{ + Epoch: wrapperspb.UInt64(uint64(exit.Message.Epoch)), + ValidatorIndex: wrapperspb.UInt64(uint64(exit.Message.ValidatorIndex)), + }, + Signature: exit.Signature.String(), + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockVoluntaryExit{ + EthV2BeaconBlockVoluntaryExit: &xatu.ClientMeta_AdditionalEthV2BeaconBlockVoluntaryExitData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/extractors/withdrawal.go b/pkg/cldata/deriver/extractors/withdrawal.go new file mode 100644 index 000000000..b4be5c5e8 --- /dev/null +++ b/pkg/cldata/deriver/extractors/withdrawal.go @@ -0,0 +1,70 @@ +package extractors + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + deriver.Register(&deriver.DeriverSpec{ + Name: "withdrawal", + CannonType: xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL, + ActivationFork: spec.DataVersionCapella, + Mode: deriver.ProcessingModeSlot, + BlockExtractor: ExtractWithdrawals, + }) +} + +// ExtractWithdrawals extracts withdrawal events from a beacon block. +func ExtractWithdrawals( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + _ cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) { + withdrawals, err := block.Withdrawals() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain withdrawals") + } + + if len(withdrawals) == 0 { + return []*xatu.DecoratedEvent{}, nil + } + + builder := deriver.NewEventBuilder(ctxProvider) + events := make([]*xatu.DecoratedEvent, 0, len(withdrawals)) + + for _, withdrawal := range withdrawals { + event, err := builder.CreateDecoratedEvent(ctx, xatu.Event_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL) + if err != nil { + return nil, err + } + + event.Data = &xatu.DecoratedEvent_EthV2BeaconBlockWithdrawal{ + EthV2BeaconBlockWithdrawal: &xatuethv1.WithdrawalV2{ + Index: &wrapperspb.UInt64Value{Value: uint64(withdrawal.Index)}, + ValidatorIndex: &wrapperspb.UInt64Value{Value: uint64(withdrawal.ValidatorIndex)}, + Address: withdrawal.Address.String(), + Amount: &wrapperspb.UInt64Value{Value: uint64(withdrawal.Amount)}, + }, + } + + event.Meta.Client.AdditionalData = &xatu.ClientMeta_EthV2BeaconBlockWithdrawal{ + EthV2BeaconBlockWithdrawal: &xatu.ClientMeta_AdditionalEthV2BeaconBlockWithdrawalData{ + Block: blockID, + }, + } + + events = append(events, event) + } + + return events, nil +} diff --git a/pkg/cldata/deriver/factory.go b/pkg/cldata/deriver/factory.go new file mode 100644 index 000000000..f2949e135 --- /dev/null +++ b/pkg/cldata/deriver/factory.go @@ -0,0 +1,99 @@ +package deriver + +import ( + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/iterator" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/sirupsen/logrus" +) + +// DeriverFactory creates derivers from the registry. +type DeriverFactory struct { + log logrus.FieldLogger + beacon cldata.BeaconClient + ctxProvider cldata.ContextProvider +} + +// NewDeriverFactory creates a new deriver factory. +func NewDeriverFactory( + log logrus.FieldLogger, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) *DeriverFactory { + return &DeriverFactory{ + log: log, + beacon: beacon, + ctxProvider: ctxProvider, + } +} + +// Create creates a generic deriver for the given cannon type. +// Returns nil if the cannon type is not registered. +func (f *DeriverFactory) Create( + cannonType xatu.CannonType, + enabled bool, + iter iterator.Iterator, +) *GenericDeriver { + spec, ok := Get(cannonType) + if !ok { + return nil + } + + return NewGenericDeriver( + f.log, + spec, + enabled, + iter, + f.beacon, + f.ctxProvider, + ) +} + +// CreateAll creates generic derivers for all registered types. +// The enabledFunc determines if each deriver should be enabled. +func (f *DeriverFactory) CreateAll( + iterFactory func(cannonType xatu.CannonType) iterator.Iterator, + enabledFunc func(cannonType xatu.CannonType) bool, +) []*GenericDeriver { + specs := All() + derivers := make([]*GenericDeriver, 0, len(specs)) + + for _, spec := range specs { + iter := iterFactory(spec.CannonType) + if iter == nil { + continue + } + + enabled := enabledFunc(spec.CannonType) + + derivers = append(derivers, NewGenericDeriver( + f.log, + spec, + enabled, + iter, + f.beacon, + f.ctxProvider, + )) + } + + return derivers +} + +// RegisteredTypes returns all registered cannon types. +func RegisteredTypes() []xatu.CannonType { + specs := All() + types := make([]xatu.CannonType, 0, len(specs)) + + for _, spec := range specs { + types = append(types, spec.CannonType) + } + + return types +} + +// IsRegistered checks if a cannon type is registered. +func IsRegistered(cannonType xatu.CannonType) bool { + _, ok := Get(cannonType) + + return ok +} diff --git a/pkg/cldata/deriver/generic.go b/pkg/cldata/deriver/generic.go new file mode 100644 index 000000000..a08c2e5a1 --- /dev/null +++ b/pkg/cldata/deriver/generic.go @@ -0,0 +1,272 @@ +package deriver + +import ( + "context" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + backoff "github.com/cenkalti/backoff/v5" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/cldata/iterator" + "github.com/ethpandaops/xatu/pkg/observability" + xatuethv1 "github.com/ethpandaops/xatu/pkg/proto/eth/v1" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// GenericDeriver is a universal deriver implementation that uses the registry +// pattern to handle all deriver types with minimal boilerplate. +type GenericDeriver struct { + log logrus.FieldLogger + enabled bool + spec *DeriverSpec + iterator iterator.Iterator + beacon cldata.BeaconClient + ctx cldata.ContextProvider + onEventsCallbacks []func(ctx context.Context, events []*xatu.DecoratedEvent) error +} + +// NewGenericDeriver creates a new generic deriver from a specification. +func NewGenericDeriver( + log logrus.FieldLogger, + deriverSpec *DeriverSpec, + enabled bool, + iter iterator.Iterator, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) *GenericDeriver { + return &GenericDeriver{ + log: log.WithFields(logrus.Fields{ + "module": "cldata/deriver/" + deriverSpec.Name, + "type": deriverSpec.CannonType.String(), + }), + enabled: enabled, + spec: deriverSpec, + iterator: iter, + beacon: beacon, + ctx: ctxProvider, + } +} + +// CannonType returns the cannon type of the deriver. +func (d *GenericDeriver) CannonType() xatu.CannonType { + return d.spec.CannonType +} + +// Name returns the name of the deriver. +func (d *GenericDeriver) Name() string { + return d.spec.CannonType.String() +} + +// ActivationFork returns the fork at which the deriver is activated. +func (d *GenericDeriver) ActivationFork() spec.DataVersion { + return d.spec.ActivationFork +} + +// OnEventsDerived registers a callback for when events are derived. +func (d *GenericDeriver) OnEventsDerived( + _ context.Context, + fn func(ctx context.Context, events []*xatu.DecoratedEvent) error, +) { + d.onEventsCallbacks = append(d.onEventsCallbacks, fn) +} + +// Start starts the deriver. +func (d *GenericDeriver) Start(ctx context.Context) error { + if !d.enabled { + d.log.Info("Deriver disabled") + + return nil + } + + d.log.Info("Deriver enabled") + + if err := d.iterator.Start(ctx, d.ActivationFork()); err != nil { + return errors.Wrap(err, "failed to start iterator") + } + + d.run(ctx) + + return nil +} + +// Stop stops the deriver. +func (d *GenericDeriver) Stop(_ context.Context) error { + return nil +} + +func (d *GenericDeriver) run(rctx context.Context) { + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = 3 * time.Minute + + tracer := observability.Tracer() + + for { + select { + case <-rctx.Done(): + return + default: + operation := func() (string, error) { + ctx, span := tracer.Start(rctx, fmt.Sprintf("Derive %s", d.Name()), + trace.WithAttributes( + attribute.String("network", d.ctx.NetworkName())), + ) + defer span.End() + + time.Sleep(100 * time.Millisecond) + + if err := d.beacon.Synced(ctx); err != nil { + span.SetStatus(codes.Error, err.Error()) + + return "", err + } + + position, err := d.iterator.Next(ctx) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + + return "", err + } + + d.lookAhead(ctx, position.LookAheadEpochs) + + events, err := d.processEpoch(ctx, position.Epoch) + if err != nil { + d.log.WithError(err).WithField("epoch", position.Epoch).Error("Failed to process epoch") + span.SetStatus(codes.Error, err.Error()) + + return "", err + } + + span.AddEvent("Epoch processing complete. Sending events...") + + for _, fn := range d.onEventsCallbacks { + if err := fn(ctx, events); err != nil { + span.SetStatus(codes.Error, err.Error()) + + return "", errors.Wrap(err, "failed to send events") + } + } + + span.AddEvent("Events sent. Updating location...") + + if err := d.iterator.UpdateLocation(ctx, position); err != nil { + span.SetStatus(codes.Error, err.Error()) + + return "", err + } + + span.AddEvent("Location updated. Done.") + bo.Reset() + + return "", nil + } + + retryOpts := []backoff.RetryOption{ + backoff.WithBackOff(bo), + backoff.WithNotify(func(err error, timer time.Duration) { + d.log.WithError(err).WithField("next_attempt", timer).Warn("Failed to process") + }), + } + if _, err := backoff.Retry(rctx, operation, retryOpts...); err != nil { + d.log.WithError(err).Warn("Failed to process") + } + } + } +} + +func (d *GenericDeriver) lookAhead(ctx context.Context, epochs []phase0.Epoch) { + _, span := observability.Tracer().Start(ctx, d.Name()+".lookAhead") + defer span.End() + + sp, err := d.beacon.Node().Spec() + if err != nil { + d.log.WithError(err).Warn("Failed to look ahead at epoch") + + return + } + + for _, epoch := range epochs { + for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { + slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) + d.beacon.LazyLoadBeaconBlock(xatuethv1.SlotAsString(slot)) + } + } +} + +func (d *GenericDeriver) processEpoch(ctx context.Context, epoch phase0.Epoch) ([]*xatu.DecoratedEvent, error) { + ctx, span := observability.Tracer().Start(ctx, + d.Name()+".processEpoch", + //nolint:gosec // epoch numbers won't exceed int64 max in practice + trace.WithAttributes(attribute.Int64("epoch", int64(epoch))), + ) + defer span.End() + + switch d.spec.Mode { + case ProcessingModeSlot: + return d.processEpochBySlot(ctx, epoch) + case ProcessingModeEpoch: + return d.spec.EpochProcessor(ctx, epoch, d.beacon, d.ctx) + default: + return nil, fmt.Errorf("unknown processing mode: %d", d.spec.Mode) + } +} + +func (d *GenericDeriver) processEpochBySlot( + ctx context.Context, + epoch phase0.Epoch, +) ([]*xatu.DecoratedEvent, error) { + sp, err := d.beacon.Node().Spec() + if err != nil { + return nil, errors.Wrap(err, "failed to obtain spec") + } + + allEvents := make([]*xatu.DecoratedEvent, 0) + + for i := uint64(0); i <= uint64(sp.SlotsPerEpoch-1); i++ { + slot := phase0.Slot(i + uint64(epoch)*uint64(sp.SlotsPerEpoch)) + + events, err := d.processSlot(ctx, slot) + if err != nil { + return nil, errors.Wrapf(err, "failed to process slot %d", slot) + } + + allEvents = append(allEvents, events...) + } + + return allEvents, nil +} + +func (d *GenericDeriver) processSlot(ctx context.Context, slot phase0.Slot) ([]*xatu.DecoratedEvent, error) { + ctx, span := observability.Tracer().Start(ctx, + d.Name()+".processSlot", + //nolint:gosec // slot numbers won't exceed int64 max in practice + trace.WithAttributes(attribute.Int64("slot", int64(slot))), + ) + defer span.End() + + block, err := d.beacon.GetBeaconBlock(ctx, xatuethv1.SlotAsString(slot)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get beacon block for slot %d", slot) + } + + if block == nil { + return []*xatu.DecoratedEvent{}, nil + } + + blockIdentifier, err := GetBlockIdentifier(block, d.ctx.Wallclock()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get block identifier for slot %d", slot) + } + + return d.spec.BlockExtractor(ctx, block, blockIdentifier, d.beacon, d.ctx) +} + +// Verify GenericDeriver implements the EventDeriver interface. +var _ EventDeriver = (*GenericDeriver)(nil) diff --git a/pkg/cldata/deriver/interface.go b/pkg/cldata/deriver/interface.go new file mode 100644 index 000000000..1b6b14d5a --- /dev/null +++ b/pkg/cldata/deriver/interface.go @@ -0,0 +1,36 @@ +// Package deriver provides shared interfaces for consensus layer data derivers. +// These interfaces are used by both Cannon (historical backfill) and Horizon (real-time) modules. +package deriver + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// EventDeriver defines the interface for deriving events from consensus layer data. +// Implementations process beacon chain data and emit decorated events. +type EventDeriver interface { + // Start begins the deriver's processing loop. + // It should block until the context is cancelled or an error occurs. + Start(ctx context.Context) error + + // Stop gracefully shuts down the deriver. + Stop(ctx context.Context) error + + // Name returns a human-readable identifier for the deriver. + Name() string + + // CannonType returns the CannonType that identifies the type of events this deriver produces. + // Note: For Horizon derivers, this maps to the corresponding HorizonType. + CannonType() xatu.CannonType + + // OnEventsDerived registers a callback to be invoked when events are derived. + // Multiple callbacks can be registered and will be called in order. + OnEventsDerived(ctx context.Context, fn func(ctx context.Context, events []*xatu.DecoratedEvent) error) + + // ActivationFork returns the fork version at which this deriver becomes active. + // Derivers should not process data from before their activation fork. + ActivationFork() spec.DataVersion +} diff --git a/pkg/cldata/deriver/registry.go b/pkg/cldata/deriver/registry.go new file mode 100644 index 000000000..192ba1a48 --- /dev/null +++ b/pkg/cldata/deriver/registry.go @@ -0,0 +1,94 @@ +// Package deriver provides shared interfaces and a registry-based implementation +// for consensus layer data derivers. The registry pattern allows declarative +// definition of derivers, eliminating boilerplate code. +package deriver + +import ( + "context" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// ProcessingMode defines how a deriver processes data. +type ProcessingMode int + +const ( + // ProcessingModeSlot processes data slot-by-slot within an epoch. + // Used for block-based derivers that extract data from beacon blocks. + ProcessingModeSlot ProcessingMode = iota + + // ProcessingModeEpoch processes data at the epoch level. + // Used for derivers that fetch epoch-level data (committees, duties, etc.). + ProcessingModeEpoch +) + +// BlockExtractor extracts items from a beacon block for a slot-based deriver. +// Returns a slice of items that will each become a decorated event. +type BlockExtractor func( + ctx context.Context, + block *spec.VersionedSignedBeaconBlock, + blockID *xatu.BlockIdentifier, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) + +// EpochProcessor processes an entire epoch for epoch-based derivers. +// Returns all decorated events for the epoch. +type EpochProcessor func( + ctx context.Context, + epoch phase0.Epoch, + beacon cldata.BeaconClient, + ctxProvider cldata.ContextProvider, +) ([]*xatu.DecoratedEvent, error) + +// DeriverSpec defines the specification for a deriver type. +// This enables declarative registration of derivers without boilerplate. +type DeriverSpec struct { + // Name is the human-readable name for logging. + Name string + + // CannonType identifies the type of events this deriver produces. + CannonType xatu.CannonType + + // ActivationFork is the fork at which this deriver becomes active. + ActivationFork spec.DataVersion + + // Mode determines how this deriver processes data. + Mode ProcessingMode + + // BlockExtractor extracts and creates events from a block (for slot mode). + // Must be set if Mode is ProcessingModeSlot. + BlockExtractor BlockExtractor + + // EpochProcessor processes an epoch (for epoch mode). + // Must be set if Mode is ProcessingModeEpoch. + EpochProcessor EpochProcessor +} + +// registry holds all registered deriver specifications. +var registry = make(map[xatu.CannonType]*DeriverSpec) + +// Register adds a deriver specification to the registry. +func Register(s *DeriverSpec) { + registry[s.CannonType] = s +} + +// Get retrieves a deriver specification by its cannon type. +func Get(cannonType xatu.CannonType) (*DeriverSpec, bool) { + s, ok := registry[cannonType] + + return s, ok +} + +// All returns all registered deriver specifications. +func All() []*DeriverSpec { + specs := make([]*DeriverSpec, 0, len(registry)) + for _, spec := range registry { + specs = append(specs, spec) + } + + return specs +} diff --git a/pkg/cldata/iterator/interface.go b/pkg/cldata/iterator/interface.go new file mode 100644 index 000000000..326e8bdce --- /dev/null +++ b/pkg/cldata/iterator/interface.go @@ -0,0 +1,58 @@ +// Package iterator provides shared interfaces for position tracking iterators. +// These interfaces abstract the position management for both Cannon (epoch-based) +// and Horizon (slot-based) modules. +package iterator + +import ( + "context" + "errors" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +var ( + // ErrLocationUpToDate is returned when there is no new position to process. + ErrLocationUpToDate = errors.New("location up to date") +) + +// Direction indicates the processing direction of the iterator. +type Direction string + +const ( + // DirectionForward processes positions moving forward (toward head). + DirectionForward Direction = "forward" + // DirectionBackward processes positions moving backward (backfill). + DirectionBackward Direction = "backward" +) + +// Position represents a position in the beacon chain that can be processed. +type Position struct { + // Slot is the slot number to process (used by slot-based iterators like Horizon). + Slot phase0.Slot + // Epoch is the epoch number to process (used by epoch-based iterators like Cannon). + Epoch phase0.Epoch + // LookAheadEpochs contains upcoming epochs for pre-fetching optimization. + // Used to preload blocks for entire epochs ahead of current processing. + LookAheadEpochs []phase0.Epoch + // Direction indicates whether this is forward or backward processing. + Direction Direction +} + +// Iterator defines the interface for tracking and managing processing positions. +// It handles communication with the coordinator to persist progress and provides +// the next position to process. +type Iterator interface { + // Start initializes the iterator with the activation fork version. + // It should be called before Next() or UpdateLocation(). + Start(ctx context.Context, activationFork spec.DataVersion) error + + // Next returns the next position to process. + // It blocks until a position is available or returns ErrLocationUpToDate + // when caught up to head. + Next(ctx context.Context) (*Position, error) + + // UpdateLocation persists the current position after successful processing. + // This should be called after events have been successfully derived and sent. + UpdateLocation(ctx context.Context, position *Position) error +} diff --git a/pkg/horizon/block_broadcaster.go b/pkg/horizon/block_broadcaster.go new file mode 100644 index 000000000..0d7a3ca15 --- /dev/null +++ b/pkg/horizon/block_broadcaster.go @@ -0,0 +1,113 @@ +package horizon + +import ( + "context" + "sync" + + "github.com/ethpandaops/xatu/pkg/horizon/cache" + "github.com/ethpandaops/xatu/pkg/horizon/subscription" + "github.com/sirupsen/logrus" +) + +// BlockEventBroadcaster deduplicates block events and fan-outs to subscribers. +type BlockEventBroadcaster struct { + log logrus.FieldLogger + dedup *cache.DedupCache + input <-chan subscription.BlockEvent + bufferSize int + + mu sync.RWMutex + subscribers []chan subscription.BlockEvent + + done chan struct{} + wg sync.WaitGroup +} + +// NewBlockEventBroadcaster creates a new broadcaster. +func NewBlockEventBroadcaster( + log logrus.FieldLogger, + dedup *cache.DedupCache, + input <-chan subscription.BlockEvent, + bufferSize int, +) *BlockEventBroadcaster { + if bufferSize <= 0 { + bufferSize = 1000 + } + + return &BlockEventBroadcaster{ + log: log.WithField("component", "block_broadcaster"), + dedup: dedup, + input: input, + bufferSize: bufferSize, + done: make(chan struct{}), + } +} + +// Subscribe returns a channel that receives deduplicated block events. +func (b *BlockEventBroadcaster) Subscribe() <-chan subscription.BlockEvent { + ch := make(chan subscription.BlockEvent, b.bufferSize) + + b.mu.Lock() + b.subscribers = append(b.subscribers, ch) + b.mu.Unlock() + + return ch +} + +// Start begins processing incoming block events. +func (b *BlockEventBroadcaster) Start(ctx context.Context) { + b.wg.Add(1) + + go func() { + defer b.wg.Done() + + for { + select { + case <-ctx.Done(): + return + case <-b.done: + return + case event, ok := <-b.input: + if !ok { + return + } + + // Deduplicate by block root. + if b.dedup.Check(event.BlockRoot.String()) { + continue + } + + b.mu.RLock() + subscribers := append([]chan subscription.BlockEvent(nil), b.subscribers...) + b.mu.RUnlock() + + for i, subscriber := range subscribers { + select { + case subscriber <- event: + default: + b.log.WithFields(logrus.Fields{ + "slot": event.Slot, + "subscriber": i, + "block_root": event.BlockRoot.String(), + "event_node": event.NodeName, + "buffer_size": b.bufferSize, + }).Warn("Block event subscriber channel full, dropping event") + } + } + } + } + }() +} + +// Stop stops the broadcaster and closes subscriber channels. +func (b *BlockEventBroadcaster) Stop() { + close(b.done) + b.wg.Wait() + + b.mu.Lock() + for _, subscriber := range b.subscribers { + close(subscriber) + } + b.subscribers = nil + b.mu.Unlock() +} diff --git a/pkg/horizon/cache/dedup.go b/pkg/horizon/cache/dedup.go new file mode 100644 index 000000000..324cd8322 --- /dev/null +++ b/pkg/horizon/cache/dedup.go @@ -0,0 +1,143 @@ +package cache + +import ( + "time" + + "github.com/jellydator/ttlcache/v3" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // DefaultTTL is the default TTL for block deduplication. + // Set to 13 minutes to cover slightly more than 1 epoch (6.4 minutes) + // to handle delayed events from multiple beacon nodes. + DefaultTTL = 13 * time.Minute +) + +// DedupCache is a TTL-based cache for deduplicating block events by block root. +// It tracks whether a block root has been seen before to prevent duplicate processing. +type DedupCache struct { + cache *ttlcache.Cache[string, time.Time] + ttl time.Duration + metrics *Metrics +} + +// Config holds configuration for the deduplication cache. +type Config struct { + // TTL is the time-to-live for cached entries. + // After this duration, entries are automatically evicted. + TTL time.Duration `yaml:"ttl" default:"13m"` +} + +// Validate validates the configuration. +func (c *Config) Validate() error { + if c.TTL <= 0 { + c.TTL = DefaultTTL + } + + return nil +} + +// Metrics holds Prometheus metrics for the deduplication cache. +type Metrics struct { + hitsTotal prometheus.Counter + missesTotal prometheus.Counter + cacheSize prometheus.Gauge +} + +// NewMetrics creates a new Metrics instance for the dedup cache. +func NewMetrics(namespace string) *Metrics { + m := &Metrics{ + hitsTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "dedup_hits_total", + Help: "Total number of deduplication cache hits (duplicate blocks dropped)", + }), + missesTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "dedup_misses_total", + Help: "Total number of deduplication cache misses (new blocks processed)", + }), + cacheSize: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "dedup_cache_size", + Help: "Current number of entries in the deduplication cache", + }), + } + + prometheus.MustRegister( + m.hitsTotal, + m.missesTotal, + m.cacheSize, + ) + + return m +} + +// New creates a new DedupCache with the given configuration and metrics namespace. +func New(cfg *Config, namespace string) *DedupCache { + ttl := cfg.TTL + if ttl <= 0 { + ttl = DefaultTTL + } + + cache := ttlcache.New( + ttlcache.WithTTL[string, time.Time](ttl), + ) + + return &DedupCache{ + cache: cache, + ttl: ttl, + metrics: NewMetrics(namespace), + } +} + +// Start begins the cache cleanup goroutine. +// This should be called once when the cache is ready to be used. +func (d *DedupCache) Start() { + go d.cache.Start() +} + +// Stop stops the cache cleanup goroutine. +func (d *DedupCache) Stop() { + d.cache.Stop() +} + +// Check checks if a block root has been seen before. +// Returns true if the block root was already seen (duplicate), +// returns false if the block root is new (first occurrence). +// If the block root is new, it is automatically added to the cache. +func (d *DedupCache) Check(blockRoot string) bool { + // Try to get the existing entry + item := d.cache.Get(blockRoot) + if item != nil { + // Block root was already seen - this is a duplicate + d.metrics.hitsTotal.Inc() + + return true + } + + // Block root is new - add it to the cache + d.cache.Set(blockRoot, time.Now(), d.ttl) + d.metrics.missesTotal.Inc() + d.metrics.cacheSize.Set(float64(d.cache.Len())) + + return false +} + +// Size returns the current number of entries in the cache. +func (d *DedupCache) Size() int { + return d.cache.Len() +} + +// Delete removes a block root from the cache. +// This is used when a chain reorg is detected and slots need to be re-processed. +func (d *DedupCache) Delete(blockRoot string) { + d.cache.Delete(blockRoot) + d.metrics.cacheSize.Set(float64(d.cache.Len())) +} + +// TTL returns the configured TTL for cache entries. +func (d *DedupCache) TTL() time.Duration { + return d.ttl +} diff --git a/pkg/horizon/config.go b/pkg/horizon/config.go new file mode 100644 index 000000000..897e11b2d --- /dev/null +++ b/pkg/horizon/config.go @@ -0,0 +1,181 @@ +package horizon + +import ( + "errors" + "fmt" + + "github.com/ethpandaops/xatu/pkg/horizon/cache" + "github.com/ethpandaops/xatu/pkg/horizon/coordinator" + "github.com/ethpandaops/xatu/pkg/horizon/deriver" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/horizon/iterator" + "github.com/ethpandaops/xatu/pkg/horizon/subscription" + "github.com/ethpandaops/xatu/pkg/observability" + "github.com/ethpandaops/xatu/pkg/output" + "github.com/ethpandaops/xatu/pkg/processor" + "github.com/sirupsen/logrus" +) + +type Config struct { + LoggingLevel string `yaml:"logging" default:"info"` + MetricsAddr string `yaml:"metricsAddr" default:":9090"` + PProfAddr *string `yaml:"pprofAddr"` + + // The name of the horizon instance + Name string `yaml:"name"` + + // Ethereum configuration (beacon node pool) + Ethereum ethereum.Config `yaml:"ethereum"` + + // Coordinator configuration for tracking processing locations + Coordinator coordinator.Config `yaml:"coordinator"` + + // Outputs configuration + Outputs []output.Config `yaml:"outputs"` + + // Labels configures the horizon with labels + Labels map[string]string `yaml:"labels"` + + // NTP Server to use for clock drift correction + NTPServer string `yaml:"ntpServer" default:"time.google.com"` + + // Tracing configuration + Tracing observability.TracingConfig `yaml:"tracing"` + + // Derivers configuration + Derivers deriver.Config `yaml:"derivers"` + + // DedupCache configuration for block event deduplication + DedupCache cache.Config `yaml:"dedupCache"` + + // Subscription configuration for SSE block events + Subscription subscription.Config `yaml:"subscription"` + + // Reorg configuration for chain reorg handling + Reorg subscription.ReorgConfig `yaml:"reorg"` + + // Iterator configuration for head/fill behavior + Iterators iterator.CoordinatorConfig `yaml:"iterators"` + + // EpochIterator configuration for epoch-based derivers + EpochIterator iterator.EpochIteratorConfig `yaml:"epochIterator"` +} + +func (c *Config) Validate() error { + if c.Name == "" { + return errors.New("name is required") + } + + if err := c.Ethereum.Validate(); err != nil { + return fmt.Errorf("invalid ethereum config: %w", err) + } + + if err := c.Coordinator.Validate(); err != nil { + return fmt.Errorf("invalid coordinator config: %w", err) + } + + if len(c.Outputs) == 0 { + return errors.New("at least one output sink is required") + } + + for _, out := range c.Outputs { + if err := out.Validate(); err != nil { + return fmt.Errorf("invalid output config %s: %w", out.Name, err) + } + } + + if err := c.Tracing.Validate(); err != nil { + return fmt.Errorf("invalid tracing config: %w", err) + } + + if err := c.Derivers.Validate(); err != nil { + return fmt.Errorf("invalid derivers config: %w", err) + } + + if err := c.DedupCache.Validate(); err != nil { + return fmt.Errorf("invalid dedup cache config: %w", err) + } + + if err := c.Subscription.Validate(); err != nil { + return fmt.Errorf("invalid subscription config: %w", err) + } + + if err := c.Reorg.Validate(); err != nil { + return fmt.Errorf("invalid reorg config: %w", err) + } + + if err := c.Iterators.Validate(); err != nil { + return fmt.Errorf("invalid iterator config: %w", err) + } + + if err := c.EpochIterator.Validate(); err != nil { + return fmt.Errorf("invalid epoch iterator config: %w", err) + } + + return nil +} + +func (c *Config) CreateSinks(log logrus.FieldLogger) ([]output.Sink, error) { + sinks := make([]output.Sink, len(c.Outputs)) + + for i, out := range c.Outputs { + if out.ShippingMethod == nil { + shippingMethod := processor.ShippingMethodSync + + out.ShippingMethod = &shippingMethod + } + + sink, err := output.NewSink(out.Name, + out.SinkType, + out.Config, + log, + out.FilterConfig, + *out.ShippingMethod, + ) + if err != nil { + return nil, err + } + + sinks[i] = sink + } + + return sinks, nil +} + +func (c *Config) ApplyOverrides(o *Override, log logrus.FieldLogger) error { + if o == nil { + return nil + } + + if o.MetricsAddr.Enabled { + log.WithField("address", o.MetricsAddr.Value).Info("Overriding metrics address") + + c.MetricsAddr = o.MetricsAddr.Value + } + + if o.CoordinatorAuth.Enabled { + log.Info("Overriding coordinator authorization header") + + if c.Coordinator.Headers == nil { + c.Coordinator.Headers = make(map[string]string) + } + + c.Coordinator.Headers["Authorization"] = o.CoordinatorAuth.Value + } + + if o.BeaconNodeURLs.Enabled { + log.Info("Overriding beacon node URLs") + } + + if o.BeaconNodeHeaders.Enabled { + log.Info("Overriding beacon node authorization headers") + } + + if o.NetworkName.Enabled { + log.WithField("network", o.NetworkName.Value).Info("Overriding network name") + } + + o.ApplyBeaconNodeOverrides(&c.Ethereum) + + return nil +} diff --git a/pkg/horizon/coordinator/client.go b/pkg/horizon/coordinator/client.go new file mode 100644 index 000000000..8f66d4d3f --- /dev/null +++ b/pkg/horizon/coordinator/client.go @@ -0,0 +1,116 @@ +package coordinator + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" +) + +// Client is a gRPC client for the coordinator service. +type Client struct { + config *Config + log logrus.FieldLogger + + conn *grpc.ClientConn + pb xatu.CoordinatorClient +} + +// New creates a new coordinator client. +func New(config *Config, log logrus.FieldLogger) (*Client, error) { + if config == nil { + return nil, errors.New("config is required") + } + + if err := config.Validate(); err != nil { + return nil, err + } + + var opts []grpc.DialOption + + if config.TLS { + host, _, err := net.SplitHostPort(config.Address) + if err != nil { + return nil, fmt.Errorf("fail to get host from address: %w", err) + } + + opts = append(opts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, host))) + } else { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + conn, err := grpc.NewClient(config.Address, opts...) + if err != nil { + return nil, fmt.Errorf("fail to create client: %w", err) + } + + pbClient := xatu.NewCoordinatorClient(conn) + + return &Client{ + config: config, + log: log.WithField("component", "coordinator"), + conn: conn, + pb: pbClient, + }, nil +} + +// Start starts the coordinator client. +func (c *Client) Start(ctx context.Context) error { + return nil +} + +// Stop stops the coordinator client and closes the connection. +func (c *Client) Stop(ctx context.Context) error { + if err := c.conn.Close(); err != nil { + return err + } + + return nil +} + +// GetHorizonLocation retrieves the horizon location for a given type and network. +func (c *Client) GetHorizonLocation( + ctx context.Context, + typ xatu.HorizonType, + networkID string, +) (*xatu.HorizonLocation, error) { + req := xatu.GetHorizonLocationRequest{ + Type: typ, + NetworkId: networkID, + } + + md := metadata.New(c.config.Headers) + ctx = metadata.NewOutgoingContext(ctx, md) + + res, err := c.pb.GetHorizonLocation(ctx, &req, grpc.UseCompressor(gzip.Name)) + if err != nil { + return nil, err + } + + return res.Location, nil +} + +// UpsertHorizonLocation creates or updates a horizon location. +func (c *Client) UpsertHorizonLocation(ctx context.Context, location *xatu.HorizonLocation) error { + req := xatu.UpsertHorizonLocationRequest{ + Location: location, + } + + md := metadata.New(c.config.Headers) + ctx = metadata.NewOutgoingContext(ctx, md) + + _, err := c.pb.UpsertHorizonLocation(ctx, &req, grpc.UseCompressor(gzip.Name)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/horizon/coordinator/config.go b/pkg/horizon/coordinator/config.go new file mode 100644 index 000000000..01f5bd861 --- /dev/null +++ b/pkg/horizon/coordinator/config.go @@ -0,0 +1,24 @@ +package coordinator + +import ( + "errors" +) + +// Config holds the configuration for the coordinator client. +type Config struct { + // Address is the gRPC address of the coordinator server. + Address string `yaml:"address"` + // Headers are optional headers to send with each request. + Headers map[string]string `yaml:"headers"` + // TLS enables TLS for the gRPC connection. + TLS bool `yaml:"tls" default:"false"` +} + +// Validate validates the coordinator configuration. +func (c *Config) Validate() error { + if c.Address == "" { + return errors.New("address is required") + } + + return nil +} diff --git a/pkg/horizon/deriver/adapters.go b/pkg/horizon/deriver/adapters.go new file mode 100644 index 000000000..9c7b1205c --- /dev/null +++ b/pkg/horizon/deriver/adapters.go @@ -0,0 +1,193 @@ +package deriver + +import ( + "context" + "runtime" + + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/beacon/pkg/beacon" + "github.com/ethpandaops/ethwallclock" + "github.com/ethpandaops/xatu/pkg/cldata" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/google/uuid" +) + +// BeaconClientAdapter wraps the Horizon's BeaconNodePool to implement cldata.BeaconClient. +type BeaconClientAdapter struct { + pool *ethereum.BeaconNodePool +} + +// NewBeaconClientAdapter creates a new BeaconClientAdapter. +func NewBeaconClientAdapter(pool *ethereum.BeaconNodePool) *BeaconClientAdapter { + return &BeaconClientAdapter{pool: pool} +} + +// GetBeaconBlock retrieves a beacon block by its identifier. +func (a *BeaconClientAdapter) GetBeaconBlock(ctx context.Context, identifier string) (*spec.VersionedSignedBeaconBlock, error) { + return a.pool.GetBeaconBlock(ctx, identifier) +} + +// LazyLoadBeaconBlock queues a block for background preloading. +func (a *BeaconClientAdapter) LazyLoadBeaconBlock(identifier string) { + a.pool.LazyLoadBeaconBlock(identifier) +} + +// Synced checks if the beacon node pool has at least one synced node. +func (a *BeaconClientAdapter) Synced(ctx context.Context) error { + return a.pool.Synced(ctx) +} + +// Node returns the underlying beacon node (uses first healthy node). +func (a *BeaconClientAdapter) Node() beacon.Node { + wrapper, err := a.pool.GetHealthyNode() + if err != nil { + return nil + } + + return wrapper.Node() +} + +// FetchBeaconBlockBlobs retrieves blob sidecars for a given block identifier. +func (a *BeaconClientAdapter) FetchBeaconBlockBlobs(ctx context.Context, identifier string) ([]*deneb.BlobSidecar, error) { + wrapper, err := a.pool.GetHealthyNode() + if err != nil { + return nil, err + } + + return wrapper.Node().FetchBeaconBlockBlobs(ctx, identifier) +} + +// FetchBeaconCommittee retrieves the beacon committees for a given epoch. +func (a *BeaconClientAdapter) FetchBeaconCommittee(ctx context.Context, epoch phase0.Epoch) ([]*v1.BeaconCommittee, error) { + return a.pool.Duties().FetchBeaconCommittee(ctx, epoch) +} + +// GetValidatorIndex looks up a validator index from the committee for a given position. +func (a *BeaconClientAdapter) GetValidatorIndex( + ctx context.Context, + epoch phase0.Epoch, + slot phase0.Slot, + committeeIndex phase0.CommitteeIndex, + position uint64, +) (phase0.ValidatorIndex, error) { + return a.pool.Duties().GetValidatorIndex(ctx, epoch, slot, committeeIndex, position) +} + +// FetchProposerDuties retrieves the proposer duties for a given epoch. +func (a *BeaconClientAdapter) FetchProposerDuties(ctx context.Context, epoch phase0.Epoch) ([]*v1.ProposerDuty, error) { + wrapper, err := a.pool.GetHealthyNode() + if err != nil { + return nil, err + } + + return wrapper.Node().FetchProposerDuties(ctx, epoch) +} + +// GetValidators retrieves validators for a given state identifier. +// Note: Horizon doesn't cache validators like Cannon does. This is a direct fetch. +func (a *BeaconClientAdapter) GetValidators(ctx context.Context, identifier string) (map[phase0.ValidatorIndex]*v1.Validator, error) { + wrapper, err := a.pool.GetHealthyNode() + if err != nil { + return nil, err + } + + // Pass nil for validatorIndices and pubkeys to fetch all validators. + return wrapper.Node().FetchValidators(ctx, identifier, nil, nil) +} + +// LazyLoadValidators is a no-op for Horizon (no validator caching). +func (a *BeaconClientAdapter) LazyLoadValidators(_ string) { + // Horizon doesn't cache validators - blocks are already cached and validators + // are fetched on-demand. +} + +// DeleteValidatorsFromCache is a no-op for Horizon (no validator caching). +func (a *BeaconClientAdapter) DeleteValidatorsFromCache(_ string) { + // Horizon doesn't cache validators. +} + +// Verify BeaconClientAdapter implements cldata.BeaconClient. +var _ cldata.BeaconClient = (*BeaconClientAdapter)(nil) + +// ContextProviderAdapter wraps Horizon's metadata creation to implement cldata.ContextProvider. +type ContextProviderAdapter struct { + id uuid.UUID + name string + networkName string + networkID uint64 + wallclock *ethwallclock.EthereumBeaconChain + depositChainID uint64 + labels map[string]string +} + +// NewContextProviderAdapter creates a new ContextProviderAdapter. +func NewContextProviderAdapter( + id uuid.UUID, + name string, + networkName string, + networkID uint64, + wallclock *ethwallclock.EthereumBeaconChain, + depositChainID uint64, + labels map[string]string, +) *ContextProviderAdapter { + return &ContextProviderAdapter{ + id: id, + name: name, + networkName: networkName, + networkID: networkID, + wallclock: wallclock, + depositChainID: depositChainID, + labels: labels, + } +} + +// CreateClientMeta creates the client metadata for events. +// Unlike Cannon which pre-builds metadata, Horizon creates it fresh for each call +// to ensure accurate timestamps. +func (a *ContextProviderAdapter) CreateClientMeta(_ context.Context) (*xatu.ClientMeta, error) { + return &xatu.ClientMeta{ + Name: a.name, + Version: xatu.Short(), + Id: a.id.String(), + Implementation: xatu.Implementation, + Os: runtime.GOOS, + ModuleName: xatu.ModuleName_HORIZON, + ClockDrift: 0, // Horizon doesn't track clock drift currently + Ethereum: &xatu.ClientMeta_Ethereum{ + Network: &xatu.ClientMeta_Ethereum_Network{ + Name: a.networkName, + Id: a.networkID, + }, + Execution: &xatu.ClientMeta_Ethereum_Execution{}, + Consensus: &xatu.ClientMeta_Ethereum_Consensus{}, + }, + Labels: a.labels, + }, nil +} + +// NetworkName returns the network name. +func (a *ContextProviderAdapter) NetworkName() string { + return a.networkName +} + +// NetworkID returns the network ID. +func (a *ContextProviderAdapter) NetworkID() uint64 { + return a.networkID +} + +// Wallclock returns the Ethereum wallclock. +func (a *ContextProviderAdapter) Wallclock() *ethwallclock.EthereumBeaconChain { + return a.wallclock +} + +// DepositChainID returns the execution layer chain ID. +func (a *ContextProviderAdapter) DepositChainID() uint64 { + return a.depositChainID +} + +// Verify ContextProviderAdapter implements cldata.ContextProvider. +var _ cldata.ContextProvider = (*ContextProviderAdapter)(nil) diff --git a/pkg/horizon/deriver/config.go b/pkg/horizon/deriver/config.go new file mode 100644 index 000000000..ea232102e --- /dev/null +++ b/pkg/horizon/deriver/config.go @@ -0,0 +1,58 @@ +package deriver + +// Config holds configuration for all Horizon derivers. +type Config struct { + // Block-based derivers (real-time processing via HEAD iterator) + BeaconBlockConfig DeriverConfig `yaml:"beaconBlock"` + AttesterSlashingConfig DeriverConfig `yaml:"attesterSlashing"` + ProposerSlashingConfig DeriverConfig `yaml:"proposerSlashing"` + DepositConfig DeriverConfig `yaml:"deposit"` + WithdrawalConfig DeriverConfig `yaml:"withdrawal"` + VoluntaryExitConfig DeriverConfig `yaml:"voluntaryExit"` + BLSToExecutionChangeConfig DeriverConfig `yaml:"blsToExecutionChange"` + ExecutionTransactionConfig DeriverConfig `yaml:"executionTransaction"` + ElaboratedAttestationConfig DeriverConfig `yaml:"elaboratedAttestation"` + + // Epoch-based derivers (triggered midway through epoch via Epoch iterator) + ProposerDutyConfig DeriverConfig `yaml:"proposerDuty"` + BeaconBlobConfig DeriverConfig `yaml:"beaconBlob"` + BeaconValidatorsConfig BeaconValidatorsConfig `yaml:"beaconValidators"` + BeaconCommitteeConfig DeriverConfig `yaml:"beaconCommittee"` +} + +// DeriverConfig is the common configuration for a deriver. +type DeriverConfig struct { + Enabled bool `yaml:"enabled" default:"true"` +} + +// BeaconValidatorsConfig is the configuration for the beacon validators deriver. +type BeaconValidatorsConfig struct { + Enabled bool `yaml:"enabled" default:"true"` + ChunkSize int `yaml:"chunkSize" default:"100"` +} + +// DefaultConfig returns a Config with sensible defaults. +func DefaultConfig() *Config { + return &Config{ + // Block-based derivers. + BeaconBlockConfig: DeriverConfig{Enabled: true}, + AttesterSlashingConfig: DeriverConfig{Enabled: true}, + ProposerSlashingConfig: DeriverConfig{Enabled: true}, + DepositConfig: DeriverConfig{Enabled: true}, + WithdrawalConfig: DeriverConfig{Enabled: true}, + VoluntaryExitConfig: DeriverConfig{Enabled: true}, + BLSToExecutionChangeConfig: DeriverConfig{Enabled: true}, + ExecutionTransactionConfig: DeriverConfig{Enabled: true}, + ElaboratedAttestationConfig: DeriverConfig{Enabled: true}, + // Epoch-based derivers. + ProposerDutyConfig: DeriverConfig{Enabled: true}, + BeaconBlobConfig: DeriverConfig{Enabled: true}, + BeaconValidatorsConfig: BeaconValidatorsConfig{Enabled: true, ChunkSize: 100}, + BeaconCommitteeConfig: DeriverConfig{Enabled: true}, + } +} + +// Validate validates the config. +func (c *Config) Validate() error { + return nil +} diff --git a/pkg/horizon/deriver_mapping.go b/pkg/horizon/deriver_mapping.go new file mode 100644 index 000000000..447156209 --- /dev/null +++ b/pkg/horizon/deriver_mapping.go @@ -0,0 +1,70 @@ +package horizon + +import ( + "github.com/ethpandaops/xatu/pkg/cldata/deriver" + horizonderiver "github.com/ethpandaops/xatu/pkg/horizon/deriver" + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// cannonToHorizonType maps CannonType to HorizonType for iterator creation. +var cannonToHorizonType = map[xatu.CannonType]xatu.HorizonType{ + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION, + xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION, + xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_PROPOSER_DUTY, + xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR, + xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_VALIDATORS, + xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE: xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_COMMITTEE, +} + +// GetHorizonType returns the HorizonType for a given CannonType. +func GetHorizonType(cannonType xatu.CannonType) (xatu.HorizonType, bool) { + horizonType, ok := cannonToHorizonType[cannonType] + + return horizonType, ok +} + +// IsDeriverEnabled returns whether a deriver is enabled based on config. +func IsDeriverEnabled(config *horizonderiver.Config, cannonType xatu.CannonType) bool { + switch cannonType { + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK: + return config.BeaconBlockConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING: + return config.AttesterSlashingConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING: + return config.ProposerSlashingConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT: + return config.DepositConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL: + return config.WithdrawalConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT: + return config.VoluntaryExitConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE: + return config.BLSToExecutionChangeConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION: + return config.ExecutionTransactionConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION: + return config.ElaboratedAttestationConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_PROPOSER_DUTY: + return config.ProposerDutyConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR: + return config.BeaconBlobConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_VALIDATORS: + return config.BeaconValidatorsConfig.Enabled + case xatu.CannonType_BEACON_API_ETH_V1_BEACON_COMMITTEE: + return config.BeaconCommitteeConfig.Enabled + default: + return false + } +} + +// IsEpochBased returns whether a deriver spec is epoch-based (vs slot-based). +func IsEpochBased(spec *deriver.DeriverSpec) bool { + return spec.Mode == deriver.ProcessingModeEpoch +} diff --git a/pkg/horizon/ethereum/beacon.go b/pkg/horizon/ethereum/beacon.go new file mode 100644 index 000000000..a963b1625 --- /dev/null +++ b/pkg/horizon/ethereum/beacon.go @@ -0,0 +1,662 @@ +package ethereum + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + ehttp "github.com/attestantio/go-eth2-client/http" + "github.com/attestantio/go-eth2-client/spec" + backoff "github.com/cenkalti/backoff/v5" + "github.com/ethpandaops/beacon/pkg/beacon" + "github.com/ethpandaops/xatu/pkg/cannon/ethereum/services" + "github.com/ethpandaops/xatu/pkg/networks" + "github.com/jellydator/ttlcache/v3" + "github.com/sirupsen/logrus" + "golang.org/x/sync/singleflight" +) + +// ErrNoHealthyNodes is returned when no healthy beacon nodes are available. +var ErrNoHealthyNodes = errors.New("no healthy beacon nodes available") + +// NodeState represents the connection state of a beacon node. +type NodeState int + +const ( + // NodeStateDisconnected indicates the node has not connected yet. + NodeStateDisconnected NodeState = iota + // NodeStateConnecting indicates the node is attempting to connect. + NodeStateConnecting + // NodeStateConnected indicates the node is connected but may not be healthy. + NodeStateConnected + // NodeStateReconnecting indicates the node is reconnecting after a failure. + NodeStateReconnecting +) + +// BeaconNodeWrapper wraps a single beacon node with its health status. +type BeaconNodeWrapper struct { + config BeaconNodeConfig + node beacon.Node + healthy bool + state NodeState + mu sync.RWMutex + log logrus.FieldLogger +} + +// IsHealthy returns whether the beacon node is healthy. +func (w *BeaconNodeWrapper) IsHealthy() bool { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.healthy +} + +// SetHealthy sets the health status of the beacon node. +func (w *BeaconNodeWrapper) SetHealthy(healthy bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.healthy = healthy +} + +// Name returns the name of the beacon node. +func (w *BeaconNodeWrapper) Name() string { + return w.config.Name +} + +// Node returns the underlying beacon node. +func (w *BeaconNodeWrapper) Node() beacon.Node { + return w.node +} + +// BeaconNodePool manages a pool of beacon nodes with health checking and failover. +type BeaconNodePool struct { + config *Config + log logrus.FieldLogger + metrics *Metrics + + nodes []*BeaconNodeWrapper + mu sync.RWMutex + + // Shared services across all nodes (uses first healthy node) + metadata *services.MetadataService + duties *services.DutiesService + + // Block cache shared across all nodes + sfGroup *singleflight.Group + blockCache *ttlcache.Cache[string, *spec.VersionedSignedBeaconBlock] + blockPreloadChan chan string + blockPreloadSem chan struct{} + + onReadyCallbacks []func(ctx context.Context) error + shutdownChan chan struct{} + wg sync.WaitGroup +} + +// NewBeaconNodePool creates a new BeaconNodePool with the given configuration. +func NewBeaconNodePool(_ context.Context, config *Config, log logrus.FieldLogger) (*BeaconNodePool, error) { + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + namespace := "xatu_horizon" + metrics := NewMetrics(namespace) + + pool := &BeaconNodePool{ + config: config, + log: log.WithField("component", "ethereum/beacon_pool"), + metrics: metrics, + nodes: make([]*BeaconNodeWrapper, 0, len(config.BeaconNodes)), + sfGroup: &singleflight.Group{}, + blockPreloadChan: make(chan string, config.BlockPreloadQueueSize), + blockPreloadSem: make(chan struct{}, config.BlockPreloadWorkers), + shutdownChan: make(chan struct{}), + } + + // Create TTL cache for blocks + pool.blockCache = ttlcache.New( + ttlcache.WithTTL[string, *spec.VersionedSignedBeaconBlock](config.BlockCacheTTL.Duration), + ttlcache.WithCapacity[string, *spec.VersionedSignedBeaconBlock](config.BlockCacheSize), + ) + + // Create beacon node wrappers for each configured node + for _, nodeCfg := range config.BeaconNodes { + wrapper, err := pool.createNodeWrapper(nodeCfg) + if err != nil { + return nil, fmt.Errorf("failed to create beacon node %s: %w", nodeCfg.Name, err) + } + + pool.nodes = append(pool.nodes, wrapper) + + metrics.SetBeaconNodeStatus(nodeCfg.Name, BeaconNodeStatusConnecting) + } + + return pool, nil +} + +// createNodeWrapper creates a new BeaconNodeWrapper for the given configuration. +func (p *BeaconNodePool) createNodeWrapper(nodeCfg BeaconNodeConfig) (*BeaconNodeWrapper, error) { + opts := *beacon. + DefaultOptions(). + DisableEmptySlotDetection(). + DisablePrometheusMetrics() + + opts.GoEth2ClientParams = []ehttp.Parameter{ + ehttp.WithEnforceJSON(true), + } + + opts.HealthCheck.Interval.Duration = p.config.HealthCheckInterval.Duration + opts.HealthCheck.SuccessfulResponses = 1 + + // Disable beacon subscriptions - Horizon will handle SSE separately + opts.BeaconSubscription.Enabled = false + + node := beacon.NewNode(p.log, &beacon.Config{ + Name: nodeCfg.Name, + Addr: nodeCfg.Address, + Headers: nodeCfg.Headers, + }, "xatu_horizon", opts) + + return &BeaconNodeWrapper{ + config: nodeCfg, + node: node, + healthy: false, + log: p.log.WithField("beacon_node", nodeCfg.Name), + }, nil +} + +// Start starts the beacon node pool and all its nodes. +func (p *BeaconNodePool) Start(ctx context.Context) error { + p.log.Info("Starting beacon node pool") + + // Start block cache eviction tracking + p.blockCache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[string, *spec.VersionedSignedBeaconBlock]) { + p.log.WithField("identifier", item.Key()).WithField("reason", reason).Trace("Block evicted from cache") + }) + + go p.blockCache.Start() + + // Start block preload workers + for i := uint64(0); i < p.config.BlockPreloadWorkers; i++ { + p.wg.Add(1) + + go func() { + defer p.wg.Done() + + for { + select { + case <-p.shutdownChan: + return + case identifier := <-p.blockPreloadChan: + p.log.WithField("identifier", identifier).Trace("Preloading block") + _, _ = p.GetBeaconBlock(ctx, identifier) + } + } + }() + } + + // Start each beacon node with retry logic + for _, wrapper := range p.nodes { + p.wg.Add(1) + + go func(w *BeaconNodeWrapper) { + defer p.wg.Done() + + p.startNodeWithRetry(ctx, w) + }(wrapper) + } + + // Start health check goroutine + p.wg.Add(1) + + go p.runHealthChecks(ctx) + + // Wait for at least one node to become healthy + if err := p.waitForHealthyNode(ctx); err != nil { + return err + } + + // Initialize shared services using first healthy node + if err := p.initializeServices(ctx); err != nil { + return fmt.Errorf("failed to initialize services: %w", err) + } + + // Run on-ready callbacks + for _, callback := range p.onReadyCallbacks { + if err := callback(ctx); err != nil { + return fmt.Errorf("on-ready callback failed: %w", err) + } + } + + p.log.Info("Beacon node pool started") + + return nil +} + +// Stop stops the beacon node pool. +func (p *BeaconNodePool) Stop(_ context.Context) error { + p.log.Info("Stopping beacon node pool") + + close(p.shutdownChan) + p.blockCache.Stop() + p.wg.Wait() + + return nil +} + +// waitForHealthyNode waits for at least one beacon node to become healthy. +func (p *BeaconNodePool) waitForHealthyNode(ctx context.Context) error { + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + timeout := time.NewTimer(p.config.StartupTimeout.Duration) + defer timeout.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-timeout.C: + return ErrNoHealthyNodes + case <-ticker.C: + if _, err := p.GetHealthyNode(); err == nil { + return nil + } + } + } +} + +// runHealthChecks runs periodic health checks on all beacon nodes. +func (p *BeaconNodePool) runHealthChecks(ctx context.Context) { + defer p.wg.Done() + + ticker := time.NewTicker(p.config.HealthCheckInterval.Duration) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-p.shutdownChan: + return + case <-ticker.C: + p.checkAllNodesHealth() + } + } +} + +// checkAllNodesHealth checks the health of all beacon nodes. +func (p *BeaconNodePool) checkAllNodesHealth() { + for _, wrapper := range p.nodes { + start := time.Now() + + healthy := p.checkNodeHealth(wrapper) + wrapper.SetHealthy(healthy) + + duration := time.Since(start).Seconds() + p.metrics.ObserveHealthCheckDuration(wrapper.Name(), duration) + + if healthy { + p.metrics.SetBeaconNodeStatus(wrapper.Name(), BeaconNodeStatusHealthy) + p.metrics.IncHealthCheck(wrapper.Name(), BeaconNodeStatusHealthy) + } else { + p.metrics.SetBeaconNodeStatus(wrapper.Name(), BeaconNodeStatusUnhealthy) + p.metrics.IncHealthCheck(wrapper.Name(), BeaconNodeStatusUnhealthy) + } + } +} + +// checkNodeHealth checks if a beacon node is healthy. +func (p *BeaconNodePool) checkNodeHealth(wrapper *BeaconNodeWrapper) bool { + status := wrapper.node.Status() + if status == nil { + p.log.WithField("node", wrapper.Name()).Trace("Node status is nil") + + return false + } + + syncState := status.SyncState() + if syncState == nil { + p.log.WithField("node", wrapper.Name()).Trace("Node sync state is nil") + + return false + } + + // Consider healthy if sync distance is reasonable + if syncState.SyncDistance > 10 { + p.log.WithField("node", wrapper.Name()). + WithField("sync_distance", syncState.SyncDistance). + Trace("Node sync distance too high") + + return false + } + + return true +} + +// initializeServices initializes shared services using the first healthy node. +func (p *BeaconNodePool) initializeServices(ctx context.Context) error { + healthyWrapper, err := p.GetHealthyNode() + if err != nil { + return err + } + + metadata := services.NewMetadataService(p.log, healthyWrapper.node) + p.metadata = &metadata + + if p.config.OverrideNetworkName != "" { + p.metadata.OverrideNetworkName(p.config.OverrideNetworkName) + } + + duties := services.NewDutiesService(p.log, healthyWrapper.node, p.metadata) + p.duties = &duties + + // Wait for metadata service to be ready + readyChan := make(chan error, 1) + + p.metadata.OnReady(ctx, func(ctx context.Context) error { + readyChan <- nil + + return nil + }) + + // Start metadata service + if err := p.metadata.Start(ctx); err != nil { + return fmt.Errorf("failed to start metadata service: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-readyChan: + if err != nil { + return err + } + case <-time.After(30 * time.Second): + return errors.New("timeout waiting for metadata service to be ready") + } + + // Verify network + if p.metadata.Network.Name == networks.NetworkNameUnknown { + return errors.New("unknown network detected - please override the network name via config") + } + + // Start duties service + if err := p.duties.Start(ctx); err != nil { + return fmt.Errorf("failed to start duties service: %w", err) + } + + p.log.WithField("network", p.metadata.Network.Name).Info("Services initialized") + + return nil +} + +// GetHealthyNode returns any healthy beacon node. +func (p *BeaconNodePool) GetHealthyNode() (*BeaconNodeWrapper, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + for _, wrapper := range p.nodes { + if wrapper.IsHealthy() { + return wrapper, nil + } + } + + return nil, ErrNoHealthyNodes +} + +// GetAllNodes returns all beacon node wrappers. +func (p *BeaconNodePool) GetAllNodes() []*BeaconNodeWrapper { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.nodes +} + +// GetHealthyNodes returns all healthy beacon nodes. +func (p *BeaconNodePool) GetHealthyNodes() []*BeaconNodeWrapper { + p.mu.RLock() + defer p.mu.RUnlock() + + healthy := make([]*BeaconNodeWrapper, 0, len(p.nodes)) + + for _, wrapper := range p.nodes { + if wrapper.IsHealthy() { + healthy = append(healthy, wrapper) + } + } + + return healthy +} + +// Metadata returns the shared metadata service. +func (p *BeaconNodePool) Metadata() *services.MetadataService { + return p.metadata +} + +// Duties returns the shared duties service. +func (p *BeaconNodePool) Duties() *services.DutiesService { + return p.duties +} + +// OnReady registers a callback to be called when the pool is ready. +func (p *BeaconNodePool) OnReady(callback func(ctx context.Context) error) { + p.onReadyCallbacks = append(p.onReadyCallbacks, callback) +} + +// Synced checks if the pool has at least one synced beacon node. +func (p *BeaconNodePool) Synced(ctx context.Context) error { + _, err := p.GetHealthyNode() + if err != nil { + return err + } + + if p.metadata == nil { + return errors.New("metadata service not initialized") + } + + if err := p.metadata.Ready(ctx); err != nil { + return fmt.Errorf("metadata service not ready: %w", err) + } + + if p.duties == nil { + return errors.New("duties service not initialized") + } + + if err := p.duties.Ready(ctx); err != nil { + return fmt.Errorf("duties service not ready: %w", err) + } + + return nil +} + +// GetBeaconBlock fetches a beacon block from any healthy node, using cache. +func (p *BeaconNodePool) GetBeaconBlock(ctx context.Context, identifier string) (*spec.VersionedSignedBeaconBlock, error) { + // Check cache first + if item := p.blockCache.Get(identifier); item != nil { + p.metrics.IncBlockCacheHits(string(p.metadata.Network.Name)) + + return item.Value(), nil + } + + p.metrics.IncBlockCacheMisses(string(p.metadata.Network.Name)) + + // Use singleflight to avoid duplicate requests + result, err, _ := p.sfGroup.Do(identifier, func() (any, error) { + // Acquire semaphore + p.blockPreloadSem <- struct{}{} + + defer func() { <-p.blockPreloadSem }() + + // Get any healthy node and fetch the block + wrapper, err := p.GetHealthyNode() + if err != nil { + return nil, err + } + + p.metrics.IncBlocksFetched(wrapper.Name(), string(p.metadata.Network.Name)) + + block, err := wrapper.node.FetchBlock(ctx, identifier) + if err != nil { + p.metrics.IncBlockFetchErrors(wrapper.Name(), string(p.metadata.Network.Name)) + + return nil, fmt.Errorf("failed to fetch block from %s: %w", wrapper.Name(), err) + } + + // Cache the block + p.blockCache.Set(identifier, block, p.config.BlockCacheTTL.Duration) + + return block, nil + }) + if err != nil { + return nil, err + } + + block, ok := result.(*spec.VersionedSignedBeaconBlock) + if !ok { + return nil, errors.New("unexpected result type from singleflight") + } + + return block, nil +} + +// LazyLoadBeaconBlock queues a block for preloading. +func (p *BeaconNodePool) LazyLoadBeaconBlock(identifier string) { + // Skip if already cached + if item := p.blockCache.Get(identifier); item != nil { + return + } + + // Non-blocking send to preload channel + select { + case p.blockPreloadChan <- identifier: + default: + // Channel full, skip preloading + } +} + +// NodeCount returns the total number of configured beacon nodes. +func (p *BeaconNodePool) NodeCount() int { + return len(p.nodes) +} + +// HealthyNodeCount returns the number of healthy beacon nodes. +func (p *BeaconNodePool) HealthyNodeCount() int { + count := 0 + + for _, wrapper := range p.nodes { + if wrapper.IsHealthy() { + count++ + } + } + + return count +} + +// PreferNode returns the specified node if it's healthy, otherwise falls back to any healthy node. +// The nodeAddress should match the Address field of a configured beacon node. +func (p *BeaconNodePool) PreferNode(nodeAddress string) (*BeaconNodeWrapper, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + // First, try to find the preferred node + for _, wrapper := range p.nodes { + if wrapper.config.Address == nodeAddress && wrapper.IsHealthy() { + return wrapper, nil + } + } + + // Preferred node not available, fall back to any healthy node + for _, wrapper := range p.nodes { + if wrapper.IsHealthy() { + p.log.WithFields(logrus.Fields{ + "preferred": nodeAddress, + "fallback": wrapper.config.Address, + }).Debug("Preferred node unavailable, using fallback") + + return wrapper, nil + } + } + + return nil, ErrNoHealthyNodes +} + +// startNodeWithRetry starts a beacon node with exponential backoff retry. +func (p *BeaconNodePool) startNodeWithRetry(ctx context.Context, wrapper *BeaconNodeWrapper) { + wrapper.mu.Lock() + wrapper.state = NodeStateConnecting + wrapper.mu.Unlock() + + p.metrics.SetBeaconNodeStatus(wrapper.Name(), BeaconNodeStatusConnecting) + + operation := func() (struct{}, error) { + select { + case <-ctx.Done(): + return struct{}{}, backoff.Permanent(ctx.Err()) + case <-p.shutdownChan: + return struct{}{}, backoff.Permanent(errors.New("pool shutting down")) + default: + } + + if err := wrapper.node.Start(ctx); err != nil { + wrapper.log.WithError(err).Warn("Failed to start beacon node, will retry") + p.metrics.SetBeaconNodeStatus(wrapper.Name(), BeaconNodeStatusUnhealthy) + + return struct{}{}, err + } + + return struct{}{}, nil + } + + bo := backoff.NewExponentialBackOff() + bo.InitialInterval = 1 * time.Second + bo.MaxInterval = 30 * time.Second + + retryOpts := []backoff.RetryOption{ + backoff.WithBackOff(bo), + backoff.WithNotify(func(err error, duration time.Duration) { + wrapper.log.WithError(err).WithField("next_retry", duration). + Warn("Beacon node connection failed, retrying") + + wrapper.mu.Lock() + wrapper.state = NodeStateReconnecting + wrapper.mu.Unlock() + }), + } + if _, err := backoff.Retry(ctx, operation, retryOpts...); err != nil { + // Only log if not a context cancellation or shutdown + if !errors.Is(err, context.Canceled) { + wrapper.log.WithError(err).Error("Beacon node connection permanently failed") + } + + wrapper.mu.Lock() + wrapper.state = NodeStateDisconnected + wrapper.healthy = false + wrapper.mu.Unlock() + + p.metrics.SetBeaconNodeStatus(wrapper.Name(), BeaconNodeStatusUnhealthy) + + return + } + + wrapper.mu.Lock() + wrapper.state = NodeStateConnected + wrapper.mu.Unlock() + + wrapper.log.Info("Beacon node connected successfully") +} + +// GetState returns the current connection state of the beacon node. +func (w *BeaconNodeWrapper) GetState() NodeState { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.state +} + +// Address returns the address of the beacon node. +func (w *BeaconNodeWrapper) Address() string { + return w.config.Address +} diff --git a/pkg/horizon/ethereum/config.go b/pkg/horizon/ethereum/config.go new file mode 100644 index 000000000..0e31cd2b1 --- /dev/null +++ b/pkg/horizon/ethereum/config.go @@ -0,0 +1,76 @@ +package ethereum + +import ( + "errors" + "fmt" + "time" + + "github.com/ethpandaops/beacon/pkg/human" +) + +// BeaconNodeConfig holds configuration for a single beacon node. +type BeaconNodeConfig struct { + // Name is a human-readable name for this beacon node. + Name string `yaml:"name"` + // Address is the HTTP address of the beacon node. + Address string `yaml:"address"` + // Headers is a map of headers to send to the beacon node. + Headers map[string]string `yaml:"headers"` +} + +// Validate validates the beacon node configuration. +func (c *BeaconNodeConfig) Validate() error { + if c.Address == "" { + return errors.New("address is required") + } + + if c.Name == "" { + return errors.New("name is required") + } + + return nil +} + +// Config holds configuration for the Ethereum beacon node pool. +type Config struct { + // BeaconNodes is a list of beacon nodes to connect to. + BeaconNodes []BeaconNodeConfig `yaml:"beaconNodes"` + // OverrideNetworkName is the name of the network to use. + // If not set, the network name will be retrieved from the first healthy beacon node. + OverrideNetworkName string `yaml:"overrideNetworkName" default:""` + // StartupTimeout is the maximum time to wait for a healthy beacon node on startup. + StartupTimeout human.Duration `yaml:"startupTimeout" default:"60s"` + // HealthCheckInterval is the interval between health checks. + HealthCheckInterval human.Duration `yaml:"healthCheckInterval" default:"3s"` + // BlockCacheSize is the number of blocks to cache per beacon node. + BlockCacheSize uint64 `yaml:"blockCacheSize" default:"1000"` + // BlockCacheTTL is the time to live for blocks in the cache. + BlockCacheTTL human.Duration `yaml:"blockCacheTtl" default:"1h"` + // BlockPreloadWorkers is the number of workers to use for preloading blocks. + BlockPreloadWorkers uint64 `yaml:"blockPreloadWorkers" default:"5"` + // BlockPreloadQueueSize is the size of the queue for preloading blocks. + BlockPreloadQueueSize uint64 `yaml:"blockPreloadQueueSize" default:"5000"` +} + +// Validate validates the configuration. +func (c *Config) Validate() error { + if len(c.BeaconNodes) == 0 { + return errors.New("at least one beacon node is required") + } + + for i, node := range c.BeaconNodes { + if err := node.Validate(); err != nil { + return fmt.Errorf("invalid beacon node config at index %d: %w", i, err) + } + } + + if c.HealthCheckInterval.Duration <= 0 { + c.HealthCheckInterval.Duration = 3 * time.Second + } + + if c.StartupTimeout.Duration <= 0 { + c.StartupTimeout.Duration = 60 * time.Second + } + + return nil +} diff --git a/pkg/horizon/ethereum/metrics.go b/pkg/horizon/ethereum/metrics.go new file mode 100644 index 000000000..50221dc86 --- /dev/null +++ b/pkg/horizon/ethereum/metrics.go @@ -0,0 +1,149 @@ +package ethereum + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// BeaconNodeStatus represents the health status of a beacon node. +type BeaconNodeStatus string + +const ( + // BeaconNodeStatusHealthy indicates the beacon node is healthy. + BeaconNodeStatusHealthy BeaconNodeStatus = "healthy" + // BeaconNodeStatusUnhealthy indicates the beacon node is unhealthy. + BeaconNodeStatusUnhealthy BeaconNodeStatus = "unhealthy" + // BeaconNodeStatusConnecting indicates the beacon node is connecting. + BeaconNodeStatusConnecting BeaconNodeStatus = "connecting" +) + +// Metrics holds Prometheus metrics for the beacon node pool. +type Metrics struct { + // beaconNodeStatus tracks the status of each beacon node (1 = status active, 0 = not). + beaconNodeStatus *prometheus.GaugeVec + + // blocksFetched tracks the total number of blocks fetched per beacon node. + blocksFetched *prometheus.CounterVec + + // blockCacheHits tracks the number of block cache hits. + blockCacheHits *prometheus.CounterVec + + // blockCacheMisses tracks the number of block cache misses. + blockCacheMisses *prometheus.CounterVec + + // blockFetchErrors tracks the number of block fetch errors. + blockFetchErrors *prometheus.CounterVec + + // healthCheckTotal tracks the total number of health checks per node. + healthCheckTotal *prometheus.CounterVec + + // healthCheckDuration tracks the duration of health checks. + healthCheckDuration *prometheus.HistogramVec +} + +// NewMetrics creates a new Metrics instance. +func NewMetrics(namespace string) *Metrics { + m := &Metrics{ + beaconNodeStatus: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "beacon_node_status", + Help: "Status of the beacon node (1 = status is active for this node)", + }, []string{"node", "status"}), + + blocksFetched: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "beacon_blocks_fetched_total", + Help: "Total number of blocks fetched from beacon nodes", + }, []string{"node", "network"}), + + blockCacheHits: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "beacon_block_cache_hits_total", + Help: "Total number of block cache hits", + }, []string{"network"}), + + blockCacheMisses: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "beacon_block_cache_misses_total", + Help: "Total number of block cache misses", + }, []string{"network"}), + + blockFetchErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "beacon_block_fetch_errors_total", + Help: "Total number of block fetch errors", + }, []string{"node", "network"}), + + healthCheckTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "beacon_health_check_total", + Help: "Total number of health checks per beacon node", + }, []string{"node", "status"}), + + healthCheckDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "beacon_health_check_duration_seconds", + Help: "Duration of health checks in seconds", + Buckets: prometheus.DefBuckets, + }, []string{"node"}), + } + + prometheus.MustRegister( + m.beaconNodeStatus, + m.blocksFetched, + m.blockCacheHits, + m.blockCacheMisses, + m.blockFetchErrors, + m.healthCheckTotal, + m.healthCheckDuration, + ) + + return m +} + +// SetBeaconNodeStatus sets the status of a beacon node. +// It sets the gauge to 1 for the current status and 0 for other statuses. +func (m *Metrics) SetBeaconNodeStatus(node string, status BeaconNodeStatus) { + statuses := []BeaconNodeStatus{ + BeaconNodeStatusHealthy, + BeaconNodeStatusUnhealthy, + BeaconNodeStatusConnecting, + } + + for _, s := range statuses { + if s == status { + m.beaconNodeStatus.WithLabelValues(node, string(s)).Set(1) + } else { + m.beaconNodeStatus.WithLabelValues(node, string(s)).Set(0) + } + } +} + +// IncBlocksFetched increments the blocks fetched counter. +func (m *Metrics) IncBlocksFetched(node, network string) { + m.blocksFetched.WithLabelValues(node, network).Inc() +} + +// IncBlockCacheHits increments the block cache hits counter. +func (m *Metrics) IncBlockCacheHits(network string) { + m.blockCacheHits.WithLabelValues(network).Inc() +} + +// IncBlockCacheMisses increments the block cache misses counter. +func (m *Metrics) IncBlockCacheMisses(network string) { + m.blockCacheMisses.WithLabelValues(network).Inc() +} + +// IncBlockFetchErrors increments the block fetch errors counter. +func (m *Metrics) IncBlockFetchErrors(node, network string) { + m.blockFetchErrors.WithLabelValues(node, network).Inc() +} + +// IncHealthCheck increments the health check counter. +func (m *Metrics) IncHealthCheck(node string, status BeaconNodeStatus) { + m.healthCheckTotal.WithLabelValues(node, string(status)).Inc() +} + +// ObserveHealthCheckDuration observes the duration of a health check. +func (m *Metrics) ObserveHealthCheckDuration(node string, duration float64) { + m.healthCheckDuration.WithLabelValues(node).Observe(duration) +} diff --git a/pkg/horizon/horizon.go b/pkg/horizon/horizon.go new file mode 100644 index 000000000..eef77f1cf --- /dev/null +++ b/pkg/horizon/horizon.go @@ -0,0 +1,652 @@ +package horizon + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + //nolint:gosec // only exposed if pprofAddr config is set + _ "net/http/pprof" + + // Import extractors package to register all derivers via init(). + _ "github.com/ethpandaops/xatu/pkg/cldata/deriver/extractors" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + cldataderiver "github.com/ethpandaops/xatu/pkg/cldata/deriver" + cldataiterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" + "github.com/ethpandaops/xatu/pkg/horizon/cache" + "github.com/ethpandaops/xatu/pkg/horizon/coordinator" + "github.com/ethpandaops/xatu/pkg/horizon/deriver" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/horizon/iterator" + "github.com/ethpandaops/xatu/pkg/horizon/subscription" + "github.com/ethpandaops/xatu/pkg/observability" + "github.com/ethpandaops/xatu/pkg/output" + oxatu "github.com/ethpandaops/xatu/pkg/output/xatu" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/google/uuid" + perrors "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/sdk/trace" +) + +type Horizon struct { + Config *Config + + sinks []output.Sink + + log logrus.FieldLogger + + id uuid.UUID + + metrics *Metrics + + // Beacon node pool for connecting to multiple beacon nodes. + beaconPool *ethereum.BeaconNodePool + + // Coordinator client for tracking locations. + coordinatorClient *coordinator.Client + + // Deduplication cache for block events. + dedupCache *cache.DedupCache + + // Broadcaster for deduplicated block events. + blockBroadcaster *BlockEventBroadcaster + + // Block subscriptions from beacon nodes. + blockSubscription *subscription.BlockSubscription + + // Reorg subscription for chain reorg events. + reorgSubscription *subscription.ReorgSubscription + + // Reorg tracker for tagging derived events. + reorgTracker *ReorgTracker + + // Event derivers for processing block data. + eventDerivers []cldataderiver.EventDeriver + + // Dual iterators for coordinated HEAD/FILL processing. + dualIterators []*iterator.DualIterator + + shutdownFuncs []func(ctx context.Context) error + + overrides *Override +} + +func New(ctx context.Context, log logrus.FieldLogger, config *Config, overrides *Override) (*Horizon, error) { + if config == nil { + return nil, errors.New("config is required") + } + + if overrides != nil { + if err := config.ApplyOverrides(overrides, log); err != nil { + return nil, fmt.Errorf("failed to apply overrides: %w", err) + } + } + + if err := config.Validate(); err != nil { + return nil, err + } + + sinks, err := config.CreateSinks(log) + if err != nil { + return nil, err + } + + // Create beacon node pool. + beaconPool, err := ethereum.NewBeaconNodePool(ctx, &config.Ethereum, log) + if err != nil { + return nil, fmt.Errorf("failed to create beacon node pool: %w", err) + } + + // Create coordinator client. + coordinatorClient, err := coordinator.New(&config.Coordinator, log) + if err != nil { + return nil, fmt.Errorf("failed to create coordinator client: %w", err) + } + + // Create deduplication cache. + dedupCache := cache.New(&config.DedupCache, "xatu_horizon") + reorgTracker := NewReorgTracker(config.DedupCache.TTL) + + return &Horizon{ + Config: config, + sinks: sinks, + log: log, + id: uuid.New(), + metrics: NewMetrics("xatu_horizon"), + beaconPool: beaconPool, + coordinatorClient: coordinatorClient, + dedupCache: dedupCache, + reorgTracker: reorgTracker, + eventDerivers: nil, // Derivers are created once the beacon pool is ready. + shutdownFuncs: make([]func(ctx context.Context) error, 0), + overrides: overrides, + }, nil +} + +func (h *Horizon) Start(ctx context.Context) error { + // Start tracing if enabled. + if h.Config.Tracing.Enabled { + h.log.Info("Tracing enabled") + + res, err := observability.NewResource(xatu.WithModule(xatu.ModuleName_HORIZON), xatu.Short()) + if err != nil { + return perrors.Wrap(err, "failed to create tracing resource") + } + + opts := []trace.TracerProviderOption{ + trace.WithSampler(trace.ParentBased(trace.TraceIDRatioBased(h.Config.Tracing.Sampling.Rate))), + } + + tracer, err := observability.NewHTTPTraceProvider(ctx, + res, + h.Config.Tracing.AsOTelOpts(), + opts..., + ) + if err != nil { + return perrors.Wrap(err, "failed to create tracing provider") + } + + shutdown, err := observability.SetupOTelSDK(ctx, tracer) + if err != nil { + return perrors.Wrap(err, "failed to setup tracing SDK") + } + + h.shutdownFuncs = append(h.shutdownFuncs, shutdown) + } + + if err := h.ServeMetrics(ctx); err != nil { + return err + } + + if h.Config.PProfAddr != nil { + if err := h.ServePProf(ctx); err != nil { + return err + } + } + + h.log. + WithField("version", xatu.Full()). + WithField("id", h.id.String()). + Info("Starting Xatu in horizon mode 🌅") + + // Start sinks. + for _, sink := range h.sinks { + if err := sink.Start(ctx); err != nil { + return err + } + } + + if err := h.ApplyOverrideBeforeStartAfterCreation(ctx); err != nil { + return fmt.Errorf("failed to apply overrides before start: %w", err) + } + + // Start dedup cache. + go h.dedupCache.Start() + + // Register on-ready callback for beacon pool. + h.beaconPool.OnReady(func(ctx context.Context) error { + return h.onBeaconPoolReady(ctx) + }) + + // Start beacon pool (will call onBeaconPoolReady when healthy). + if err := h.beaconPool.Start(ctx); err != nil { + return fmt.Errorf("failed to start beacon pool: %w", err) + } + + cancel := make(chan os.Signal, 1) + signal.Notify(cancel, syscall.SIGTERM, syscall.SIGINT) + + sig := <-cancel + h.log.Printf("Caught signal: %v", sig) + + if err := h.Shutdown(ctx); err != nil { + return err + } + + return nil +} + +// onBeaconPoolReady is called when the beacon pool has at least one healthy node. +// It initializes and starts all the event derivers. +func (h *Horizon) onBeaconPoolReady(ctx context.Context) error { + h.log.Info("Beacon pool ready, initializing event derivers") + + metadata := h.beaconPool.Metadata() + networkName := string(metadata.Network.Name) + networkID := fmt.Sprintf("%d", metadata.Network.ID) + wallclock := metadata.Wallclock() + depositChainID := metadata.Spec.DepositChainID + + // Create block subscription for SSE events. + h.blockSubscription = subscription.NewBlockSubscription( + h.log, + h.beaconPool, + &h.Config.Subscription, + ) + + // Start block subscription. + if err := h.blockSubscription.Start(ctx); err != nil { + return fmt.Errorf("failed to start block subscription: %w", err) + } + + // Start block broadcaster to deduplicate and fan-out events. + h.blockBroadcaster = NewBlockEventBroadcaster( + h.log, + h.dedupCache, + h.blockSubscription.Events(), + h.Config.Subscription.BufferSize, + ) + h.blockBroadcaster.Start(ctx) + + // Create and start reorg subscription for chain reorg handling. + h.reorgSubscription = subscription.NewReorgSubscription( + h.log, + h.beaconPool, + &h.Config.Reorg, + ) + + if err := h.reorgSubscription.Start(ctx); err != nil { + return fmt.Errorf("failed to start reorg subscription: %w", err) + } + + // Start goroutine to handle reorg events. + go h.handleReorgEvents(ctx) + + // Create context provider adapter for all derivers. + ctxProvider := deriver.NewContextProviderAdapter( + h.id, + h.Config.Name, + networkName, + metadata.Network.ID, + wallclock, + depositChainID, + h.Config.Labels, + ) + + // Create beacon client adapter. + beaconClient := deriver.NewBeaconClientAdapter(h.beaconPool) + + // Create derivers using the factory pattern. + // All derivers are registered via init() in the extractors package. + factory := cldataderiver.NewDeriverFactory(h.log, beaconClient, ctxProvider) + + // Create iterator factory that returns appropriate iterator based on deriver mode. + iteratorFactory := func(cannonType xatu.CannonType) cldataiterator.Iterator { + horizonType, ok := GetHorizonType(cannonType) + if !ok { + h.log.WithField("cannon_type", cannonType.String()).Warn("Unknown cannon type, skipping") + + return nil + } + + spec, ok := cldataderiver.Get(cannonType) + if !ok { + return nil + } + + if IsEpochBased(spec) { + return h.createEpochIterator(horizonType, networkID, networkName) + } + + return h.createDualIterator(horizonType, networkID, networkName) + } + + // Create enabled function that checks config. + enabledFunc := func(cannonType xatu.CannonType) bool { + return IsDeriverEnabled(&h.Config.Derivers, cannonType) + } + + // Create all derivers using factory. + genericDerivers := factory.CreateAll(iteratorFactory, enabledFunc) + + eventDerivers := make([]cldataderiver.EventDeriver, 0, len(genericDerivers)) + for _, d := range genericDerivers { + eventDerivers = append(eventDerivers, d) + } + + h.eventDerivers = eventDerivers + + // Start each deriver. + for _, d := range h.eventDerivers { + // Register callback for derived events. + d.OnEventsDerived(ctx, func(ctx context.Context, events []*xatu.DecoratedEvent) error { + return h.handleNewDecoratedEvents(ctx, events) + }) + + // Start deriver in goroutine. + go func() { + if err := h.startDeriverWhenReady(ctx, d); err != nil { + h.log. + WithField("deriver", d.Name()). + WithError(err).Fatal("Failed to start deriver") + } + }() + } + + return nil +} + +// createHeadIterator creates a HEAD iterator for a specific deriver type. +func (h *Horizon) createHeadIterator( + horizonType xatu.HorizonType, + networkID string, + networkName string, + blockEvents <-chan subscription.BlockEvent, +) *iterator.HeadIterator { + return iterator.NewHeadIterator( + h.log, + h.beaconPool, + h.coordinatorClient, + horizonType, + networkID, + networkName, + blockEvents, + ) +} + +// createFillIterator creates a FILL iterator for a specific deriver type. +func (h *Horizon) createFillIterator( + horizonType xatu.HorizonType, + networkID string, + networkName string, +) *iterator.FillIterator { + return iterator.NewFillIterator( + h.log, + h.beaconPool, + h.coordinatorClient, + &h.Config.Iterators.Fill, + horizonType, + networkID, + networkName, + ) +} + +// createDualIterator creates a dual iterator that multiplexes HEAD and FILL. +func (h *Horizon) createDualIterator( + horizonType xatu.HorizonType, + networkID string, + networkName string, +) *iterator.DualIterator { + head := h.createHeadIterator(horizonType, networkID, networkName, h.blockBroadcaster.Subscribe()) + fill := h.createFillIterator(horizonType, networkID, networkName) + dual := iterator.NewDualIterator(h.log, &h.Config.Iterators, head, fill) + + h.dualIterators = append(h.dualIterators, dual) + + return dual +} + +// createEpochIterator creates an Epoch iterator for a specific deriver type. +func (h *Horizon) createEpochIterator( + horizonType xatu.HorizonType, + networkID string, + networkName string, +) *iterator.EpochIterator { + return iterator.NewEpochIterator( + h.log, + h.beaconPool, + h.coordinatorClient, + h.Config.EpochIterator, + horizonType, + networkID, + networkName, + ) +} + +// startDeriverWhenReady waits for the deriver's activation fork and then starts it. +func (h *Horizon) startDeriverWhenReady(ctx context.Context, d cldataderiver.EventDeriver) error { + for { + // Handle derivers that require phase0 - since it's not actually a fork, it'll never appear in the spec. + if d.ActivationFork() != spec.DataVersionPhase0 { + fork, err := h.beaconPool.Metadata().Spec.ForkEpochs.GetByName(d.ActivationFork().String()) + if err != nil { + h.log.WithError(err).Errorf("unknown activation fork: %s", d.ActivationFork()) + + epoch := h.beaconPool.Metadata().Wallclock().Epochs().Current() + + time.Sleep(time.Until(epoch.TimeWindow().End())) + + continue + } + + currentEpoch := h.beaconPool.Metadata().Wallclock().Epochs().Current() + + if !fork.Active(phase0.Epoch(currentEpoch.Number())) { + activationForkEpoch := h.beaconPool.Metadata().Wallclock().Epochs().FromNumber(uint64(fork.Epoch)) + + sleepFor := time.Until(activationForkEpoch.TimeWindow().End()) + + if activationForkEpoch.Number()-currentEpoch.Number() > 100000 { + // If the fork epoch is over 100k epochs away, we are most likely dealing with a + // placeholder fork epoch. Sleep until the end of the current fork epoch and then + // wait for the spec to refresh. + sleepFor = time.Until(currentEpoch.TimeWindow().End()) + } + + h.log. + WithField("current_epoch", currentEpoch.Number()). + WithField("activation_fork_name", d.ActivationFork()). + WithField("activation_fork_epoch", fork.Epoch). + WithField("estimated_time_until_fork", time.Until(activationForkEpoch.TimeWindow().Start())). + WithField("check_again_in", sleepFor). + Warn("Deriver required fork is not active yet") + + time.Sleep(sleepFor) + + continue + } + } + + h.log. + WithField("deriver", d.Name()). + Info("Starting horizon event deriver") + + return d.Start(ctx) + } +} + +// handleNewDecoratedEvents sends derived events to all configured sinks. +func (h *Horizon) handleNewDecoratedEvents(ctx context.Context, events []*xatu.DecoratedEvent) error { + h.markReorgMetadata(events) + + for _, sink := range h.sinks { + if err := sink.HandleNewDecoratedEvents(ctx, events); err != nil { + return perrors.Wrapf(err, "failed to handle new decorated events in sink %s", sink.Name()) + } + } + + networkName := string(h.beaconPool.Metadata().Network.Name) + + for _, event := range events { + h.metrics.AddDecoratedEvent(1, event, networkName) + } + + return nil +} + +// handleReorgEvents handles chain reorg events by clearing affected block roots from the dedup cache. +// This allows the affected slots to be re-processed with the new canonical blocks. +func (h *Horizon) handleReorgEvents(ctx context.Context) { + if h.reorgSubscription == nil || !h.reorgSubscription.Enabled() { + return + } + + log := h.log.WithField("component", "reorg_handler") + log.Info("Starting reorg event handler") + + for { + select { + case <-ctx.Done(): + log.Info("Reorg event handler stopped (context cancelled)") + + return + case event, ok := <-h.reorgSubscription.Events(): + if !ok { + log.Info("Reorg event handler stopped (channel closed)") + + return + } + + log.WithFields(logrus.Fields{ + "slot": event.Slot, + "depth": event.Depth, + "old_head_block": event.OldHeadBlock.String(), + "new_head_block": event.NewHeadBlock.String(), + "epoch": event.Epoch, + "node": event.NodeName, + }).Info("Processing chain reorg event") + + start, end := reorgSlotRange(event) + if h.reorgTracker != nil { + h.reorgTracker.AddRange(start, end) + } + + h.rollbackReorgLocations(ctx, start) + + // Clear the old head block from dedup cache so the new canonical block can be processed. + // The old head block root needs to be removed so that if we receive the new canonical + // block for the same slot, it won't be deduplicated. + h.dedupCache.Delete(event.OldHeadBlock.String()) + + log.WithFields(logrus.Fields{ + "slot": event.Slot, + "depth": event.Depth, + "cleared_block": event.OldHeadBlock.String(), + "new_canonical": event.NewHeadBlock.String(), + }).Info("Cleared reorged block from dedup cache - slot can be re-processed") + } + } +} + +func (h *Horizon) Shutdown(ctx context.Context) error { + h.log.Printf("Shutting down") + + // Stop event derivers. + for _, d := range h.eventDerivers { + if err := d.Stop(ctx); err != nil { + h.log.WithError(err).WithField("deriver", d.Name()).Warn("Error stopping deriver") + } + } + + // Stop dual iterators. + for _, dual := range h.dualIterators { + if err := dual.Stop(ctx); err != nil { + h.log.WithError(err).Warn("Error stopping dual iterator") + } + } + + // Stop block broadcaster. + if h.blockBroadcaster != nil { + h.blockBroadcaster.Stop() + } + + // Stop block subscription. + if h.blockSubscription != nil { + if err := h.blockSubscription.Stop(ctx); err != nil { + h.log.WithError(err).Warn("Error stopping block subscription") + } + } + + // Stop reorg subscription. + if h.reorgSubscription != nil { + if err := h.reorgSubscription.Stop(ctx); err != nil { + h.log.WithError(err).Warn("Error stopping reorg subscription") + } + } + + // Stop dedup cache. + h.dedupCache.Stop() + + // Stop beacon pool. + if err := h.beaconPool.Stop(ctx); err != nil { + h.log.WithError(err).Warn("Error stopping beacon pool") + } + + // Stop sinks. + for _, sink := range h.sinks { + if err := sink.Stop(ctx); err != nil { + return err + } + } + + // Run shutdown functions. + for _, fun := range h.shutdownFuncs { + if err := fun(ctx); err != nil { + return err + } + } + + return nil +} + +func (h *Horizon) ApplyOverrideBeforeStartAfterCreation(ctx context.Context) error { + if h.overrides == nil { + return nil + } + + if h.overrides.XatuOutputAuth.Enabled { + h.log.Info("Overriding output authorization on xatu sinks") + + for _, sink := range h.sinks { + if sink.Type() == string(output.SinkTypeXatu) { + xatuSink, ok := sink.(*oxatu.Xatu) + if !ok { + return perrors.New("failed to assert xatu sink") + } + + h.log.WithField("sink_name", sink.Name()).Info("Overriding xatu output authorization") + + xatuSink.SetAuthorization(h.overrides.XatuOutputAuth.Value) + } + } + } + + return nil +} + +func (h *Horizon) ServeMetrics(_ context.Context) error { + go func() { + sm := http.NewServeMux() + sm.Handle("/metrics", promhttp.Handler()) + + server := &http.Server{ + Addr: h.Config.MetricsAddr, + ReadHeaderTimeout: 15 * time.Second, + Handler: sm, + } + + h.log.Infof("Serving metrics at %s", h.Config.MetricsAddr) + + if err := server.ListenAndServe(); err != nil { + h.log.Fatal(err) + } + }() + + return nil +} + +func (h *Horizon) ServePProf(_ context.Context) error { + pprofServer := &http.Server{ + Addr: *h.Config.PProfAddr, + ReadHeaderTimeout: 120 * time.Second, + } + + go func() { + h.log.Infof("Serving pprof at %s", *h.Config.PProfAddr) + + if err := pprofServer.ListenAndServe(); err != nil { + h.log.Fatal(err) + } + }() + + return nil +} diff --git a/pkg/horizon/iterator/coordinator.go b/pkg/horizon/iterator/coordinator.go new file mode 100644 index 000000000..aea6f98b1 --- /dev/null +++ b/pkg/horizon/iterator/coordinator.go @@ -0,0 +1,275 @@ +package iterator + +import ( + "context" + "errors" + "sync" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +// CoordinatorConfig holds configuration for the iterator coordinator. +type CoordinatorConfig struct { + // Head is the configuration for the HEAD iterator. + Head HeadIteratorConfig `yaml:"head"` + // Fill is the configuration for the FILL iterator. + Fill FillIteratorConfig `yaml:"fill"` +} + +// Validate validates the configuration. +func (c *CoordinatorConfig) Validate() error { + if err := c.Head.Validate(); err != nil { + return err + } + + if err := c.Fill.Validate(); err != nil { + return err + } + + return nil +} + +// Coordinator manages the dual HEAD and FILL iterators, ensuring they run +// in separate goroutines without blocking each other. HEAD has priority for +// real-time block processing, while FILL handles consistency catch-up. +type Coordinator struct { + log logrus.FieldLogger + config *CoordinatorConfig + metrics *CoordinatorMetrics + + headIterator *HeadIterator + fillIterator *FillIterator + + // wg tracks running goroutines for graceful shutdown. + wg sync.WaitGroup + + // done signals shutdown to all goroutines. + done chan struct{} +} + +// CoordinatorMetrics tracks metrics for the iterator coordinator. +type CoordinatorMetrics struct { + headRunning prometheus.Gauge + fillRunning prometheus.Gauge +} + +// NewCoordinatorMetrics creates metrics for the coordinator. +func NewCoordinatorMetrics(namespace string) *CoordinatorMetrics { + m := &CoordinatorMetrics{ + headRunning: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "iterator_coordinator", + Name: "head_running", + Help: "Indicates if the HEAD iterator is running (1) or stopped (0)", + }), + fillRunning: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "iterator_coordinator", + Name: "fill_running", + Help: "Indicates if the FILL iterator is running (1) or stopped (0)", + }), + } + + prometheus.MustRegister( + m.headRunning, + m.fillRunning, + ) + + return m +} + +// NewCoordinator creates a new iterator coordinator. +func NewCoordinator( + log logrus.FieldLogger, + config *CoordinatorConfig, + headIterator *HeadIterator, + fillIterator *FillIterator, +) *Coordinator { + if config == nil { + config = &CoordinatorConfig{} + } + + return &Coordinator{ + log: log.WithField("component", "iterator/coordinator"), + config: config, + metrics: NewCoordinatorMetrics("xatu_horizon"), + headIterator: headIterator, + fillIterator: fillIterator, + done: make(chan struct{}), + } +} + +// Start starts both iterators in their own goroutines. +// HEAD iterator runs first for priority, FILL iterator follows. +// Both iterators coordinate through the coordinator service to avoid +// processing the same slots. +func (c *Coordinator) Start(ctx context.Context, activationFork spec.DataVersion) error { + c.log.WithField("activation_fork", activationFork.String()). + Info("Starting dual-iterator coordinator") + + // Start HEAD iterator in its dedicated goroutine. + // HEAD has priority and processes real-time SSE block events immediately. + if c.config.Head.Enabled { + if err := c.headIterator.Start(ctx, activationFork); err != nil { + return err + } + + c.wg.Add(1) + + go c.runHeadIterator(ctx) + + c.metrics.headRunning.Set(1) + + c.log.Info("HEAD iterator started in dedicated goroutine") + } else { + c.log.Warn("HEAD iterator is disabled") + } + + // Start FILL iterator in its separate goroutine. + // FILL runs independently and never blocks HEAD. + if c.config.Fill.Enabled { + if err := c.fillIterator.Start(ctx, activationFork); err != nil { + return err + } + + c.wg.Add(1) + + go c.runFillIterator(ctx) + + c.metrics.fillRunning.Set(1) + + c.log.Info("FILL iterator started in separate goroutine") + } else { + c.log.Warn("FILL iterator is disabled") + } + + return nil +} + +// runHeadIterator runs the HEAD iterator loop in its own goroutine. +// HEAD has priority - it receives real-time SSE block events and processes them immediately. +func (c *Coordinator) runHeadIterator(ctx context.Context) { + defer c.wg.Done() + defer c.metrics.headRunning.Set(0) + + c.log.Debug("HEAD iterator goroutine started") + + for { + select { + case <-ctx.Done(): + c.log.Info("HEAD iterator stopping due to context cancellation") + + return + case <-c.done: + c.log.Info("HEAD iterator stopping due to coordinator shutdown") + + return + default: + // Get next position from HEAD iterator. + // This blocks until a block event is received from SSE. + pos, err := c.headIterator.Next(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, ErrIteratorClosed) { + return + } + + // Log and continue on other errors. + c.log.WithError(err).Debug("HEAD iterator Next() returned error") + + continue + } + + if pos == nil { + continue + } + + // Position is available for processing. + // The deriver will call UpdateLocation after processing completes. + c.log.WithField("slot", pos.Slot).Trace("HEAD position ready for processing") + } + } +} + +// runFillIterator runs the FILL iterator loop in its own goroutine. +// FILL runs independently and never blocks HEAD. +func (c *Coordinator) runFillIterator(ctx context.Context) { + defer c.wg.Done() + defer c.metrics.fillRunning.Set(0) + + c.log.Debug("FILL iterator goroutine started") + + for { + select { + case <-ctx.Done(): + c.log.Info("FILL iterator stopping due to context cancellation") + + return + case <-c.done: + c.log.Info("FILL iterator stopping due to coordinator shutdown") + + return + default: + // Get next position from FILL iterator. + // This walks slots from fill_slot toward HEAD - LAG. + pos, err := c.fillIterator.Next(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, ErrIteratorClosed) { + return + } + + // Log and continue on other errors. + c.log.WithError(err).Debug("FILL iterator Next() returned error") + + continue + } + + if pos == nil { + continue + } + + // Position is available for processing. + // The deriver will call UpdateLocation after processing completes. + c.log.WithField("slot", pos.Slot).Trace("FILL position ready for processing") + } + } +} + +// Stop stops both iterators and waits for goroutines to finish. +func (c *Coordinator) Stop(ctx context.Context) error { + c.log.Info("Stopping dual-iterator coordinator") + + // Signal all goroutines to stop. + close(c.done) + + // Stop individual iterators. + if c.config.Head.Enabled { + if err := c.headIterator.Stop(ctx); err != nil { + c.log.WithError(err).Warn("Error stopping HEAD iterator") + } + } + + if c.config.Fill.Enabled { + if err := c.fillIterator.Stop(ctx); err != nil { + c.log.WithError(err).Warn("Error stopping FILL iterator") + } + } + + // Wait for goroutines to finish. + c.wg.Wait() + + c.log.Info("Dual-iterator coordinator stopped") + + return nil +} + +// HeadIterator returns the HEAD iterator. +func (c *Coordinator) HeadIterator() *HeadIterator { + return c.headIterator +} + +// FillIterator returns the FILL iterator. +func (c *Coordinator) FillIterator() *FillIterator { + return c.fillIterator +} diff --git a/pkg/horizon/iterator/dual.go b/pkg/horizon/iterator/dual.go new file mode 100644 index 000000000..1e6ad8217 --- /dev/null +++ b/pkg/horizon/iterator/dual.go @@ -0,0 +1,235 @@ +package iterator + +import ( + "context" + "errors" + "sync" + + "github.com/attestantio/go-eth2-client/spec" + cldataIterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" + "github.com/sirupsen/logrus" +) + +// ErrDualIteratorClosed is returned when the dual iterator is closed. +var ErrDualIteratorClosed = errors.New("dual iterator closed") + +// DualIterator multiplexes HEAD and FILL iterators with HEAD priority. +// It implements the shared cldata iterator interface so derivers can consume +// a single iterator while still getting both real-time and catch-up positions. +type DualIterator struct { + log logrus.FieldLogger + config *CoordinatorConfig + + head *HeadIterator + fill *FillIterator + + headCh chan *cldataIterator.Position + fillCh chan *cldataIterator.Position + + done chan struct{} + wg sync.WaitGroup +} + +// NewDualIterator creates a new DualIterator. +func NewDualIterator( + log logrus.FieldLogger, + config *CoordinatorConfig, + head *HeadIterator, + fill *FillIterator, +) *DualIterator { + if config == nil { + config = &CoordinatorConfig{ + Head: HeadIteratorConfig{Enabled: true}, + Fill: FillIteratorConfig{Enabled: true}, + } + } + + return &DualIterator{ + log: log.WithField("component", "iterator/dual"), + config: config, + head: head, + fill: fill, + done: make(chan struct{}), + } +} + +// Start initializes both iterators and begins their processing loops. +func (d *DualIterator) Start(ctx context.Context, activationFork spec.DataVersion) error { + if d.config.Head.Enabled { + if err := d.head.Start(ctx, activationFork); err != nil { + return err + } + + d.headCh = make(chan *cldataIterator.Position, 16) + d.wg.Add(1) + go d.runHead(ctx) + } else { + d.log.Warn("HEAD iterator disabled") + } + + if d.config.Fill.Enabled { + if err := d.fill.Start(ctx, activationFork); err != nil { + return err + } + + d.fillCh = make(chan *cldataIterator.Position, 16) + d.wg.Add(1) + go d.runFill(ctx) + } else { + d.log.Warn("FILL iterator disabled") + } + + return nil +} + +func (d *DualIterator) runHead(ctx context.Context) { + defer d.wg.Done() + + for { + pos, err := d.head.Next(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, ErrIteratorClosed) { + return + } + + d.log.WithError(err).Debug("HEAD iterator Next() returned error") + continue + } + + if pos == nil { + continue + } + + select { + case d.headCh <- pos: + case <-ctx.Done(): + return + case <-d.done: + return + } + } +} + +func (d *DualIterator) runFill(ctx context.Context) { + defer d.wg.Done() + + for { + pos, err := d.fill.Next(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, ErrIteratorClosed) { + return + } + + d.log.WithError(err).Debug("FILL iterator Next() returned error") + continue + } + + if pos == nil { + continue + } + + select { + case d.fillCh <- pos: + case <-ctx.Done(): + return + case <-d.done: + return + } + } +} + +// Next returns the next position to process, prioritizing HEAD positions. +func (d *DualIterator) Next(ctx context.Context) (*cldataIterator.Position, error) { + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-d.done: + return nil, ErrDualIteratorClosed + default: + } + + // Non-blocking check for HEAD priority. + select { + case pos, ok := <-d.headCh: + if !ok { + d.headCh = nil + break + } + + return pos, nil + default: + } + + select { + case pos, ok := <-d.headCh: + if !ok { + d.headCh = nil + break + } + + return pos, nil + case pos, ok := <-d.fillCh: + if !ok { + d.fillCh = nil + break + } + + return pos, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-d.done: + return nil, ErrDualIteratorClosed + } + + if d.headCh == nil && d.fillCh == nil { + return nil, ErrDualIteratorClosed + } + } +} + +// UpdateLocation persists the current position to the appropriate iterator. +func (d *DualIterator) UpdateLocation(ctx context.Context, position *cldataIterator.Position) error { + switch position.Direction { + case cldataIterator.DirectionForward: + if d.head == nil { + return errors.New("head iterator not available") + } + + return d.head.UpdateLocation(ctx, position) + case cldataIterator.DirectionBackward: + if d.fill == nil { + return errors.New("fill iterator not available") + } + + return d.fill.UpdateLocation(ctx, position) + default: + return errors.New("unknown iterator direction") + } +} + +// Stop stops both iterators and waits for goroutines to finish. +func (d *DualIterator) Stop(ctx context.Context) error { + close(d.done) + + if d.head != nil { + _ = d.head.Stop(ctx) + } + if d.fill != nil { + _ = d.fill.Stop(ctx) + } + + d.wg.Wait() + + if d.headCh != nil { + close(d.headCh) + } + if d.fillCh != nil { + close(d.fillCh) + } + + return nil +} + +// Verify DualIterator implements the Iterator interface. +var _ cldataIterator.Iterator = (*DualIterator)(nil) diff --git a/pkg/horizon/iterator/epoch.go b/pkg/horizon/iterator/epoch.go new file mode 100644 index 000000000..c39c58b3b --- /dev/null +++ b/pkg/horizon/iterator/epoch.go @@ -0,0 +1,484 @@ +package iterator + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/horizon/coordinator" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + cldataIterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" +) + +var ( + // ErrEpochIteratorClosed is returned when the epoch iterator is closed. + ErrEpochIteratorClosed = errors.New("epoch iterator closed") +) + +// EpochIteratorConfig holds configuration for the epoch iterator. +type EpochIteratorConfig struct { + // Enabled indicates if this iterator is enabled. + Enabled bool `yaml:"enabled" default:"true"` + // TriggerPercent is the percentage through an epoch at which to trigger. + // For example, 0.5 means trigger at 50% through the epoch (midway). + // Default is 0.5 (50%). + TriggerPercent float64 `yaml:"triggerPercent" default:"0.5"` +} + +// Validate validates the configuration. +func (c *EpochIteratorConfig) Validate() error { + if c.TriggerPercent <= 0 || c.TriggerPercent >= 1 { + return errors.New("triggerPercent must be between 0 and 1 (exclusive)") + } + + return nil +} + +// DefaultEpochIteratorConfig returns the default epoch iterator configuration. +func DefaultEpochIteratorConfig() EpochIteratorConfig { + return EpochIteratorConfig{ + Enabled: true, + TriggerPercent: 0.5, + } +} + +// EpochIterator is an iterator that fires at a configurable point within each epoch. +// It's designed for epoch-based derivers (ProposerDuty, BeaconBlob, BeaconValidators, BeaconCommittee) +// that need to fetch data for an upcoming epoch before it starts. +// +// The iterator triggers at TriggerPercent through the current epoch (default 50%) and returns +// the NEXT epoch for processing. This allows derivers to pre-fetch epoch data before it's needed. +type EpochIterator struct { + log logrus.FieldLogger + pool *ethereum.BeaconNodePool + coordinator *coordinator.Client + cfg EpochIteratorConfig + metrics *EpochIteratorMetrics + + // horizonType is the type of deriver this iterator is for. + horizonType xatu.HorizonType + // networkID is the network identifier. + networkID string + // networkName is the human-readable network name. + networkName string + + // activationFork is the fork at which the deriver becomes active. + activationFork spec.DataVersion + + // lastProcessedEpoch tracks the last epoch we returned for processing. + lastProcessedEpoch phase0.Epoch + epochMu sync.RWMutex + initialized bool + + // done signals iterator shutdown. + done chan struct{} +} + +// EpochIteratorMetrics tracks metrics for the epoch iterator. +type EpochIteratorMetrics struct { + processedTotal *prometheus.CounterVec + skippedTotal *prometheus.CounterVec + positionEpoch *prometheus.GaugeVec + triggerWaitTotal *prometheus.CounterVec +} + +var ( + epochIteratorMetrics *EpochIteratorMetrics + epochIteratorMetricsOnce sync.Once +) + +// newEpochIteratorMetrics creates new metrics for the epoch iterator. +// Uses registration that doesn't panic on duplicate registration. +func newEpochIteratorMetrics(namespace string) *EpochIteratorMetrics { + epochIteratorMetricsOnce.Do(func() { + epochIteratorMetrics = &EpochIteratorMetrics{ + processedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "epoch_iterator", + Name: "processed_total", + Help: "Total number of epochs processed by the epoch iterator", + }, []string{"deriver", "network"}), + + skippedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "epoch_iterator", + Name: "skipped_total", + Help: "Total number of epochs skipped", + }, []string{"deriver", "network", "reason"}), + + positionEpoch: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "epoch_iterator", + Name: "position_epoch", + Help: "Current epoch position of the epoch iterator", + }, []string{"deriver", "network"}), + + triggerWaitTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "epoch_iterator", + Name: "trigger_wait_total", + Help: "Total number of times the iterator waited for trigger point", + }, []string{"deriver", "network"}), + } + + prometheus.MustRegister( + epochIteratorMetrics.processedTotal, + epochIteratorMetrics.skippedTotal, + epochIteratorMetrics.positionEpoch, + epochIteratorMetrics.triggerWaitTotal, + ) + }) + + return epochIteratorMetrics +} + +// NewEpochIterator creates a new epoch iterator. +func NewEpochIterator( + log logrus.FieldLogger, + pool *ethereum.BeaconNodePool, + coordinatorClient *coordinator.Client, + cfg EpochIteratorConfig, + horizonType xatu.HorizonType, + networkID string, + networkName string, +) *EpochIterator { + return &EpochIterator{ + log: log.WithFields(logrus.Fields{ + "component": "iterator/epoch", + "horizon_type": horizonType.String(), + }), + pool: pool, + coordinator: coordinatorClient, + cfg: cfg, + horizonType: horizonType, + networkID: networkID, + networkName: networkName, + metrics: newEpochIteratorMetrics("xatu_horizon"), + done: make(chan struct{}), + } +} + +// Start initializes the iterator with the activation fork version. +func (e *EpochIterator) Start(ctx context.Context, activationFork spec.DataVersion) error { + e.activationFork = activationFork + + // Initialize last processed epoch from coordinator. + if err := e.initializeFromCoordinator(ctx); err != nil { + e.log.WithError(err).Warn("Failed to initialize from coordinator, starting fresh") + } + + e.log.WithFields(logrus.Fields{ + "activation_fork": activationFork.String(), + "network_id": e.networkID, + "trigger_percent": e.cfg.TriggerPercent, + "last_epoch": e.lastProcessedEpoch, + }).Info("Epoch iterator started") + + return nil +} + +// initializeFromCoordinator loads the last processed epoch from the coordinator. +func (e *EpochIterator) initializeFromCoordinator(ctx context.Context) error { + location, err := e.coordinator.GetHorizonLocation(ctx, e.horizonType, e.networkID) + if err != nil { + return fmt.Errorf("failed to get horizon location: %w", err) + } + + if location == nil { + // No previous location, start from epoch 0. + e.lastProcessedEpoch = 0 + e.initialized = false + + return nil + } + + // For epoch-based derivers, we track the last processed epoch in head_slot field. + // This is a bit of a misnomer but allows reuse of the existing HorizonLocation message. + // head_slot field stores the last processed epoch number for epoch iterators. + e.epochMu.Lock() + e.lastProcessedEpoch = phase0.Epoch(location.HeadSlot) + e.initialized = true + e.epochMu.Unlock() + + return nil +} + +// Next returns the next epoch to process. +// It waits until the trigger point within the current epoch (e.g., 50% through), +// then returns the NEXT epoch for processing. +func (e *EpochIterator) Next(ctx context.Context) (*cldataIterator.Position, error) { + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-e.done: + return nil, ErrEpochIteratorClosed + default: + position, err := e.calculateNextPosition(ctx) + if err != nil { + if errors.Is(err, cldataIterator.ErrLocationUpToDate) { + // Wait for the trigger point. + if waitErr := e.waitForTriggerPoint(ctx); waitErr != nil { + return nil, waitErr + } + + continue + } + + return nil, err + } + + return position, nil + } + } +} + +// calculateNextPosition determines the next epoch to process. +func (e *EpochIterator) calculateNextPosition(ctx context.Context) (*cldataIterator.Position, error) { + metadata := e.pool.Metadata() + if metadata == nil { + return nil, errors.New("metadata not available") + } + + wallclock := metadata.Wallclock() + currentEpoch := wallclock.Epochs().Current() + + // Calculate the trigger slot within the current epoch. + slotsPerEpoch := uint64(metadata.Spec.SlotsPerEpoch) + triggerSlotOffset := uint64(float64(slotsPerEpoch) * e.cfg.TriggerPercent) + epochStartSlot := currentEpoch.Number() * slotsPerEpoch + triggerSlot := epochStartSlot + triggerSlotOffset + + // Get the current slot. + currentSlot := wallclock.Slots().Current() + + // If we haven't reached the trigger point yet, we're up to date. + if currentSlot.Number() < triggerSlot { + return nil, cldataIterator.ErrLocationUpToDate + } + + // The epoch to process is the NEXT epoch (current + 1). + nextEpoch := phase0.Epoch(currentEpoch.Number() + 1) + + // Check if we already processed this epoch. + e.epochMu.RLock() + lastProcessed := e.lastProcessedEpoch + initialized := e.initialized + e.epochMu.RUnlock() + + if initialized && nextEpoch <= lastProcessed { + // Already processed, wait for next epoch. + return nil, cldataIterator.ErrLocationUpToDate + } + + // Check if the activation fork is active for this epoch. + if err := e.checkActivationFork(nextEpoch); err != nil { + e.metrics.skippedTotal.WithLabelValues( + e.horizonType.String(), + e.networkName, + "pre_activation", + ).Inc() + + e.log.WithFields(logrus.Fields{ + "epoch": nextEpoch, + "reason": err.Error(), + }).Trace("Skipping epoch due to activation fork") + + // Mark as processed to move forward. + e.epochMu.Lock() + e.lastProcessedEpoch = nextEpoch + e.initialized = true + e.epochMu.Unlock() + + return nil, cldataIterator.ErrLocationUpToDate + } + + // Create position for the epoch. + position := &cldataIterator.Position{ + Slot: phase0.Slot(uint64(nextEpoch) * slotsPerEpoch), // First slot of the epoch. + Epoch: nextEpoch, + Direction: cldataIterator.DirectionForward, + LookAheadEpochs: e.calculateLookAhead(nextEpoch), + } + + e.log.WithFields(logrus.Fields{ + "epoch": nextEpoch, + "current_epoch": currentEpoch.Number(), + "current_slot": currentSlot.Number(), + }).Debug("Returning next epoch for processing") + + return position, nil +} + +// calculateLookAhead returns the epochs to look ahead for pre-fetching. +func (e *EpochIterator) calculateLookAhead(currentEpoch phase0.Epoch) []phase0.Epoch { + // Look ahead by 1 epoch for pre-fetching. + return []phase0.Epoch{currentEpoch + 1} +} + +// checkActivationFork checks if the epoch is after the activation fork. +func (e *EpochIterator) checkActivationFork(epoch phase0.Epoch) error { + // Phase0 is always active. + if e.activationFork == spec.DataVersionPhase0 { + return nil + } + + metadata := e.pool.Metadata() + if metadata == nil { + return errors.New("metadata not available") + } + + beaconSpec := metadata.Spec + if beaconSpec == nil { + return errors.New("spec not available") + } + + forkEpoch, err := beaconSpec.ForkEpochs.GetByName(e.activationFork.String()) + if err != nil { + return fmt.Errorf("failed to get fork epoch for %s: %w", e.activationFork.String(), err) + } + + if epoch < forkEpoch.Epoch { + return fmt.Errorf("epoch %d is before fork activation at epoch %d", epoch, forkEpoch.Epoch) + } + + return nil +} + +// waitForTriggerPoint waits until the trigger point within the current epoch. +func (e *EpochIterator) waitForTriggerPoint(ctx context.Context) error { + metadata := e.pool.Metadata() + if metadata == nil { + return errors.New("metadata not available") + } + + wallclock := metadata.Wallclock() + currentEpoch := wallclock.Epochs().Current() + + // Calculate the trigger time. + slotsPerEpoch := uint64(metadata.Spec.SlotsPerEpoch) + triggerSlotOffset := uint64(float64(slotsPerEpoch) * e.cfg.TriggerPercent) + epochStartSlot := currentEpoch.Number() * slotsPerEpoch + triggerSlot := epochStartSlot + triggerSlotOffset + + // Get the trigger slot's start time. + triggerSlotInfo := wallclock.Slots().FromNumber(triggerSlot) + triggerTime := triggerSlotInfo.TimeWindow().Start() + + // If we're past the trigger time but haven't processed, check next epoch. + now := time.Now() + if now.After(triggerTime) { + // Check if we need to wait for next epoch. + e.epochMu.RLock() + lastProcessed := e.lastProcessedEpoch + initialized := e.initialized + e.epochMu.RUnlock() + + nextEpoch := phase0.Epoch(currentEpoch.Number() + 1) + + if initialized && nextEpoch <= lastProcessed { + // We've processed this epoch, wait for next epoch's trigger point. + nextEpochStart := (currentEpoch.Number() + 1) * slotsPerEpoch + nextTriggerSlot := nextEpochStart + triggerSlotOffset + nextTriggerSlotInfo := wallclock.Slots().FromNumber(nextTriggerSlot) + triggerTime = nextTriggerSlotInfo.TimeWindow().Start() + } + } + + waitDuration := time.Until(triggerTime) + if waitDuration <= 0 { + // Already past trigger time, no need to wait. + return nil + } + + e.metrics.triggerWaitTotal.WithLabelValues( + e.horizonType.String(), + e.networkName, + ).Inc() + + e.log.WithFields(logrus.Fields{ + "wait_duration": waitDuration.String(), + "trigger_time": triggerTime, + "trigger_slot": triggerSlot, + }).Debug("Waiting for epoch trigger point") + + select { + case <-ctx.Done(): + return ctx.Err() + case <-e.done: + return ErrEpochIteratorClosed + case <-time.After(waitDuration): + return nil + } +} + +// UpdateLocation persists the current position after successful processing. +func (e *EpochIterator) UpdateLocation(ctx context.Context, position *cldataIterator.Position) error { + // For epoch iterators, we store the processed epoch in the head_slot field. + // This reuses the existing HorizonLocation structure. + newLocation := &xatu.HorizonLocation{ + NetworkId: e.networkID, + Type: e.horizonType, + HeadSlot: uint64(position.Epoch), // Store epoch as "head_slot" + FillSlot: 0, // Not used for epoch iterators. + } + + if err := e.coordinator.UpsertHorizonLocation(ctx, newLocation); err != nil { + return fmt.Errorf("failed to upsert horizon location: %w", err) + } + + // Update local tracking. + e.epochMu.Lock() + e.lastProcessedEpoch = position.Epoch + e.initialized = true + e.epochMu.Unlock() + + // Update metrics. + e.metrics.processedTotal.WithLabelValues( + e.horizonType.String(), + e.networkName, + ).Inc() + e.metrics.positionEpoch.WithLabelValues( + e.horizonType.String(), + e.networkName, + ).Set(float64(position.Epoch)) + + e.log.WithFields(logrus.Fields{ + "epoch": position.Epoch, + }).Debug("Updated epoch location") + + return nil +} + +// Stop stops the epoch iterator. +func (e *EpochIterator) Stop(_ context.Context) error { + close(e.done) + + e.log.Info("Epoch iterator stopped") + + return nil +} + +// HorizonType returns the horizon type this iterator is for. +func (e *EpochIterator) HorizonType() xatu.HorizonType { + return e.horizonType +} + +// LastProcessedEpoch returns the last processed epoch. +func (e *EpochIterator) LastProcessedEpoch() phase0.Epoch { + e.epochMu.RLock() + defer e.epochMu.RUnlock() + + return e.lastProcessedEpoch +} + +// Verify EpochIterator implements the Iterator interface. +var _ cldataIterator.Iterator = (*EpochIterator)(nil) diff --git a/pkg/horizon/iterator/fill.go b/pkg/horizon/iterator/fill.go new file mode 100644 index 000000000..eb437323f --- /dev/null +++ b/pkg/horizon/iterator/fill.go @@ -0,0 +1,625 @@ +package iterator + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/horizon/coordinator" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "golang.org/x/time/rate" + + cldataIterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" +) + +const ( + // DefaultLagSlots is the default number of slots to stay behind HEAD. + DefaultLagSlots = 32 + // DefaultMaxBoundedSlots is the default maximum range of slots to process per cycle. + DefaultMaxBoundedSlots = 7200 + // DefaultFillRateLimit is the default rate limit in slots per second. + DefaultFillRateLimit = 10.0 +) + +// FillIteratorConfig holds configuration for the FILL iterator. +type FillIteratorConfig struct { + // Enabled indicates if this iterator is enabled. + Enabled bool `yaml:"enabled" default:"true"` + // LagSlots is the number of slots to stay behind HEAD. + LagSlots uint64 `yaml:"lagSlots" default:"32"` + // MaxBoundedSlots is the maximum number of slots to process in one bounded range. + MaxBoundedSlots uint64 `yaml:"maxBoundedSlots" default:"7200"` + // RateLimit is the maximum number of slots to process per second. + RateLimit float64 `yaml:"rateLimit" default:"10.0"` +} + +// Validate validates the configuration. +func (c *FillIteratorConfig) Validate() error { + if c.LagSlots == 0 { + c.LagSlots = DefaultLagSlots + } + + if c.MaxBoundedSlots == 0 { + c.MaxBoundedSlots = DefaultMaxBoundedSlots + } + + if c.RateLimit <= 0 { + c.RateLimit = DefaultFillRateLimit + } + + return nil +} + +// FillIterator is an iterator that fills in gaps by walking slots from fill_slot toward HEAD - LAG. +// It processes historical slots that may have been missed by the HEAD iterator. +type FillIterator struct { + log logrus.FieldLogger + pool *ethereum.BeaconNodePool + coordinator *coordinator.Client + config *FillIteratorConfig + metrics *FillIteratorMetrics + + // horizonType is the type of deriver this iterator is for. + horizonType xatu.HorizonType + // networkID is the network identifier. + networkID string + // networkName is the human-readable network name. + networkName string + + // activationFork is the fork at which the deriver becomes active. + activationFork spec.DataVersion + + // currentSlot tracks the current slot being processed. + currentSlot phase0.Slot + currentSlotMu sync.RWMutex + + // limiter controls the rate of slot processing. + limiter *rate.Limiter + + // done signals iterator shutdown. + done chan struct{} + + // started indicates if the iterator has been started. + started bool +} + +// FillIteratorMetrics tracks metrics for the FILL iterator. +type FillIteratorMetrics struct { + processedTotal *prometheus.CounterVec + skippedTotal *prometheus.CounterVec + positionSlot *prometheus.GaugeVec + targetSlot *prometheus.GaugeVec + slotsRemaining *prometheus.GaugeVec + rateLimitWaitTotal prometheus.Counter + cyclesCompleteTotal *prometheus.CounterVec +} + +var ( + fillIteratorMetrics *FillIteratorMetrics + fillIteratorMetricsOnce sync.Once +) + +// NewFillIteratorMetrics creates new metrics for the FILL iterator. +func NewFillIteratorMetrics(namespace string) *FillIteratorMetrics { + fillIteratorMetricsOnce.Do(func() { + fillIteratorMetrics = &FillIteratorMetrics{ + processedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "processed_total", + Help: "Total number of slots processed by the FILL iterator", + }, []string{"deriver", "network"}), + + skippedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "skipped_total", + Help: "Total number of slots skipped by the FILL iterator", + }, []string{"deriver", "network", "reason"}), + + positionSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "position_slot", + Help: "Current slot position of the FILL iterator", + }, []string{"deriver", "network"}), + + targetSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "target_slot", + Help: "Target slot the FILL iterator is working toward (HEAD - LAG)", + }, []string{"deriver", "network"}), + + slotsRemaining: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "slots_remaining", + Help: "Number of slots remaining until caught up with target", + }, []string{"deriver", "network"}), + + rateLimitWaitTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "rate_limit_wait_total", + Help: "Total number of times the FILL iterator waited for rate limit", + }), + + cyclesCompleteTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "fill_iterator", + Name: "cycles_complete_total", + Help: "Total number of fill cycles completed (caught up to target)", + }, []string{"deriver", "network"}), + } + + prometheus.MustRegister( + fillIteratorMetrics.processedTotal, + fillIteratorMetrics.skippedTotal, + fillIteratorMetrics.positionSlot, + fillIteratorMetrics.targetSlot, + fillIteratorMetrics.slotsRemaining, + fillIteratorMetrics.rateLimitWaitTotal, + fillIteratorMetrics.cyclesCompleteTotal, + ) + }) + + return fillIteratorMetrics +} + +// NewFillIterator creates a new FILL iterator. +func NewFillIterator( + log logrus.FieldLogger, + pool *ethereum.BeaconNodePool, + coordinatorClient *coordinator.Client, + config *FillIteratorConfig, + horizonType xatu.HorizonType, + networkID string, + networkName string, +) *FillIterator { + if config == nil { + config = &FillIteratorConfig{} + } + + _ = config.Validate() + + return &FillIterator{ + log: log.WithFields(logrus.Fields{ + "component": "iterator/fill", + "horizon_type": horizonType.String(), + }), + pool: pool, + coordinator: coordinatorClient, + config: config, + horizonType: horizonType, + networkID: networkID, + networkName: networkName, + limiter: rate.NewLimiter(rate.Limit(config.RateLimit), 1), + metrics: NewFillIteratorMetrics("xatu_horizon"), + done: make(chan struct{}), + } +} + +// Start initializes the iterator with the activation fork version. +func (f *FillIterator) Start(ctx context.Context, activationFork spec.DataVersion) error { + f.activationFork = activationFork + + // Initialize current slot from coordinator + location, err := f.coordinator.GetHorizonLocation(ctx, f.horizonType, f.networkID) + if err != nil { + // If location doesn't exist, we'll start from activation fork slot + f.log.WithError(err).Debug("No existing location found, will start from activation fork") + + location = nil + } + + if location != nil && location.FillSlot > 0 { + f.currentSlot = phase0.Slot(location.FillSlot) + } else { + // Start from activation fork slot + activationSlot, err := f.getActivationSlot() + if err != nil { + return fmt.Errorf("failed to get activation slot: %w", err) + } + + f.currentSlot = activationSlot + } + + f.started = true + + f.log.WithFields(logrus.Fields{ + "activation_fork": activationFork.String(), + "network_id": f.networkID, + "start_slot": f.currentSlot, + "lag_slots": f.config.LagSlots, + "rate_limit": f.config.RateLimit, + }).Info("FILL iterator started") + + return nil +} + +// Next returns the next position to process. +// It walks slots forward from fill_slot toward HEAD - LAG. +func (f *FillIterator) Next(ctx context.Context) (*cldataIterator.Position, error) { + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-f.done: + return nil, ErrIteratorClosed + default: + } + + // Get target slot (HEAD - LAG) + targetSlot, err := f.getTargetSlot(ctx) + if err != nil { + f.log.WithError(err).Warn("Failed to get target slot, will retry") + time.Sleep(time.Second) + + continue + } + + f.currentSlotMu.RLock() + currentSlot := f.currentSlot + f.currentSlotMu.RUnlock() + + // Update metrics + f.metrics.targetSlot.WithLabelValues(f.horizonType.String(), f.networkName). + Set(float64(targetSlot)) + f.metrics.positionSlot.WithLabelValues(f.horizonType.String(), f.networkName). + Set(float64(currentSlot)) + + if currentSlot < targetSlot { + remaining := uint64(targetSlot) - uint64(currentSlot) + f.metrics.slotsRemaining.WithLabelValues(f.horizonType.String(), f.networkName). + Set(float64(remaining)) + } else { + f.metrics.slotsRemaining.WithLabelValues(f.horizonType.String(), f.networkName). + Set(0) + } + + // Check if we've caught up to target + if currentSlot >= targetSlot { + f.metrics.cyclesCompleteTotal.WithLabelValues(f.horizonType.String(), f.networkName).Inc() + + f.log.WithFields(logrus.Fields{ + "current_slot": currentSlot, + "target_slot": targetSlot, + }).Debug("FILL iterator caught up to target, waiting for new slots") + + // Wait before checking again + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-f.done: + return nil, ErrIteratorClosed + case <-time.After(time.Duration(12) * time.Second): // Wait roughly one slot + continue + } + } + + // Apply rate limiting + if rateLimitErr := f.limiter.Wait(ctx); rateLimitErr != nil { + if errors.Is(rateLimitErr, context.Canceled) { + return nil, rateLimitErr + } + + f.log.WithError(rateLimitErr).Warn("Rate limiter wait failed") + + continue + } + + f.metrics.rateLimitWaitTotal.Inc() + + // Check if slot is before activation fork + if forkErr := f.checkActivationFork(currentSlot); forkErr != nil { + f.metrics.skippedTotal.WithLabelValues( + f.horizonType.String(), + f.networkName, + "pre_activation", + ).Inc() + + f.log.WithFields(logrus.Fields{ + "slot": currentSlot, + "reason": forkErr.Error(), + }).Trace("Skipping slot due to activation fork") + + // Move to next slot + f.incrementCurrentSlot() + + continue + } + + // Check if slot was already processed by either HEAD or FILL iterator. + // Both iterators use the same coordinator to track progress. + var alreadyProcessed bool + + alreadyProcessed, err = f.isSlotProcessedByHead(ctx, currentSlot) + if err != nil { + f.log.WithError(err).Warn("Failed to check if slot was already processed") + // Continue anyway, let the deriver handle it. + } else if alreadyProcessed { + f.metrics.skippedTotal.WithLabelValues( + f.horizonType.String(), + f.networkName, + "already_processed", + ).Inc() + + f.log.WithField("slot", currentSlot). + Trace("Skipping slot already processed by another iterator") + + // Move to next slot + f.incrementCurrentSlot() + + continue + } + + // Apply bounded range limit + if f.config.MaxBoundedSlots > 0 && currentSlot+phase0.Slot(f.config.MaxBoundedSlots) < targetSlot { + // We're too far behind, jump forward + newSlot := phase0.Slot(uint64(targetSlot) - f.config.MaxBoundedSlots) + + f.log.WithFields(logrus.Fields{ + "current_slot": currentSlot, + "new_slot": newSlot, + "target_slot": targetSlot, + "max_bounded": f.config.MaxBoundedSlots, + }).Info("FILL iterator jumping forward due to bounded range limit") + + currentSlot = f.setCurrentSlot(newSlot) + } + + // Create position for the slot + slotsPerEpoch := f.slotsPerEpoch() + position := &cldataIterator.Position{ + Slot: currentSlot, + Epoch: phase0.Epoch(uint64(currentSlot) / slotsPerEpoch), + Direction: cldataIterator.DirectionBackward, // FILL processes historical data + } + + f.log.WithFields(logrus.Fields{ + "slot": currentSlot, + "epoch": position.Epoch, + "target_slot": targetSlot, + }).Debug("Processing fill slot") + + // Advance current slot for next iteration + f.incrementCurrentSlot() + + return position, nil + } +} + +// getTargetSlot returns the target slot (HEAD - LAG). +func (f *FillIterator) getTargetSlot(ctx context.Context) (phase0.Slot, error) { + // Get current head slot from coordinator + location, err := f.coordinator.GetHorizonLocation(ctx, f.horizonType, f.networkID) + if err != nil { + return 0, fmt.Errorf("failed to get horizon location: %w", err) + } + + if location == nil || location.HeadSlot == 0 { + // No head slot recorded yet, use wallclock + return f.getWallclockHeadSlot() + } + + headSlot := phase0.Slot(location.HeadSlot) + + // Calculate target: HEAD - LAG + if uint64(headSlot) <= f.config.LagSlots { + return 0, nil + } + + return phase0.Slot(uint64(headSlot) - f.config.LagSlots), nil +} + +// getWallclockHeadSlot returns the current head slot based on wallclock time. +func (f *FillIterator) getWallclockHeadSlot() (phase0.Slot, error) { + metadata := f.pool.Metadata() + if metadata == nil { + return 0, errors.New("metadata not available") + } + + wallclock := metadata.Wallclock() + if wallclock == nil { + return 0, errors.New("wallclock not available") + } + + slot := wallclock.Slots().Current() + + return phase0.Slot(slot.Number()), nil +} + +// getActivationSlot returns the slot at which the activation fork started. +func (f *FillIterator) getActivationSlot() (phase0.Slot, error) { + // Phase0 is always active from slot 0 + if f.activationFork == spec.DataVersionPhase0 { + return 0, nil + } + + metadata := f.pool.Metadata() + if metadata == nil { + return 0, errors.New("metadata not available") + } + + beaconSpec := metadata.Spec + if beaconSpec == nil { + return 0, errors.New("spec not available") + } + + forkEpoch, err := beaconSpec.ForkEpochs.GetByName(f.activationFork.String()) + if err != nil { + return 0, fmt.Errorf("failed to get fork epoch for %s: %w", f.activationFork.String(), err) + } + + slotsPerEpoch := uint64(beaconSpec.SlotsPerEpoch) + + return phase0.Slot(uint64(forkEpoch.Epoch) * slotsPerEpoch), nil +} + +// setCurrentSlot atomically sets the current slot and returns the new value. +func (f *FillIterator) setCurrentSlot(slot phase0.Slot) phase0.Slot { + f.currentSlotMu.Lock() + defer f.currentSlotMu.Unlock() + + f.currentSlot = slot + + return slot +} + +// incrementCurrentSlot atomically increments the current slot. +func (f *FillIterator) incrementCurrentSlot() { + f.currentSlotMu.Lock() + defer f.currentSlotMu.Unlock() + + f.currentSlot++ +} + +// isSlotProcessedByHead checks if a slot has already been processed by either iterator. +// Both iterators coordinate through the coordinator service: +// - HEAD updates head_slot after processing real-time blocks +// - FILL updates fill_slot after processing historical slots +// This check primarily catches slots that HEAD processed (real-time) before FILL reached them. +func (f *FillIterator) isSlotProcessedByHead(ctx context.Context, slot phase0.Slot) (bool, error) { + location, err := f.coordinator.GetHorizonLocation(ctx, f.horizonType, f.networkID) + if err != nil { + // If location doesn't exist, no slots have been processed. + return false, nil //nolint:nilerr // Not found is not an error for this check. + } + + if location == nil { + return false, nil + } + + // Check if slot was already processed by HEAD (real-time processing). + if location.HeadSlot > 0 && uint64(slot) <= location.HeadSlot { + return true, nil + } + + // Also check fill_slot for consistency - FILL shouldn't reprocess its own work + // if it restarts from a stale position. + if location.FillSlot > 0 && uint64(slot) <= location.FillSlot { + return true, nil + } + + return false, nil +} + +// checkActivationFork checks if the slot is at or after the activation fork. +func (f *FillIterator) checkActivationFork(slot phase0.Slot) error { + // Phase0 is always active + if f.activationFork == spec.DataVersionPhase0 { + return nil + } + + activationSlot, err := f.getActivationSlot() + if err != nil { + return err + } + + if slot < activationSlot { + return fmt.Errorf("slot %d is before fork activation at slot %d", slot, activationSlot) + } + + return nil +} + +// UpdateLocation persists the current position after successful processing. +func (f *FillIterator) UpdateLocation(ctx context.Context, position *cldataIterator.Position) error { + // Get current location from coordinator + location, err := f.coordinator.GetHorizonLocation(ctx, f.horizonType, f.networkID) + if err != nil { + // Treat as new location if not found + location = nil + } + + // Create or update the location - only update fill_slot + var headSlot uint64 + + var fillSlot uint64 + + if location != nil { + headSlot = location.HeadSlot + // Only update fill_slot if the new position is greater + fillSlot = max(uint64(position.Slot), location.FillSlot) + } else { + // New location - initialize both + headSlot = uint64(position.Slot) + fillSlot = uint64(position.Slot) + } + + newLocation := &xatu.HorizonLocation{ + NetworkId: f.networkID, + Type: f.horizonType, + HeadSlot: headSlot, + FillSlot: fillSlot, + } + + if err := f.coordinator.UpsertHorizonLocation(ctx, newLocation); err != nil { + return fmt.Errorf("failed to upsert horizon location: %w", err) + } + + // Update metrics + f.metrics.processedTotal.WithLabelValues( + f.horizonType.String(), + f.networkName, + ).Inc() + f.metrics.positionSlot.WithLabelValues( + f.horizonType.String(), + f.networkName, + ).Set(float64(position.Slot)) + + f.log.WithFields(logrus.Fields{ + "slot": position.Slot, + "head_slot": headSlot, + "fill_slot": fillSlot, + }).Debug("Updated horizon location (fill)") + + return nil +} + +// Stop stops the FILL iterator. +func (f *FillIterator) Stop(_ context.Context) error { + close(f.done) + + f.log.Info("FILL iterator stopped") + + return nil +} + +func (f *FillIterator) slotsPerEpoch() uint64 { + metadata := f.pool.Metadata() + if metadata != nil && metadata.Spec != nil && metadata.Spec.SlotsPerEpoch > 0 { + return uint64(metadata.Spec.SlotsPerEpoch) + } + + return 32 +} + +// CurrentSlot returns the current slot position of the iterator. +func (f *FillIterator) CurrentSlot() phase0.Slot { + f.currentSlotMu.RLock() + defer f.currentSlotMu.RUnlock() + + return f.currentSlot +} + +// HorizonType returns the horizon type this iterator is for. +func (f *FillIterator) HorizonType() xatu.HorizonType { + return f.horizonType +} + +// Config returns the iterator configuration. +func (f *FillIterator) Config() *FillIteratorConfig { + return f.config +} + +// Verify FillIterator implements the Iterator interface. +var _ cldataIterator.Iterator = (*FillIterator)(nil) diff --git a/pkg/horizon/iterator/head.go b/pkg/horizon/iterator/head.go new file mode 100644 index 000000000..3ba3a6a21 --- /dev/null +++ b/pkg/horizon/iterator/head.go @@ -0,0 +1,432 @@ +package iterator + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/horizon/coordinator" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/ethpandaops/xatu/pkg/horizon/subscription" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + cldataIterator "github.com/ethpandaops/xatu/pkg/cldata/iterator" +) + +var ( + // ErrIteratorClosed is returned when the iterator is closed. + ErrIteratorClosed = errors.New("iterator closed") + // ErrSlotSkipped is returned when a slot should be skipped (not an error condition). + ErrSlotSkipped = errors.New("slot skipped") +) + +// HeadIteratorConfig holds configuration for the HEAD iterator. +type HeadIteratorConfig struct { + // Enabled indicates if this iterator is enabled. + Enabled bool `yaml:"enabled" default:"true"` +} + +// Validate validates the configuration. +func (c *HeadIteratorConfig) Validate() error { + return nil +} + +// HeadIterator is an iterator that tracks the HEAD of the beacon chain. +// It receives real-time block events from SSE subscriptions and processes +// them in order, coordinating with the server to track progress. +type HeadIterator struct { + log logrus.FieldLogger + pool *ethereum.BeaconNodePool + coordinator *coordinator.Client + metrics *HeadIteratorMetrics + + // horizonType is the type of deriver this iterator is for. + horizonType xatu.HorizonType + // networkID is the network identifier. + networkID string + // networkName is the human-readable network name. + networkName string + + // blockEvents receives deduplicated block events from SSE. + blockEvents <-chan subscription.BlockEvent + + // activationFork is the fork at which the deriver becomes active. + activationFork spec.DataVersion + + // currentPosition tracks the last processed position. + currentPosition *cldataIterator.Position + positionMu sync.RWMutex + + // done signals iterator shutdown. + done chan struct{} +} + +// HeadIteratorMetrics tracks metrics for the HEAD iterator. +type HeadIteratorMetrics struct { + processedTotal *prometheus.CounterVec + skippedTotal *prometheus.CounterVec + lastProcessedAt *prometheus.GaugeVec + positionSlot *prometheus.GaugeVec + eventsQueuedSize prometheus.Gauge +} + +var ( + headIteratorMetrics *HeadIteratorMetrics + headIteratorMetricsOnce sync.Once +) + +// NewHeadIteratorMetrics creates new metrics for the HEAD iterator. +func NewHeadIteratorMetrics(namespace string) *HeadIteratorMetrics { + headIteratorMetricsOnce.Do(func() { + headIteratorMetrics = &HeadIteratorMetrics{ + processedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "head_iterator", + Name: "processed_total", + Help: "Total number of slots processed by the HEAD iterator", + }, []string{"deriver", "network"}), + + skippedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "head_iterator", + Name: "skipped_total", + Help: "Total number of slots skipped (already processed)", + }, []string{"deriver", "network", "reason"}), + + lastProcessedAt: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "head_iterator", + Name: "last_processed_at", + Help: "Unix timestamp of last processed slot", + }, []string{"deriver", "network"}), + + positionSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "head_iterator", + Name: "position_slot", + Help: "Current slot position of the HEAD iterator", + }, []string{"deriver", "network"}), + + eventsQueuedSize: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "head_iterator", + Name: "events_queued", + Help: "Number of block events queued for processing", + }), + } + + prometheus.MustRegister( + headIteratorMetrics.processedTotal, + headIteratorMetrics.skippedTotal, + headIteratorMetrics.lastProcessedAt, + headIteratorMetrics.positionSlot, + headIteratorMetrics.eventsQueuedSize, + ) + }) + + return headIteratorMetrics +} + +// NewHeadIterator creates a new HEAD iterator. +func NewHeadIterator( + log logrus.FieldLogger, + pool *ethereum.BeaconNodePool, + coordinatorClient *coordinator.Client, + horizonType xatu.HorizonType, + networkID string, + networkName string, + blockEvents <-chan subscription.BlockEvent, +) *HeadIterator { + return &HeadIterator{ + log: log.WithFields(logrus.Fields{ + "component": "iterator/head", + "horizon_type": horizonType.String(), + }), + pool: pool, + coordinator: coordinatorClient, + horizonType: horizonType, + networkID: networkID, + networkName: networkName, + blockEvents: blockEvents, + metrics: NewHeadIteratorMetrics("xatu_horizon"), + done: make(chan struct{}), + } +} + +// Start initializes the iterator with the activation fork version. +func (h *HeadIterator) Start(_ context.Context, activationFork spec.DataVersion) error { + h.activationFork = activationFork + + h.log.WithFields(logrus.Fields{ + "activation_fork": activationFork.String(), + "network_id": h.networkID, + }).Info("HEAD iterator started") + + return nil +} + +// Next returns the next position to process. +// It blocks until a block event is received from the SSE subscription, +// then returns the slot for processing. +func (h *HeadIterator) Next(ctx context.Context) (*cldataIterator.Position, error) { + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-h.done: + return nil, ErrIteratorClosed + case event, ok := <-h.blockEvents: + if !ok { + return nil, ErrIteratorClosed + } + + h.metrics.eventsQueuedSize.Set(float64(len(h.blockEvents))) + + // Check if we should process this slot. + position, err := h.processBlockEvent(ctx, &event) + if err != nil { + if errors.Is(err, ErrSlotSkipped) { + // Slot was skipped (duplicate or already processed), continue to next. + continue + } + + h.log.WithError(err).WithField("slot", event.Slot). + Warn("Failed to process block event") + + continue + } + + return position, nil + } + } +} + +// processBlockEvent processes a block event and returns a position if it should be processed. +// Returns ErrSlotSkipped if the slot should be skipped (not an error condition). +func (h *HeadIterator) processBlockEvent(ctx context.Context, event *subscription.BlockEvent) (*cldataIterator.Position, error) { + blockRootStr := event.BlockRoot.String() + + // Check if we need to skip based on activation fork. + if err := h.checkActivationFork(event.Slot); err != nil { + h.metrics.skippedTotal.WithLabelValues( + h.horizonType.String(), + h.networkName, + "pre_activation", + ).Inc() + + h.log.WithFields(logrus.Fields{ + "slot": event.Slot, + "block_root": blockRootStr, + "reason": err.Error(), + }).Trace("Skipping block event due to activation fork") + + return nil, ErrSlotSkipped + } + + // Check coordinator to see if this slot was already processed. + alreadyProcessed, err := h.isSlotProcessed(ctx, event.Slot) + if err != nil { + return nil, fmt.Errorf("failed to check if slot is processed: %w", err) + } + + if alreadyProcessed { + h.metrics.skippedTotal.WithLabelValues( + h.horizonType.String(), + h.networkName, + "already_processed", + ).Inc() + + h.log.WithFields(logrus.Fields{ + "slot": event.Slot, + "block_root": blockRootStr, + }).Trace("Skipping already processed slot") + + return nil, ErrSlotSkipped + } + + // Create position for the slot. + slotsPerEpoch := h.slotsPerEpoch() + position := &cldataIterator.Position{ + Slot: event.Slot, + Epoch: phase0.Epoch(uint64(event.Slot) / slotsPerEpoch), + Direction: cldataIterator.DirectionForward, + } + + h.log.WithFields(logrus.Fields{ + "slot": event.Slot, + "block_root": blockRootStr, + "epoch": position.Epoch, + }).Debug("Processing block event") + + return position, nil +} + +// checkActivationFork checks if the slot is after the activation fork. +func (h *HeadIterator) checkActivationFork(slot phase0.Slot) error { + // Phase0 is always active. + if h.activationFork == spec.DataVersionPhase0 { + return nil + } + + metadata := h.pool.Metadata() + if metadata == nil { + return errors.New("metadata not available") + } + + beaconSpec := metadata.Spec + if beaconSpec == nil { + return errors.New("spec not available") + } + + forkEpoch, err := beaconSpec.ForkEpochs.GetByName(h.activationFork.String()) + if err != nil { + return fmt.Errorf("failed to get fork epoch for %s: %w", h.activationFork.String(), err) + } + + slotsPerEpoch := uint64(beaconSpec.SlotsPerEpoch) + forkSlot := phase0.Slot(uint64(forkEpoch.Epoch) * slotsPerEpoch) + + if slot < forkSlot { + return fmt.Errorf("slot %d is before fork activation at slot %d", slot, forkSlot) + } + + return nil +} + +// isSlotProcessed checks if a slot has already been processed by either HEAD or FILL iterator. +// Both iterators coordinate through the coordinator service: +// - HEAD updates head_slot after processing real-time blocks +// - FILL updates fill_slot after processing historical slots +// A slot is considered processed if slot <= head_slot OR slot <= fill_slot. +func (h *HeadIterator) isSlotProcessed(ctx context.Context, slot phase0.Slot) (bool, error) { + location, err := h.coordinator.GetHorizonLocation(ctx, h.horizonType, h.networkID) + if err != nil { + // If location doesn't exist, the slot hasn't been processed. + // Check if it's a "not found" error and return false. + // Otherwise, return the error. + // Note: The coordinator client should return nil location for not found. + return false, fmt.Errorf("failed to get horizon location: %w", err) + } + + if location == nil { + // No location stored yet, nothing has been processed. + return false, nil + } + + // Check if this slot was processed by HEAD (slot <= head_slot) + // or by FILL (slot <= fill_slot). + // Both iterators skip slots processed by the other to avoid duplicates. + if uint64(slot) <= location.HeadSlot { + return true, nil + } + + if uint64(slot) <= location.FillSlot { + return true, nil + } + + return false, nil +} + +// UpdateLocation persists the current position after successful processing. +func (h *HeadIterator) UpdateLocation(ctx context.Context, position *cldataIterator.Position) error { + // Get current location from coordinator. + location, err := h.coordinator.GetHorizonLocation(ctx, h.horizonType, h.networkID) + if err != nil { + // Treat as new location if not found. + location = nil + } + + // Create or update the location. + var headSlot uint64 + + var fillSlot uint64 + + if location != nil { + fillSlot = location.FillSlot + + // Only update head_slot if the new position is greater. + headSlot = max(uint64(position.Slot), location.HeadSlot) + } else { + // New location - initialize both to current slot. + headSlot = uint64(position.Slot) + fillSlot = uint64(position.Slot) + } + + newLocation := &xatu.HorizonLocation{ + NetworkId: h.networkID, + Type: h.horizonType, + HeadSlot: headSlot, + FillSlot: fillSlot, + } + + if err := h.coordinator.UpsertHorizonLocation(ctx, newLocation); err != nil { + return fmt.Errorf("failed to upsert horizon location: %w", err) + } + + // Update current position. + h.positionMu.Lock() + h.currentPosition = position + h.positionMu.Unlock() + + // Update metrics. + h.metrics.processedTotal.WithLabelValues( + h.horizonType.String(), + h.networkName, + ).Inc() + h.metrics.positionSlot.WithLabelValues( + h.horizonType.String(), + h.networkName, + ).Set(float64(position.Slot)) + h.metrics.lastProcessedAt.WithLabelValues( + h.horizonType.String(), + h.networkName, + ).Set(float64(time.Now().Unix())) + + h.log.WithFields(logrus.Fields{ + "slot": position.Slot, + "head_slot": headSlot, + "fill_slot": fillSlot, + }).Debug("Updated horizon location") + + return nil +} + +func (h *HeadIterator) slotsPerEpoch() uint64 { + metadata := h.pool.Metadata() + if metadata != nil && metadata.Spec != nil && metadata.Spec.SlotsPerEpoch > 0 { + return uint64(metadata.Spec.SlotsPerEpoch) + } + + return 32 +} + +// Stop stops the HEAD iterator. +func (h *HeadIterator) Stop(_ context.Context) error { + close(h.done) + + h.log.Info("HEAD iterator stopped") + + return nil +} + +// CurrentPosition returns the current position of the iterator. +func (h *HeadIterator) CurrentPosition() *cldataIterator.Position { + h.positionMu.RLock() + defer h.positionMu.RUnlock() + + return h.currentPosition +} + +// HorizonType returns the horizon type this iterator is for. +func (h *HeadIterator) HorizonType() xatu.HorizonType { + return h.horizonType +} + +// Verify HeadIterator implements the Iterator interface. +var _ cldataIterator.Iterator = (*HeadIterator)(nil) diff --git a/pkg/horizon/metrics.go b/pkg/horizon/metrics.go new file mode 100644 index 000000000..1ae7a61f9 --- /dev/null +++ b/pkg/horizon/metrics.go @@ -0,0 +1,86 @@ +package horizon + +import ( + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/prometheus/client_golang/prometheus" +) + +type Metrics struct { + decoratedEventTotal *prometheus.CounterVec + + // Slot tracking gauges + headSlot *prometheus.GaugeVec + fillSlot *prometheus.GaugeVec + lagSlots *prometheus.GaugeVec + + // Block processing counter + blocksDerivedTotal *prometheus.CounterVec +} + +func NewMetrics(namespace string) *Metrics { + m := &Metrics{ + decoratedEventTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "decorated_event_total", + Help: "Total number of decorated events created by horizon", + }, []string{"type", "network"}), + + headSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "head_slot", + Help: "Current HEAD slot position being processed by horizon", + }, []string{"deriver", "network"}), + + fillSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "fill_slot", + Help: "Current FILL slot position for catch-up processing", + }, []string{"deriver", "network"}), + + lagSlots: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "lag_slots", + Help: "Number of slots FILL is behind HEAD", + }, []string{"deriver", "network"}), + + blocksDerivedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "blocks_derived_total", + Help: "Total number of blocks derived by horizon", + }, []string{"deriver", "network", "iterator"}), + } + + prometheus.MustRegister( + m.decoratedEventTotal, + m.headSlot, + m.fillSlot, + m.lagSlots, + m.blocksDerivedTotal, + ) + + return m +} + +func (m *Metrics) AddDecoratedEvent(count int, eventType *xatu.DecoratedEvent, network string) { + m.decoratedEventTotal.WithLabelValues(eventType.Event.Name.String(), network).Add(float64(count)) +} + +// SetHeadSlot sets the current HEAD slot position for a deriver. +func (m *Metrics) SetHeadSlot(slot uint64, deriver, network string) { + m.headSlot.WithLabelValues(deriver, network).Set(float64(slot)) +} + +// SetFillSlot sets the current FILL slot position for a deriver. +func (m *Metrics) SetFillSlot(slot uint64, deriver, network string) { + m.fillSlot.WithLabelValues(deriver, network).Set(float64(slot)) +} + +// SetLagSlots sets the number of slots FILL is behind HEAD for a deriver. +func (m *Metrics) SetLagSlots(lag uint64, deriver, network string) { + m.lagSlots.WithLabelValues(deriver, network).Set(float64(lag)) +} + +// AddBlocksDerived increments the count of blocks derived. +func (m *Metrics) AddBlocksDerived(count int, deriver, network, iterator string) { + m.blocksDerivedTotal.WithLabelValues(deriver, network, iterator).Add(float64(count)) +} diff --git a/pkg/horizon/overrides.go b/pkg/horizon/overrides.go new file mode 100644 index 000000000..508edd3ad --- /dev/null +++ b/pkg/horizon/overrides.go @@ -0,0 +1,68 @@ +package horizon + +import ( + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" +) + +type Override struct { + MetricsAddr struct { + Enabled bool + Value string + } + XatuOutputAuth struct { + Enabled bool + Value string + } + CoordinatorAuth struct { + Enabled bool + Value string + } + // BeaconNodeURLs allows overriding beacon node URLs via environment variables. + // When enabled, it replaces all configured beacon nodes with a single node. + BeaconNodeURLs struct { + Enabled bool + Value string + } + // BeaconNodeHeaders allows overriding beacon node authorization headers. + BeaconNodeHeaders struct { + Enabled bool + Value string + } + // NetworkName allows overriding the network name. + NetworkName struct { + Enabled bool + Value string + } +} + +// ApplyBeaconNodeOverrides applies beacon node overrides to the config. +func (o *Override) ApplyBeaconNodeOverrides(cfg *ethereum.Config) { + if o == nil { + return + } + + if o.BeaconNodeURLs.Enabled && o.BeaconNodeURLs.Value != "" { + // Replace all beacon nodes with the override + cfg.BeaconNodes = []ethereum.BeaconNodeConfig{ + { + Name: "override-node", + Address: o.BeaconNodeURLs.Value, + Headers: make(map[string]string), + }, + } + } + + if o.BeaconNodeHeaders.Enabled && o.BeaconNodeHeaders.Value != "" { + for i := range cfg.BeaconNodes { + if cfg.BeaconNodes[i].Headers == nil { + cfg.BeaconNodes[i].Headers = make(map[string]string) + } + + cfg.BeaconNodes[i].Headers["Authorization"] = o.BeaconNodeHeaders.Value + } + } + + if o.NetworkName.Enabled && o.NetworkName.Value != "" { + cfg.OverrideNetworkName = o.NetworkName.Value + } +} diff --git a/pkg/horizon/reorg_helpers.go b/pkg/horizon/reorg_helpers.go new file mode 100644 index 000000000..7e0d18275 --- /dev/null +++ b/pkg/horizon/reorg_helpers.go @@ -0,0 +1,102 @@ +package horizon + +import ( + "context" + "fmt" + + "github.com/ethpandaops/xatu/pkg/horizon/subscription" + "github.com/ethpandaops/xatu/pkg/proto/xatu" + "github.com/sirupsen/logrus" +) + +var blockBasedHorizonTypes = []xatu.HorizonType{ + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION, + xatu.HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION, +} + +func reorgSlotRange(event subscription.ReorgEvent) (uint64, uint64) { + end := uint64(event.Slot) + start := end + + if event.Depth > 0 { + depth := uint64(event.Depth) + if depth > end+1 { + start = 0 + } else { + start = end - (depth - 1) + } + } + + return start, end +} + +func rollbackSlot(start uint64) uint64 { + if start == 0 { + return 0 + } + + return start - 1 +} + +func (h *Horizon) rollbackReorgLocations(ctx context.Context, start uint64) { + if h.coordinatorClient == nil { + return + } + + rollback := rollbackSlot(start) + + for _, horizonType := range blockBasedHorizonTypes { + location, err := h.coordinatorClient.GetHorizonLocation(ctx, horizonType, h.networkID()) + if err != nil { + h.log.WithError(err).WithField("horizon_type", horizonType.String()). + Debug("Failed to fetch horizon location for reorg rollback") + continue + } + + if location == nil { + continue + } + + updated := false + + if location.HeadSlot > rollback { + location.HeadSlot = rollback + updated = true + } + + if location.FillSlot > rollback { + location.FillSlot = rollback + updated = true + } + + if !updated { + continue + } + + if err := h.coordinatorClient.UpsertHorizonLocation(ctx, location); err != nil { + h.log.WithError(err).WithField("horizon_type", horizonType.String()). + Warn("Failed to rollback horizon location after reorg") + } else { + h.log.WithFields(logrus.Fields{ + "horizon_type": horizonType.String(), + "head_slot": location.HeadSlot, + "fill_slot": location.FillSlot, + }).Info("Rolled back horizon location after reorg") + } + } +} + +func (h *Horizon) networkID() string { + if h.beaconPool == nil || h.beaconPool.Metadata() == nil { + return "" + } + + return fmt.Sprintf("%d", h.beaconPool.Metadata().Network.ID) +} diff --git a/pkg/horizon/reorg_labels.go b/pkg/horizon/reorg_labels.go new file mode 100644 index 000000000..5c12db844 --- /dev/null +++ b/pkg/horizon/reorg_labels.go @@ -0,0 +1,81 @@ +package horizon + +import ( + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +const reorgDetectedLabelKey = "reorg_detected" + +func (h *Horizon) markReorgMetadata(events []*xatu.DecoratedEvent) { + if h.reorgTracker == nil { + return + } + + for _, event := range events { + slot, ok := slotFromDecoratedEvent(event) + if !ok { + continue + } + + if !h.reorgTracker.IsReorgSlot(slot) { + continue + } + + if event.Meta == nil || event.Meta.Client == nil { + continue + } + + if event.Meta.Client.Labels == nil { + event.Meta.Client.Labels = make(map[string]string) + } + + event.Meta.Client.Labels[reorgDetectedLabelKey] = "true" + } +} + +func slotFromDecoratedEvent(event *xatu.DecoratedEvent) (uint64, bool) { + if event == nil || event.Meta == nil || event.Meta.Client == nil { + return 0, false + } + + switch data := event.Meta.Client.AdditionalData.(type) { + case *xatu.ClientMeta_EthV2BeaconBlockV2: + return slotFromSlotV2(data.EthV2BeaconBlockV2.GetSlot()) + case *xatu.ClientMeta_EthV2BeaconBlockElaboratedAttestation: + return slotFromSlotV2(data.EthV2BeaconBlockElaboratedAttestation.GetSlot()) + case *xatu.ClientMeta_EthV2BeaconBlockDeposit: + return slotFromBlockIdentifier(data.EthV2BeaconBlockDeposit.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockWithdrawal: + return slotFromBlockIdentifier(data.EthV2BeaconBlockWithdrawal.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockVoluntaryExit: + return slotFromBlockIdentifier(data.EthV2BeaconBlockVoluntaryExit.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockProposerSlashing: + return slotFromBlockIdentifier(data.EthV2BeaconBlockProposerSlashing.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockAttesterSlashing: + return slotFromBlockIdentifier(data.EthV2BeaconBlockAttesterSlashing.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockBlsToExecutionChange: + return slotFromBlockIdentifier(data.EthV2BeaconBlockBlsToExecutionChange.GetBlock()) + case *xatu.ClientMeta_EthV2BeaconBlockExecutionTransaction: + return slotFromBlockIdentifier(data.EthV2BeaconBlockExecutionTransaction.GetBlock()) + default: + return 0, false + } +} + +func slotFromBlockIdentifier(block *xatu.BlockIdentifier) (uint64, bool) { + if block == nil { + return 0, false + } + + return slotFromSlotV2(block.GetSlot()) +} + +func slotFromSlotV2(slot *xatu.SlotV2) (uint64, bool) { + if slot == nil || slot.Number == nil { + return 0, false + } + + return slot.Number.Value, true +} + +// slotFromIdentifierString parses a slot identifier if it is numeric. diff --git a/pkg/horizon/reorg_tracker.go b/pkg/horizon/reorg_tracker.go new file mode 100644 index 000000000..0c12548c5 --- /dev/null +++ b/pkg/horizon/reorg_tracker.go @@ -0,0 +1,74 @@ +package horizon + +import ( + "sync" + "time" +) + +// ReorgTracker tracks slots that were affected by reorgs to annotate derived events. +type ReorgTracker struct { + mu sync.Mutex + slots map[uint64]time.Time + ttl time.Duration +} + +// NewReorgTracker creates a new tracker with the given TTL. +func NewReorgTracker(ttl time.Duration) *ReorgTracker { + if ttl <= 0 { + ttl = 13 * time.Minute + } + + return &ReorgTracker{ + slots: make(map[uint64]time.Time), + ttl: ttl, + } +} + +// AddRange marks slots in [start, end] as affected by a reorg. +func (r *ReorgTracker) AddRange(start, end uint64) { + if end < start { + return + } + + now := time.Now() + expiry := now.Add(r.ttl) + + r.mu.Lock() + defer r.mu.Unlock() + + r.cleanupLocked(now) + + for slot := start; slot <= end; slot++ { + r.slots[slot] = expiry + } +} + +// IsReorgSlot reports whether a slot is marked as reorg-affected. +func (r *ReorgTracker) IsReorgSlot(slot uint64) bool { + now := time.Now() + + r.mu.Lock() + defer r.mu.Unlock() + + r.cleanupLocked(now) + + expiry, ok := r.slots[slot] + if !ok { + return false + } + + if now.After(expiry) { + delete(r.slots, slot) + return false + } + + return true +} + +func (r *ReorgTracker) cleanupLocked(now time.Time) { + for slot, expiry := range r.slots { + if now.After(expiry) { + delete(r.slots, slot) + } + } +} diff --git a/pkg/horizon/subscription/block.go b/pkg/horizon/subscription/block.go new file mode 100644 index 000000000..e26f99e45 --- /dev/null +++ b/pkg/horizon/subscription/block.go @@ -0,0 +1,292 @@ +package subscription + +import ( + "context" + "errors" + "sync" + "time" + + eth2v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/beacon/pkg/beacon" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +var ErrSubscriptionClosed = errors.New("subscription closed") + +// BlockEvent represents a parsed block event from the SSE stream. +type BlockEvent struct { + // Slot is the slot number of the block. + Slot phase0.Slot + // BlockRoot is the root of the block. + BlockRoot phase0.Root + // ExecutionOptimistic indicates if the block was received before execution validation. + ExecutionOptimistic bool + // ReceivedAt is the time when the event was received. + ReceivedAt time.Time + // NodeName is the name of the beacon node that received this event. + NodeName string +} + +// BlockSubscription manages SSE subscriptions to block events across multiple beacon nodes. +type BlockSubscription struct { + log logrus.FieldLogger + pool *ethereum.BeaconNodePool + metrics *Metrics + + // events channel receives parsed block events. + events chan BlockEvent + + // done channel signals subscription shutdown. + done chan struct{} + wg sync.WaitGroup + + // bufferSize is the size of the events channel buffer. + bufferSize int +} + +// Metrics tracks SSE subscription metrics. +type Metrics struct { + sseEventsTotal *prometheus.CounterVec + sseConnectionStatus *prometheus.GaugeVec + sseReconnectsTotal *prometheus.CounterVec + sseLastEventReceivedAt *prometheus.GaugeVec + sseEventProcessingDelay *prometheus.HistogramVec +} + +// NewMetrics creates metrics for SSE subscriptions. +func NewMetrics(namespace string) *Metrics { + m := &Metrics{ + sseEventsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "sse", + Name: "events_total", + Help: "Total number of SSE events received from beacon nodes", + }, []string{"node", "topic", "network"}), + + sseConnectionStatus: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "sse", + Name: "connection_status", + Help: "SSE connection status per beacon node (1=connected, 0=disconnected)", + }, []string{"node"}), + + sseReconnectsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "sse", + Name: "reconnects_total", + Help: "Total number of SSE reconnection attempts per beacon node", + }, []string{"node"}), + + sseLastEventReceivedAt: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "sse", + Name: "last_event_received_at", + Help: "Unix timestamp of last SSE event received per beacon node", + }, []string{"node", "topic"}), + + sseEventProcessingDelay: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: "sse", + Name: "event_processing_delay_seconds", + Help: "Time delay between slot start and event receipt", + Buckets: []float64{0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 12.0}, + }, []string{"node", "topic"}), + } + + prometheus.MustRegister( + m.sseEventsTotal, + m.sseConnectionStatus, + m.sseReconnectsTotal, + m.sseLastEventReceivedAt, + m.sseEventProcessingDelay, + ) + + return m +} + +// IncSSEEvents increments the SSE events counter. +func (m *Metrics) IncSSEEvents(node, topic, network string) { + m.sseEventsTotal.WithLabelValues(node, topic, network).Inc() +} + +// SetSSEConnectionStatus sets the SSE connection status for a node. +func (m *Metrics) SetSSEConnectionStatus(node string, connected bool) { + val := float64(0) + if connected { + val = 1 + } + + m.sseConnectionStatus.WithLabelValues(node).Set(val) +} + +// IncSSEReconnects increments the SSE reconnect counter. +func (m *Metrics) IncSSEReconnects(node string) { + m.sseReconnectsTotal.WithLabelValues(node).Inc() +} + +// SetSSELastEventReceivedAt sets the timestamp of the last received event. +func (m *Metrics) SetSSELastEventReceivedAt(node, topic string, t time.Time) { + m.sseLastEventReceivedAt.WithLabelValues(node, topic).Set(float64(t.Unix())) +} + +// ObserveSSEEventProcessingDelay records the processing delay for an event. +func (m *Metrics) ObserveSSEEventProcessingDelay(node, topic string, delay time.Duration) { + m.sseEventProcessingDelay.WithLabelValues(node, topic).Observe(delay.Seconds()) +} + +// Config holds configuration for the block subscription. +type Config struct { + // BufferSize is the size of the events channel buffer. + // Default: 1000 + BufferSize int `yaml:"bufferSize" default:"1000"` +} + +// Validate validates the configuration. +func (c *Config) Validate() error { + if c.BufferSize <= 0 { + c.BufferSize = 1000 + } + + return nil +} + +// NewBlockSubscription creates a new BlockSubscription. +func NewBlockSubscription( + log logrus.FieldLogger, + pool *ethereum.BeaconNodePool, + config *Config, +) *BlockSubscription { + if config == nil { + config = &Config{BufferSize: 1000} + } + + if config.BufferSize <= 0 { + config.BufferSize = 1000 + } + + return &BlockSubscription{ + log: log.WithField("component", "subscription/block"), + pool: pool, + metrics: NewMetrics("xatu_horizon"), + events: make(chan BlockEvent, config.BufferSize), + done: make(chan struct{}), + bufferSize: config.BufferSize, + } +} + +// Start starts subscribing to block events on all beacon nodes. +// This should be called after the beacon node pool is started and ready. +func (b *BlockSubscription) Start(ctx context.Context) error { + b.log.Info("Starting block subscription") + + // Get all nodes from the pool and subscribe to each. + nodes := b.pool.GetAllNodes() + if len(nodes) == 0 { + return errors.New("no beacon nodes configured") + } + + for _, wrapper := range nodes { + b.subscribeToNode(ctx, wrapper) + } + + b.log.WithField("node_count", len(nodes)).Info("Block subscription started") + + return nil +} + +// subscribeToNode subscribes to block events on a single beacon node. +func (b *BlockSubscription) subscribeToNode(ctx context.Context, wrapper *ethereum.BeaconNodeWrapper) { + nodeName := wrapper.Name() + node := wrapper.Node() + log := b.log.WithField("beacon_node", nodeName) + + // Get network name for metrics. + networkName := "unknown" + if metadata := b.pool.Metadata(); metadata != nil { + networkName = string(metadata.Network.Name) + } + + // Subscribe to block events. + // The beacon library handles: + // - SSE connection management + // - Automatic reconnection with backoff + // - Parsing of SSE payloads + node.OnBlock(ctx, func(ctx context.Context, event *eth2v1.BlockEvent) error { + receivedAt := time.Now() + + log.WithFields(logrus.Fields{ + "slot": event.Slot, + "block_root": event.Block.String(), + "execution_optimistic": event.ExecutionOptimistic, + }).Trace("Received block event") + + // Update metrics. + b.metrics.IncSSEEvents(nodeName, "block", networkName) + b.metrics.SetSSELastEventReceivedAt(nodeName, "block", receivedAt) + + // Calculate processing delay if we have wallclock. + if metadata := b.pool.Metadata(); metadata != nil && metadata.Wallclock() != nil { + slotInfo := metadata.Wallclock().Slots().FromNumber(uint64(event.Slot)) + delay := receivedAt.Sub(slotInfo.TimeWindow().Start()) + b.metrics.ObserveSSEEventProcessingDelay(nodeName, "block", delay) + } + + // Emit the block event to the channel. + blockEvent := BlockEvent{ + Slot: event.Slot, + BlockRoot: event.Block, + ExecutionOptimistic: event.ExecutionOptimistic, + ReceivedAt: receivedAt, + NodeName: nodeName, + } + + select { + case b.events <- blockEvent: + // Event sent successfully. + case <-ctx.Done(): + return ctx.Err() + case <-b.done: + return ErrSubscriptionClosed + default: + // Channel is full, log and drop the event. + log.WithField("slot", event.Slot).Warn("Block event channel full, dropping event") + } + + return nil + }) + + // Subscribe to connection events for status tracking. + // The beacon library emits these when connection state changes. + node.OnFirstTimeHealthy(ctx, func(_ context.Context, _ *beacon.FirstTimeHealthyEvent) error { + log.Debug("Beacon node SSE connection established") + b.metrics.SetSSEConnectionStatus(nodeName, true) + + return nil + }) + + log.Debug("Subscribed to block events") +} + +// Events returns the channel that receives block events. +// Consumers should read from this channel to process incoming block events. +func (b *BlockSubscription) Events() <-chan BlockEvent { + return b.events +} + +// Stop stops the block subscription. +func (b *BlockSubscription) Stop(_ context.Context) error { + b.log.Info("Stopping block subscription") + + close(b.done) + b.wg.Wait() + + // Close events channel after all goroutines have stopped. + close(b.events) + + b.log.Info("Block subscription stopped") + + return nil +} diff --git a/pkg/horizon/subscription/reorg.go b/pkg/horizon/subscription/reorg.go new file mode 100644 index 000000000..6869ed4d7 --- /dev/null +++ b/pkg/horizon/subscription/reorg.go @@ -0,0 +1,317 @@ +package subscription + +import ( + "context" + "sync" + "time" + + eth2v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/xatu/pkg/horizon/ethereum" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +// ReorgEvent represents a parsed chain reorg event from the SSE stream. +type ReorgEvent struct { + // Slot is the slot at which the reorg occurred. + Slot phase0.Slot + // Depth is the number of slots in the reorg. + Depth uint64 + // OldHeadBlock is the block root of the old head. + OldHeadBlock phase0.Root + // NewHeadBlock is the block root of the new head. + NewHeadBlock phase0.Root + // OldHeadState is the state root of the old head. + OldHeadState phase0.Root + // NewHeadState is the state root of the new head. + NewHeadState phase0.Root + // Epoch is the epoch in which the reorg occurred. + Epoch phase0.Epoch + // ReceivedAt is the time when the event was received. + ReceivedAt time.Time + // NodeName is the name of the beacon node that received this event. + NodeName string +} + +// ReorgSubscription manages SSE subscriptions to chain reorg events across multiple beacon nodes. +type ReorgSubscription struct { + log logrus.FieldLogger + pool *ethereum.BeaconNodePool + metrics *ReorgMetrics + + // events channel receives parsed reorg events. + events chan ReorgEvent + + // done channel signals subscription shutdown. + done chan struct{} + wg sync.WaitGroup + + // config holds reorg subscription configuration. + config *ReorgConfig +} + +// ReorgMetrics tracks chain reorg metrics. +type ReorgMetrics struct { + reorgsTotal *prometheus.CounterVec + reorgDepth *prometheus.HistogramVec + reorgsIgnored *prometheus.CounterVec + lastReorgAt *prometheus.GaugeVec + lastReorgDepth *prometheus.GaugeVec + lastReorgSlot *prometheus.GaugeVec +} + +// NewReorgMetrics creates metrics for chain reorg subscriptions. +func NewReorgMetrics(namespace string) *ReorgMetrics { + m := &ReorgMetrics{ + reorgsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "events_total", + Help: "Total number of chain reorg events received from beacon nodes", + }, []string{"node", "network"}), + + reorgDepth: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "depth", + Help: "Histogram of chain reorg depths in slots", + Buckets: []float64{1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 24, 32, 48, 64}, + }, []string{"node", "network"}), + + reorgsIgnored: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "ignored_total", + Help: "Total number of chain reorg events ignored (depth exceeds limit)", + }, []string{"node", "network"}), + + lastReorgAt: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "last_event_at", + Help: "Unix timestamp of last chain reorg event per beacon node", + }, []string{"node", "network"}), + + lastReorgDepth: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "last_depth", + Help: "Depth (in slots) of the last chain reorg event per beacon node", + }, []string{"node", "network"}), + + lastReorgSlot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "reorg", + Name: "last_slot", + Help: "Slot of the last chain reorg event per beacon node", + }, []string{"node", "network"}), + } + + prometheus.MustRegister( + m.reorgsTotal, + m.reorgDepth, + m.reorgsIgnored, + m.lastReorgAt, + m.lastReorgDepth, + m.lastReorgSlot, + ) + + return m +} + +// ReorgConfig holds configuration for the reorg subscription. +type ReorgConfig struct { + // Enabled indicates if reorg handling is enabled. + Enabled bool `yaml:"enabled" default:"true"` + // MaxDepth is the maximum reorg depth to handle. Reorgs deeper than this are ignored. + // Default: 64 slots + MaxDepth uint64 `yaml:"maxDepth" default:"64"` + // BufferSize is the size of the events channel buffer. + // Default: 100 + BufferSize int `yaml:"bufferSize" default:"100"` +} + +// Validate validates the configuration. +func (c *ReorgConfig) Validate() error { + if c.MaxDepth == 0 { + c.MaxDepth = 64 + } + + if c.BufferSize <= 0 { + c.BufferSize = 100 + } + + return nil +} + +// NewReorgSubscription creates a new ReorgSubscription. +func NewReorgSubscription( + log logrus.FieldLogger, + pool *ethereum.BeaconNodePool, + config *ReorgConfig, +) *ReorgSubscription { + if config == nil { + config = &ReorgConfig{ + Enabled: true, + MaxDepth: 64, + BufferSize: 100, + } + } + + if config.MaxDepth == 0 { + config.MaxDepth = 64 + } + + if config.BufferSize <= 0 { + config.BufferSize = 100 + } + + return &ReorgSubscription{ + log: log.WithField("component", "subscription/reorg"), + pool: pool, + metrics: NewReorgMetrics("xatu_horizon"), + events: make(chan ReorgEvent, config.BufferSize), + done: make(chan struct{}), + config: config, + } +} + +// Start starts subscribing to chain reorg events on all beacon nodes. +// This should be called after the beacon node pool is started and ready. +func (r *ReorgSubscription) Start(ctx context.Context) error { + if !r.config.Enabled { + r.log.Info("Reorg subscription disabled") + + return nil + } + + r.log.Info("Starting reorg subscription") + + // Get all nodes from the pool and subscribe to each. + nodes := r.pool.GetAllNodes() + if len(nodes) == 0 { + r.log.Warn("No beacon nodes configured for reorg subscription") + + return nil + } + + for _, wrapper := range nodes { + r.subscribeToNode(ctx, wrapper) + } + + r.log.WithField("node_count", len(nodes)).Info("Reorg subscription started") + + return nil +} + +// subscribeToNode subscribes to chain reorg events on a single beacon node. +func (r *ReorgSubscription) subscribeToNode(ctx context.Context, wrapper *ethereum.BeaconNodeWrapper) { + nodeName := wrapper.Name() + node := wrapper.Node() + log := r.log.WithField("beacon_node", nodeName) + + // Get network name for metrics. + networkName := "unknown" + if metadata := r.pool.Metadata(); metadata != nil { + networkName = string(metadata.Network.Name) + } + + // Subscribe to chain reorg events. + // The beacon library handles: + // - SSE connection management + // - Automatic reconnection with backoff + // - Parsing of SSE payloads + node.OnChainReOrg(ctx, func(ctx context.Context, event *eth2v1.ChainReorgEvent) error { + receivedAt := time.Now() + + log.WithFields(logrus.Fields{ + "slot": event.Slot, + "depth": event.Depth, + "old_head_block": event.OldHeadBlock.String(), + "new_head_block": event.NewHeadBlock.String(), + "epoch": event.Epoch, + }).Info("Received chain reorg event") + + // Update metrics. + r.metrics.reorgsTotal.WithLabelValues(nodeName, networkName).Inc() + r.metrics.reorgDepth.WithLabelValues(nodeName, networkName).Observe(float64(event.Depth)) + r.metrics.lastReorgAt.WithLabelValues(nodeName, networkName).Set(float64(receivedAt.Unix())) + r.metrics.lastReorgDepth.WithLabelValues(nodeName, networkName).Set(float64(event.Depth)) + r.metrics.lastReorgSlot.WithLabelValues(nodeName, networkName).Set(float64(event.Slot)) + + // Check if depth exceeds limit. + if event.Depth > r.config.MaxDepth { + log.WithFields(logrus.Fields{ + "slot": event.Slot, + "depth": event.Depth, + "max_depth": r.config.MaxDepth, + }).Warn("Ignoring reorg event - depth exceeds configured limit") + + r.metrics.reorgsIgnored.WithLabelValues(nodeName, networkName).Inc() + + return nil + } + + // Create reorg event. + reorgEvent := ReorgEvent{ + Slot: event.Slot, + Depth: event.Depth, + OldHeadBlock: event.OldHeadBlock, + NewHeadBlock: event.NewHeadBlock, + OldHeadState: event.OldHeadState, + NewHeadState: event.NewHeadState, + Epoch: event.Epoch, + ReceivedAt: receivedAt, + NodeName: nodeName, + } + + // Emit the reorg event to the channel. + select { + case r.events <- reorgEvent: + // Event sent successfully. + case <-ctx.Done(): + return ctx.Err() + case <-r.done: + return ErrSubscriptionClosed + default: + // Channel is full, log and drop the event. + log.WithField("slot", event.Slot).Warn("Reorg event channel full, dropping event") + } + + return nil + }) + + log.Debug("Subscribed to chain reorg events") +} + +// Events returns the channel that receives reorg events. +// Consumers should read from this channel to process incoming reorg events. +func (r *ReorgSubscription) Events() <-chan ReorgEvent { + return r.events +} + +// Stop stops the reorg subscription. +func (r *ReorgSubscription) Stop(_ context.Context) error { + r.log.Info("Stopping reorg subscription") + + close(r.done) + r.wg.Wait() + + // Close events channel after all goroutines have stopped. + close(r.events) + + r.log.Info("Reorg subscription stopped") + + return nil +} + +// MaxDepth returns the configured maximum reorg depth. +func (r *ReorgSubscription) MaxDepth() uint64 { + return r.config.MaxDepth +} + +// Enabled returns whether reorg handling is enabled. +func (r *ReorgSubscription) Enabled() bool { + return r.config.Enabled +} diff --git a/pkg/proto/xatu/coordinator.pb.go b/pkg/proto/xatu/coordinator.pb.go index 1b2bad539..f35db0163 100644 --- a/pkg/proto/xatu/coordinator.pb.go +++ b/pkg/proto/xatu/coordinator.pb.go @@ -150,6 +150,87 @@ func (RelayMonitorType) EnumDescriptor() ([]byte, []int) { return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{1} } +// Horizon types - for head data collection module +// Mirrors CannonType for horizon-specific location types +type HorizonType int32 + +const ( + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT HorizonType = 0 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING HorizonType = 1 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT HorizonType = 2 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING HorizonType = 3 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE HorizonType = 4 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION HorizonType = 5 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL HorizonType = 6 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK HorizonType = 7 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR HorizonType = 8 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_PROPOSER_DUTY HorizonType = 9 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION HorizonType = 10 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_VALIDATORS HorizonType = 11 + HorizonType_HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_COMMITTEE HorizonType = 12 +) + +// Enum value maps for HorizonType. +var ( + HorizonType_name = map[int32]string{ + 0: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT", + 1: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING", + 2: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT", + 3: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING", + 4: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE", + 5: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION", + 6: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL", + 7: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK", + 8: "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR", + 9: "HORIZON_TYPE_BEACON_API_ETH_V1_PROPOSER_DUTY", + 10: "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION", + 11: "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_VALIDATORS", + 12: "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_COMMITTEE", + } + HorizonType_value = map[string]int32{ + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT": 0, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING": 1, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT": 2, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING": 3, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE": 4, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION": 5, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL": 6, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK": 7, + "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR": 8, + "HORIZON_TYPE_BEACON_API_ETH_V1_PROPOSER_DUTY": 9, + "HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION": 10, + "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_VALIDATORS": 11, + "HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_COMMITTEE": 12, + } +) + +func (x HorizonType) Enum() *HorizonType { + p := new(HorizonType) + *p = x + return p +} + +func (x HorizonType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HorizonType) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_proto_xatu_coordinator_proto_enumTypes[2].Descriptor() +} + +func (HorizonType) Type() protoreflect.EnumType { + return &file_pkg_proto_xatu_coordinator_proto_enumTypes[2] +} + +func (x HorizonType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HorizonType.Descriptor instead. +func (HorizonType) EnumDescriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{2} +} + type CreateNodeRecordsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3232,6 +3313,266 @@ func (*UpsertRelayMonitorLocationResponse) Descriptor() ([]byte, []int) { return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{52} } +// HorizonLocation stores HEAD and FILL slot positions per deriver +// Used to track progress of the Horizon head data collection module +type HorizonLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` // Network identifier (e.g., "mainnet", "holesky") + Type HorizonType `protobuf:"varint,2,opt,name=type,proto3,enum=xatu.HorizonType" json:"type,omitempty"` // Deriver type being tracked + HeadSlot uint64 `protobuf:"varint,3,opt,name=head_slot,json=headSlot,proto3" json:"head_slot,omitempty"` // Current head slot position for real-time tracking + FillSlot uint64 `protobuf:"varint,4,opt,name=fill_slot,json=fillSlot,proto3" json:"fill_slot,omitempty"` // Fill slot position for catch-up processing +} + +func (x *HorizonLocation) Reset() { + *x = HorizonLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HorizonLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HorizonLocation) ProtoMessage() {} + +func (x *HorizonLocation) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HorizonLocation.ProtoReflect.Descriptor instead. +func (*HorizonLocation) Descriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{53} +} + +func (x *HorizonLocation) GetNetworkId() string { + if x != nil { + return x.NetworkId + } + return "" +} + +func (x *HorizonLocation) GetType() HorizonType { + if x != nil { + return x.Type + } + return HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT +} + +func (x *HorizonLocation) GetHeadSlot() uint64 { + if x != nil { + return x.HeadSlot + } + return 0 +} + +func (x *HorizonLocation) GetFillSlot() uint64 { + if x != nil { + return x.FillSlot + } + return 0 +} + +type GetHorizonLocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + Type HorizonType `protobuf:"varint,2,opt,name=type,proto3,enum=xatu.HorizonType" json:"type,omitempty"` +} + +func (x *GetHorizonLocationRequest) Reset() { + *x = GetHorizonLocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHorizonLocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHorizonLocationRequest) ProtoMessage() {} + +func (x *GetHorizonLocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHorizonLocationRequest.ProtoReflect.Descriptor instead. +func (*GetHorizonLocationRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{54} +} + +func (x *GetHorizonLocationRequest) GetNetworkId() string { + if x != nil { + return x.NetworkId + } + return "" +} + +func (x *GetHorizonLocationRequest) GetType() HorizonType { + if x != nil { + return x.Type + } + return HorizonType_HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT +} + +type GetHorizonLocationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Location *HorizonLocation `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *GetHorizonLocationResponse) Reset() { + *x = GetHorizonLocationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHorizonLocationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHorizonLocationResponse) ProtoMessage() {} + +func (x *GetHorizonLocationResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHorizonLocationResponse.ProtoReflect.Descriptor instead. +func (*GetHorizonLocationResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{55} +} + +func (x *GetHorizonLocationResponse) GetLocation() *HorizonLocation { + if x != nil { + return x.Location + } + return nil +} + +type UpsertHorizonLocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Location *HorizonLocation `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *UpsertHorizonLocationRequest) Reset() { + *x = UpsertHorizonLocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpsertHorizonLocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpsertHorizonLocationRequest) ProtoMessage() {} + +func (x *UpsertHorizonLocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpsertHorizonLocationRequest.ProtoReflect.Descriptor instead. +func (*UpsertHorizonLocationRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{56} +} + +func (x *UpsertHorizonLocationRequest) GetLocation() *HorizonLocation { + if x != nil { + return x.Location + } + return nil +} + +type UpsertHorizonLocationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpsertHorizonLocationResponse) Reset() { + *x = UpsertHorizonLocationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpsertHorizonLocationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpsertHorizonLocationResponse) ProtoMessage() {} + +func (x *UpsertHorizonLocationResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpsertHorizonLocationResponse.ProtoReflect.Descriptor instead. +func (*UpsertHorizonLocationResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_xatu_coordinator_proto_rawDescGZIP(), []int{57} +} + type ExecutionNodeStatus_Capability struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3244,7 +3585,7 @@ type ExecutionNodeStatus_Capability struct { func (x *ExecutionNodeStatus_Capability) Reset() { *x = ExecutionNodeStatus_Capability{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[53] + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +3598,7 @@ func (x *ExecutionNodeStatus_Capability) String() string { func (*ExecutionNodeStatus_Capability) ProtoMessage() {} func (x *ExecutionNodeStatus_Capability) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[53] + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3299,7 +3640,7 @@ type ExecutionNodeStatus_ForkID struct { func (x *ExecutionNodeStatus_ForkID) Reset() { *x = ExecutionNodeStatus_ForkID{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[54] + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3312,7 +3653,7 @@ func (x *ExecutionNodeStatus_ForkID) String() string { func (*ExecutionNodeStatus_ForkID) ProtoMessage() {} func (x *ExecutionNodeStatus_ForkID) ProtoReflect() protoreflect.Message { - mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[54] + mi := &file_pkg_proto_xatu_coordinator_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3904,170 +4245,260 @@ var file_pkg_proto_xatu_coordinator_proto_rawDesc = []byte{ 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x24, 0x0a, 0x22, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0xa1, 0x05, 0x0a, 0x0a, 0x43, 0x61, - 0x6e, 0x6e, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x2d, 0x42, 0x45, 0x41, 0x43, - 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, - 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4e, - 0x54, 0x41, 0x52, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x00, 0x12, 0x34, 0x0a, 0x30, 0x42, - 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, - 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x50, 0x52, - 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, - 0x01, 0x12, 0x2a, 0x0a, 0x26, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, - 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, - 0x4f, 0x43, 0x4b, 0x5f, 0x44, 0x45, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, - 0x30, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x0f, 0x48, 0x6f, + 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, + 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x78, 0x61, 0x74, + 0x75, 0x2e, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x6c, 0x6f, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x61, 0x0a, + 0x19, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x48, + 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x22, 0x4f, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, + 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x51, 0x0a, 0x1c, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, + 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x48, 0x6f, 0x72, 0x69, 0x7a, + 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x48, 0x6f, + 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0xa1, 0x05, 0x0a, 0x0a, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x2d, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, + 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, + 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4e, 0x54, 0x41, 0x52, 0x59, + 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x00, 0x12, 0x34, 0x0a, 0x30, 0x42, 0x45, 0x41, 0x43, 0x4f, + 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, + 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x50, 0x4f, 0x53, + 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x2a, 0x0a, + 0x26, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, - 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, - 0x47, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, - 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, - 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4c, 0x53, 0x5f, 0x54, 0x4f, 0x5f, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x04, 0x12, - 0x38, 0x0a, 0x34, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, - 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, - 0x4b, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, - 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x2d, 0x0a, 0x29, 0x42, 0x45, 0x41, + 0x44, 0x45, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, - 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x57, 0x49, 0x54, 0x48, - 0x44, 0x52, 0x41, 0x57, 0x41, 0x4c, 0x10, 0x06, 0x12, 0x22, 0x0a, 0x1e, 0x42, 0x45, 0x41, 0x43, - 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, - 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x07, 0x12, 0x23, 0x0a, 0x1f, - 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x50, 0x52, 0x49, 0x4e, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, - 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, - 0x08, 0x12, 0x29, 0x0a, 0x25, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, - 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, - 0x4f, 0x42, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x43, 0x41, 0x52, 0x10, 0x09, 0x12, 0x23, 0x0a, 0x1f, + 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x41, 0x54, 0x54, 0x45, + 0x53, 0x54, 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, + 0x3a, 0x0a, 0x36, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, + 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, + 0x4b, 0x5f, 0x42, 0x4c, 0x53, 0x5f, 0x54, 0x4f, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x04, 0x12, 0x38, 0x0a, 0x34, 0x42, + 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, + 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x45, 0x58, + 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x2d, 0x0a, 0x29, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, + 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, + 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x44, 0x52, 0x41, 0x57, + 0x41, 0x4c, 0x10, 0x06, 0x12, 0x22, 0x0a, 0x1e, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, + 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, + 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x07, 0x12, 0x23, 0x0a, 0x1f, 0x42, 0x4c, 0x4f, 0x43, + 0x4b, 0x50, 0x52, 0x49, 0x4e, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x08, 0x12, 0x29, 0x0a, + 0x25, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, + 0x56, 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x53, + 0x49, 0x44, 0x45, 0x43, 0x41, 0x52, 0x10, 0x09, 0x12, 0x23, 0x0a, 0x1f, 0x42, 0x45, 0x41, 0x43, + 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x50, 0x52, + 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x52, 0x5f, 0x44, 0x55, 0x54, 0x59, 0x10, 0x0a, 0x12, 0x39, 0x0a, + 0x35, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, + 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, + 0x45, 0x4c, 0x41, 0x42, 0x4f, 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x54, 0x54, 0x45, 0x53, + 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0b, 0x12, 0x27, 0x0a, 0x23, 0x42, 0x45, 0x41, 0x43, + 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, + 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x53, 0x10, + 0x0c, 0x12, 0x26, 0x0a, 0x22, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, + 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, + 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x45, 0x10, 0x0d, 0x2a, 0x54, 0x0a, 0x10, 0x52, 0x65, 0x6c, + 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, + 0x17, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x4d, 0x4f, 0x4e, 0x49, 0x54, 0x4f, 0x52, 0x5f, 0x42, + 0x49, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x4c, 0x41, 0x59, 0x5f, 0x4d, 0x4f, 0x4e, 0x49, 0x54, 0x4f, 0x52, 0x5f, 0x50, 0x41, 0x59, 0x4c, + 0x4f, 0x41, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x45, 0x44, 0x10, 0x01, 0x2a, + 0xa6, 0x06, 0x0a, 0x0b, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x3e, 0x0a, 0x3a, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, - 0x31, 0x5f, 0x50, 0x52, 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x52, 0x5f, 0x44, 0x55, 0x54, 0x59, 0x10, - 0x0a, 0x12, 0x39, 0x0a, 0x35, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, - 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, - 0x4f, 0x43, 0x4b, 0x5f, 0x45, 0x4c, 0x41, 0x42, 0x4f, 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, - 0x54, 0x54, 0x45, 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0b, 0x12, 0x27, 0x0a, 0x23, + 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x56, + 0x4f, 0x4c, 0x55, 0x4e, 0x54, 0x41, 0x52, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x00, 0x12, + 0x41, 0x0a, 0x3d, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, - 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, - 0x4f, 0x52, 0x53, 0x10, 0x0c, 0x12, 0x26, 0x0a, 0x22, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, - 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, - 0x4e, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x45, 0x10, 0x0d, 0x2a, 0x54, 0x0a, - 0x10, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x4d, 0x4f, 0x4e, 0x49, 0x54, - 0x4f, 0x52, 0x5f, 0x42, 0x49, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x00, 0x12, 0x23, - 0x0a, 0x1f, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x4d, 0x4f, 0x4e, 0x49, 0x54, 0x4f, 0x52, 0x5f, - 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x45, - 0x44, 0x10, 0x01, 0x32, 0xfb, 0x0d, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, - 0x74, 0x6f, 0x72, 0x12, 0x56, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, - 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, - 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, - 0x78, 0x61, 0x74, 0x75, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, - 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x7d, 0x0a, 0x1e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x12, 0x2b, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, - 0x69, 0x6e, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, - 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x37, 0x0a, 0x33, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, + 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, + 0x4b, 0x5f, 0x44, 0x45, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x10, 0x02, 0x12, 0x41, 0x0a, 0x3d, 0x48, + 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, + 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, + 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x41, 0x54, 0x54, 0x45, 0x53, + 0x54, 0x45, 0x52, 0x5f, 0x53, 0x4c, 0x41, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x47, + 0x0a, 0x43, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, + 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4c, + 0x53, 0x5f, 0x54, 0x4f, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x04, 0x12, 0x45, 0x0a, 0x41, 0x48, 0x4f, 0x52, 0x49, 0x5a, + 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, + 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, + 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x3a, + 0x0a, 0x36, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, + 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x44, 0x52, 0x41, 0x57, 0x41, 0x4c, 0x10, 0x06, 0x12, 0x2f, 0x0a, 0x2b, 0x48, 0x4f, + 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, + 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, + 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x07, 0x12, 0x36, 0x0a, 0x32, 0x48, + 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, + 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, + 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x43, 0x41, + 0x52, 0x10, 0x08, 0x12, 0x30, 0x0a, 0x2c, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, + 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x50, 0x52, 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x52, 0x5f, 0x44, + 0x55, 0x54, 0x59, 0x10, 0x09, 0x12, 0x46, 0x0a, 0x42, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, + 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x32, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x45, 0x4c, 0x41, 0x42, 0x4f, 0x52, 0x41, 0x54, 0x45, 0x44, 0x5f, + 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0a, 0x12, 0x34, 0x0a, + 0x30, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, + 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, + 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, + 0x53, 0x10, 0x0b, 0x12, 0x33, 0x0a, 0x2f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x45, + 0x54, 0x48, 0x5f, 0x56, 0x31, 0x5f, 0x42, 0x45, 0x41, 0x43, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, + 0x4d, 0x49, 0x54, 0x54, 0x45, 0x45, 0x10, 0x0c, 0x32, 0xba, 0x0f, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x56, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, + 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, - 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, + 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, - 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, + 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x86, 0x01, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x78, - 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, - 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x78, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, + 0x6e, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2b, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, + 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x73, + 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, + 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x7d, 0x0a, 0x1e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x12, 0x2b, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, - 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, + 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, + 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x86, 0x01, 0x0a, 0x21, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, + 0x73, 0x12, 0x2e, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2f, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1e, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2b, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, + 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, + 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x43, 0x6f, 0x6f, 0x72, 0x64, + 0x69, 0x6e, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, - 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, - 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, - 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, + 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, - 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, - 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2c, 0x2e, 0x78, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2c, + 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x78, 0x61, 0x74, - 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, + 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x11, 0x47, - 0x65, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1e, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, - 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, - 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x14, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x61, 0x6e, - 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x78, 0x61, - 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x61, 0x6e, 0x6e, - 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x24, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, + 0x64, 0x12, 0x2c, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x56, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x14, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x61, + 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x43, 0x61, 0x6e, 0x6e, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, - 0x0a, 0x1a, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x78, - 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, - 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x78, 0x61, 0x74, + 0x75, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x1a, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, + 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x27, 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, + 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x78, 0x61, 0x74, + 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x72, + 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x78, + 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x78, 0x61, 0x74, 0x75, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x65, 0x74, 0x68, 0x70, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x70, 0x73, 0x2f, 0x78, 0x61, 0x74, 0x75, - 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x78, 0x61, 0x74, 0x75, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x00, 0x12, 0x62, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, + 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x78, 0x61, 0x74, + 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x78, 0x61, 0x74, 0x75, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x48, 0x6f, 0x72, 0x69, + 0x7a, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x74, 0x68, 0x70, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x70, 0x73, 0x2f, + 0x78, 0x61, 0x74, 0x75, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x78, + 0x61, 0x74, 0x75, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4082,152 +4513,166 @@ func file_pkg_proto_xatu_coordinator_proto_rawDescGZIP() []byte { return file_pkg_proto_xatu_coordinator_proto_rawDescData } -var file_pkg_proto_xatu_coordinator_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_pkg_proto_xatu_coordinator_proto_msgTypes = make([]protoimpl.MessageInfo, 55) +var file_pkg_proto_xatu_coordinator_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_pkg_proto_xatu_coordinator_proto_msgTypes = make([]protoimpl.MessageInfo, 60) var file_pkg_proto_xatu_coordinator_proto_goTypes = []any{ (CannonType)(0), // 0: xatu.CannonType (RelayMonitorType)(0), // 1: xatu.RelayMonitorType - (*CreateNodeRecordsRequest)(nil), // 2: xatu.CreateNodeRecordsRequest - (*CreateNodeRecordsResponse)(nil), // 3: xatu.CreateNodeRecordsResponse - (*ListStalledExecutionNodeRecordsRequest)(nil), // 4: xatu.ListStalledExecutionNodeRecordsRequest - (*ListStalledExecutionNodeRecordsResponse)(nil), // 5: xatu.ListStalledExecutionNodeRecordsResponse - (*ExecutionNodeStatus)(nil), // 6: xatu.ExecutionNodeStatus - (*CreateExecutionNodeRecordStatusRequest)(nil), // 7: xatu.CreateExecutionNodeRecordStatusRequest - (*CreateExecutionNodeRecordStatusResponse)(nil), // 8: xatu.CreateExecutionNodeRecordStatusResponse - (*CoordinatedNodeRecord)(nil), // 9: xatu.CoordinatedNodeRecord - (*CoordinateExecutionNodeRecordsRequest)(nil), // 10: xatu.CoordinateExecutionNodeRecordsRequest - (*CoordinateExecutionNodeRecordsResponse)(nil), // 11: xatu.CoordinateExecutionNodeRecordsResponse - (*ConsensusNodeStatus)(nil), // 12: xatu.ConsensusNodeStatus - (*ListStalledConsensusNodeRecordsRequest)(nil), // 13: xatu.ListStalledConsensusNodeRecordsRequest - (*ListStalledConsensusNodeRecordsResponse)(nil), // 14: xatu.ListStalledConsensusNodeRecordsResponse - (*CreateConsensusNodeRecordStatusRequest)(nil), // 15: xatu.CreateConsensusNodeRecordStatusRequest - (*CreateConsensusNodeRecordStatusResponse)(nil), // 16: xatu.CreateConsensusNodeRecordStatusResponse - (*CreateConsensusNodeRecordStatusesRequest)(nil), // 17: xatu.CreateConsensusNodeRecordStatusesRequest - (*CreateConsensusNodeRecordStatusesResponse)(nil), // 18: xatu.CreateConsensusNodeRecordStatusesResponse - (*CoordinateConsensusNodeRecordsRequest)(nil), // 19: xatu.CoordinateConsensusNodeRecordsRequest - (*CoordinateConsensusNodeRecordsResponse)(nil), // 20: xatu.CoordinateConsensusNodeRecordsResponse - (*GetDiscoveryNodeRecordRequest)(nil), // 21: xatu.GetDiscoveryNodeRecordRequest - (*GetDiscoveryNodeRecordResponse)(nil), // 22: xatu.GetDiscoveryNodeRecordResponse - (*GetDiscoveryExecutionNodeRecordRequest)(nil), // 23: xatu.GetDiscoveryExecutionNodeRecordRequest - (*GetDiscoveryExecutionNodeRecordResponse)(nil), // 24: xatu.GetDiscoveryExecutionNodeRecordResponse - (*GetDiscoveryConsensusNodeRecordRequest)(nil), // 25: xatu.GetDiscoveryConsensusNodeRecordRequest - (*GetDiscoveryConsensusNodeRecordResponse)(nil), // 26: xatu.GetDiscoveryConsensusNodeRecordResponse - (*BackfillingCheckpointMarker)(nil), // 27: xatu.BackfillingCheckpointMarker - (*CannonLocationEthV2BeaconBlockVoluntaryExit)(nil), // 28: xatu.CannonLocationEthV2BeaconBlockVoluntaryExit - (*CannonLocationEthV2BeaconBlockProposerSlashing)(nil), // 29: xatu.CannonLocationEthV2BeaconBlockProposerSlashing - (*CannonLocationEthV2BeaconBlockDeposit)(nil), // 30: xatu.CannonLocationEthV2BeaconBlockDeposit - (*CannonLocationEthV2BeaconBlockAttesterSlashing)(nil), // 31: xatu.CannonLocationEthV2BeaconBlockAttesterSlashing - (*CannonLocationEthV2BeaconBlockBlsToExecutionChange)(nil), // 32: xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange - (*CannonLocationEthV2BeaconBlockExecutionTransaction)(nil), // 33: xatu.CannonLocationEthV2BeaconBlockExecutionTransaction - (*CannonLocationEthV2BeaconBlockWithdrawal)(nil), // 34: xatu.CannonLocationEthV2BeaconBlockWithdrawal - (*CannonLocationEthV2BeaconBlock)(nil), // 35: xatu.CannonLocationEthV2BeaconBlock - (*CannonLocationBlockprintBlockClassification)(nil), // 36: xatu.CannonLocationBlockprintBlockClassification - (*CannonLocationEthV1BeaconBlobSidecar)(nil), // 37: xatu.CannonLocationEthV1BeaconBlobSidecar - (*CannonLocationEthV1BeaconProposerDuty)(nil), // 38: xatu.CannonLocationEthV1BeaconProposerDuty - (*CannonLocationEthV2BeaconBlockElaboratedAttestation)(nil), // 39: xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation - (*CannonLocationEthV1BeaconValidators)(nil), // 40: xatu.CannonLocationEthV1BeaconValidators - (*CannonLocationEthV1BeaconCommittee)(nil), // 41: xatu.CannonLocationEthV1BeaconCommittee - (*CannonLocation)(nil), // 42: xatu.CannonLocation - (*GetCannonLocationRequest)(nil), // 43: xatu.GetCannonLocationRequest - (*GetCannonLocationResponse)(nil), // 44: xatu.GetCannonLocationResponse - (*UpsertCannonLocationRequest)(nil), // 45: xatu.UpsertCannonLocationRequest - (*UpsertCannonLocationResponse)(nil), // 46: xatu.UpsertCannonLocationResponse - (*RelayMonitorSlotMarker)(nil), // 47: xatu.RelayMonitorSlotMarker - (*RelayMonitorLocationBidTrace)(nil), // 48: xatu.RelayMonitorLocationBidTrace - (*RelayMonitorLocationPayloadDelivered)(nil), // 49: xatu.RelayMonitorLocationPayloadDelivered - (*RelayMonitorLocation)(nil), // 50: xatu.RelayMonitorLocation - (*GetRelayMonitorLocationRequest)(nil), // 51: xatu.GetRelayMonitorLocationRequest - (*GetRelayMonitorLocationResponse)(nil), // 52: xatu.GetRelayMonitorLocationResponse - (*UpsertRelayMonitorLocationRequest)(nil), // 53: xatu.UpsertRelayMonitorLocationRequest - (*UpsertRelayMonitorLocationResponse)(nil), // 54: xatu.UpsertRelayMonitorLocationResponse - (*ExecutionNodeStatus_Capability)(nil), // 55: xatu.ExecutionNodeStatus.Capability - (*ExecutionNodeStatus_ForkID)(nil), // 56: xatu.ExecutionNodeStatus.ForkID - (*timestamppb.Timestamp)(nil), // 57: google.protobuf.Timestamp + (HorizonType)(0), // 2: xatu.HorizonType + (*CreateNodeRecordsRequest)(nil), // 3: xatu.CreateNodeRecordsRequest + (*CreateNodeRecordsResponse)(nil), // 4: xatu.CreateNodeRecordsResponse + (*ListStalledExecutionNodeRecordsRequest)(nil), // 5: xatu.ListStalledExecutionNodeRecordsRequest + (*ListStalledExecutionNodeRecordsResponse)(nil), // 6: xatu.ListStalledExecutionNodeRecordsResponse + (*ExecutionNodeStatus)(nil), // 7: xatu.ExecutionNodeStatus + (*CreateExecutionNodeRecordStatusRequest)(nil), // 8: xatu.CreateExecutionNodeRecordStatusRequest + (*CreateExecutionNodeRecordStatusResponse)(nil), // 9: xatu.CreateExecutionNodeRecordStatusResponse + (*CoordinatedNodeRecord)(nil), // 10: xatu.CoordinatedNodeRecord + (*CoordinateExecutionNodeRecordsRequest)(nil), // 11: xatu.CoordinateExecutionNodeRecordsRequest + (*CoordinateExecutionNodeRecordsResponse)(nil), // 12: xatu.CoordinateExecutionNodeRecordsResponse + (*ConsensusNodeStatus)(nil), // 13: xatu.ConsensusNodeStatus + (*ListStalledConsensusNodeRecordsRequest)(nil), // 14: xatu.ListStalledConsensusNodeRecordsRequest + (*ListStalledConsensusNodeRecordsResponse)(nil), // 15: xatu.ListStalledConsensusNodeRecordsResponse + (*CreateConsensusNodeRecordStatusRequest)(nil), // 16: xatu.CreateConsensusNodeRecordStatusRequest + (*CreateConsensusNodeRecordStatusResponse)(nil), // 17: xatu.CreateConsensusNodeRecordStatusResponse + (*CreateConsensusNodeRecordStatusesRequest)(nil), // 18: xatu.CreateConsensusNodeRecordStatusesRequest + (*CreateConsensusNodeRecordStatusesResponse)(nil), // 19: xatu.CreateConsensusNodeRecordStatusesResponse + (*CoordinateConsensusNodeRecordsRequest)(nil), // 20: xatu.CoordinateConsensusNodeRecordsRequest + (*CoordinateConsensusNodeRecordsResponse)(nil), // 21: xatu.CoordinateConsensusNodeRecordsResponse + (*GetDiscoveryNodeRecordRequest)(nil), // 22: xatu.GetDiscoveryNodeRecordRequest + (*GetDiscoveryNodeRecordResponse)(nil), // 23: xatu.GetDiscoveryNodeRecordResponse + (*GetDiscoveryExecutionNodeRecordRequest)(nil), // 24: xatu.GetDiscoveryExecutionNodeRecordRequest + (*GetDiscoveryExecutionNodeRecordResponse)(nil), // 25: xatu.GetDiscoveryExecutionNodeRecordResponse + (*GetDiscoveryConsensusNodeRecordRequest)(nil), // 26: xatu.GetDiscoveryConsensusNodeRecordRequest + (*GetDiscoveryConsensusNodeRecordResponse)(nil), // 27: xatu.GetDiscoveryConsensusNodeRecordResponse + (*BackfillingCheckpointMarker)(nil), // 28: xatu.BackfillingCheckpointMarker + (*CannonLocationEthV2BeaconBlockVoluntaryExit)(nil), // 29: xatu.CannonLocationEthV2BeaconBlockVoluntaryExit + (*CannonLocationEthV2BeaconBlockProposerSlashing)(nil), // 30: xatu.CannonLocationEthV2BeaconBlockProposerSlashing + (*CannonLocationEthV2BeaconBlockDeposit)(nil), // 31: xatu.CannonLocationEthV2BeaconBlockDeposit + (*CannonLocationEthV2BeaconBlockAttesterSlashing)(nil), // 32: xatu.CannonLocationEthV2BeaconBlockAttesterSlashing + (*CannonLocationEthV2BeaconBlockBlsToExecutionChange)(nil), // 33: xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange + (*CannonLocationEthV2BeaconBlockExecutionTransaction)(nil), // 34: xatu.CannonLocationEthV2BeaconBlockExecutionTransaction + (*CannonLocationEthV2BeaconBlockWithdrawal)(nil), // 35: xatu.CannonLocationEthV2BeaconBlockWithdrawal + (*CannonLocationEthV2BeaconBlock)(nil), // 36: xatu.CannonLocationEthV2BeaconBlock + (*CannonLocationBlockprintBlockClassification)(nil), // 37: xatu.CannonLocationBlockprintBlockClassification + (*CannonLocationEthV1BeaconBlobSidecar)(nil), // 38: xatu.CannonLocationEthV1BeaconBlobSidecar + (*CannonLocationEthV1BeaconProposerDuty)(nil), // 39: xatu.CannonLocationEthV1BeaconProposerDuty + (*CannonLocationEthV2BeaconBlockElaboratedAttestation)(nil), // 40: xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation + (*CannonLocationEthV1BeaconValidators)(nil), // 41: xatu.CannonLocationEthV1BeaconValidators + (*CannonLocationEthV1BeaconCommittee)(nil), // 42: xatu.CannonLocationEthV1BeaconCommittee + (*CannonLocation)(nil), // 43: xatu.CannonLocation + (*GetCannonLocationRequest)(nil), // 44: xatu.GetCannonLocationRequest + (*GetCannonLocationResponse)(nil), // 45: xatu.GetCannonLocationResponse + (*UpsertCannonLocationRequest)(nil), // 46: xatu.UpsertCannonLocationRequest + (*UpsertCannonLocationResponse)(nil), // 47: xatu.UpsertCannonLocationResponse + (*RelayMonitorSlotMarker)(nil), // 48: xatu.RelayMonitorSlotMarker + (*RelayMonitorLocationBidTrace)(nil), // 49: xatu.RelayMonitorLocationBidTrace + (*RelayMonitorLocationPayloadDelivered)(nil), // 50: xatu.RelayMonitorLocationPayloadDelivered + (*RelayMonitorLocation)(nil), // 51: xatu.RelayMonitorLocation + (*GetRelayMonitorLocationRequest)(nil), // 52: xatu.GetRelayMonitorLocationRequest + (*GetRelayMonitorLocationResponse)(nil), // 53: xatu.GetRelayMonitorLocationResponse + (*UpsertRelayMonitorLocationRequest)(nil), // 54: xatu.UpsertRelayMonitorLocationRequest + (*UpsertRelayMonitorLocationResponse)(nil), // 55: xatu.UpsertRelayMonitorLocationResponse + (*HorizonLocation)(nil), // 56: xatu.HorizonLocation + (*GetHorizonLocationRequest)(nil), // 57: xatu.GetHorizonLocationRequest + (*GetHorizonLocationResponse)(nil), // 58: xatu.GetHorizonLocationResponse + (*UpsertHorizonLocationRequest)(nil), // 59: xatu.UpsertHorizonLocationRequest + (*UpsertHorizonLocationResponse)(nil), // 60: xatu.UpsertHorizonLocationResponse + (*ExecutionNodeStatus_Capability)(nil), // 61: xatu.ExecutionNodeStatus.Capability + (*ExecutionNodeStatus_ForkID)(nil), // 62: xatu.ExecutionNodeStatus.ForkID + (*timestamppb.Timestamp)(nil), // 63: google.protobuf.Timestamp } var file_pkg_proto_xatu_coordinator_proto_depIdxs = []int32{ - 55, // 0: xatu.ExecutionNodeStatus.capabilities:type_name -> xatu.ExecutionNodeStatus.Capability - 56, // 1: xatu.ExecutionNodeStatus.fork_id:type_name -> xatu.ExecutionNodeStatus.ForkID - 6, // 2: xatu.CreateExecutionNodeRecordStatusRequest.status:type_name -> xatu.ExecutionNodeStatus - 9, // 3: xatu.CoordinateExecutionNodeRecordsRequest.node_records:type_name -> xatu.CoordinatedNodeRecord - 57, // 4: xatu.ConsensusNodeStatus.finalized_epoch_start_date_time:type_name -> google.protobuf.Timestamp - 57, // 5: xatu.ConsensusNodeStatus.head_slot_start_date_time:type_name -> google.protobuf.Timestamp - 12, // 6: xatu.CreateConsensusNodeRecordStatusRequest.status:type_name -> xatu.ConsensusNodeStatus - 12, // 7: xatu.CreateConsensusNodeRecordStatusesRequest.statuses:type_name -> xatu.ConsensusNodeStatus - 9, // 8: xatu.CoordinateConsensusNodeRecordsRequest.node_records:type_name -> xatu.CoordinatedNodeRecord - 27, // 9: xatu.CannonLocationEthV2BeaconBlockVoluntaryExit.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 10: xatu.CannonLocationEthV2BeaconBlockProposerSlashing.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 11: xatu.CannonLocationEthV2BeaconBlockDeposit.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 12: xatu.CannonLocationEthV2BeaconBlockAttesterSlashing.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 13: xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 14: xatu.CannonLocationEthV2BeaconBlockExecutionTransaction.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 15: xatu.CannonLocationEthV2BeaconBlockWithdrawal.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 16: xatu.CannonLocationEthV2BeaconBlock.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 17: xatu.CannonLocationEthV1BeaconBlobSidecar.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 18: xatu.CannonLocationEthV1BeaconProposerDuty.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 19: xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 20: xatu.CannonLocationEthV1BeaconValidators.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker - 27, // 21: xatu.CannonLocationEthV1BeaconCommittee.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 61, // 0: xatu.ExecutionNodeStatus.capabilities:type_name -> xatu.ExecutionNodeStatus.Capability + 62, // 1: xatu.ExecutionNodeStatus.fork_id:type_name -> xatu.ExecutionNodeStatus.ForkID + 7, // 2: xatu.CreateExecutionNodeRecordStatusRequest.status:type_name -> xatu.ExecutionNodeStatus + 10, // 3: xatu.CoordinateExecutionNodeRecordsRequest.node_records:type_name -> xatu.CoordinatedNodeRecord + 63, // 4: xatu.ConsensusNodeStatus.finalized_epoch_start_date_time:type_name -> google.protobuf.Timestamp + 63, // 5: xatu.ConsensusNodeStatus.head_slot_start_date_time:type_name -> google.protobuf.Timestamp + 13, // 6: xatu.CreateConsensusNodeRecordStatusRequest.status:type_name -> xatu.ConsensusNodeStatus + 13, // 7: xatu.CreateConsensusNodeRecordStatusesRequest.statuses:type_name -> xatu.ConsensusNodeStatus + 10, // 8: xatu.CoordinateConsensusNodeRecordsRequest.node_records:type_name -> xatu.CoordinatedNodeRecord + 28, // 9: xatu.CannonLocationEthV2BeaconBlockVoluntaryExit.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 10: xatu.CannonLocationEthV2BeaconBlockProposerSlashing.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 11: xatu.CannonLocationEthV2BeaconBlockDeposit.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 12: xatu.CannonLocationEthV2BeaconBlockAttesterSlashing.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 13: xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 14: xatu.CannonLocationEthV2BeaconBlockExecutionTransaction.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 15: xatu.CannonLocationEthV2BeaconBlockWithdrawal.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 16: xatu.CannonLocationEthV2BeaconBlock.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 17: xatu.CannonLocationEthV1BeaconBlobSidecar.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 18: xatu.CannonLocationEthV1BeaconProposerDuty.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 19: xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 20: xatu.CannonLocationEthV1BeaconValidators.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker + 28, // 21: xatu.CannonLocationEthV1BeaconCommittee.backfilling_checkpoint_marker:type_name -> xatu.BackfillingCheckpointMarker 0, // 22: xatu.CannonLocation.type:type_name -> xatu.CannonType - 28, // 23: xatu.CannonLocation.eth_v2_beacon_block_voluntary_exit:type_name -> xatu.CannonLocationEthV2BeaconBlockVoluntaryExit - 29, // 24: xatu.CannonLocation.eth_v2_beacon_block_proposer_slashing:type_name -> xatu.CannonLocationEthV2BeaconBlockProposerSlashing - 30, // 25: xatu.CannonLocation.eth_v2_beacon_block_deposit:type_name -> xatu.CannonLocationEthV2BeaconBlockDeposit - 31, // 26: xatu.CannonLocation.eth_v2_beacon_block_attester_slashing:type_name -> xatu.CannonLocationEthV2BeaconBlockAttesterSlashing - 32, // 27: xatu.CannonLocation.eth_v2_beacon_block_bls_to_execution_change:type_name -> xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange - 33, // 28: xatu.CannonLocation.eth_v2_beacon_block_execution_transaction:type_name -> xatu.CannonLocationEthV2BeaconBlockExecutionTransaction - 34, // 29: xatu.CannonLocation.eth_v2_beacon_block_withdrawal:type_name -> xatu.CannonLocationEthV2BeaconBlockWithdrawal - 35, // 30: xatu.CannonLocation.eth_v2_beacon_block:type_name -> xatu.CannonLocationEthV2BeaconBlock - 36, // 31: xatu.CannonLocation.blockprint_block_classification:type_name -> xatu.CannonLocationBlockprintBlockClassification - 37, // 32: xatu.CannonLocation.eth_v1_beacon_blob_sidecar:type_name -> xatu.CannonLocationEthV1BeaconBlobSidecar - 38, // 33: xatu.CannonLocation.eth_v1_beacon_proposer_duty:type_name -> xatu.CannonLocationEthV1BeaconProposerDuty - 39, // 34: xatu.CannonLocation.eth_v2_beacon_block_elaborated_attestation:type_name -> xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation - 40, // 35: xatu.CannonLocation.eth_v1_beacon_validators:type_name -> xatu.CannonLocationEthV1BeaconValidators - 41, // 36: xatu.CannonLocation.eth_v1_beacon_committee:type_name -> xatu.CannonLocationEthV1BeaconCommittee + 29, // 23: xatu.CannonLocation.eth_v2_beacon_block_voluntary_exit:type_name -> xatu.CannonLocationEthV2BeaconBlockVoluntaryExit + 30, // 24: xatu.CannonLocation.eth_v2_beacon_block_proposer_slashing:type_name -> xatu.CannonLocationEthV2BeaconBlockProposerSlashing + 31, // 25: xatu.CannonLocation.eth_v2_beacon_block_deposit:type_name -> xatu.CannonLocationEthV2BeaconBlockDeposit + 32, // 26: xatu.CannonLocation.eth_v2_beacon_block_attester_slashing:type_name -> xatu.CannonLocationEthV2BeaconBlockAttesterSlashing + 33, // 27: xatu.CannonLocation.eth_v2_beacon_block_bls_to_execution_change:type_name -> xatu.CannonLocationEthV2BeaconBlockBlsToExecutionChange + 34, // 28: xatu.CannonLocation.eth_v2_beacon_block_execution_transaction:type_name -> xatu.CannonLocationEthV2BeaconBlockExecutionTransaction + 35, // 29: xatu.CannonLocation.eth_v2_beacon_block_withdrawal:type_name -> xatu.CannonLocationEthV2BeaconBlockWithdrawal + 36, // 30: xatu.CannonLocation.eth_v2_beacon_block:type_name -> xatu.CannonLocationEthV2BeaconBlock + 37, // 31: xatu.CannonLocation.blockprint_block_classification:type_name -> xatu.CannonLocationBlockprintBlockClassification + 38, // 32: xatu.CannonLocation.eth_v1_beacon_blob_sidecar:type_name -> xatu.CannonLocationEthV1BeaconBlobSidecar + 39, // 33: xatu.CannonLocation.eth_v1_beacon_proposer_duty:type_name -> xatu.CannonLocationEthV1BeaconProposerDuty + 40, // 34: xatu.CannonLocation.eth_v2_beacon_block_elaborated_attestation:type_name -> xatu.CannonLocationEthV2BeaconBlockElaboratedAttestation + 41, // 35: xatu.CannonLocation.eth_v1_beacon_validators:type_name -> xatu.CannonLocationEthV1BeaconValidators + 42, // 36: xatu.CannonLocation.eth_v1_beacon_committee:type_name -> xatu.CannonLocationEthV1BeaconCommittee 0, // 37: xatu.GetCannonLocationRequest.type:type_name -> xatu.CannonType - 42, // 38: xatu.GetCannonLocationResponse.location:type_name -> xatu.CannonLocation - 42, // 39: xatu.UpsertCannonLocationRequest.location:type_name -> xatu.CannonLocation - 47, // 40: xatu.RelayMonitorLocationBidTrace.slot_marker:type_name -> xatu.RelayMonitorSlotMarker - 47, // 41: xatu.RelayMonitorLocationPayloadDelivered.slot_marker:type_name -> xatu.RelayMonitorSlotMarker + 43, // 38: xatu.GetCannonLocationResponse.location:type_name -> xatu.CannonLocation + 43, // 39: xatu.UpsertCannonLocationRequest.location:type_name -> xatu.CannonLocation + 48, // 40: xatu.RelayMonitorLocationBidTrace.slot_marker:type_name -> xatu.RelayMonitorSlotMarker + 48, // 41: xatu.RelayMonitorLocationPayloadDelivered.slot_marker:type_name -> xatu.RelayMonitorSlotMarker 1, // 42: xatu.RelayMonitorLocation.type:type_name -> xatu.RelayMonitorType - 48, // 43: xatu.RelayMonitorLocation.bid_trace:type_name -> xatu.RelayMonitorLocationBidTrace - 49, // 44: xatu.RelayMonitorLocation.payload_delivered:type_name -> xatu.RelayMonitorLocationPayloadDelivered + 49, // 43: xatu.RelayMonitorLocation.bid_trace:type_name -> xatu.RelayMonitorLocationBidTrace + 50, // 44: xatu.RelayMonitorLocation.payload_delivered:type_name -> xatu.RelayMonitorLocationPayloadDelivered 1, // 45: xatu.GetRelayMonitorLocationRequest.type:type_name -> xatu.RelayMonitorType - 50, // 46: xatu.GetRelayMonitorLocationResponse.location:type_name -> xatu.RelayMonitorLocation - 50, // 47: xatu.UpsertRelayMonitorLocationRequest.location:type_name -> xatu.RelayMonitorLocation - 2, // 48: xatu.Coordinator.CreateNodeRecords:input_type -> xatu.CreateNodeRecordsRequest - 4, // 49: xatu.Coordinator.ListStalledExecutionNodeRecords:input_type -> xatu.ListStalledExecutionNodeRecordsRequest - 7, // 50: xatu.Coordinator.CreateExecutionNodeRecordStatus:input_type -> xatu.CreateExecutionNodeRecordStatusRequest - 10, // 51: xatu.Coordinator.CoordinateExecutionNodeRecords:input_type -> xatu.CoordinateExecutionNodeRecordsRequest - 13, // 52: xatu.Coordinator.ListStalledConsensusNodeRecords:input_type -> xatu.ListStalledConsensusNodeRecordsRequest - 15, // 53: xatu.Coordinator.CreateConsensusNodeRecordStatus:input_type -> xatu.CreateConsensusNodeRecordStatusRequest - 17, // 54: xatu.Coordinator.CreateConsensusNodeRecordStatuses:input_type -> xatu.CreateConsensusNodeRecordStatusesRequest - 19, // 55: xatu.Coordinator.CoordinateConsensusNodeRecords:input_type -> xatu.CoordinateConsensusNodeRecordsRequest - 21, // 56: xatu.Coordinator.GetDiscoveryNodeRecord:input_type -> xatu.GetDiscoveryNodeRecordRequest - 23, // 57: xatu.Coordinator.GetDiscoveryExecutionNodeRecord:input_type -> xatu.GetDiscoveryExecutionNodeRecordRequest - 25, // 58: xatu.Coordinator.GetDiscoveryConsensusNodeRecord:input_type -> xatu.GetDiscoveryConsensusNodeRecordRequest - 43, // 59: xatu.Coordinator.GetCannonLocation:input_type -> xatu.GetCannonLocationRequest - 45, // 60: xatu.Coordinator.UpsertCannonLocation:input_type -> xatu.UpsertCannonLocationRequest - 51, // 61: xatu.Coordinator.GetRelayMonitorLocation:input_type -> xatu.GetRelayMonitorLocationRequest - 53, // 62: xatu.Coordinator.UpsertRelayMonitorLocation:input_type -> xatu.UpsertRelayMonitorLocationRequest - 3, // 63: xatu.Coordinator.CreateNodeRecords:output_type -> xatu.CreateNodeRecordsResponse - 5, // 64: xatu.Coordinator.ListStalledExecutionNodeRecords:output_type -> xatu.ListStalledExecutionNodeRecordsResponse - 8, // 65: xatu.Coordinator.CreateExecutionNodeRecordStatus:output_type -> xatu.CreateExecutionNodeRecordStatusResponse - 11, // 66: xatu.Coordinator.CoordinateExecutionNodeRecords:output_type -> xatu.CoordinateExecutionNodeRecordsResponse - 14, // 67: xatu.Coordinator.ListStalledConsensusNodeRecords:output_type -> xatu.ListStalledConsensusNodeRecordsResponse - 16, // 68: xatu.Coordinator.CreateConsensusNodeRecordStatus:output_type -> xatu.CreateConsensusNodeRecordStatusResponse - 18, // 69: xatu.Coordinator.CreateConsensusNodeRecordStatuses:output_type -> xatu.CreateConsensusNodeRecordStatusesResponse - 20, // 70: xatu.Coordinator.CoordinateConsensusNodeRecords:output_type -> xatu.CoordinateConsensusNodeRecordsResponse - 22, // 71: xatu.Coordinator.GetDiscoveryNodeRecord:output_type -> xatu.GetDiscoveryNodeRecordResponse - 24, // 72: xatu.Coordinator.GetDiscoveryExecutionNodeRecord:output_type -> xatu.GetDiscoveryExecutionNodeRecordResponse - 26, // 73: xatu.Coordinator.GetDiscoveryConsensusNodeRecord:output_type -> xatu.GetDiscoveryConsensusNodeRecordResponse - 44, // 74: xatu.Coordinator.GetCannonLocation:output_type -> xatu.GetCannonLocationResponse - 46, // 75: xatu.Coordinator.UpsertCannonLocation:output_type -> xatu.UpsertCannonLocationResponse - 52, // 76: xatu.Coordinator.GetRelayMonitorLocation:output_type -> xatu.GetRelayMonitorLocationResponse - 54, // 77: xatu.Coordinator.UpsertRelayMonitorLocation:output_type -> xatu.UpsertRelayMonitorLocationResponse - 63, // [63:78] is the sub-list for method output_type - 48, // [48:63] is the sub-list for method input_type - 48, // [48:48] is the sub-list for extension type_name - 48, // [48:48] is the sub-list for extension extendee - 0, // [0:48] is the sub-list for field type_name + 51, // 46: xatu.GetRelayMonitorLocationResponse.location:type_name -> xatu.RelayMonitorLocation + 51, // 47: xatu.UpsertRelayMonitorLocationRequest.location:type_name -> xatu.RelayMonitorLocation + 2, // 48: xatu.HorizonLocation.type:type_name -> xatu.HorizonType + 2, // 49: xatu.GetHorizonLocationRequest.type:type_name -> xatu.HorizonType + 56, // 50: xatu.GetHorizonLocationResponse.location:type_name -> xatu.HorizonLocation + 56, // 51: xatu.UpsertHorizonLocationRequest.location:type_name -> xatu.HorizonLocation + 3, // 52: xatu.Coordinator.CreateNodeRecords:input_type -> xatu.CreateNodeRecordsRequest + 5, // 53: xatu.Coordinator.ListStalledExecutionNodeRecords:input_type -> xatu.ListStalledExecutionNodeRecordsRequest + 8, // 54: xatu.Coordinator.CreateExecutionNodeRecordStatus:input_type -> xatu.CreateExecutionNodeRecordStatusRequest + 11, // 55: xatu.Coordinator.CoordinateExecutionNodeRecords:input_type -> xatu.CoordinateExecutionNodeRecordsRequest + 14, // 56: xatu.Coordinator.ListStalledConsensusNodeRecords:input_type -> xatu.ListStalledConsensusNodeRecordsRequest + 16, // 57: xatu.Coordinator.CreateConsensusNodeRecordStatus:input_type -> xatu.CreateConsensusNodeRecordStatusRequest + 18, // 58: xatu.Coordinator.CreateConsensusNodeRecordStatuses:input_type -> xatu.CreateConsensusNodeRecordStatusesRequest + 20, // 59: xatu.Coordinator.CoordinateConsensusNodeRecords:input_type -> xatu.CoordinateConsensusNodeRecordsRequest + 22, // 60: xatu.Coordinator.GetDiscoveryNodeRecord:input_type -> xatu.GetDiscoveryNodeRecordRequest + 24, // 61: xatu.Coordinator.GetDiscoveryExecutionNodeRecord:input_type -> xatu.GetDiscoveryExecutionNodeRecordRequest + 26, // 62: xatu.Coordinator.GetDiscoveryConsensusNodeRecord:input_type -> xatu.GetDiscoveryConsensusNodeRecordRequest + 44, // 63: xatu.Coordinator.GetCannonLocation:input_type -> xatu.GetCannonLocationRequest + 46, // 64: xatu.Coordinator.UpsertCannonLocation:input_type -> xatu.UpsertCannonLocationRequest + 52, // 65: xatu.Coordinator.GetRelayMonitorLocation:input_type -> xatu.GetRelayMonitorLocationRequest + 54, // 66: xatu.Coordinator.UpsertRelayMonitorLocation:input_type -> xatu.UpsertRelayMonitorLocationRequest + 57, // 67: xatu.Coordinator.GetHorizonLocation:input_type -> xatu.GetHorizonLocationRequest + 59, // 68: xatu.Coordinator.UpsertHorizonLocation:input_type -> xatu.UpsertHorizonLocationRequest + 4, // 69: xatu.Coordinator.CreateNodeRecords:output_type -> xatu.CreateNodeRecordsResponse + 6, // 70: xatu.Coordinator.ListStalledExecutionNodeRecords:output_type -> xatu.ListStalledExecutionNodeRecordsResponse + 9, // 71: xatu.Coordinator.CreateExecutionNodeRecordStatus:output_type -> xatu.CreateExecutionNodeRecordStatusResponse + 12, // 72: xatu.Coordinator.CoordinateExecutionNodeRecords:output_type -> xatu.CoordinateExecutionNodeRecordsResponse + 15, // 73: xatu.Coordinator.ListStalledConsensusNodeRecords:output_type -> xatu.ListStalledConsensusNodeRecordsResponse + 17, // 74: xatu.Coordinator.CreateConsensusNodeRecordStatus:output_type -> xatu.CreateConsensusNodeRecordStatusResponse + 19, // 75: xatu.Coordinator.CreateConsensusNodeRecordStatuses:output_type -> xatu.CreateConsensusNodeRecordStatusesResponse + 21, // 76: xatu.Coordinator.CoordinateConsensusNodeRecords:output_type -> xatu.CoordinateConsensusNodeRecordsResponse + 23, // 77: xatu.Coordinator.GetDiscoveryNodeRecord:output_type -> xatu.GetDiscoveryNodeRecordResponse + 25, // 78: xatu.Coordinator.GetDiscoveryExecutionNodeRecord:output_type -> xatu.GetDiscoveryExecutionNodeRecordResponse + 27, // 79: xatu.Coordinator.GetDiscoveryConsensusNodeRecord:output_type -> xatu.GetDiscoveryConsensusNodeRecordResponse + 45, // 80: xatu.Coordinator.GetCannonLocation:output_type -> xatu.GetCannonLocationResponse + 47, // 81: xatu.Coordinator.UpsertCannonLocation:output_type -> xatu.UpsertCannonLocationResponse + 53, // 82: xatu.Coordinator.GetRelayMonitorLocation:output_type -> xatu.GetRelayMonitorLocationResponse + 55, // 83: xatu.Coordinator.UpsertRelayMonitorLocation:output_type -> xatu.UpsertRelayMonitorLocationResponse + 58, // 84: xatu.Coordinator.GetHorizonLocation:output_type -> xatu.GetHorizonLocationResponse + 60, // 85: xatu.Coordinator.UpsertHorizonLocation:output_type -> xatu.UpsertHorizonLocationResponse + 69, // [69:86] is the sub-list for method output_type + 52, // [52:69] is the sub-list for method input_type + 52, // [52:52] is the sub-list for extension type_name + 52, // [52:52] is the sub-list for extension extendee + 0, // [0:52] is the sub-list for field type_name } func init() { file_pkg_proto_xatu_coordinator_proto_init() } @@ -4873,7 +5318,7 @@ func file_pkg_proto_xatu_coordinator_proto_init() { } } file_pkg_proto_xatu_coordinator_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*ExecutionNodeStatus_Capability); i { + switch v := v.(*HorizonLocation); i { case 0: return &v.state case 1: @@ -4885,6 +5330,66 @@ func file_pkg_proto_xatu_coordinator_proto_init() { } } file_pkg_proto_xatu_coordinator_proto_msgTypes[54].Exporter = func(v any, i int) any { + switch v := v.(*GetHorizonLocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_xatu_coordinator_proto_msgTypes[55].Exporter = func(v any, i int) any { + switch v := v.(*GetHorizonLocationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_xatu_coordinator_proto_msgTypes[56].Exporter = func(v any, i int) any { + switch v := v.(*UpsertHorizonLocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_xatu_coordinator_proto_msgTypes[57].Exporter = func(v any, i int) any { + switch v := v.(*UpsertHorizonLocationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_xatu_coordinator_proto_msgTypes[58].Exporter = func(v any, i int) any { + switch v := v.(*ExecutionNodeStatus_Capability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_xatu_coordinator_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*ExecutionNodeStatus_ForkID); i { case 0: return &v.state @@ -4922,8 +5427,8 @@ func file_pkg_proto_xatu_coordinator_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_proto_xatu_coordinator_proto_rawDesc, - NumEnums: 2, - NumMessages: 55, + NumEnums: 3, + NumMessages: 60, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/proto/xatu/coordinator.proto b/pkg/proto/xatu/coordinator.proto index a3a67212e..9bb678552 100644 --- a/pkg/proto/xatu/coordinator.proto +++ b/pkg/proto/xatu/coordinator.proto @@ -39,6 +39,11 @@ service Coordinator { returns (GetRelayMonitorLocationResponse) {} rpc UpsertRelayMonitorLocation(UpsertRelayMonitorLocationRequest) returns (UpsertRelayMonitorLocationResponse) {} + + rpc GetHorizonLocation(GetHorizonLocationRequest) + returns (GetHorizonLocationResponse) {} + rpc UpsertHorizonLocation(UpsertHorizonLocationRequest) + returns (UpsertHorizonLocationResponse) {} } message CreateNodeRecordsRequest { repeated string node_records = 1; } @@ -368,3 +373,45 @@ message GetRelayMonitorLocationResponse { RelayMonitorLocation location = 1; } message UpsertRelayMonitorLocationRequest { RelayMonitorLocation location = 1; } message UpsertRelayMonitorLocationResponse {} + +// Horizon types - for head data collection module +// Mirrors CannonType for horizon-specific location types +enum HorizonType { + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_VOLUNTARY_EXIT = 0; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_PROPOSER_SLASHING = 1; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_DEPOSIT = 2; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ATTESTER_SLASHING = 3; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_BLS_TO_EXECUTION_CHANGE = 4; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_EXECUTION_TRANSACTION = 5; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_WITHDRAWAL = 6; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK = 7; + HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_BLOB_SIDECAR = 8; + HORIZON_TYPE_BEACON_API_ETH_V1_PROPOSER_DUTY = 9; + HORIZON_TYPE_BEACON_API_ETH_V2_BEACON_BLOCK_ELABORATED_ATTESTATION = 10; + HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_VALIDATORS = 11; + HORIZON_TYPE_BEACON_API_ETH_V1_BEACON_COMMITTEE = 12; +} + +// HorizonLocation stores HEAD and FILL slot positions per deriver +// Used to track progress of the Horizon head data collection module +message HorizonLocation { + string network_id = 1; // Network identifier (e.g., "mainnet", "holesky") + HorizonType type = 2; // Deriver type being tracked + uint64 head_slot = 3; // Current head slot position for real-time tracking + uint64 fill_slot = 4; // Fill slot position for catch-up processing +} + +message GetHorizonLocationRequest { + string network_id = 1; + HorizonType type = 2; +} + +message GetHorizonLocationResponse { + HorizonLocation location = 1; +} + +message UpsertHorizonLocationRequest { + HorizonLocation location = 1; +} + +message UpsertHorizonLocationResponse {} diff --git a/pkg/proto/xatu/coordinator_grpc.pb.go b/pkg/proto/xatu/coordinator_grpc.pb.go index 2941edd73..b28dfa661 100644 --- a/pkg/proto/xatu/coordinator_grpc.pb.go +++ b/pkg/proto/xatu/coordinator_grpc.pb.go @@ -34,6 +34,8 @@ const ( Coordinator_UpsertCannonLocation_FullMethodName = "/xatu.Coordinator/UpsertCannonLocation" Coordinator_GetRelayMonitorLocation_FullMethodName = "/xatu.Coordinator/GetRelayMonitorLocation" Coordinator_UpsertRelayMonitorLocation_FullMethodName = "/xatu.Coordinator/UpsertRelayMonitorLocation" + Coordinator_GetHorizonLocation_FullMethodName = "/xatu.Coordinator/GetHorizonLocation" + Coordinator_UpsertHorizonLocation_FullMethodName = "/xatu.Coordinator/UpsertHorizonLocation" ) // CoordinatorClient is the client API for Coordinator service. @@ -55,6 +57,8 @@ type CoordinatorClient interface { UpsertCannonLocation(ctx context.Context, in *UpsertCannonLocationRequest, opts ...grpc.CallOption) (*UpsertCannonLocationResponse, error) GetRelayMonitorLocation(ctx context.Context, in *GetRelayMonitorLocationRequest, opts ...grpc.CallOption) (*GetRelayMonitorLocationResponse, error) UpsertRelayMonitorLocation(ctx context.Context, in *UpsertRelayMonitorLocationRequest, opts ...grpc.CallOption) (*UpsertRelayMonitorLocationResponse, error) + GetHorizonLocation(ctx context.Context, in *GetHorizonLocationRequest, opts ...grpc.CallOption) (*GetHorizonLocationResponse, error) + UpsertHorizonLocation(ctx context.Context, in *UpsertHorizonLocationRequest, opts ...grpc.CallOption) (*UpsertHorizonLocationResponse, error) } type coordinatorClient struct { @@ -215,6 +219,26 @@ func (c *coordinatorClient) UpsertRelayMonitorLocation(ctx context.Context, in * return out, nil } +func (c *coordinatorClient) GetHorizonLocation(ctx context.Context, in *GetHorizonLocationRequest, opts ...grpc.CallOption) (*GetHorizonLocationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetHorizonLocationResponse) + err := c.cc.Invoke(ctx, Coordinator_GetHorizonLocation_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *coordinatorClient) UpsertHorizonLocation(ctx context.Context, in *UpsertHorizonLocationRequest, opts ...grpc.CallOption) (*UpsertHorizonLocationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpsertHorizonLocationResponse) + err := c.cc.Invoke(ctx, Coordinator_UpsertHorizonLocation_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // CoordinatorServer is the server API for Coordinator service. // All implementations must embed UnimplementedCoordinatorServer // for forward compatibility. @@ -234,6 +258,8 @@ type CoordinatorServer interface { UpsertCannonLocation(context.Context, *UpsertCannonLocationRequest) (*UpsertCannonLocationResponse, error) GetRelayMonitorLocation(context.Context, *GetRelayMonitorLocationRequest) (*GetRelayMonitorLocationResponse, error) UpsertRelayMonitorLocation(context.Context, *UpsertRelayMonitorLocationRequest) (*UpsertRelayMonitorLocationResponse, error) + GetHorizonLocation(context.Context, *GetHorizonLocationRequest) (*GetHorizonLocationResponse, error) + UpsertHorizonLocation(context.Context, *UpsertHorizonLocationRequest) (*UpsertHorizonLocationResponse, error) mustEmbedUnimplementedCoordinatorServer() } @@ -289,6 +315,12 @@ func (UnimplementedCoordinatorServer) GetRelayMonitorLocation(context.Context, * func (UnimplementedCoordinatorServer) UpsertRelayMonitorLocation(context.Context, *UpsertRelayMonitorLocationRequest) (*UpsertRelayMonitorLocationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpsertRelayMonitorLocation not implemented") } +func (UnimplementedCoordinatorServer) GetHorizonLocation(context.Context, *GetHorizonLocationRequest) (*GetHorizonLocationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHorizonLocation not implemented") +} +func (UnimplementedCoordinatorServer) UpsertHorizonLocation(context.Context, *UpsertHorizonLocationRequest) (*UpsertHorizonLocationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpsertHorizonLocation not implemented") +} func (UnimplementedCoordinatorServer) mustEmbedUnimplementedCoordinatorServer() {} func (UnimplementedCoordinatorServer) testEmbeddedByValue() {} @@ -580,6 +612,42 @@ func _Coordinator_UpsertRelayMonitorLocation_Handler(srv interface{}, ctx contex return interceptor(ctx, in, info, handler) } +func _Coordinator_GetHorizonLocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHorizonLocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CoordinatorServer).GetHorizonLocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Coordinator_GetHorizonLocation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CoordinatorServer).GetHorizonLocation(ctx, req.(*GetHorizonLocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Coordinator_UpsertHorizonLocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpsertHorizonLocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CoordinatorServer).UpsertHorizonLocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Coordinator_UpsertHorizonLocation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CoordinatorServer).UpsertHorizonLocation(ctx, req.(*UpsertHorizonLocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Coordinator_ServiceDesc is the grpc.ServiceDesc for Coordinator service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -647,6 +715,14 @@ var Coordinator_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpsertRelayMonitorLocation", Handler: _Coordinator_UpsertRelayMonitorLocation_Handler, }, + { + MethodName: "GetHorizonLocation", + Handler: _Coordinator_GetHorizonLocation_Handler, + }, + { + MethodName: "UpsertHorizonLocation", + Handler: _Coordinator_UpsertHorizonLocation_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/proto/xatu/coordinator.proto", diff --git a/pkg/proto/xatu/module.pb.go b/pkg/proto/xatu/module.pb.go index 76ee79d06..82a103316 100644 --- a/pkg/proto/xatu/module.pb.go +++ b/pkg/proto/xatu/module.pb.go @@ -35,6 +35,7 @@ const ( ModuleName_TYSM ModuleName = 8 ModuleName_SIDECAR ModuleName = 9 ModuleName_RPC_SNOOPER ModuleName = 10 + ModuleName_HORIZON ModuleName = 11 ) // Enum value maps for ModuleName. @@ -51,6 +52,7 @@ var ( 8: "TYSM", 9: "SIDECAR", 10: "RPC_SNOOPER", + 11: "HORIZON", } ModuleName_value = map[string]int32{ "UNSPECIFIED": 0, @@ -64,6 +66,7 @@ var ( "TYSM": 8, "SIDECAR": 9, "RPC_SNOOPER": 10, + "HORIZON": 11, } ) @@ -99,7 +102,7 @@ var File_pkg_proto_xatu_module_proto protoreflect.FileDescriptor var file_pkg_proto_xatu_module_proto_rawDesc = []byte{ 0x0a, 0x1b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x78, 0x61, 0x74, 0x75, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x78, - 0x61, 0x74, 0x75, 0x2a, 0xab, 0x01, 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, + 0x61, 0x74, 0x75, 0x2a, 0xb8, 0x01, 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, @@ -110,10 +113,11 @@ var file_pkg_proto_xatu_module_proto_rawDesc = []byte{ 0x4d, 0x4f, 0x4e, 0x49, 0x54, 0x4f, 0x52, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x54, 0x59, 0x53, 0x4d, 0x10, 0x08, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x49, 0x44, 0x45, 0x43, 0x41, 0x52, 0x10, 0x09, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x50, 0x43, 0x5f, 0x53, 0x4e, 0x4f, 0x4f, 0x50, 0x45, 0x52, 0x10, - 0x0a, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x65, 0x74, 0x68, 0x70, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x70, 0x73, 0x2f, 0x78, 0x61, 0x74, 0x75, - 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x78, 0x61, 0x74, 0x75, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0a, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0x0b, 0x42, 0x2c, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x74, 0x68, + 0x70, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x70, 0x73, 0x2f, 0x78, 0x61, 0x74, 0x75, 0x2f, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x78, 0x61, 0x74, 0x75, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/xatu/module.proto b/pkg/proto/xatu/module.proto index 60b06d499..f3efdb4da 100644 --- a/pkg/proto/xatu/module.proto +++ b/pkg/proto/xatu/module.proto @@ -17,4 +17,5 @@ enum ModuleName { TYSM = 8; SIDECAR = 9; RPC_SNOOPER = 10; + HORIZON = 11; } \ No newline at end of file diff --git a/pkg/server/persistence/horizon/location.go b/pkg/server/persistence/horizon/location.go new file mode 100644 index 000000000..99e33d6e5 --- /dev/null +++ b/pkg/server/persistence/horizon/location.go @@ -0,0 +1,55 @@ +package horizon + +import ( + "fmt" + "time" + + "github.com/ethpandaops/xatu/pkg/proto/xatu" +) + +// Location represents a Horizon location record in the database. +type Location struct { + // LocationID is the location id. + LocationID any `json:"locationId" db:"location_id"` + // CreateTime is the timestamp of when the record was created. + CreateTime time.Time `json:"createTime" db:"create_time" fieldopt:"omitempty"` + // UpdateTime is the timestamp of when the record was updated. + UpdateTime time.Time `json:"updateTime" db:"update_time" fieldopt:"omitempty"` + // NetworkID is the network id of the location. + NetworkID string `json:"networkId" db:"network_id"` + // Type is the type of the location. + Type string `json:"type" db:"type"` + // HeadSlot is the current head slot position for real-time tracking. + HeadSlot uint64 `json:"headSlot" db:"head_slot"` + // FillSlot is the fill slot position for catch-up processing. + FillSlot uint64 `json:"fillSlot" db:"fill_slot"` +} + +// Marshal marshals a proto HorizonLocation message into the Location fields. +func (l *Location) Marshal(msg *xatu.HorizonLocation) error { + if msg == nil { + return fmt.Errorf("horizon location message is nil") + } + + l.NetworkID = msg.NetworkId + l.Type = msg.Type.String() + l.HeadSlot = msg.HeadSlot + l.FillSlot = msg.FillSlot + + return nil +} + +// Unmarshal unmarshals the Location into a proto HorizonLocation message. +func (l *Location) Unmarshal() (*xatu.HorizonLocation, error) { + horizonType, ok := xatu.HorizonType_value[l.Type] + if !ok { + return nil, fmt.Errorf("unknown horizon type: %s", l.Type) + } + + return &xatu.HorizonLocation{ + NetworkId: l.NetworkID, + Type: xatu.HorizonType(horizonType), + HeadSlot: l.HeadSlot, + FillSlot: l.FillSlot, + }, nil +} diff --git a/pkg/server/persistence/horizon_location.go b/pkg/server/persistence/horizon_location.go new file mode 100644 index 000000000..6ba2088fa --- /dev/null +++ b/pkg/server/persistence/horizon_location.go @@ -0,0 +1,104 @@ +package persistence + +import ( + "context" + "errors" + "time" + + perrors "github.com/pkg/errors" + + "github.com/ethpandaops/xatu/pkg/server/persistence/horizon" + "github.com/huandu/go-sqlbuilder" +) + +var horizonLocationStruct = sqlbuilder.NewStruct(new(horizon.Location)).For(sqlbuilder.PostgreSQL) + +var ErrHorizonLocationNotFound = errors.New("horizon location not found") + +func (c *Client) UpsertHorizonLocation(ctx context.Context, location *horizon.Location) error { + if location.LocationID == nil { + location.LocationID = sqlbuilder.Raw("DEFAULT") + } + + location.CreateTime = time.Now() + location.UpdateTime = time.Now() + + ub := horizonLocationStruct.InsertInto("horizon_location", location) + + sqlQuery, args := ub.Build() + sqlQuery += " ON CONFLICT ON CONSTRAINT horizon_location_unique DO UPDATE SET update_time = EXCLUDED.update_time, head_slot = EXCLUDED.head_slot, fill_slot = EXCLUDED.fill_slot" + + c.log.WithField("sql", sqlQuery).WithField("args", args).Debug("UpsertHorizonLocation") + + _, err := c.db.ExecContext(ctx, sqlQuery, args...) + + return err +} + +func (c *Client) GetHorizonLocationByID(ctx context.Context, id int64) (*horizon.Location, error) { + sb := horizonLocationStruct.SelectFrom("horizon_location") + sb.Where(sb.E("location_id", id)) + + sql, args := sb.Build() + + rows, err := c.db.QueryContext(ctx, sql, args...) + if err != nil { + return nil, perrors.Wrap(err, "db query failed") + } + + defer rows.Close() + + var locations []*horizon.Location + + for rows.Next() { + var location horizon.Location + + err = rows.Scan(horizonLocationStruct.Addr(&location)...) + if err != nil { + return nil, perrors.Wrap(err, "db scan failed") + } + + locations = append(locations, &location) + } + + if len(locations) != 1 { + return nil, ErrHorizonLocationNotFound + } + + return locations[0], nil +} + +// GetHorizonLocationByNetworkIDAndType gets location by network id and type. +func (c *Client) GetHorizonLocationByNetworkIDAndType(ctx context.Context, networkID, typ string) (*horizon.Location, error) { + sb := horizonLocationStruct.SelectFrom("horizon_location") + sb.Where(sb.E("network_id", networkID)) + sb.Where(sb.E("type", typ)) + + sql, args := sb.Build() + + rows, err := c.db.QueryContext(ctx, sql, args...) + if err != nil { + return nil, perrors.Wrap(err, "db query failed") + } + + defer rows.Close() + + var locations []*horizon.Location + + for rows.Next() { + var location horizon.Location + + err = rows.Scan(horizonLocationStruct.Addr(&location)...) + if err != nil { + return nil, perrors.Wrap(err, "db scan failed") + } + + locations = append(locations, &location) + } + + if len(locations) != 1 { + return nil, ErrHorizonLocationNotFound + } + + return locations[0], nil +} diff --git a/pkg/server/service/coordinator/client.go b/pkg/server/service/coordinator/client.go index e35c8c437..173094e15 100644 --- a/pkg/server/service/coordinator/client.go +++ b/pkg/server/service/coordinator/client.go @@ -17,6 +17,7 @@ import ( "github.com/ethpandaops/xatu/pkg/server/geoip/lookup" "github.com/ethpandaops/xatu/pkg/server/persistence" "github.com/ethpandaops/xatu/pkg/server/persistence/cannon" + "github.com/ethpandaops/xatu/pkg/server/persistence/horizon" "github.com/ethpandaops/xatu/pkg/server/persistence/node" "github.com/ethpandaops/xatu/pkg/server/persistence/relaymonitor" n "github.com/ethpandaops/xatu/pkg/server/service/coordinator/node" @@ -823,6 +824,66 @@ func (c *Client) UpsertRelayMonitorLocation(ctx context.Context, req *xatu.Upser return &xatu.UpsertRelayMonitorLocationResponse{}, nil } +func (c *Client) GetHorizonLocation(ctx context.Context, req *xatu.GetHorizonLocationRequest) (*xatu.GetHorizonLocationResponse, error) { + if c.config.Auth.Enabled != nil && *c.config.Auth.Enabled { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Unauthenticated, "missing metadata") + } + + if err := c.validateAuth(ctx, md); err != nil { + return nil, err + } + } + + location, err := c.persistence.GetHorizonLocationByNetworkIDAndType(ctx, req.NetworkId, req.Type.Enum().String()) + if err != nil && err != persistence.ErrHorizonLocationNotFound { + return nil, status.Error(codes.Internal, perrors.Wrap(err, "failed to get horizon location from db").Error()) + } + + rsp := &xatu.GetHorizonLocationResponse{} + + if location == nil { + return rsp, nil + } + + protoLoc, err := location.Unmarshal() + if err != nil { + return nil, status.Error(codes.Internal, perrors.Wrap(err, "failed to unmarshal horizon location").Error()) + } + + return &xatu.GetHorizonLocationResponse{ + Location: protoLoc, + }, nil +} + +func (c *Client) UpsertHorizonLocation(ctx context.Context, req *xatu.UpsertHorizonLocationRequest) (*xatu.UpsertHorizonLocationResponse, error) { + if c.config.Auth.Enabled != nil && *c.config.Auth.Enabled { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Unauthenticated, "missing metadata") + } + + if err := c.validateAuth(ctx, md); err != nil { + return nil, err + } + } + + newLocation := &horizon.Location{} + + err := newLocation.Marshal(req.Location) + if err != nil { + return nil, status.Error(codes.InvalidArgument, perrors.Wrap(err, "failed to marshal horizon location").Error()) + } + + err = c.persistence.UpsertHorizonLocation(ctx, newLocation) + if err != nil { + return nil, status.Error(codes.Internal, perrors.Wrap(err, "failed to upsert horizon location to db").Error()) + } + + return &xatu.UpsertHorizonLocationResponse{}, nil +} + func (c *Client) secureRandomInt(input int) (int, error) { if input <= 0 { return 0, fmt.Errorf("invalid range for random int: %d", input) diff --git a/scripts/e2e-horizon-test.sh b/scripts/e2e-horizon-test.sh new file mode 100755 index 000000000..3f4713f0b --- /dev/null +++ b/scripts/e2e-horizon-test.sh @@ -0,0 +1,656 @@ +#!/bin/bash +# +# Horizon E2E Test Script +# +# This script runs an end-to-end test of the Horizon module using Kurtosis +# to spin up a local Ethereum testnet with all consensus clients. +# +# The test verifies that data flows through the entire pipeline: +# Beacon Nodes (via SSE) -> Horizon -> Xatu Server -> Kafka -> Vector -> ClickHouse +# +# Usage: +# ./scripts/e2e-horizon-test.sh [--quick] [--skip-build] [--skip-cleanup] +# +# Options: +# --quick Run quick test (1 epoch, ~7 minutes instead of ~15 minutes) +# --skip-build Skip building the xatu image (use existing image) +# --skip-cleanup Don't cleanup on exit (useful for debugging) +# +# Prerequisites: +# - Docker and Docker Compose +# - Kurtosis CLI (https://docs.kurtosis.com/install/) +# - clickhouse-client (optional, will use docker exec if not available) + +set -euo pipefail + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +ENCLAVE_NAME="horizon-e2e" +XATU_IMAGE="ethpandaops/xatu:local" +DOCKER_NETWORK="xatu_xatu-net" + +# Timing configuration +QUICK_MODE=false +SKIP_BUILD=false +SKIP_CLEANUP=false +WAIT_EPOCHS=2 +SECONDS_PER_SLOT=12 +SLOTS_PER_EPOCH=32 +GENESIS_DELAY=120 +FORK_DISABLED_EPOCH=18446744073709551615 + +KURTOSIS_ARGS_FILE="" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --quick) + QUICK_MODE=true + WAIT_EPOCHS=1 + shift + ;; + --skip-build) + SKIP_BUILD=true + shift + ;; + --skip-cleanup) + SKIP_CLEANUP=true + shift + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +# Calculate wait time +EPOCH_DURATION=$((SLOTS_PER_EPOCH * SECONDS_PER_SLOT)) +WAIT_TIME=$((WAIT_EPOCHS * EPOCH_DURATION + 60)) # Add 60s buffer for processing + +# Color output helpers +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +log_header() { + echo "" + echo -e "${BLUE}============================================${NC}" + echo -e "${BLUE} $1${NC}" + echo -e "${BLUE}============================================${NC}" +} + +# Cleanup function +cleanup() { + if [ "$SKIP_CLEANUP" = true ]; then + log_warn "Skipping cleanup (--skip-cleanup specified)" + log_info "To clean up manually:" + log_info " kurtosis enclave stop $ENCLAVE_NAME && kurtosis enclave rm $ENCLAVE_NAME" + log_info " docker stop xatu-horizon && docker rm xatu-horizon" + log_info " docker compose -f $REPO_ROOT/docker-compose.yml down -v" + return + fi + + log_header "Cleaning up" + + # Stop and remove Horizon container + if docker ps -a --format '{{.Names}}' | grep -q "^xatu-horizon$"; then + log_info "Stopping Horizon container..." + docker stop xatu-horizon 2>/dev/null || true + docker rm xatu-horizon 2>/dev/null || true + fi + + # Stop Kurtosis enclave + if kurtosis enclave ls 2>/dev/null | grep -q "$ENCLAVE_NAME"; then + log_info "Stopping Kurtosis enclave..." + kurtosis enclave stop "$ENCLAVE_NAME" 2>/dev/null || true + kurtosis enclave rm "$ENCLAVE_NAME" 2>/dev/null || true + fi + + # Stop docker-compose + log_info "Stopping docker-compose stack..." + docker compose -f "$REPO_ROOT/docker-compose.yml" down -v 2>/dev/null || true + + if [ -n "$KURTOSIS_ARGS_FILE" ] && [ -f "$KURTOSIS_ARGS_FILE" ]; then + rm -f "$KURTOSIS_ARGS_FILE" + fi + + log_success "Cleanup complete" +} + +# Set up trap for cleanup on exit +trap cleanup EXIT + +# Execute ClickHouse query +execute_query() { + local query="$1" + if command -v clickhouse-client &> /dev/null; then + clickhouse-client -h localhost --port 9000 -u default -d default -q "$query" 2>/dev/null + else + docker exec xatu-clickhouse-01 clickhouse-client -q "$query" 2>/dev/null + fi +} + +# Wait for ClickHouse to be ready +wait_for_clickhouse() { + log_info "Waiting for ClickHouse to be ready..." + local max_attempts=60 + local attempt=0 + + while ! execute_query "SELECT 1" &>/dev/null; do + attempt=$((attempt + 1)) + if [ $attempt -ge $max_attempts ]; then + log_error "ClickHouse not ready after $max_attempts attempts" + return 1 + fi + sleep 2 + done + log_success "ClickHouse is ready" +} + +# Wait for Postgres to be ready and run migrations +wait_for_postgres() { + log_info "Waiting for PostgreSQL to be ready..." + local max_attempts=60 + local attempt=0 + + while ! docker exec xatu-postgres pg_isready -U user &>/dev/null; do + attempt=$((attempt + 1)) + if [ $attempt -ge $max_attempts ]; then + log_error "PostgreSQL not ready after $max_attempts attempts" + return 1 + fi + sleep 2 + done + + # Wait for horizon_location table to be created + log_info "Waiting for horizon_location table..." + attempt=0 + while ! docker exec xatu-postgres psql -U user -d xatu -c "SELECT 1 FROM horizon_location LIMIT 1" &>/dev/null; do + attempt=$((attempt + 1)) + if [ $attempt -ge $max_attempts ]; then + log_error "horizon_location table not created after $max_attempts attempts" + return 1 + fi + sleep 2 + done + + log_success "PostgreSQL is ready with horizon_location table" +} + +# Get beacon node container names from Kurtosis +get_beacon_nodes() { + local inspect_output + if ! inspect_output=$(kurtosis enclave inspect --full-uuids "$ENCLAVE_NAME" 2>/dev/null); then + log_error "Failed to inspect Kurtosis enclave: $ENCLAVE_NAME" + return 1 + fi + + echo "$inspect_output" | \ + awk '{ + uuid = "" + name = "" + for (i = 1; i <= NF; i++) { + if (length($i) == 32 && $i ~ /^[0-9a-f]+$/) { uuid = $i } + } + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i !~ /validator/) { name = $i } + } + if (uuid != "" && name != "") { print name " " name "--" uuid } + }' | \ + head -n 6 +} + +# Connect Kurtosis containers to xatu network +connect_networks() { + log_info "Connecting Kurtosis beacon nodes to xatu network..." + + local beacon_nodes + beacon_nodes=$(get_beacon_nodes) + if [ -z "$beacon_nodes" ]; then + log_error "No beacon nodes found in Kurtosis enclave output" + return 1 + fi + + local connected=0 + while read -r name container; do + [ -z "$name" ] && continue + if docker network connect --alias "$name" "$DOCKER_NETWORK" "$container" 2>/dev/null; then + log_info " Connected: $container (alias: $name)" + connected=$((connected + 1)) + else + log_warn " Already connected or failed: $container" + fi + done <<< "$beacon_nodes" + + if [ $connected -eq 0 ]; then + log_error "Failed to connect any beacon nodes to $DOCKER_NETWORK" + return 1 + fi +} + +# Generate Horizon config with actual beacon node URLs +generate_horizon_config() { + local config_file="$1" + + log_info "Generating Horizon configuration..." + + # Get beacon node info from Kurtosis + local lighthouse_container prysm_container teku_container lodestar_container nimbus_container grandine_container + + local inspect_output + if ! inspect_output=$(kurtosis enclave inspect --full-uuids "$ENCLAVE_NAME" 2>/dev/null); then + log_error "Failed to inspect Kurtosis enclave: $ENCLAVE_NAME" + return 1 + fi + + lighthouse_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /lighthouse/ && $i !~ /validator/) { print $i; exit } + } + }') + prysm_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /prysm/ && $i !~ /validator/) { print $i; exit } + } + }') + teku_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /teku/ && $i !~ /validator/) { print $i; exit } + } + }') + lodestar_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /lodestar/ && $i !~ /validator/) { print $i; exit } + } + }') + nimbus_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /nimbus/ && $i !~ /validator/) { print $i; exit } + } + }') + grandine_container=$(echo "$inspect_output" | awk '{ + for (i = 1; i <= NF; i++) { + if ($i ~ /^cl-/ && $i ~ /grandine/ && $i !~ /validator/) { print $i; exit } + } + }') + + if [ -z "${lighthouse_container}${prysm_container}${teku_container}${lodestar_container}${nimbus_container}${grandine_container}" ]; then + log_error "No beacon node services found in Kurtosis enclave output" + return 1 + fi + + cat > "$config_file" <> "$config_file" <> "$config_file" <> "$config_file" <> "$config_file" <> "$config_file" <> "$config_file" <> "$config_file" < 1 + ) + ") + if [ -n "$duplicates" ] && [ "$duplicates" -eq 0 ]; then + log_success " No duplicate blocks found (deduplication working)" + else + log_error " Found $duplicates duplicate block entries" + failed=$((failed + 1)) + fi + + # Query 3: Check for slot gaps (if we have enough blocks) + log_info "Checking for slot gaps..." + total=$((total + 1)) + local min_slot max_slot expected_count actual_count + min_slot=$(execute_query "SELECT MIN(slot) FROM beacon_api_eth_v2_beacon_block FINAL WHERE meta_client_module = 'HORIZON'") + max_slot=$(execute_query "SELECT MAX(slot) FROM beacon_api_eth_v2_beacon_block FINAL WHERE meta_client_module = 'HORIZON'") + + if [ -n "$min_slot" ] && [ -n "$max_slot" ] && [ "$min_slot" != "$max_slot" ]; then + expected_count=$((max_slot - min_slot + 1)) + actual_count=$(execute_query "SELECT COUNT(DISTINCT slot) FROM beacon_api_eth_v2_beacon_block FINAL WHERE meta_client_module = 'HORIZON'") + + if [ "$actual_count" -ge "$((expected_count - 2))" ]; then # Allow 2 slot tolerance for missed blocks + log_success " Slots coverage: $actual_count / $expected_count (min: $min_slot, max: $max_slot)" + else + log_warn " Potential gaps: $actual_count / $expected_count slots covered" + fi + else + log_warn " Not enough data to check for gaps" + fi + + # Query 4: Check execution transactions + log_info "Checking for execution transactions..." + total=$((total + 1)) + local tx_count + tx_count=$(execute_query "SELECT COUNT(*) FROM beacon_api_eth_v2_beacon_block_execution_transaction FINAL WHERE meta_client_module = 'HORIZON'") + if [ -n "$tx_count" ] && [ "$tx_count" -gt 0 ]; then + log_success " Found $tx_count execution transactions" + else + log_warn " No execution transactions (may be normal for empty blocks)" + fi + + # Query 5: Check elaborated attestations + log_info "Checking for elaborated attestations..." + total=$((total + 1)) + local attestation_count + attestation_count=$(execute_query "SELECT COUNT(*) FROM beacon_api_eth_v2_beacon_block_elaborated_attestation FINAL WHERE meta_client_module = 'HORIZON'") + if [ -n "$attestation_count" ] && [ "$attestation_count" -gt 0 ]; then + log_success " Found $attestation_count elaborated attestations" + else + log_error " No elaborated attestations found" + failed=$((failed + 1)) + fi + + # Query 6: Check proposer duties + log_info "Checking for proposer duties..." + total=$((total + 1)) + local duty_count + duty_count=$(execute_query "SELECT COUNT(*) FROM beacon_api_eth_v1_proposer_duty FINAL WHERE meta_client_module = 'HORIZON'") + if [ -n "$duty_count" ] && [ "$duty_count" -gt 0 ]; then + log_success " Found $duty_count proposer duties" + else + log_error " No proposer duties found" + failed=$((failed + 1)) + fi + + # Query 7: Check beacon committees + log_info "Checking for beacon committees..." + total=$((total + 1)) + local committee_count + committee_count=$(execute_query "SELECT COUNT(*) FROM beacon_api_eth_v1_beacon_committee FINAL WHERE meta_client_module = 'HORIZON'") + if [ -n "$committee_count" ] && [ "$committee_count" -gt 0 ]; then + log_success " Found $committee_count beacon committees" + else + log_error " No beacon committees found" + failed=$((failed + 1)) + fi + + # Summary + log_header "Validation Summary" + + local passed=$((total - failed)) + if [ $failed -eq 0 ]; then + log_success "All $total checks passed!" + return 0 + else + log_error "$failed of $total checks failed" + return 1 + fi +} + +# Main execution +main() { + log_header "Horizon E2E Test" + log_info "Mode: $([ "$QUICK_MODE" = true ] && echo 'Quick (1 epoch)' || echo 'Full (2 epochs)')" + log_info "Wait time: ~$((WAIT_TIME / 60)) minutes" + + cd "$REPO_ROOT" + + # Step 1: Build xatu image + if [ "$SKIP_BUILD" = false ]; then + log_header "Building Xatu Image" + docker build -t "$XATU_IMAGE" . + log_success "Image built: $XATU_IMAGE" + else + log_warn "Skipping build (--skip-build specified)" + fi + + # Step 2: Start docker-compose stack + log_header "Starting Xatu Stack" + docker compose up --detach --quiet-pull + wait_for_clickhouse + wait_for_postgres + log_success "Xatu stack is running" + + # Step 3: Start Kurtosis network + log_header "Starting Kurtosis Network" + local genesis_time + genesis_time=$(( $(date +%s) + GENESIS_DELAY )) + KURTOSIS_ARGS_FILE="$(mktemp /tmp/horizon-test-XXXXXX)" + awk -v genesis_time="$genesis_time" -v fork_disabled_epoch="$FORK_DISABLED_EPOCH" ' + /genesis_delay:/ { + print + print " genesis_time: " genesis_time + print " fulu_fork_epoch: " fork_disabled_epoch + next + } + /deneb_fork_epoch:/ { print " deneb_fork_epoch: " fork_disabled_epoch; next } + /electra_fork_epoch:/ { print " electra_fork_epoch: " fork_disabled_epoch; next } + /fulu_fork_epoch:/ { print " fulu_fork_epoch: " fork_disabled_epoch; next } + { print } + ' "$REPO_ROOT/deploy/kurtosis/horizon-test.yaml" > "$KURTOSIS_ARGS_FILE" + + kurtosis run github.com/ethpandaops/ethereum-package \ + --args-file "$KURTOSIS_ARGS_FILE" \ + --enclave "$ENCLAVE_NAME" + log_success "Kurtosis network started" + + # Step 4: Wait for genesis + log_info "Waiting for genesis (120 seconds based on genesis_delay)..." + sleep 130 + + # Step 5: Connect networks + log_header "Connecting Networks" + connect_networks + + # Step 6: Generate and start Horizon + log_header "Starting Horizon" + local horizon_config="/tmp/horizon-e2e-config.yaml" + generate_horizon_config "$horizon_config" + + docker run -d \ + --name xatu-horizon \ + --network "$DOCKER_NETWORK" \ + -v "$horizon_config:/etc/xatu/config.yaml:ro" \ + "$XATU_IMAGE" \ + horizon --config /etc/xatu/config.yaml + + log_info "Waiting for Horizon to start..." + sleep 10 + + # Show Horizon logs + log_info "Horizon initial logs:" + docker logs xatu-horizon 2>&1 | head -n 20 + + # Step 7: Wait for data collection + log_header "Collecting Data" + log_info "Waiting $((WAIT_TIME / 60)) minutes for $WAIT_EPOCHS epoch(s)..." + + local elapsed=0 + local check_interval=30 + while [ $elapsed -lt $WAIT_TIME ]; do + sleep $check_interval + elapsed=$((elapsed + check_interval)) + + # Show progress + local remaining=$((WAIT_TIME - elapsed)) + log_info "Progress: $((elapsed / 60))m elapsed, ~$((remaining / 60))m remaining" + + # Quick check for blocks + local current_blocks + current_blocks=$(execute_query "SELECT COUNT(*) FROM beacon_api_eth_v2_beacon_block FINAL WHERE meta_client_module = 'HORIZON'" 2>/dev/null || echo "0") + log_info " Current block count: $current_blocks" + + # Show recent Horizon logs if no blocks yet + if [ "$current_blocks" = "0" ]; then + log_info " Recent Horizon logs:" + docker logs --tail 5 xatu-horizon 2>&1 | sed 's/^/ /' + fi + done + + # Step 8: Run validation + if run_validation; then + log_header "TEST PASSED" + exit 0 + else + log_header "TEST FAILED" + + # Show debugging info + log_info "Horizon logs (last 50 lines):" + docker logs --tail 50 xatu-horizon 2>&1 + + log_info "xatu-server logs (last 20 lines):" + docker logs --tail 20 xatu-server 2>&1 + + exit 1 + fi +} + +main diff --git a/scripts/e2e-horizon-validate.sql b/scripts/e2e-horizon-validate.sql new file mode 100644 index 000000000..ee2ccc357 --- /dev/null +++ b/scripts/e2e-horizon-validate.sql @@ -0,0 +1,274 @@ +-- ============================================================================ +-- Horizon E2E Validation Queries +-- ============================================================================ +-- This file contains SQL queries to validate that the Horizon module is +-- working correctly during E2E tests. +-- +-- Usage: +-- cat scripts/e2e-horizon-validate.sql | clickhouse-client -h localhost +-- +-- Or run individual queries: +-- docker exec xatu-clickhouse-01 clickhouse-client --query "" +-- +-- All queries filter by meta_client_module = 'HORIZON' to verify data +-- specifically came from the Horizon module (not Cannon or other sources). +-- ============================================================================ + + +-- ============================================================================ +-- QUERY 1: Count beacon blocks by slot (check for duplicates) +-- ============================================================================ +-- Expected: Each slot should have at most 1 block (or 0 for missed slots). +-- If duplicates exist (cnt > 1), deduplication is not working properly. +-- Result should be empty if deduplication is working correctly. +-- ============================================================================ +SELECT + 'DUPLICATE_BLOCKS' as check_name, + slot, + block_root, + COUNT(*) as cnt +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON' +GROUP BY slot, block_root +HAVING cnt > 1 +ORDER BY slot DESC +LIMIT 20; + + +-- ============================================================================ +-- QUERY 2: Verify no gaps in slot sequence (FILL iterator working) +-- ============================================================================ +-- Expected: No gaps greater than 1 slot between consecutive blocks. +-- Gaps of exactly 1 are normal (consecutive slots). +-- Large gaps (>1) indicate the FILL iterator may not be catching up properly. +-- Note: Some gaps may be acceptable if slots were missed (no block proposed). +-- ============================================================================ +WITH slots AS ( + SELECT DISTINCT slot + FROM beacon_api_eth_v2_beacon_block FINAL + WHERE meta_client_module = 'HORIZON' + ORDER BY slot +) +SELECT + 'SLOT_GAPS' as check_name, + slot as current_slot, + lagInFrame(slot, 1) OVER (ORDER BY slot) as previous_slot, + slot - lagInFrame(slot, 1) OVER (ORDER BY slot) as gap +FROM slots +WHERE slot - lagInFrame(slot, 1) OVER (ORDER BY slot) > 1 + AND lagInFrame(slot, 1) OVER (ORDER BY slot) IS NOT NULL +ORDER BY slot +LIMIT 20; + + +-- ============================================================================ +-- QUERY 3: Verify events have module_name = HORIZON +-- ============================================================================ +-- Expected: All events should have meta_client_module = 'HORIZON'. +-- This query shows a sample of blocks to confirm the module name is set. +-- ============================================================================ +SELECT + 'MODULE_VERIFICATION' as check_name, + slot, + block_root, + meta_client_module, + meta_client_name +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON' +ORDER BY slot DESC +LIMIT 10; + + +-- ============================================================================ +-- QUERY 4: Count events per deriver type +-- ============================================================================ +-- Expected: Non-zero counts for most deriver types if blocks were processed. +-- beacon_block: Should always have data +-- elaborated_attestation: Should have data (attestations in every block) +-- execution_transaction: May be 0 if no transactions in test blocks +-- attester_slashing, proposer_slashing: Often 0 (rare events) +-- deposit, withdrawal, voluntary_exit, bls_to_execution_change: May be 0 +-- ============================================================================ +SELECT + 'EVENTS_PER_DERIVER' as check_name, + event_type, + event_count +FROM ( + SELECT 'beacon_block' as event_type, COUNT(*) as event_count + FROM beacon_api_eth_v2_beacon_block FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'attester_slashing', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_attester_slashing FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'proposer_slashing', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_proposer_slashing FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'deposit', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_deposit FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'withdrawal', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_withdrawal FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'voluntary_exit', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_voluntary_exit FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'bls_to_execution_change', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_bls_to_execution_change FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'execution_transaction', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_execution_transaction FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'elaborated_attestation', COUNT(*) + FROM beacon_api_eth_v2_beacon_block_elaborated_attestation FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'proposer_duty', COUNT(*) + FROM beacon_api_eth_v1_proposer_duty FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'beacon_blob', COUNT(*) + FROM beacon_api_eth_v1_beacon_blob_sidecar FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'beacon_validators', COUNT(*) + FROM beacon_api_eth_v1_beacon_validators FINAL + WHERE meta_client_module = 'HORIZON' + + UNION ALL + + SELECT 'beacon_committee', COUNT(*) + FROM beacon_api_eth_v1_beacon_committee FINAL + WHERE meta_client_module = 'HORIZON' +) +ORDER BY event_count DESC; + + +-- ============================================================================ +-- QUERY 5: Slot coverage summary +-- ============================================================================ +-- Expected: Shows the range of slots covered and total unique slots. +-- coverage_percent: Should be close to 100% if no missed slots. +-- total_blocks: Should roughly equal (max_slot - min_slot + 1). +-- ============================================================================ +SELECT + 'SLOT_COVERAGE' as check_name, + MIN(slot) as min_slot, + MAX(slot) as max_slot, + MAX(slot) - MIN(slot) + 1 as expected_slots, + COUNT(DISTINCT slot) as actual_slots, + ROUND(COUNT(DISTINCT slot) * 100.0 / (MAX(slot) - MIN(slot) + 1), 2) as coverage_percent +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON'; + + +-- ============================================================================ +-- QUERY 6: Block latency analysis +-- ============================================================================ +-- Expected: Shows how quickly Horizon processed blocks after they were produced. +-- Low latency indicates HEAD iterator is working in real-time. +-- Higher latency may indicate FILL iterator backfilling historical data. +-- ============================================================================ +SELECT + 'BLOCK_LATENCY' as check_name, + COUNT(*) as total_blocks, + ROUND(AVG(toUnixTimestamp(meta_client_event_date_time) - toUnixTimestamp(slot_start_date_time)), 2) as avg_latency_seconds, + MIN(toUnixTimestamp(meta_client_event_date_time) - toUnixTimestamp(slot_start_date_time)) as min_latency_seconds, + MAX(toUnixTimestamp(meta_client_event_date_time) - toUnixTimestamp(slot_start_date_time)) as max_latency_seconds +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON' + AND slot_start_date_time IS NOT NULL; + + +-- ============================================================================ +-- QUERY 7: Events per beacon node (multi-node validation) +-- ============================================================================ +-- Expected: If Horizon is connected to multiple beacon nodes, events should +-- still be deduplicated (total should match single-node processing). +-- This query shows which beacon node reported each block first. +-- ============================================================================ +SELECT + 'EVENTS_BY_NODE' as check_name, + meta_client_name, + COUNT(*) as block_count +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON' +GROUP BY meta_client_name +ORDER BY block_count DESC; + + +-- ============================================================================ +-- QUERY 8: Recent blocks (sanity check) +-- ============================================================================ +-- Expected: Shows the 10 most recent blocks processed by Horizon. +-- Useful for quick visual verification that data is flowing. +-- ============================================================================ +SELECT + 'RECENT_BLOCKS' as check_name, + slot, + LEFT(block_root, 16) as block_root_prefix, + meta_client_name, + meta_client_event_date_time +FROM beacon_api_eth_v2_beacon_block FINAL +WHERE meta_client_module = 'HORIZON' +ORDER BY slot DESC +LIMIT 10; + + +-- ============================================================================ +-- VALIDATION SUMMARY +-- ============================================================================ +-- This final query provides a pass/fail summary for automated testing. +-- All checks should return 1 (pass) for a successful E2E test. +-- ============================================================================ +SELECT + 'VALIDATION_SUMMARY' as check_name, + -- Check 1: Has beacon blocks + (SELECT COUNT(*) > 0 FROM beacon_api_eth_v2_beacon_block FINAL WHERE meta_client_module = 'HORIZON') as has_beacon_blocks, + -- Check 2: No duplicate blocks (by slot+block_root) + (SELECT COUNT(*) = 0 FROM ( + SELECT slot, block_root, COUNT(*) as cnt + FROM beacon_api_eth_v2_beacon_block FINAL + WHERE meta_client_module = 'HORIZON' + GROUP BY slot, block_root + HAVING cnt > 1 + )) as no_duplicates, + -- Check 3: Has elaborated attestations + (SELECT COUNT(*) > 0 FROM beacon_api_eth_v2_beacon_block_elaborated_attestation FINAL WHERE meta_client_module = 'HORIZON') as has_attestations, + -- Check 4: Has proposer duties + (SELECT COUNT(*) > 0 FROM beacon_api_eth_v1_proposer_duty FINAL WHERE meta_client_module = 'HORIZON') as has_proposer_duties, + -- Check 5: Has beacon committees + (SELECT COUNT(*) > 0 FROM beacon_api_eth_v1_beacon_committee FINAL WHERE meta_client_module = 'HORIZON') as has_committees, + -- Check 6: Reasonable slot coverage (>90%) + (SELECT COUNT(DISTINCT slot) * 100.0 / (MAX(slot) - MIN(slot) + 1) > 90 + FROM beacon_api_eth_v2_beacon_block FINAL + WHERE meta_client_module = 'HORIZON') as good_coverage;