diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000..41cdcc5
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,67 @@
+version: 2.1
+jobs:
+ build_and_test:
+ docker:
+ - image: ${CIPHEROWL_ECR_URL}/cipherowl/circleci:46f1d6a4caf201933914e8813b9c32b2f746d5f8
+ working_directory: ~/chainstorage
+ steps:
+ - checkout
+ - setup_remote_docker:
+ version: default
+ - run:
+ name: Install dependencies
+ command: |
+ set +x
+ make bootstrap
+ - run:
+ name: Run unit tests
+ command: "make test"
+ - run:
+ name: Run integration tests
+ command: |
+ set +x
+
+ docker compose -f docker-compose-testing.yml up -d --force-recreate
+ sleep 10
+
+ # Due to how the remote docker engine works with docker-compose
+ # in circleci, we have to run our integration tests from
+ # a remote container so that the tests can access network services
+ # spun up by docker-compose.
+ docker create -v /home/circleci --name chainstorage alpine:3.21 /bin/true
+ # docker cp /home/circleci/go chainstorage:/home/circleci/go
+ docker cp /home/circleci/chainstorage chainstorage:/home/circleci/chainstorage
+
+ docker run --network chainstorage_default \
+ --volumes-from chainstorage \
+ -w /home/circleci/chainstorage \
+ -e CHAINSTORAGE_AWS_POSTGRES_HOST=postgres \
+ -e CHAINSTORAGE_AWS_POSTGRES_PORT=5432 \
+ -e CHAINSTORAGE_AWS_POSTGRES_USER=postgres \
+ -e CHAINSTORAGE_AWS_POSTGRES_PASSWORD=postgres \
+ -e CHAINSTORAGE_AWS_POSTGRES_DATABASE=postgres \
+ ${CIPHEROWL_ECR_URL}/cipherowl/circleci:46f1d6a4caf201933914e8813b9c32b2f746d5f8 \
+ /bin/bash -c "sudo chown -R circleci:circleci ~/ && make bootstrap && \
+ echo '🔧 Test databases already set up by init script' && \
+ TEST_TYPE=integration go test ./... -v -p=1 -parallel=1 -timeout=15m -failfast -run=TestIntegration"
+ - run:
+ name: Run functional tests
+ command: |
+ set +x
+ echo "functional tests skipped"
+
+ # docker run --network chainstorage_default \
+ # --volumes-from chainstorage \
+ # -w /home/circleci/chainstorage \
+ # ${CIPHEROWL_ECR_URL}/cipherowl/circleci:46f1d6a4caf201933914e8813b9c32b2f746d5f8 \
+ # /bin/bash -c "sudo chown -R circleci:circleci ~/ && make bootstrap && TEST_TYPE=functional go test ./... -v -p=1 -parallel=1 -timeout=45m -failfast -run=TestIntegration"
+
+ docker compose -f docker-compose-testing.yml down
+
+workflows:
+ version: 2
+ default:
+ jobs:
+ - build_and_test:
+ name: build_and_test
+ context: cipherowl_build_context
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index f944dfb..b9e540d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,5 +28,8 @@
# pg
postgres_*
+# ignore all files in dev-data/
+dev-data/
+
# ignore binaries
/scripts/bin
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000..49aa57b
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,173 @@
+# ChainStorage Development Guide
+
+This guide helps Claude understand the ChainStorage project structure and common development tasks.
+
+## Project Overview
+
+ChainStorage is a blockchain data storage and processing system that:
+- Continuously replicates blockchain changes (new blocks)
+- Acts as a distributed file system for blockchain data
+- Stores raw data in horizontally-scalable storage (S3 + DynamoDB)
+- Supports multiple blockchains: Ethereum, Bitcoin, Solana, Polygon, etc.
+- Can serve up to 1,500 blocks per second in production
+
+## Key Commands
+
+### Testing
+```bash
+# Run all unit tests
+make test
+
+# Run specific package tests
+make test TARGET=internal/blockchain/...
+
+# Run integration tests
+make integration TARGET=internal/storage/...
+
+# Run functional tests (requires secrets.yml)
+make functional TARGET=internal/workflow/...
+```
+
+### Linting and Type Checking
+```bash
+# Run linter (includes go vet, errcheck, ineffassign)
+make lint
+# Note: May encounter errors with Go versions > 1.22
+
+# No separate typecheck command - type checking happens during build
+```
+
+### Building
+```bash
+# Initial setup (once)
+make bootstrap
+
+# Build everything
+make build
+
+# Generate protobuf files
+make proto
+
+# Generate configs from templates
+make config
+```
+
+### Local Development
+```bash
+# Start local infrastructure (LocalStack)
+make localstack
+
+# Start server (Ethereum mainnet)
+make server
+
+# Start server (other networks)
+make server CHAINSTORAGE_CONFIG=ethereum_goerli
+```
+
+## Project Structure
+
+```
+/cmd/ - Command line tools
+ /admin/ - Admin CLI tool
+ /api/ - API server
+ /server/ - Main server
+ /worker/ - Worker processes
+
+/internal/ - Core implementation
+ /blockchain/ - Blockchain clients and parsers
+ /client/ - Chain-specific clients
+ /parser/ - Chain-specific parsers
+ /storage/ - Storage implementations
+ /blobstorage/ - S3/GCS storage
+ /metastorage/ - DynamoDB/Firestore storage
+ /workflow/ - Temporal workflows
+ /activity/ - Workflow activities
+
+/config/ - Generated configurations
+/config_templates/ - Configuration templates
+/protos/ - Protocol buffer definitions
+/sdk/ - Go SDK for consumers
+```
+
+## Key Workflows
+
+1. **Backfiller**: Backfills historical blocks
+2. **Poller**: Polls for new blocks continuously
+3. **Streamer**: Streams blocks in real-time
+4. **Monitor**: Monitors system health
+5. **Benchmarker**: Benchmarks performance
+
+## Environment Variables
+
+- `CHAINSTORAGE_NAMESPACE`: Service namespace (default: chainstorage)
+- `CHAINSTORAGE_CONFIG`: Format: `{blockchain}-{network}` (e.g., ethereum-mainnet)
+- `CHAINSTORAGE_ENVIRONMENT`: Environment (local/development/production)
+
+## Common Tasks
+
+### Adding Support for New Blockchain
+1. Create config templates in `/config_templates/chainstorage/{blockchain}/{network}/`
+2. Implement client in `/internal/blockchain/client/{blockchain}/`
+3. Implement parser in `/internal/blockchain/parser/{blockchain}/`
+4. Run `make config` to generate configs
+5. Add tests
+
+### Debugging LocalStack Services
+```bash
+# Check S3 files
+aws s3 --no-sign-request --region local --endpoint-url http://localhost:4566 ls --recursive example-chainstorage-ethereum-mainnet-dev/
+
+# Check DynamoDB
+aws dynamodb --no-sign-request --region local --endpoint-url http://localhost:4566 scan --table-name example_chainstorage_blocks_ethereum_mainnet
+
+# Check SQS DLQ
+aws sqs --no-sign-request --region local --endpoint-url http://localhost:4566 receive-message --queue-url "http://localhost:4566/000000000000/example_chainstorage_blocks_ethereum_mainnet_dlq"
+```
+
+### Working with Temporal Workflows
+```bash
+# Start backfiller
+go run ./cmd/admin workflow start --workflow backfiller --input '{"StartHeight": 11000000, "EndHeight": 11000100}' --blockchain ethereum --network mainnet --env local
+
+# Start poller
+go run ./cmd/admin workflow start --workflow poller --input '{"Tag": 0, "MaxBlocksToSync": 100}' --blockchain ethereum --network mainnet --env local
+
+# Check workflow status
+tctl --address localhost:7233 --namespace chainstorage-ethereum-mainnet workflow show --workflow_id workflow.backfiller
+```
+
+## Important Notes
+
+1. **Always run tests before committing**: Use `make test` and `make lint`
+2. **Config generation**: After modifying templates, run `make config`
+3. **Secrets**: Never commit `secrets.yml` files (used for endpoint configurations)
+4. **Endpoint groups**: Master (sticky) for canonical chain, Slave (round-robin) for data ingestion
+5. **Parser types**: Native (default), Mesh, or Raw
+
+## Dependencies
+
+- Go 1.22 (required - newer versions may cause lint errors)
+- Protobuf 25.2
+- Temporal (workflow engine)
+- LocalStack (local AWS services)
+- Docker & Docker Compose
+
+## Architecture Insights
+
+### Client Architecture
+- **Multi-endpoint system**: Master (primary), Slave (load distribution), Validator, Consensus
+- **Protocol support**: JSON-RPC (most chains) and REST API (Rosetta)
+- **Shared implementations**: EVM chains (Polygon, BSC, Arbitrum) share Ethereum client code
+- **Factory pattern**: Each blockchain has a client factory registered with dependency injection
+
+### Key Design Patterns
+1. **Interceptor Pattern**: Wraps clients for instrumentation and parsing
+2. **Option Pattern**: Modifies client behavior (e.g., WithBestEffort())
+3. **Batch Processing**: Configurable batch sizes for performance
+4. **Error Handling**: Standardized errors with network-specific handling
+
+### Supported Blockchains
+- **EVM-based**: Ethereum, Polygon, BSC, Arbitrum, Optimism, Base, Fantom, Avalanche
+- **Bitcoin-based**: Bitcoin, Bitcoin Cash, Dogecoin, Litecoin
+- **Other**: Solana, Aptos, Tron
+- **Special**: Ethereum Beacon Chain support
\ No newline at end of file
diff --git a/COMPONENT_DIAGRAM.md b/COMPONENT_DIAGRAM.md
new file mode 100644
index 0000000..1c979cf
--- /dev/null
+++ b/COMPONENT_DIAGRAM.md
@@ -0,0 +1,365 @@
+# ChainStorage Component Diagram
+
+## Overview
+
+ChainStorage is a distributed blockchain data storage and processing system that continuously replicates blockchain data and serves it through APIs.
+
+## High-Level Architecture (Simplified)
+
+```mermaid
+graph LR
+ %% External
+ BC[Blockchain
Nodes]
+ ExtClients[External
Clients]
+
+ %% Core Components
+ API[API Server]
+ WF[Workflow
Engine]
+ BCC[Blockchain
Clients]
+ P[Parsers]
+
+ %% Storage
+ BS[Blob
Storage]
+ MS[Meta
Storage]
+
+ %% Flow
+ ExtClients -->|REST/gRPC| API
+ API --> BS
+ API --> MS
+
+ WF -->|Fetch| BCC
+ BCC -->|Raw Data| BC
+ BCC --> P
+ P --> BS
+ P --> MS
+
+ %% Styling with Nord palette
+ style BC fill:#d8dee9,stroke:#2e3440,stroke-width:2px,color:#2e3440
+ style ExtClients fill:#d8dee9,stroke:#2e3440,stroke-width:2px,color:#2e3440
+ style API fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style WF fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style BCC fill:#4c566a,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style P fill:#4c566a,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style BS fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style MS fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+```
+
+## Detailed Architecture
+
+```mermaid
+graph TB
+ %% External Systems
+ subgraph EXT["🌐 External Systems"]
+ BC[Blockchain Nodes
Bitcoin/Ethereum/Solana/etc]
+ Client[External Clients
SDK/REST/gRPC]
+ Temporal[Temporal Server]
+ AWS[AWS Services
S3/DynamoDB/SQS]
+ GCP[GCP Services
GCS/Firestore]
+ end
+
+ %% Entry Points
+ subgraph ENTRY["🚀 Entry Points (cmd/)"]
+ API[API Server
REST/gRPC]
+ Server[Main Server
Workflow Manager]
+ Worker[Worker
Activity Executor]
+ Admin[Admin CLI]
+ Cron[Cron Jobs]
+ end
+
+ %% Storage Layer
+ subgraph STORAGE["💾 Storage Layer"]
+ BlobStorage[BlobStorage
Raw Block Data]
+ MetaStorage[MetaStorage
Metadata & Indexes]
+ end
+
+ %% Blockchain Layer
+ subgraph BLOCKCHAIN["⛓️ Blockchain Layer"]
+ ClientFactory[Client Factory]
+ subgraph CLIENTS["Multi-Endpoint Clients"]
+ Master[Master Client
Sticky Sessions]
+ Slave[Slave Client
Load Balanced]
+ Validator[Validator Client]
+ Consensus[Consensus Client]
+ end
+
+ subgraph PARSERS["Parsers"]
+ NativeParser[Native Parser]
+ RosettaParser[Rosetta Parser]
+ TrustlessValidator[Trustless Validator]
+ end
+ end
+
+ %% Workflow Engine
+ subgraph WORKFLOW["⚙️ Workflow Engine"]
+ subgraph WORKFLOWS["Workflows"]
+ Backfiller[Backfiller
Historical Blocks]
+ Poller[Poller
New Blocks]
+ Streamer[Streamer
Real-time]
+ Monitor[Monitor
Health Check]
+ CrossValidator[Cross Validator]
+ Replicator[Replicator]
+ end
+
+ subgraph ACTIVITIES["Activities"]
+ Extractor[Extractor]
+ Loader[Loader]
+ Syncer[Syncer]
+ Reader[Reader]
+ EventLoader[Event Loader]
+ end
+ end
+
+ %% Connections
+ Client -->|REST/gRPC| API
+ API -->|Read| BlobStorage
+ API -->|Query| MetaStorage
+
+ Server -->|Schedule| Temporal
+ Worker -->|Execute| Temporal
+ Temporal -->|Orchestrate| WORKFLOWS
+
+ WORKFLOWS -->|Execute| ACTIVITIES
+ ACTIVITIES -->|Use| ClientFactory
+ ClientFactory -->|Create| Master
+ ClientFactory -->|Create| Slave
+ ClientFactory -->|Create| Validator
+ ClientFactory -->|Create| Consensus
+
+ Master -->|Fetch| BC
+ Slave -->|Fetch| BC
+ Validator -->|Validate| BC
+ Consensus -->|Check| BC
+
+ ACTIVITIES -->|Parse| NativeParser
+ NativeParser -->|Convert| RosettaParser
+ ACTIVITIES -->|Validate| TrustlessValidator
+
+ ACTIVITIES -->|Store Raw| BlobStorage
+ ACTIVITIES -->|Store Meta| MetaStorage
+ BlobStorage -->|S3/GCS| AWS
+ BlobStorage -->|S3/GCS| GCP
+ MetaStorage -->|DynamoDB| AWS
+ MetaStorage -->|Firestore| GCP
+
+ Admin -->|Manual Ops| WORKFLOWS
+ Admin -->|Direct Access| BlobStorage
+ Admin -->|Direct Access| MetaStorage
+
+ Cron -->|Scheduled| WORKFLOWS
+
+ %% Styling with Nord palette
+ classDef external fill:#d8dee9,stroke:#2e3440,stroke-width:2px,color:#2e3440
+ classDef entry fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ classDef storage fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ classDef workflow fill:#4c566a,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ classDef blockchain fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ classDef subgraphStyle fill:#d8dee9,stroke:#2e3440,stroke-width:1px,color:#2e3440
+
+ class BC,Client,Temporal,AWS,GCP external
+ class API,Server,Worker,Admin,Cron entry
+ class BlobStorage,MetaStorage storage
+ class Backfiller,Poller,Streamer,Monitor,CrossValidator,Replicator,Extractor,Loader,Syncer,Reader,EventLoader workflow
+ class ClientFactory,Master,Slave,Validator,Consensus,NativeParser,RosettaParser,TrustlessValidator blockchain
+ class EXT,ENTRY,STORAGE,BLOCKCHAIN,WORKFLOW,WORKFLOWS,ACTIVITIES,CLIENTS,PARSERS subgraphStyle
+```
+
+## Component Breakdown
+
+### Workflow Engine Detail
+
+```mermaid
+graph TB
+ T[Temporal Server]
+
+ subgraph Workflows
+ BF[Backfiller]
+ PL[Poller]
+ ST[Streamer]
+ MN[Monitor]
+ CV[Cross Validator]
+ RP[Replicator]
+ end
+
+ subgraph Activities
+ EX[Extractor]
+ LD[Loader]
+ SY[Syncer]
+ RD[Reader]
+ EL[Event Loader]
+ end
+
+ T --> Workflows
+ Workflows --> Activities
+
+ style T fill:#d8dee9,stroke:#2e3440,stroke-width:2px,color:#2e3440
+ style Workflows fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style Activities fill:#4c566a,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+```
+
+### Blockchain Client Architecture
+
+```mermaid
+graph LR
+ CF[Client Factory]
+
+ subgraph Endpoints
+ M[Master
Sticky Sessions]
+ S[Slave
Load Balanced]
+ V[Validator]
+ C[Consensus]
+ end
+
+ subgraph Parsers
+ NP[Native Parser]
+ RP[Rosetta Parser]
+ TV[Trustless Validator]
+ end
+
+ CF --> Endpoints
+ Endpoints --> Parsers
+
+ style CF fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style Endpoints fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style Parsers fill:#4c566a,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+```
+
+### Storage Architecture
+
+```mermaid
+graph TB
+ subgraph BlobStorage
+ S3[AWS S3]
+ GCS[Google Cloud Storage]
+ end
+
+ subgraph MetaStorage
+ DDB[DynamoDB]
+ FS[Firestore]
+ end
+
+ BS[Blob Storage
Interface] --> S3
+ BS --> GCS
+
+ MS[Meta Storage
Interface] --> DDB
+ MS --> FS
+
+ style BlobStorage fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style MetaStorage fill:#434c5e,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style BS fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+ style MS fill:#3b4252,stroke:#2e3440,stroke-width:2px,color:#d8dee9
+```
+
+## Component Details
+
+### Entry Points
+
+1. **API Server** (`cmd/api`)
+ - Serves REST and gRPC endpoints
+ - Provides block data, events, and metadata
+ - Handles authentication and rate limiting
+
+2. **Main Server** (`cmd/server`)
+ - Manages Temporal workflows
+ - Coordinates blockchain data ingestion
+ - Handles workflow scheduling
+
+3. **Worker** (`cmd/worker`)
+ - Executes Temporal activities
+ - Performs actual blockchain data fetching
+ - Handles parsing and validation
+
+4. **Admin CLI** (`cmd/admin`)
+ - Manual workflow management
+ - Direct storage operations
+ - Debugging and maintenance
+
+5. **Cron** (`cmd/cron`)
+ - Scheduled maintenance tasks
+ - Periodic health checks
+ - Cleanup operations
+
+### Storage Layer
+
+1. **BlobStorage**
+ - Stores compressed raw block data
+ - Supports S3 (AWS) and GCS (GCP)
+ - Provides presigned URLs for direct access
+ - Handles block versioning
+
+2. **MetaStorage**
+ - Stores block metadata (height, hash, timestamp)
+ - Maintains transaction indexes
+ - Tracks events (block added/removed)
+ - Supports DynamoDB (AWS) and Firestore (GCP)
+
+### Blockchain Layer
+
+1. **Client System**
+ - **Master**: Primary endpoint with sticky sessions
+ - **Slave**: Load-balanced endpoints for ingestion
+ - **Validator**: Data validation endpoints
+ - **Consensus**: Consensus verification endpoints
+
+2. **Parser System**
+ - **Native Parser**: Blockchain-specific parsing
+ - **Rosetta Parser**: Standardized Rosetta format
+ - **Trustless Validator**: Cryptographic validation
+
+### Workflow Engine
+
+1. **Core Workflows**
+ - **Backfiller**: Historical block ingestion
+ - **Poller**: Continuous new block polling
+ - **Streamer**: Real-time block streaming
+ - **Monitor**: System health monitoring
+ - **Cross Validator**: Cross-chain validation
+ - **Replicator**: Data replication
+
+2. **Activities**
+ - **Extractor**: Fetches raw blocks
+ - **Loader**: Stores processed blocks
+ - **Syncer**: Synchronizes blockchain state
+ - **Reader**: Reads stored data
+ - **Event Loader**: Processes blockchain events
+
+## Data Flow
+
+1. **Ingestion Flow**
+ ```
+ Blockchain Node → Client → Parser → Validator → Storage
+ ```
+
+2. **Query Flow**
+ ```
+ External Client → API Server → Storage → Response
+ ```
+
+3. **Workflow Flow**
+ ```
+ Temporal → Workflow → Activity → Client → Blockchain
+ ↓
+ Storage
+ ```
+
+## Key Design Patterns
+
+- **Factory Pattern**: Dynamic blockchain client/parser creation
+- **Interceptor Pattern**: Request/response instrumentation
+- **Module Pattern**: Clean separation of concerns
+- **Dependency Injection**: Using Uber FX framework
+- **Option Pattern**: Flexible configuration
+
+## Supported Blockchains
+
+- **EVM**: Ethereum, Polygon, BSC, Arbitrum, Optimism, Base, Fantom, Avalanche
+- **Bitcoin-based**: Bitcoin, Bitcoin Cash, Dogecoin, Litecoin
+- **Others**: Solana, Aptos, Tron
+- **Special**: Ethereum Beacon Chain
+
+## Scalability Features
+
+- Horizontal scaling via distributed storage
+- Multi-endpoint load balancing
+- Temporal workflow orchestration
+- Configurable batch processing
+- Support for 1,500+ blocks/second
\ No newline at end of file
diff --git a/README.md b/README.md
index 2b33483..7a1e2fd 100644
--- a/README.md
+++ b/README.md
@@ -22,6 +22,10 @@
- [Overriding the Configuration](#overriding-the-configuration)
- [Development](#development)
- [Running Server](#running-server)
+ - [Running with PostgreSQL](#running-with-postgresql)
+ - [PostgreSQL Role-Based Setup (Recommended)](#postgresql-role-based-setup-recommended)
+ - [Local Development Setup](#local-development-setup)
+ - [Admin CLI](#admin-cli)
- [AWS localstack](#aws-localstack)
- [Temporal Workflow](#temporal-workflow)
- [Failover](#failover)
@@ -55,33 +59,52 @@ It aims to provide an efficient and flexible way to access the on-chain data:
- Flexibility is improved by decoupling data interpretation from data ingestion. ChainStorage stores the raw data, and the parsing is deferred until the data is consumed. The parsers are shipped as part of the SDK and run on the consumer side. Thanks to the ELT (Extract, Load, Transform) architecture, we can quickly iterate on the parser without ingesting the data from the blockchain again.
## Quick Start
-
-Make sure your local go version is 1.22 by running the following commands:
-```shell
-brew install go@1.22
-brew unlink go
-brew link go@1.22
-
-brew install protobuf@25.2
-brew unlink protobuf
-brew link protobuf
-
-```
-
-To set up for the first time (only done once):
+This section will guide you through setting up ChainStorage on your local machine for development.
+
+### Prerequisites
+1. **Go (version 1.24):**
+ ```shell
+ brew install go@1.24
+ brew unlink go
+ brew link go@1.24
+ ```
+ Verify your Go installation:
+ ```shell
+ go version
+ ```
+2. **Protocol Buffer Compiler (protobuf):** Used for code generation based on `.proto` files.
+ ```shell
+ brew install protobuf@29
+ brew unlink protobuf
+ brew link protobuf@29
+ ```
+ Verify your installation:
+ ```shell
+ protoc --version
+ ```
+
+### Initial Setup
+
+This command (run only once) installs necessary Go tools for development, like linters and code generators.
```shell
make bootstrap
```
-Rebuild everything:
+### Build the Project
+
+This command compiles the ChainStorage Go programs.
```shell
make build
```
+You'll run this command whenever you make changes to the Go source code.
-Generate Protos:
+### Generate Protocol Buffers
+
+ChainStorage uses Protocol Buffers to define data structures. This command generates Go code from those definitions.
```shell
make proto
```
+You'll need to run this if you change any `.proto` files (usually located in the `protos/` directory).
## Command Line
@@ -119,14 +142,24 @@ All sub-commands require the `blockchain`, `env`, `network` flags.
### Block Command
-Fetch a block from ethereum mainnet:
+This command allows you to fetch and inspect individual blocks from a specified blockchain and network.
+
+**Usage Example:**
+
+Fetch block #46147 from Ethereum mainnet, using your local configuration:
```shell
-go run ./cmd/admin block --blockchain ethereum --network mainnet --env local --height 46147
+go run ./cmd/admin/main.go block --blockchain ethereum --network mainnet --env local --height 46147
```
+* `block`: The command to fetch block data.
+* `--blockchain ethereum --network mainnet --env local`: These flags specify the target (Ethereum mainnet) and the configuration environment (local).
+* `--height 46147`: The specific block number you want to retrieve.
+
+You can also fetch blocks from other supported blockchains and networks by changing the flag values:
-Fetch a block from ethereum goerli:
+Fetch a block from Ethereum Goerli testnet:
```shell
-go run ./cmd/admin block --blockchain ethereum --network goerli --env local --height 46147
+# Assuming Goerli is configured and data is available
+go run ./cmd/admin/main.go block --blockchain ethereum --network goerli --env local --height 12345
```
### Backfill Command (development)
@@ -187,19 +220,44 @@ make functional TARGET=internal/blockchain/... TEST_FILTER=TestIntegrationPolygo
```
## Configuration
-
+Configuration in ChainStorage tells the system:
+1. Which blockchain to connect to (like Ethereum, Bitcoin, etc.)
+2. How to connect to that blockchain (which nodes/servers to use)
+3. Where to store the data it collects
+4. How to process and manage the data
### Dependency overview
To understand the structure and elements of ChainStorage's config, it's important to comprehend its dependencies.
-- **Temporal**: Temporal is a workflow engine that orchestrates the data ingestion workflow. It calls ChainStorage service endpoint to complete various of tasks.
-- **Blob storage** - current implementation is on AWS S3, and the local service is provied by localstack
-- **Key value storage** - current implemnentation is based on dynamodb and the local service is provied by localstack
-- **Dead Letter queue** - current implementation is on SQS and the local service is provied by localstack
+ChainStorage needs several services to work properly:
+
+1. **Blockchain Nodes**: These are servers that maintain a copy of the blockchain. ChainStorage connects to these to get blockchain data.
+ - Example: An Ethereum node that provides information about Ethereum transactions and blocks
+
+2. **Storage Systems**:
+ - **Blob storage** - current implementation is on AWS S3, and the local service is provied by localstack
+ - **Key value storage** - current implemnentation is based on dynamodb and the local service is provied by localstack
+ - **Dead Letter queue** - current implementation is on SQS and the local service is provied by localstack
+
+3. **Workflow Engine** (Temporal):Temporal is a workflow engine that orchestrates the data ingestion workflow. It calls ChainStorage service endpoint to complete various of tasks.
+
### Template location and generated config
The config template directory is in `config_templates/config`, which `make config` reads this directory and generates the config into the `config/chainstorage` directory.
+### Creating New Configurations
+
+Every new asset in ChainStorage consists of ChainStorage configuration files.
+These configuration files are generated from `.template.yml` template files using:
+
+```shell
+make config
+```
+
+these templates will be under a directory dedicated to storing the config templates
+in a structure that mirrors the final config structure of the `config`
+directories. All configurations from this directory will be generated within the final
+respective config directories
### Environment Variables
@@ -218,19 +276,6 @@ The directory structure is as follows: `config/{namespace}/{blockchain}/{network
This env var controls the `{environment}` in which the service is deployed. Possible values include `production`
, `development`, and `local` (which is also the default value).
-### Creating New Configurations
-
-Every new asset in ChainStorage consists of ChainStorage configuration files.
-These configuration files are generated from `.template.yml` template files using:
-
-```shell
-make config
-```
-
-these templates will be under a directory dedicated to storing the config templates
-in a structure that mirrors the final config structure of the `config`
-directories. All configurations from this directory will be generated within the final
-respective config directories
### Template Format and Inheritance
@@ -325,7 +370,9 @@ chain:
```
### Overriding the Configuration
+You can override configurations in two ways:
+1. **Environment Variables**:
You may override any configuration using an environment variable. The environment variable should be prefixed with
"CHAINSTORAGE_". For nested dictionary, use underscore to separate the keys.
@@ -333,10 +380,31 @@ For example, you may override the endpoint group config at runtime by injecting
* master: CHAINSTORAGE_CHAIN_CLIENT_MASTER_ENDPOINT_GROUP
* slave: CHAINSTORAGE_CHAIN_CLIENT_SLAVE_ENDPOINT_GROUP
-Alternatively, you may override the configuration by creating `secrets.yml` within the same directory. Its attributes
+**Security Best Practice - PostgreSQL Credentials:**
+For sensitive data like database passwords, always use environment variables instead of hardcoding them in config files:
+```shell
+# PostgreSQL credentials (never put these in config files!)
+export CHAINSTORAGE_AWS_POSTGRES_USER="your_username"
+export CHAINSTORAGE_AWS_POSTGRES_PASSWORD="your_secure_password"
+
+# Storage type configuration
+export CHAINSTORAGE_STORAGE_TYPE_META="POSTGRES"
+```
+
+2. **secrets.yml**: Alternatively, you may override the configuration by creating `secrets.yml` within the same directory. Its attributes
will be merged into the runtime configuration and take the highest precedence. Note that this file may contain
credentials and is excluded from check-in by `.gitignore`.
+Example `config/chainstorage/ethereum/mainnet/.secrets.yml`:
+```yaml
+storage_type:
+ meta: POSTGRES
+aws:
+ postgres:
+ user: your_username
+ password: your_secure_password
+```
+
## Development
### Running Server
@@ -362,6 +430,211 @@ make server
make server CHAINSTORAGE_CONFIG=ethereum_goerli
```
+### Running with PostgreSQL
+
+ChainStorage supports PostgreSQL as an alternative to DynamoDB for metadata storage. Here's how to set it up:
+
+#### 1. Start PostgreSQL Database
+
+You can use Docker to run PostgreSQL locally:
+
+```shell
+# Start PostgreSQL container
+docker run --name chainstorage-postgres \
+ -e POSTGRES_USER=temporal \
+ -e POSTGRES_PASSWORD=temporal \
+ -e POSTGRES_DB=postgres \
+ -p 5432:5432 \
+ -d postgres:13
+```
+
+Or add it to your existing docker-compose setup.
+
+#### 2. Configure Meta Storage Type
+
+Create or modify your local config to use PostgreSQL instead of DynamoDB. You have two options:
+
+**Option A: Create a local secrets file (recommended for development)**
+
+Create `config/chainstorage/{blockchain}/{network}/.secrets.yml` (e.g., `config/chainstorage/ethereum/mainnet/.secrets.yml`):
+
+```yaml
+storage_type:
+ meta: POSTGRES
+```
+
+**Option B: Set via environment variable**
+
+```shell
+export CHAINSTORAGE_STORAGE_TYPE_META=POSTGRES
+```
+
+#### 3. Set PostgreSQL Credentials
+
+Since PostgreSQL credentials should not be hardcoded in config files, set them via environment variables:
+
+```shell
+# PostgreSQL connection details
+export CHAINSTORAGE_AWS_POSTGRES_USER="temporal"
+export CHAINSTORAGE_AWS_POSTGRES_PASSWORD="temporal"
+export CHAINSTORAGE_AWS_POSTGRES_HOST="localhost"
+export CHAINSTORAGE_AWS_POSTGRES_PORT="5432"
+export CHAINSTORAGE_AWS_POSTGRES_SSL_MODE="require"
+```
+
+#### 4. Run the Server
+
+Now start the server with PostgreSQL configuration:
+
+```shell
+# Method 1: Using exported environment variables
+make server
+
+# Method 2: Setting environment variables inline
+CHAINSTORAGE_STORAGE_TYPE_META=POSTGRES \
+CHAINSTORAGE_AWS_POSTGRES_USER="temporal" \
+CHAINSTORAGE_AWS_POSTGRES_PASSWORD="temporal" \
+make server
+```
+
+#### PostgreSQL Configuration Reference
+
+The following environment variables can be used to configure PostgreSQL:
+
+| Environment Variable | Config Path | Description | Default |
+|---------------------|-------------|-------------|---------|
+| `CHAINSTORAGE_AWS_POSTGRES_HOST` | `aws.postgres.host` | PostgreSQL hostname | `localhost` |
+| `CHAINSTORAGE_AWS_POSTGRES_PORT` | `aws.postgres.port` | PostgreSQL port | `5432` |
+| `CHAINSTORAGE_AWS_POSTGRES_USER` | `aws.postgres.user` | PostgreSQL username | (required) |
+| `CHAINSTORAGE_AWS_POSTGRES_PASSWORD` | `aws.postgres.password` | PostgreSQL password | (required) |
+| `CHAINSTORAGE_AWS_POSTGRES_DATABASE` | `aws.postgres.database` | Database name | `chainstorage_{blockchain}_{network}` |
+| `CHAINSTORAGE_AWS_POSTGRES_SSL_MODE` | `aws.postgres.ssl_mode` | SSL mode | `require` |
+| `CHAINSTORAGE_AWS_POSTGRES_MAX_CONNECTIONS` | `aws.postgres.max_connections` | Maximum connection pool size | `25` |
+| `CHAINSTORAGE_AWS_POSTGRES_MIN_CONNECTIONS` | `aws.postgres.min_connections` | Minimum connection pool size | `5` |
+| `CHAINSTORAGE_AWS_POSTGRES_CONNECT_TIMEOUT` | `aws.postgres.connect_timeout` | Connection establishment timeout | `30s` |
+| `CHAINSTORAGE_AWS_POSTGRES_STATEMENT_TIMEOUT` | `aws.postgres.statement_timeout` | Statement/transaction timeout | `60s` |
+| `CHAINSTORAGE_STORAGE_TYPE_META` | `storage_type.meta` | Meta storage type | `DYNAMODB` |
+
+#### Database Schema
+
+ChainStorage will automatically create the necessary database schema and run migrations when it starts up. The database will contain tables for:
+- `block_metadata` - Block metadata and headers
+- `canonical_blocks` - Canonical chain state
+- `block_events` - Blockchain event log
+
+### PostgreSQL Setup
+
+ChainStorage supports PostgreSQL as an alternative to DynamoDB for metadata storage with role-based access for enhanced security.
+
+#### Local Development
+
+**Quick Start:**
+```bash
+# Start PostgreSQL with automatic database initialization
+docker-compose -f docker-compose-local-dev.yml up -d chainstorage-postgres
+```
+
+This automatically creates:
+- Shared `chainstorage_worker` and `chainstorage_server` roles
+- Databases for all supported networks (ethereum_mainnet, bitcoin_mainnet, etc.)
+- Proper permissions (worker: read-write, server: read-only)
+
+**Default Credentials:**
+- Worker: `chainstorage_worker` / `worker_password`
+- Server: `chainstorage_server` / `server_password`
+
+**Manual Setup:**
+```bash
+chainstorage admin setup-postgres \
+ --blockchain ethereum \
+ --network mainnet \
+ --env local \
+ --master-user postgres \
+ --master-password postgres \
+ --worker-password worker_password \
+ --server-password server_password
+```
+
+#### Production/Development Setup
+
+In production, databases are initialized using the `db-init` command:
+
+```bash
+# Connect to admin pod
+kubectl exec -it deploy/chainstorage-admin-dev-console -c chainstorage-admin -- /bin/bash
+
+# Initialize database for ethereum-mainnet
+./admin db-init --blockchain ethereum --network mainnet --env dev
+```
+
+The `db-init` command:
+1. Reads master credentials from environment variables (injected by Kubernetes)
+2. Fetches network-specific credentials from AWS Secrets Manager (`chainstorage/db-creds/{env}`)
+3. Creates the database (e.g., `chainstorage_ethereum_mainnet`)
+4. Creates network-specific users with passwords from the secret
+5. Grants appropriate permissions
+
+#### Database Naming Convention
+
+Databases follow the pattern: `chainstorage_{blockchain}_{network}`
+
+Examples:
+- `chainstorage_ethereum_mainnet`
+- `chainstorage_bitcoin_mainnet`
+- `chainstorage_polygon_testnet`
+
+Note: Hyphens in blockchain/network names are replaced with underscores.
+
+### Local Development Setup
+
+#### Complete Local Environment
+
+Start the full local development stack:
+
+```shell
+# Start all services (PostgreSQL, Temporal, LocalStack)
+make localstack
+
+# Load environment variables
+source scripts/postgres-roles-local.env
+```
+
+#### Available Commands
+
+**Database Operations:**
+```shell
+# Set up PostgreSQL database and roles for a new network
+go run ./cmd/admin setup-postgres \
+ --blockchain ethereum \
+ --network mainnet \
+ --env local \
+ --master-user postgres \
+ --master-password postgres \
+ --host localhost \
+ --port 5433
+
+# Initialize databases from AWS Secrets Manager (production)
+go run ./cmd/admin db-init \
+ --secret-name chainstorage/db-init/prod \
+ --aws-region us-east-1
+
+# Migrate data from DynamoDB to PostgreSQL
+chainstorage admin migrate-dynamodb-to-postgres \
+ --blockchain ethereum \
+ --network mainnet \
+ --env local \
+ --start-height 1000000 \
+ --end-height 1001000
+```
+
+#### Command Reference
+
+| Command | Description | Example |
+|---------|-------------|---------|
+| `setup-postgres` | Create database and roles | `setup-postgres --master-user postgres --master-password postgres` |
+| `db-init` | Initialize from AWS Secrets Manager | `db-init --blockchain ethereum --network mainnet --env dev` |
+| `migrate-dynamodb-to-postgres` | Migrate data from DynamoDB to PostgreSQL | `migrate-dynamodb-to-postgres --start-height 1000000 --end-height 1001000` |
+
### AWS localstack
Check S3 files:
@@ -412,6 +685,37 @@ Start the streamer workflow:
go run ./cmd/admin workflow start --workflow streamer --input '{}' --blockchain ethereum --network goerli --env local
```
+Start the migrator workflow (event-driven migration from DynamoDB to PostgreSQL):
+```shell
+# Migrate events by sequence range
+go run ./cmd/admin workflow start --workflow migrator --input '{"StartEventSequence": 1, "EndEventSequence": 1000, "Tag": 2, "EventTag": 3, "BatchSize": 500, "Parallelism": 2, "CheckpointSize": 10000}' --blockchain ethereum --network mainnet --env local
+
+# Auto-resume from last migrated position
+go run ./cmd/admin workflow start --workflow migrator --input '{"StartEventSequence": 0, "EndEventSequence": 100000, "Tag": 2, "EventTag": 3, "AutoResume": true}' --blockchain ethereum --network mainnet --env local
+
+# Continuous sync mode with auto-resume
+go run ./cmd/admin workflow start --workflow migrator --input '{"StartEventSequence": 0, "EndEventSequence": 0, "Tag": 2, "EventTag": 3, "AutoResume": true, "ContinuousSync": true, "BatchSize": 500,"Parallelism": 2, "CheckpointSize": 10000}' --blockchain ethereum --network mainnet --env local
+
+# Custom batch size and parallelism for large migrations
+go run ./cmd/admin workflow start --workflow migrator --input '{"StartEventSequence": 1000000, "EndEventSequence": 2000000, "Tag": 1, "EventTag": 0, "BatchSize": 10000, "Parallelism": 16, "CheckpointSize": 100000}' --blockchain ethereum --network mainnet --env local
+```
+Note: The migrator uses an event-driven architecture where events are fetched by sequence number and blocks are extracted from BLOCK_ADDED events. This ensures data consistency and proper handling of blockchain reorganizations.
+
+Start the cross validator workflow:
+```shell
+go run ./cmd/admin workflow start --workflow cross_validator --input '{"StartHeight": 15500000, "Tag": 0}' --blockchain ethereum --network mainnet --env local
+```
+
+Start the event backfiller workflow:
+```shell
+go run ./cmd/admin workflow start --workflow event_backfiller --input '{"Tag": 0, "EventTag": 0, "StartSequence": 1000, "EndSequence": 2000}' --blockchain ethereum --network mainnet --env local
+```
+
+Start the replicator workflow:
+```shell
+go run ./cmd/admin workflow start --workflow replicator --input '{"Tag": 0, "StartHeight": 1000000, "EndHeight": 1001000}' --blockchain ethereum --network mainnet --env local
+```
+
Stop the monitor workflow:
```shell
go run ./cmd/admin workflow stop --workflow monitor --blockchain ethereum --network mainnet --env local
@@ -427,7 +731,7 @@ Using Temporal CLI to check the status of the workflow:
brew install tctl
tctl --address localhost:7233 --namespace chainstorage-ethereum-mainnet workflow show --workflow_id workflow.backfiller
-````
+```
## Failover
@@ -556,6 +860,36 @@ and out of order, the logical ordering guarantee is preserved.
6. Update watermark once all the batches have been processed.
7. Repeat above steps.
+## Data Migration Tool
+
+Tool to migrate blockchain data from DynamoDB to PostgreSQL with complete reorg support and data integrity preservation.
+
+### Overview
+
+The migration tool performs a comprehensive transfer of blockchain data:
+- **Block metadata** from DynamoDB to PostgreSQL (`block_metadata` + `canonical_blocks` tables)
+- **Events** from DynamoDB to PostgreSQL (`block_events` table)
+- **Complete reorg data** including both canonical and non-canonical blocks
+- **Event ID-based migration** for efficient sequential processing
+
+**Critical Requirements**:
+1. Block metadata **must** be migrated before events (foreign key dependencies)
+2. Migration preserves complete blockchain history including all reorg blocks
+3. Canonical block identification is maintained through migration ordering
+
+### Basic Usage
+
+```bash
+# Migrate both blocks and events for a height range
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --tag=1 \
+ --event-tag=0
+```
## Contact Us
-We have set up a Discord server soon. Here is the link to join (limited 10) https://discord.com/channels/1079683467018764328/1079683467786334220.
+We have set up a Discord server soon. Here is the link to join (limited 10) https://discord.com/channels/1079683467018764328/1079683467786334220.
\ No newline at end of file
diff --git a/SEARCHABILITY_PROPOSAL.md b/SEARCHABILITY_PROPOSAL.md
new file mode 100644
index 0000000..237bbbe
--- /dev/null
+++ b/SEARCHABILITY_PROPOSAL.md
@@ -0,0 +1,440 @@
+# ChainStorage Searchability Enhancement Proposal
+## Leveraging Delta Lake and Uniform Format for Advanced Blockchain Analytics
+
+### Executive Summary
+
+This proposal outlines a comprehensive enhancement to ChainStorage's architecture by introducing Delta Lake with Uniform format support to dramatically improve blockchain data searchability and analytics capabilities. By adopting a lakehouse architecture, ChainStorage can maintain its high-throughput ingestion while enabling complex analytical queries through multiple query engines.
+
+### Current Limitations
+
+ChainStorage's current architecture faces several searchability constraints:
+
+1. **No Transaction Content Search**: Only exact hash lookups supported
+2. **No Address-Based Queries**: Cannot find transactions by sender/receiver
+3. **No Token Transfer Analytics**: ERC20/721 transfers not indexed
+4. **No Event Log Search**: Smart contract events not queryable
+5. **Limited Time-Based Queries**: Only indirect via block height
+6. **No Cross-Block Analytics**: Each block queried independently
+7. **No Columnar Storage**: Inefficient for analytical workloads
+
+### Proposed Architecture
+
+```mermaid
+graph TB
+ subgraph "Data Ingestion"
+ BC[Blockchain Nodes]
+ CS[ChainStorage Workers]
+ end
+
+ subgraph "Delta Lake Layer"
+ subgraph "Bronze Tables"
+ RawBlocks[Raw Blocks
Delta Table]
+ RawTxs[Raw Transactions
Delta Table]
+ RawLogs[Raw Event Logs
Delta Table]
+ end
+
+ subgraph "Silver Tables"
+ Blocks[Enriched Blocks
Delta Table]
+ Txs[Parsed Transactions
Delta Table]
+ TokenTransfers[Token Transfers
Delta Table]
+ Events[Decoded Events
Delta Table]
+ Addresses[Address Activity
Delta Table]
+ end
+
+ subgraph "Gold Tables"
+ DailyStats[Daily Statistics
Delta Table]
+ AddressBalances[Address Balances
Delta Table]
+ TokenMetrics[Token Metrics
Delta Table]
+ end
+ end
+
+ subgraph "Query Layer"
+ subgraph "Uniform Format"
+ IcebergMeta[Iceberg Metadata]
+ HudiMeta[Hudi Metadata]
+ end
+
+ subgraph "Query Engines"
+ Spark[Apache Spark]
+ Presto[Presto/Trino]
+ Athena[AWS Athena]
+ Dremio[Dremio]
+ API[ChainStorage API]
+ end
+ end
+
+ BC --> CS
+ CS --> RawBlocks
+ CS --> RawTxs
+ CS --> RawLogs
+
+ RawBlocks --> Blocks
+ RawTxs --> Txs
+ RawLogs --> Events
+
+ Txs --> TokenTransfers
+ Txs --> Addresses
+ Events --> TokenTransfers
+
+ Blocks --> DailyStats
+ TokenTransfers --> TokenMetrics
+ Addresses --> AddressBalances
+
+ Bronze Tables --> IcebergMeta
+ Silver Tables --> IcebergMeta
+ Gold Tables --> IcebergMeta
+
+ IcebergMeta --> Spark
+ IcebergMeta --> Presto
+ IcebergMeta --> Athena
+ IcebergMeta --> Dremio
+
+ Query Engines --> API
+
+ style Bronze Tables fill:#cd7f32,stroke:#2e3440,color:#fff
+ style Silver Tables fill:#c0c0c0,stroke:#2e3440,color:#000
+ style Gold Tables fill:#ffd700,stroke:#2e3440,color:#000
+```
+
+### Implementation Details
+
+#### 1. Bronze Layer (Raw Data Ingestion)
+
+```sql
+-- Raw Blocks Table
+CREATE TABLE IF NOT EXISTS raw_blocks (
+ blockchain STRING,
+ network STRING,
+ block_height BIGINT,
+ block_hash STRING,
+ parent_hash STRING,
+ block_timestamp TIMESTAMP,
+ raw_data BINARY, -- Compressed protobuf
+ ingestion_timestamp TIMESTAMP,
+ -- Partitioning for efficient queries
+ year INT GENERATED ALWAYS AS (YEAR(block_timestamp)),
+ month INT GENERATED ALWAYS AS (MONTH(block_timestamp)),
+ day INT GENERATED ALWAYS AS (DAY(block_timestamp))
+) USING DELTA
+PARTITIONED BY (blockchain, network, year, month, day)
+TBLPROPERTIES (
+ 'delta.enableIcebergCompatV2' = 'true',
+ 'delta.universalFormat.enabledFormats' = 'iceberg',
+ 'delta.autoOptimize.optimizeWrite' = 'true',
+ 'delta.autoOptimize.autoCompact' = 'true'
+);
+
+-- Raw Transactions Table
+CREATE TABLE IF NOT EXISTS raw_transactions (
+ blockchain STRING,
+ network STRING,
+ block_height BIGINT,
+ block_hash STRING,
+ tx_hash STRING,
+ tx_index INT,
+ raw_tx BINARY, -- Compressed transaction data
+ -- Partitioning
+ year INT,
+ month INT,
+ day INT
+) USING DELTA
+PARTITIONED BY (blockchain, network, year, month, day)
+TBLPROPERTIES (
+ 'delta.enableIcebergCompatV2' = 'true',
+ 'delta.universalFormat.enabledFormats' = 'iceberg'
+);
+```
+
+#### 2. Silver Layer (Parsed and Enriched Data)
+
+```sql
+-- Parsed Transactions Table
+CREATE TABLE IF NOT EXISTS transactions (
+ blockchain STRING,
+ network STRING,
+ block_height BIGINT,
+ block_timestamp TIMESTAMP,
+ tx_hash STRING,
+ tx_index INT,
+ from_address STRING,
+ to_address STRING,
+ value DECIMAL(38, 0),
+ gas_price BIGINT,
+ gas_used BIGINT,
+ status INT,
+ input_data STRING,
+ contract_address STRING,
+ method_id STRING, -- First 4 bytes of input
+ -- Time partitioning
+ year INT,
+ month INT,
+ day INT
+) USING DELTA
+PARTITIONED BY (blockchain, network, year, month, day)
+CLUSTERED BY (from_address, to_address) INTO 100 BUCKETS
+TBLPROPERTIES (
+ 'delta.enableIcebergCompatV2' = 'true',
+ 'delta.universalFormat.enabledFormats' = 'iceberg',
+ 'delta.dataSkippingNumIndexedCols' = '8'
+);
+
+-- Token Transfers Table
+CREATE TABLE IF NOT EXISTS token_transfers (
+ blockchain STRING,
+ network STRING,
+ block_height BIGINT,
+ block_timestamp TIMESTAMP,
+ tx_hash STRING,
+ log_index INT,
+ token_address STRING,
+ token_type STRING, -- ERC20, ERC721, ERC1155
+ from_address STRING,
+ to_address STRING,
+ value DECIMAL(38, 0),
+ token_id BIGINT, -- For NFTs
+ -- Partitioning
+ year INT,
+ month INT
+) USING DELTA
+PARTITIONED BY (blockchain, network, year, month)
+CLUSTERED BY (token_address) INTO 50 BUCKETS;
+
+-- Smart Contract Events Table
+CREATE TABLE IF NOT EXISTS contract_events (
+ blockchain STRING,
+ network STRING,
+ block_height BIGINT,
+ block_timestamp TIMESTAMP,
+ tx_hash STRING,
+ log_index INT,
+ contract_address STRING,
+ event_signature STRING,
+ topic0 STRING,
+ topic1 STRING,
+ topic2 STRING,
+ topic3 STRING,
+ data STRING,
+ decoded_event MAP, -- JSON decoded parameters
+ -- Partitioning
+ year INT,
+ month INT,
+ day INT
+) USING DELTA
+PARTITIONED BY (blockchain, network, year, month, day)
+CLUSTERED BY (contract_address, event_signature) INTO 100 BUCKETS;
+```
+
+#### 3. Gold Layer (Aggregated Analytics)
+
+```sql
+-- Daily Statistics
+CREATE TABLE IF NOT EXISTS daily_statistics (
+ blockchain STRING,
+ network STRING,
+ date DATE,
+ total_blocks BIGINT,
+ total_transactions BIGINT,
+ total_value DECIMAL(38, 0),
+ unique_addresses BIGINT,
+ total_gas_used BIGINT,
+ avg_gas_price DECIMAL(18, 9),
+ active_contracts BIGINT,
+ token_transfer_count BIGINT
+) USING DELTA
+PARTITIONED BY (blockchain, network, year(date));
+
+-- Address Activity Summary
+CREATE TABLE IF NOT EXISTS address_activity (
+ blockchain STRING,
+ network STRING,
+ address STRING,
+ first_seen_block BIGINT,
+ last_seen_block BIGINT,
+ total_sent_txs BIGINT,
+ total_received_txs BIGINT,
+ total_value_sent DECIMAL(38, 0),
+ total_value_received DECIMAL(38, 0),
+ unique_interacted_addresses BIGINT,
+ last_updated TIMESTAMP
+) USING DELTA
+PARTITIONED BY (blockchain, network)
+CLUSTERED BY (address) INTO 1000 BUCKETS;
+```
+
+### Query Examples
+
+#### 1. Find All Transactions for an Address
+```sql
+SELECT * FROM transactions
+WHERE blockchain = 'ethereum'
+ AND network = 'mainnet'
+ AND (from_address = '0x123...' OR to_address = '0x123...')
+ AND block_timestamp >= '2024-01-01'
+ORDER BY block_timestamp DESC;
+```
+
+#### 2. Token Transfer Analytics
+```sql
+SELECT
+ token_address,
+ COUNT(*) as transfer_count,
+ COUNT(DISTINCT from_address) as unique_senders,
+ COUNT(DISTINCT to_address) as unique_receivers,
+ SUM(value) as total_volume
+FROM token_transfers
+WHERE blockchain = 'ethereum'
+ AND network = 'mainnet'
+ AND block_timestamp >= CURRENT_DATE - INTERVAL 7 DAYS
+GROUP BY token_address
+ORDER BY transfer_count DESC
+LIMIT 100;
+```
+
+#### 3. Smart Contract Event Search
+```sql
+SELECT * FROM contract_events
+WHERE blockchain = 'ethereum'
+ AND contract_address = '0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48' -- USDC
+ AND event_signature = 'Transfer(address,address,uint256)'
+ AND topic1 = '0x000...' -- From address
+ AND block_timestamp >= '2024-01-01';
+```
+
+#### 4. Cross-Block Analytics
+```sql
+WITH daily_gas AS (
+ SELECT
+ DATE(block_timestamp) as date,
+ AVG(gas_price) as avg_gas_price,
+ MAX(gas_price) as max_gas_price,
+ PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY gas_price) as median_gas_price
+ FROM transactions
+ WHERE blockchain = 'ethereum'
+ AND block_timestamp >= CURRENT_DATE - INTERVAL 30 DAYS
+ GROUP BY DATE(block_timestamp)
+)
+SELECT * FROM daily_gas ORDER BY date DESC;
+```
+
+### Integration with ChainStorage
+
+#### 1. Minimal Changes to Existing Code
+
+```go
+// New writer interface for Delta tables
+type DeltaWriter interface {
+ WriteBlock(ctx context.Context, block *api.Block) error
+ WriteTransactions(ctx context.Context, txs []*api.Transaction) error
+ WriteEvents(ctx context.Context, events []*api.Event) error
+}
+
+// Implementation using Delta-RS or Spark
+type deltaWriterImpl struct {
+ sparkSession *spark.Session
+ batchSize int
+}
+
+// Add to existing workflow activities
+func (a *Activity) ExtractAndLoad(ctx context.Context, block *api.Block) error {
+ // Existing blob storage write
+ if err := a.blobStorage.Write(ctx, block); err != nil {
+ return err
+ }
+
+ // New: Write to Delta tables
+ if a.deltaWriter != nil {
+ if err := a.deltaWriter.WriteBlock(ctx, block); err != nil {
+ // Log but don't fail - Delta write is async
+ a.logger.Warn("failed to write to delta", zap.Error(err))
+ }
+ }
+
+ return nil
+}
+```
+
+#### 2. Streaming Updates
+
+```go
+// Use Delta's streaming capabilities
+func (d *deltaWriterImpl) StartStreaming(ctx context.Context) {
+ stream := d.sparkSession.
+ ReadStream().
+ Format("delta").
+ Load("raw_blocks").
+ WriteStream().
+ Trigger(spark.ProcessingTime("10 seconds")).
+ ForEachBatch(d.processBatch).
+ Start()
+}
+```
+
+### Benefits of This Approach
+
+#### 1. **Multi-Engine Query Support**
+- Spark for complex analytics
+- Presto/Trino for interactive queries
+- AWS Athena for serverless analytics
+- Existing ChainStorage API continues to work
+
+#### 2. **Improved Performance**
+- Columnar storage format (Parquet)
+- Predicate pushdown and column pruning
+- Z-order clustering for common query patterns
+- Automatic file compaction
+
+#### 3. **Advanced Analytics Capabilities**
+- Time-travel queries to any point in history
+- ACID transactions for consistent reads
+- Schema evolution support
+- Incremental processing with CDC
+
+#### 4. **Cost Optimization**
+- Data compression (10-100x reduction)
+- Efficient storage with deduplication
+- Query only required columns
+- Automatic data lifecycle management
+
+#### 5. **Operational Benefits**
+- No changes to existing ChainStorage workflows
+- Gradual migration path
+- Backward compatibility maintained
+- Single storage layer for all formats
+
+### Implementation Roadmap
+
+#### Phase 1: Foundation (Month 1-2)
+- Set up Delta Lake infrastructure
+- Implement basic Bronze layer tables
+- Create DeltaWriter interface
+- Add to Backfiller workflow
+
+#### Phase 2: Core Tables (Month 2-3)
+- Implement Silver layer parsing
+- Create transaction and event tables
+- Add clustering and optimization
+- Enable Uniform format
+
+#### Phase 3: Analytics (Month 3-4)
+- Build Gold layer aggregations
+- Implement streaming updates
+- Add query API endpoints
+- Create example dashboards
+
+#### Phase 4: Production (Month 4-5)
+- Performance optimization
+- Migration of historical data
+- Monitoring and alerting
+- Documentation and training
+
+### Conclusion
+
+By adopting Delta Lake with Uniform format, ChainStorage can evolve from a pure storage system to a comprehensive blockchain analytics platform. This approach:
+
+1. **Preserves existing functionality** while adding new capabilities
+2. **Enables multiple query engines** through Uniform format
+3. **Dramatically improves query performance** with columnar storage
+4. **Supports advanced analytics** use cases
+5. **Reduces storage costs** through compression and deduplication
+
+The lakehouse architecture provides the best of both worlds: the reliability and performance of a data warehouse with the flexibility and cost-effectiveness of a data lake.
\ No newline at end of file
diff --git a/cmd/admin/README_migrate.md b/cmd/admin/README_migrate.md
new file mode 100644
index 0000000..a2e5b45
--- /dev/null
+++ b/cmd/admin/README_migrate.md
@@ -0,0 +1,318 @@
+# Data Migration Tool
+
+Tool to migrate blockchain data from DynamoDB to PostgreSQL with complete reorg support and data integrity preservation.
+
+## Overview
+
+The migration tool performs a comprehensive transfer of blockchain data:
+- **Block metadata** from DynamoDB to PostgreSQL (`block_metadata` + `canonical_blocks` tables)
+- **Events** from DynamoDB to PostgreSQL (`block_events` table)
+- **Complete reorg data** including both canonical and non-canonical blocks
+- **Event ID-based migration** for efficient sequential processing
+
+**Critical Requirements**:
+1. Block metadata **must** be migrated before events (foreign key dependencies)
+2. Migration preserves complete blockchain history including all reorg blocks
+3. Canonical block identification is maintained through migration ordering
+
+## Architecture
+
+### Advanced Migration Strategy
+The tool uses a sophisticated **height-by-height approach** that:
+
+1. **Queries ALL blocks** at each height from DynamoDB (canonical + non-canonical)
+2. **Migrates non-canonical blocks first** to preserve reorg history
+3. **Migrates canonical block last** to ensure proper canonicality in PostgreSQL
+4. **Uses event ID ranges** for efficient event migration
+
+### Reorg Handling Design
+- **DynamoDB**: Stores both actual block hash entries and "canonical" markers
+- **PostgreSQL**: Uses separate `block_metadata` (all blocks) and `canonical_blocks` (canonical only) tables
+- **Migration**: Preserves complete reorg history while maintaining canonical block identification
+
+## Basic Usage
+
+```bash
+# Migrate both blocks and events for a height range
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --tag=1 \
+ --event-tag=0
+```
+
+## Command Line Flags
+
+### Basic Parameters
+| Flag | Required | Description | Default |
+|------|----------|-------------|---------|
+| `--start-height` | ✅ | Start block height (inclusive) | - |
+| `--end-height` | ✅ | End block height (exclusive) | - |
+| `--env` | ✅ | Environment (local/development/production) | - |
+| `--blockchain` | ✅ | Blockchain name (e.g., ethereum, base) | - |
+| `--network` | ✅ | Network name (e.g., mainnet, testnet) | - |
+| `--tag` | | Block tag for migration | 1 |
+| `--event-tag` | | Event tag for migration | 0 |
+
+### Performance & Batch Parameters
+| Flag | Required | Description | Default |
+|------|----------|-------------|---------|
+| `--batch-size` | | Number of blocks to process in each workflow batch | 100 |
+| `--mini-batch-size` | | Number of blocks to process in each activity mini-batch | batch-size/10 |
+| `--checkpoint-size` | | Number of blocks to process before creating a workflow checkpoint | 10000 |
+| `--parallelism` | | Number of parallel workers for processing mini-batches | 1 |
+| `--backoff-interval` | | Time duration to wait between batches (e.g., '1s', '500ms') | - |
+
+### Migration Mode Parameters
+| Flag | Required | Description | Default |
+|------|----------|-------------|---------|
+| `--skip-blocks` | | Skip block migration (events only) | false |
+| `--skip-events` | | Skip event migration (blocks only) | false |
+| `--continuous-sync` | | Enable continuous sync mode (workflow only, not supported in direct mode) | false |
+| `--sync-interval` | | Time duration to wait between continuous sync cycles (e.g., '1m', '30s') | 1m |
+
+## Continuous Sync Mode
+
+The migrator supports **continuous sync mode** for real-time data synchronization. This mode is designed for workflow-based migrations and enables:
+
+- **Infinite loop operation**: Automatically restarts migration when current batch completes
+- **Dynamic end height**: Sets new StartHeight to current EndHeight and resets EndHeight to 0 (sync to latest)
+- **Configurable sync intervals**: Wait duration between continuous sync cycles
+- **Automatic workflow continuation**: Uses Temporal's ContinueAsNewError for seamless restarts
+
+### Continuous Sync Process
+1. Complete current migration batch (StartHeight → EndHeight)
+2. Set new StartHeight = previous EndHeight
+3. Reset EndHeight = 0 (query latest block from source)
+4. Wait for SyncInterval duration
+5. Restart workflow with new parameters using ContinueAsNewError
+6. Repeat indefinitely
+
+### Validation Rules for Continuous Sync
+- `EndHeight` must be 0 OR greater than `StartHeight` when `ContinuousSync` is enabled
+- When `EndHeight = 0`, the tool automatically queries the latest block from DynamoDB
+- `SyncInterval` defaults to 1 minute if not specified or invalid
+
+### Continuous Sync Examples
+
+```bash
+# Basic continuous sync - syncs every minute from height 1000000 to latest
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --tag=2 \
+ --event-tag=3 \
+ --continuous-sync
+
+# High-performance continuous sync with custom parameters
+go run cmd/admin/*.go migrate \
+ --env=production \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=18000000 \
+ --tag=1 \
+ --event-tag=0 \
+ --continuous-sync \
+ --sync-interval=30s \
+ --batch-size=500 \
+ --mini-batch-size=50 \
+ --parallelism=4 \
+ --checkpoint-size=5000
+```
+
+**Note**: Continuous sync is only available when using the Temporal workflow system. The direct migration command will show a warning and perform a one-time migration if `--continuous-sync` is specified.
+
+## Migration Phases
+
+### Phase 1: Height-by-Height Block Migration
+For each height in the range:
+
+1. **Query ALL blocks** at height from DynamoDB using direct table queries
+2. **Separate canonical vs non-canonical** blocks client-side
+3. **Migrate non-canonical blocks first** (preserves reorg history)
+4. **Migrate canonical block last** (ensures canonicality in PostgreSQL)
+
+```sql
+-- DynamoDB Query Pattern:
+-- All blocks: BlockPid = "{tag}-{height}"
+-- Canonical: BlockPid = "{tag}-{height}" AND BlockRid = "canonical"
+-- Non-canonical: BlockPid = "{tag}-{height}" AND BlockRid != "canonical"
+```
+
+### Phase 2: Event ID-Based Migration
+1. **Determine event ID range** from start/end heights
+2. **Migrate events sequentially** by event ID in batches
+3. **Establish foreign key relationships** to migrated block metadata
+4. **Handle missing events gracefully** (logged as debug)
+
+**CRITICAL REQUIREMENT for Events-Only Migration:**
+When using `--skip-blocks` (events-only migration), the corresponding block metadata **must already exist** in PostgreSQL. Events depend on block metadata through foreign key constraints (`block_events.block_metadata_id` → `block_metadata.id`).
+
+If block metadata is missing, the migration will fail with an error. To resolve this:
+1. First run migration with `--skip-events` to migrate block metadata
+2. Then run migration with `--skip-blocks` to migrate events
+
+```bash
+# Step 1: Migrate blocks first
+go run ./cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --skip-events
+
+# Step 2: Migrate events (now that block metadata exists)
+go run ./cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --skip-blocks
+```
+
+## PostgreSQL Schema Design
+
+### Block Storage Tables
+```sql
+-- All blocks ever observed (append-only)
+CREATE TABLE block_metadata (
+ id BIGSERIAL PRIMARY KEY,
+ height BIGINT NOT NULL,
+ tag INT NOT NULL,
+ hash VARCHAR(66),
+ parent_hash VARCHAR(66),
+ object_key_main VARCHAR(255),
+ timestamp TIMESTAMPTZ NOT NULL,
+ skipped BOOLEAN NOT NULL DEFAULT FALSE
+);
+
+-- Canonical block tracking (current "winner" at each height)
+CREATE TABLE canonical_blocks (
+ height BIGINT NOT NULL,
+ block_metadata_id BIGINT NOT NULL,
+ tag INT NOT NULL,
+ PRIMARY KEY (height, tag),
+ FOREIGN KEY (block_metadata_id) REFERENCES block_metadata (id)
+);
+```
+
+### Event Storage Table
+```sql
+-- Blockchain state change events (append-only)
+CREATE TABLE block_events (
+ event_tag INT NOT NULL DEFAULT 0,
+ event_sequence BIGINT NOT NULL,
+ event_type event_type_enum NOT NULL,
+ block_metadata_id BIGINT NOT NULL,
+ height BIGINT NOT NULL,
+ hash VARCHAR(66),
+ PRIMARY KEY (event_tag, event_sequence),
+ FOREIGN KEY (block_metadata_id) REFERENCES block_metadata (id)
+);
+```
+
+## Complete Reorg Support
+
+### DynamoDB Storage Pattern
+For height with reorgs, DynamoDB contains:
+```
+BlockPid: "1-12345", BlockRid: "0xabc123..." (non-canonical block)
+BlockPid: "1-12345", BlockRid: "0xdef456..." (another non-canonical block)
+BlockPid: "1-12345", BlockRid: "canonical" (canonical marker pointing to winner)
+```
+
+### Migration Process
+1. **Query all blocks** at height: `BlockPid = "1-12345"`
+2. **Filter canonical vs non-canonical** client-side based on `BlockRid`
+3. **Migrate non-canonical first**: All reorg blocks → `block_metadata` only
+4. **Migrate canonical last**: Winner block → `block_metadata` + `canonical_blocks`
+
+### PostgreSQL Result
+```sql
+-- block_metadata table (ALL blocks)
+id | height | hash | ...
+1 | 12345 | 0xabc123... | ... (non-canonical)
+2 | 12345 | 0xdef456... | ... (non-canonical)
+3 | 12345 | 0x789abc... | ... (canonical)
+
+-- canonical_blocks table (canonical only)
+height | block_metadata_id | tag
+12345 | 3 | 1 (points to canonical block)
+```
+
+## Schema Mapping Details
+
+### DynamoDB → PostgreSQL Block Metadata
+```
+DynamoDB BlockMetaDataDDBEntry → PostgreSQL Tables
+├── Hash → block_metadata.hash
+├── ParentHash → block_metadata.parent_hash
+├── Height → block_metadata.height
+├── Tag → block_metadata.tag
+├── ObjectKeyMain → block_metadata.object_key_main
+├── Timestamp → block_metadata.timestamp
+├── Skipped → block_metadata.skipped
+└── (canonical status) → canonical_blocks.block_metadata_id (if canonical)
+```
+
+### DynamoDB → PostgreSQL Events
+```
+DynamoDB VersionedEventDDBEntry → PostgreSQL block_events
+├── EventId → event_sequence
+├── BlockHeight → height
+├── BlockHash → hash
+├── EventTag → event_tag
+├── EventType → event_type
+├── Sequence → event_sequence
+└── (block reference) → block_metadata_id (via foreign key)
+```
+
+## Usage Examples
+
+### Complete Migration
+```bash
+# Migrate both blocks and events with full reorg support
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=18000000 \
+ --end-height=18001000 \
+ --tag=1 \
+ --event-tag=3
+```
+
+### Block-Only Migration
+```bash
+# Migrate only block metadata (useful for preparing for event migration)
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=base \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --skip-events
+```
+
+### Event-Only Migration
+```bash
+# IMPORTANT: Block metadata must already exist in PostgreSQL!
+# Run this ONLY after blocks have been migrated for this height range
+
+# Migrate only events (requires blocks already migrated)
+go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=polygon \
+ --network=mainnet \
+ --start-height=50000000 \
+ --end-height=50001000 \
+ --skip-blocks \
+ --event-tag=2
+```
\ No newline at end of file
diff --git a/cmd/admin/common.go b/cmd/admin/common.go
index 6234752..20c8009 100644
--- a/cmd/admin/common.go
+++ b/cmd/admin/common.go
@@ -281,7 +281,7 @@ func confirm(prompt string) bool {
return true
}
- fmt.Printf(prompt)
+ fmt.Print(prompt)
response, err := bufio.NewReader(os.Stdin).ReadString('\n')
if err != nil {
logger.Error("failed to read from console", zap.Error(err))
diff --git a/cmd/admin/db_init.go b/cmd/admin/db_init.go
new file mode 100644
index 0000000..d0ef916
--- /dev/null
+++ b/cmd/admin/db_init.go
@@ -0,0 +1,473 @@
+package main
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/service/secretsmanager"
+ "github.com/lib/pq"
+ _ "github.com/lib/pq"
+ "github.com/pressly/goose/v3"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+// DBInitSecret is no longer needed - we parse flat JSON directly
+
+func newDBInitCommand() *cobra.Command {
+ var (
+ awsRegion string
+ dryRun bool
+ )
+
+ cmd := &cobra.Command{
+ Use: "db-init",
+ Short: "Initialize database and users for a specific network from AWS Secrets Manager",
+ Long: `Initialize PostgreSQL database and users for a specific blockchain network based on configuration stored in AWS Secrets Manager.
+
+This command MUST be run from the ChainStorage admin pod, which has the master PostgreSQL credentials
+injected as environment variables through Kubernetes secrets.
+
+The command:
+1. Uses master credentials from environment variables (CHAINSTORAGE_AWS_POSTGRES_*)
+2. Fetches network-specific credentials from AWS Secrets Manager
+3. Creates a database for the specified network
+4. Creates network-specific server (read-only) and worker (read-write) users
+5. Sets up proper permissions for each role
+6. Is idempotent - can be run multiple times safely
+
+Required environment variables (automatically available in admin pod):
+- CHAINSTORAGE_AWS_POSTGRES_HOST: PostgreSQL cluster endpoint
+- CHAINSTORAGE_AWS_POSTGRES_PORT: PostgreSQL port
+- CHAINSTORAGE_AWS_POSTGRES_USER: Master username
+- CHAINSTORAGE_AWS_POSTGRES_PASSWORD: Master password
+
+Example usage (run from admin pod):
+ # Initialize database for ethereum-mainnet
+ ./admin db-init --blockchain ethereum --network mainnet --env dev
+
+ # Dry run to preview changes
+ ./admin db-init --blockchain ethereum --network mainnet --env dev --dry-run
+
+ # Use specific AWS region
+ ./admin db-init --blockchain ethereum --network mainnet --env prod --aws-region us-west-2`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Use commonFlags from common.go for blockchain, network, and env
+ return runDBInit(commonFlags.blockchain, commonFlags.network, commonFlags.env, awsRegion, dryRun)
+ },
+ }
+
+ cmd.Flags().StringVar(&awsRegion, "aws-region", "us-east-1", "AWS region")
+ cmd.Flags().BoolVar(&dryRun, "dry-run", false, "Preview changes without applying them")
+
+ return cmd
+}
+
+func runDBInit(blockchain, network, env, awsRegion string, dryRun bool) error {
+ ctx := context.Background()
+ logger := log.WithPackage(logger)
+
+ logger.Info("Starting database initialization",
+ zap.String("blockchain", blockchain),
+ zap.String("network", network),
+ zap.String("environment", env),
+ zap.String("region", awsRegion),
+ zap.Bool("dry_run", dryRun))
+
+ // Get master credentials from environment variables (as set in admin pod deployment)
+ masterHost := os.Getenv("CHAINSTORAGE_AWS_POSTGRES_HOST")
+ masterPortStr := os.Getenv("CHAINSTORAGE_AWS_POSTGRES_PORT")
+ masterUser := os.Getenv("CHAINSTORAGE_AWS_POSTGRES_USER")
+ masterPassword := os.Getenv("CHAINSTORAGE_AWS_POSTGRES_PASSWORD")
+
+ if masterHost == "" || masterPortStr == "" || masterUser == "" || masterPassword == "" {
+ return xerrors.New("missing required environment variables: CHAINSTORAGE_AWS_POSTGRES_HOST, CHAINSTORAGE_AWS_POSTGRES_PORT, CHAINSTORAGE_AWS_POSTGRES_USER, CHAINSTORAGE_AWS_POSTGRES_PASSWORD - ensure this command is run from the admin pod")
+ }
+
+ var masterPort int
+ if _, err := fmt.Sscanf(masterPortStr, "%d", &masterPort); err != nil {
+ return xerrors.Errorf("invalid port number: %s", masterPortStr)
+ }
+
+ // Construct secret name
+ secretName := fmt.Sprintf("chainstorage/db-creds/%s", env)
+
+ // Fetch secret from AWS Secrets Manager
+ secretData, err := fetchSecret(ctx, secretName, awsRegion)
+ if err != nil {
+ return xerrors.Errorf("failed to fetch secret: %w", err)
+ }
+
+ // Construct lookup keys for this network
+ // Replace hyphens with underscores for the key lookup
+ networkKey := strings.ReplaceAll(fmt.Sprintf("%s_%s", blockchain, network), "-", "_")
+
+ // Extract network-specific values from flat secret
+ dbName, err := getStringFromSecret(secretData, fmt.Sprintf("%s_database_name", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get database name: %w", err)
+ }
+
+ serverUsername, err := getStringFromSecret(secretData, fmt.Sprintf("%s_server_username", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get server username: %w", err)
+ }
+
+ serverPassword, err := getStringFromSecret(secretData, fmt.Sprintf("%s_server_password", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get server password: %w", err)
+ }
+
+ workerUsername, err := getStringFromSecret(secretData, fmt.Sprintf("%s_worker_username", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get worker username: %w", err)
+ }
+
+ workerPassword, err := getStringFromSecret(secretData, fmt.Sprintf("%s_worker_password", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get worker password: %w", err)
+ }
+
+ logger.Info("Successfully fetched credentials from AWS Secrets Manager",
+ zap.String("database", dbName),
+ zap.String("server_user", serverUsername),
+ zap.String("worker_user", workerUsername))
+
+ if dryRun {
+ logger.Info("DRY RUN MODE - No changes will be made",
+ zap.String("database", dbName),
+ zap.String("server_user", serverUsername),
+ zap.String("worker_user", workerUsername))
+ return nil
+ }
+
+ // Connect to PostgreSQL as master user
+ masterDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=postgres sslmode=require",
+ masterHost, masterPort, masterUser, masterPassword)
+
+ db, err := sql.Open("postgres", masterDSN)
+ if err != nil {
+ return xerrors.Errorf("failed to connect to database: %w", err)
+ }
+ defer func() {
+ if closeErr := db.Close(); closeErr != nil {
+ logger.Warn("Failed to close database connection", zap.Error(closeErr))
+ }
+ }()
+
+ // Set connection pool settings
+ db.SetMaxOpenConns(5)
+ db.SetMaxIdleConns(2)
+ db.SetConnMaxLifetime(30 * time.Second)
+
+ // Test connection
+ if err := db.PingContext(ctx); err != nil {
+ return xerrors.Errorf("failed to ping database: %w", err)
+ }
+
+ logger.Info("Successfully connected to PostgreSQL cluster")
+
+ // Create users
+ logger.Info("Creating users")
+
+ if err := createUser(db, workerUsername, workerPassword, true, logger); err != nil {
+ return xerrors.Errorf("failed to create worker user %s: %w", workerUsername, err)
+ }
+ logger.Info("Created/verified worker user", zap.String("username", workerUsername))
+
+ if err := createUser(db, serverUsername, serverPassword, false, logger); err != nil {
+ return xerrors.Errorf("failed to create server user %s: %w", serverUsername, err)
+ }
+ logger.Info("Created/verified server user", zap.String("username", serverUsername))
+
+ // Create database
+ logger.Info("Creating database")
+ if err := createDatabase(db, dbName, workerUsername, logger); err != nil {
+ return xerrors.Errorf("failed to create database %s: %w", dbName, err)
+ }
+ logger.Info("Created/verified database", zap.String("database", dbName))
+
+ // Run migrations on the network database
+ logger.Info("Running migrations")
+ if err := runMigrations(ctx, masterHost, masterPort, masterUser, masterPassword, dbName, logger); err != nil {
+ return xerrors.Errorf("failed to run migrations: %w", err)
+ }
+ logger.Info("Migrations completed successfully")
+
+ // Grant permissions
+ logger.Info("Setting up permissions")
+
+ // Grant CONNECT permission to server user
+ if err := grantConnectPermission(db, dbName, serverUsername, logger); err != nil {
+ return xerrors.Errorf("failed to grant CONNECT permission on %s to %s: %w",
+ dbName, serverUsername, err)
+ }
+
+ // Connect to the network database to grant permissions
+ netDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=require",
+ masterHost, masterPort, masterUser, masterPassword, dbName)
+
+ netDB, err := sql.Open("postgres", netDSN)
+ if err != nil {
+ return xerrors.Errorf("failed to connect to database %s: %w", dbName, err)
+ }
+ defer func() {
+ if closeErr := netDB.Close(); closeErr != nil {
+ logger.Warn("Failed to close network database connection", zap.Error(closeErr))
+ }
+ }()
+
+ // Grant full access to worker user (owner)
+ if err := grantFullAccess(netDB, workerUsername, logger); err != nil {
+ return xerrors.Errorf("failed to grant full access on %s to %s: %w", dbName, workerUsername, err)
+ }
+ logger.Info("Granted full access to worker user", zap.String("username", workerUsername))
+
+ // Grant permissions to server user
+ if err := grantReadOnlyAccess(netDB, serverUsername, workerUsername, logger); err != nil {
+ return xerrors.Errorf("failed to grant permissions on %s: %w", dbName, err)
+ }
+ logger.Info("Granted read-only access to server user", zap.String("username", serverUsername))
+
+ logger.Info("Database initialization completed successfully",
+ zap.String("network", fmt.Sprintf("%s-%s", blockchain, network)),
+ zap.String("database", dbName),
+ zap.String("worker_user", workerUsername),
+ zap.String("server_user", serverUsername))
+
+ logger.Info("Next steps",
+ zap.String("step1", "Deploy chainstorage worker pods to start data ingestion"),
+ zap.String("step2", "Deploy chainstorage server pods for API access"),
+ zap.String("step3", "Monitor logs for successful connections"))
+
+ return nil
+}
+
+func fetchSecret(ctx context.Context, secretName, region string) (map[string]interface{}, error) {
+ cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load AWS config: %w", err)
+ }
+
+ client := secretsmanager.NewFromConfig(cfg)
+
+ input := &secretsmanager.GetSecretValueInput{
+ SecretId: &secretName,
+ }
+
+ result, err := client.GetSecretValue(ctx, input)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get secret value: %w", err)
+ }
+
+ // Parse the flat JSON secret
+ var secretData map[string]interface{}
+ if err := json.Unmarshal([]byte(*result.SecretString), &secretData); err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal secret: %w", err)
+ }
+
+ return secretData, nil
+}
+
+func getStringFromSecret(secret map[string]interface{}, key string) (string, error) {
+ value, ok := secret[key]
+ if !ok {
+ return "", xerrors.Errorf("key %s not found in secret", key)
+ }
+
+ strValue, ok := value.(string)
+ if !ok {
+ return "", xerrors.Errorf("value for key %s is not a string", key)
+ }
+
+ return strValue, nil
+}
+
+func createUser(db *sql.DB, username, password string, canCreateDB bool, logger *zap.Logger) error {
+ // Check if user exists
+ var exists bool
+ query := `SELECT EXISTS(SELECT 1 FROM pg_user WHERE usename = $1)`
+ if err := db.QueryRow(query, username).Scan(&exists); err != nil {
+ return xerrors.Errorf("failed to check if user exists: %w", err)
+ }
+
+ if exists {
+ logger.Info("User already exists, updating password", zap.String("username", username))
+ // Update password for existing user
+ alterQuery := fmt.Sprintf("ALTER USER %s WITH PASSWORD %s",
+ pq.QuoteIdentifier(username), pq.QuoteLiteral(password))
+ if _, err := db.Exec(alterQuery); err != nil {
+ return xerrors.Errorf("failed to update user password: %w", err)
+ }
+ return nil
+ }
+
+ // Create user with proper quoting
+ createQuery := fmt.Sprintf("CREATE USER %s WITH LOGIN PASSWORD %s",
+ pq.QuoteIdentifier(username), pq.QuoteLiteral(password))
+
+ if canCreateDB {
+ createQuery += " CREATEDB"
+ }
+
+ if _, err := db.Exec(createQuery); err != nil {
+ return xerrors.Errorf("failed to create user: %w", err)
+ }
+
+ return nil
+}
+
+func createDatabase(db *sql.DB, dbName, owner string, logger *zap.Logger) error {
+ // Check if database exists
+ var exists bool
+ query := `SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = $1)`
+ if err := db.QueryRow(query, dbName).Scan(&exists); err != nil {
+ return xerrors.Errorf("failed to check if database exists: %w", err)
+ }
+
+ if exists {
+ logger.Info("Database already exists, checking ownership", zap.String("database", dbName))
+ // Check current owner
+ var currentOwner string
+ ownerQuery := `SELECT pg_get_userbyid(datdba) FROM pg_database WHERE datname = $1`
+ if err := db.QueryRow(ownerQuery, dbName).Scan(¤tOwner); err != nil {
+ return xerrors.Errorf("failed to get database owner: %w", err)
+ }
+
+ if currentOwner == owner {
+ logger.Info("Database already owned by expected owner", zap.String("database", dbName), zap.String("owner", owner))
+ return nil
+ } else {
+ logger.Info("Transferring database ownership", zap.String("database", dbName), zap.String("current_owner", currentOwner), zap.String("new_owner", owner))
+ // Transfer ownership
+ alterQuery := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s",
+ pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner))
+ if _, err := db.Exec(alterQuery); err != nil {
+ return xerrors.Errorf("failed to transfer database ownership: %w", err)
+ }
+ return nil
+ }
+ }
+
+ // Create database with master user as owner first, then transfer ownership
+ createQuery := fmt.Sprintf("CREATE DATABASE %s ENCODING 'UTF8'",
+ pq.QuoteIdentifier(dbName))
+
+ if _, err := db.Exec(createQuery); err != nil {
+ return xerrors.Errorf("failed to create database: %w", err)
+ }
+
+ // Transfer ownership to the specified owner
+ alterQuery := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s",
+ pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner))
+
+ if _, err := db.Exec(alterQuery); err != nil {
+ return xerrors.Errorf("failed to transfer database ownership: %w", err)
+ }
+
+ return nil
+}
+
+func grantConnectPermission(db *sql.DB, dbName, username string, logger *zap.Logger) error {
+ // Grant CONNECT permission on database
+ grantQuery := fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s",
+ pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(username))
+
+ if _, err := db.Exec(grantQuery); err != nil {
+ // This might fail if permission already exists, which is fine
+ return xerrors.Errorf("failed to grant connect permission: %w", err)
+ }
+
+ return nil
+}
+
+func grantReadOnlyAccess(db *sql.DB, readUser, ownerUser string, logger *zap.Logger) error {
+ queries := []string{
+ fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", pq.QuoteIdentifier(readUser)),
+ fmt.Sprintf("GRANT SELECT ON ALL TABLES IN SCHEMA public TO %s", pq.QuoteIdentifier(readUser)),
+ fmt.Sprintf("GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO %s", pq.QuoteIdentifier(readUser)),
+ // Set default privileges for future objects created by the owner
+ fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR USER %s IN SCHEMA public GRANT SELECT ON TABLES TO %s",
+ pq.QuoteIdentifier(ownerUser), pq.QuoteIdentifier(readUser)),
+ fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR USER %s IN SCHEMA public GRANT SELECT ON SEQUENCES TO %s",
+ pq.QuoteIdentifier(ownerUser), pq.QuoteIdentifier(readUser)),
+ }
+
+ for _, q := range queries {
+ if _, err := db.Exec(q); err != nil {
+ // Some permissions might already exist, log but don't fail
+ logger.Warn("Failed to grant read-only permission (continuing)", zap.Error(err))
+ }
+ }
+
+ return nil
+}
+
+func grantFullAccess(db *sql.DB, username string, logger *zap.Logger) error {
+ queries := []string{
+ fmt.Sprintf("GRANT ALL PRIVILEGES ON SCHEMA public TO %s", pq.QuoteIdentifier(username)),
+ fmt.Sprintf("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO %s", pq.QuoteIdentifier(username)),
+ fmt.Sprintf("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO %s", pq.QuoteIdentifier(username)),
+ fmt.Sprintf("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO %s", pq.QuoteIdentifier(username)),
+ // Set default privileges for future objects
+ fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO %s", pq.QuoteIdentifier(username)),
+ fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO %s", pq.QuoteIdentifier(username)),
+ fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON FUNCTIONS TO %s", pq.QuoteIdentifier(username)),
+ }
+
+ for _, q := range queries {
+ if _, err := db.Exec(q); err != nil {
+ // Some permissions might already exist, log but don't fail
+ logger.Warn("Failed to grant full access permission (continuing)", zap.Error(err))
+ }
+ }
+
+ return nil
+}
+
+func runMigrations(ctx context.Context, host string, port int, user, password, dbName string, logger *zap.Logger) error {
+ // Connect to the network database to run migrations
+ migrationDSN := fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s sslmode=require",
+ host, port, dbName, user, password)
+
+ migrationDB, err := sql.Open("postgres", migrationDSN)
+ if err != nil {
+ return xerrors.Errorf("failed to connect to database for migrations: %w", err)
+ }
+ defer func() {
+ if closeErr := migrationDB.Close(); closeErr != nil {
+ logger.Warn("Failed to close migration database connection", zap.Error(closeErr))
+ }
+ }()
+
+ // Test connection
+ if err := migrationDB.PingContext(ctx); err != nil {
+ return xerrors.Errorf("failed to ping migration database: %w", err)
+ }
+
+ // Set dialect for goose
+ if err := goose.SetDialect("postgres"); err != nil {
+ return xerrors.Errorf("failed to set goose dialect: %w", err)
+ }
+
+ // Run migrations using the file system path
+ migrationsDir := "/app/migrations"
+ if err := goose.UpContext(ctx, migrationDB, migrationsDir); err != nil {
+ return xerrors.Errorf("failed to run migrations: %w", err)
+ }
+
+ return nil
+}
+
+func init() {
+ rootCmd.AddCommand(newDBInitCommand())
+}
diff --git a/cmd/admin/db_migrate.go b/cmd/admin/db_migrate.go
new file mode 100644
index 0000000..4f3196b
--- /dev/null
+++ b/cmd/admin/db_migrate.go
@@ -0,0 +1,209 @@
+package main
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "os"
+ "time"
+
+ _ "github.com/lib/pq"
+ "github.com/pressly/goose/v3"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+func newDBMigrateCommand() *cobra.Command {
+ var (
+ masterUser string
+ masterPassword string
+ host string
+ port int
+ sslMode string
+ connectTimeout time.Duration
+ dryRun bool
+ dbName string
+ )
+
+ cmd := &cobra.Command{
+ Use: "db-migrate",
+ Short: "Run PostgreSQL schema migrations",
+ Long: `Run PostgreSQL schema migrations using admin/master credentials.
+
+This command MUST be run from the ChainStorage admin pod or with master credentials.
+It applies any pending schema migrations to the database.
+
+The migrations are tracked via the goose_db_version table, ensuring idempotency.
+Running this command multiple times is safe - it will only apply new migrations.
+
+Required flags:
+- --master-user: PostgreSQL master/admin username
+- --master-password: PostgreSQL master/admin password
+
+Optional flags:
+- --host: PostgreSQL host (default: from environment)
+- --port: PostgreSQL port (default: 5432)
+- --db-name: Database name (default: chainstorage_{blockchain}_{network})
+- --ssl-mode: SSL mode (default: require)
+- --dry-run: Show pending migrations without applying them
+
+Example usage:
+ # Run migrations for ethereum-mainnet (from admin pod)
+ ./admin db-migrate --blockchain ethereum --network mainnet --env dev --master-user postgres --master-password
+
+ # Dry run to see pending migrations
+ ./admin db-migrate --blockchain ethereum --network mainnet --env dev --master-user postgres --master-password --dry-run
+
+ # Use custom database name
+ ./admin db-migrate --blockchain ethereum --network mainnet --env dev --master-user postgres --master-password --db-name my_custom_db`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runDBMigrate(masterUser, masterPassword, host, port, dbName, sslMode, connectTimeout, dryRun)
+ },
+ }
+
+ cmd.Flags().StringVar(&masterUser, "master-user", "", "PostgreSQL master/admin username (required)")
+ cmd.Flags().StringVar(&masterPassword, "master-password", "", "PostgreSQL master/admin password (required)")
+ cmd.Flags().StringVar(&host, "host", "", "PostgreSQL host (uses environment if not specified)")
+ cmd.Flags().IntVar(&port, "port", 5432, "PostgreSQL port")
+ cmd.Flags().StringVar(&dbName, "db-name", "", "Database name (default: chainstorage_{blockchain}_{network})")
+ cmd.Flags().StringVar(&sslMode, "ssl-mode", "require", "SSL mode (disable, require, verify-ca, verify-full)")
+ cmd.Flags().DurationVar(&connectTimeout, "connect-timeout", 30*time.Second, "Connection timeout")
+ cmd.Flags().BoolVar(&dryRun, "dry-run", false, "Show pending migrations without applying them")
+
+ _ = cmd.MarkFlagRequired("master-user")
+ _ = cmd.MarkFlagRequired("master-password")
+
+ return cmd
+}
+
+func runDBMigrate(masterUser, masterPassword, host string, port int, dbName, sslMode string, connectTimeout time.Duration, dryRun bool) error {
+ ctx := context.Background()
+ logger := log.WithPackage(logger)
+
+ // Determine host from environment if not provided
+ if host == "" {
+ host = getEnvOrDefault("CHAINSTORAGE_AWS_POSTGRES_HOST", "localhost")
+ }
+
+ // Determine database name if not provided
+ if dbName == "" {
+ dbName = fmt.Sprintf("chainstorage_%s_%s", commonFlags.blockchain, commonFlags.network)
+ dbName = replaceHyphensWithUnderscores(dbName)
+ }
+
+ logger.Info("Starting database migration",
+ zap.String("blockchain", commonFlags.blockchain),
+ zap.String("network", commonFlags.network),
+ zap.String("env", commonFlags.env),
+ zap.String("host", host),
+ zap.Int("port", port),
+ zap.String("database", dbName),
+ zap.Bool("dry_run", dryRun),
+ )
+
+ // Build connection config
+ cfg := &config.PostgresConfig{
+ Host: host,
+ Port: port,
+ Database: dbName,
+ User: masterUser,
+ Password: masterPassword,
+ SSLMode: sslMode,
+ ConnectTimeout: connectTimeout,
+ }
+
+ // Connect to database
+ dsn := fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s sslmode=%s connect_timeout=%d",
+ cfg.Host, cfg.Port, cfg.Database, cfg.User, cfg.Password, cfg.SSLMode, int(cfg.ConnectTimeout.Seconds()))
+
+ db, err := sql.Open("postgres", dsn)
+ if err != nil {
+ return xerrors.Errorf("failed to open database connection: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
+
+ if err := db.PingContext(ctx); err != nil {
+ return xerrors.Errorf("failed to ping database: %w", err)
+ }
+
+ logger.Info("Successfully connected to database")
+
+ // Set up goose
+ goose.SetBaseFS(postgres.GetEmbeddedMigrations())
+ if err := goose.SetDialect("postgres"); err != nil {
+ return xerrors.Errorf("failed to set goose dialect: %w", err)
+ }
+
+ if dryRun {
+ // Show pending migrations
+ logger.Info("Checking for pending migrations (dry run)")
+
+ currentVersion, err := goose.GetDBVersion(db)
+ if err != nil {
+ return xerrors.Errorf("failed to get current database version: %w", err)
+ }
+
+ logger.Info("Current database version", zap.Int64("version", currentVersion))
+
+ migrations, err := goose.CollectMigrations("db/migrations", 0, 9999999)
+ if err != nil {
+ return xerrors.Errorf("failed to collect migrations: %w", err)
+ }
+
+ fmt.Printf("\n📋 Migration Status:\n")
+ fmt.Printf("Current version: %d\n\n", currentVersion)
+
+ hasPending := false
+ for _, migration := range migrations {
+ if migration.Version > currentVersion {
+ fmt.Printf(" [PENDING] %d: %s\n", migration.Version, migration.Source)
+ hasPending = true
+ } else {
+ fmt.Printf(" [APPLIED] %d: %s\n", migration.Version, migration.Source)
+ }
+ }
+
+ if !hasPending {
+ fmt.Printf("\n✅ No pending migrations. Database is up to date!\n")
+ } else {
+ fmt.Printf("\n⚠️ Pending migrations found. Run without --dry-run to apply them.\n")
+ }
+
+ return nil
+ }
+
+ // Apply migrations
+ logger.Info("Applying database migrations")
+ if err := goose.UpContext(ctx, db, "db/migrations"); err != nil {
+ return xerrors.Errorf("failed to run migrations: %w", err)
+ }
+
+ currentVersion, err := goose.GetDBVersion(db)
+ if err != nil {
+ return xerrors.Errorf("failed to get current database version: %w", err)
+ }
+
+ logger.Info("Migrations completed successfully", zap.Int64("current_version", currentVersion))
+ fmt.Printf("\n✅ Database migrations completed successfully!\n")
+ fmt.Printf("Current version: %d\n", currentVersion)
+
+ return nil
+}
+
+func getEnvOrDefault(key, defaultValue string) string {
+ if value := os.Getenv(key); value != "" {
+ return value
+ }
+ return defaultValue
+}
+
+func init() {
+ rootCmd.AddCommand(newDBMigrateCommand())
+}
diff --git a/cmd/admin/env_set.go b/cmd/admin/env_set.go
new file mode 100644
index 0000000..db5b1a9
--- /dev/null
+++ b/cmd/admin/env_set.go
@@ -0,0 +1,186 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+func newEnvSetCommand() *cobra.Command {
+ var (
+ awsRegion string
+ quiet bool
+ )
+
+ cmd := &cobra.Command{
+ Use: "env-set",
+ Short: "Set environment variables for PostgreSQL credentials from AWS Secrets Manager",
+ Long: `Set environment variables for PostgreSQL credentials from AWS Secrets Manager.
+
+This command fetches database credentials from AWS Secrets Manager and outputs export commands
+that can be evaluated in your shell to set environment variables.
+
+To use this command and set the environment variables in your current shell session:
+ eval $(./admin env-set --blockchain ethereum --network mainnet --env dev --quiet)
+
+Example usage:
+ # Set environment variables for ethereum mainnet (worker role)
+ eval $(./admin env-set --blockchain ethereum --network mainnet --env dev --quiet)
+
+The command will output the following environment variables:
+ - CHAINSTORAGE_AWS_POSTGRES_USER
+ - CHAINSTORAGE_AWS_POSTGRES_PASSWORD
+ - CHAINSTORAGE_AWS_POSTGRES_DATABASE
+ - CHAINSTORAGE_AWS_POSTGRES_HOST (from master credentials)
+ - CHAINSTORAGE_AWS_POSTGRES_PORT (from master credentials)
+ - CHAINSTORAGE_AWS_POSTGRES_SSL_MODE (default: require)`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Use worker role by default
+ role := "worker"
+ return runEnvSet(commonFlags.blockchain, commonFlags.network, commonFlags.env, awsRegion, role, quiet)
+ },
+ }
+
+ cmd.Flags().StringVar(&awsRegion, "aws-region", "us-east-1", "AWS region for Secrets Manager")
+ cmd.Flags().BoolVar(&quiet, "quiet", false, "Suppress log output (use with eval)")
+
+ return cmd
+}
+
+type EnvVars struct {
+ User string `json:"CHAINSTORAGE_AWS_POSTGRES_USER"`
+ Password string `json:"CHAINSTORAGE_AWS_POSTGRES_PASSWORD"`
+ Database string `json:"CHAINSTORAGE_AWS_POSTGRES_DATABASE"`
+ Host string `json:"CHAINSTORAGE_AWS_POSTGRES_HOST"`
+ Port string `json:"CHAINSTORAGE_AWS_POSTGRES_PORT"`
+ SSLMode string `json:"CHAINSTORAGE_AWS_POSTGRES_SSL_MODE"`
+}
+
+func runEnvSet(blockchain, network, env, awsRegion, role string, quiet bool) error {
+ ctx := context.Background()
+ logger := log.WithPackage(logger)
+
+ if !quiet {
+ logger.Info("Setting environment variables",
+ zap.String("blockchain", blockchain),
+ zap.String("network", network),
+ zap.String("environment", env),
+ zap.String("aws_region", awsRegion),
+ zap.String("role", role))
+ }
+
+ // Get master credentials from environment variables
+ masterHost := os.Getenv("CHAINSTORAGE_CLUSTER_ENDPOINT")
+ masterPortStr := os.Getenv("CHAINSTORAGE_CLUSTER_PORT")
+
+ if masterHost == "" || masterPortStr == "" {
+ return xerrors.New("missing required environment variables: CHAINSTORAGE_CLUSTER_ENDPOINT, CHAINSTORAGE_CLUSTER_PORT")
+ }
+
+ // Construct secret name
+ secretName := fmt.Sprintf("chainstorage/db-creds/%s", env)
+
+ // Fetch secret from AWS Secrets Manager
+ secretData, err := fetchSecret(ctx, secretName, awsRegion)
+ if err != nil {
+ return xerrors.Errorf("failed to fetch secret: %w", err)
+ }
+
+ // Construct lookup keys for this network
+ // Replace hyphens with underscores for the key lookup
+ networkKey := strings.ReplaceAll(fmt.Sprintf("%s_%s", blockchain, network), "-", "_")
+
+ // Extract network-specific values from flat secret
+ dbName, err := getStringFromSecret(secretData, fmt.Sprintf("%s_database_name", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get database name: %w", err)
+ }
+
+ var username, password string
+ if role == "worker" {
+ username, err = getStringFromSecret(secretData, fmt.Sprintf("%s_worker_username", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get worker username: %w", err)
+ }
+ password, err = getStringFromSecret(secretData, fmt.Sprintf("%s_worker_password", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get worker password: %w", err)
+ }
+ } else {
+ username, err = getStringFromSecret(secretData, fmt.Sprintf("%s_server_username", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get server username: %w", err)
+ }
+ password, err = getStringFromSecret(secretData, fmt.Sprintf("%s_server_password", networkKey))
+ if err != nil {
+ return xerrors.Errorf("failed to get server password: %w", err)
+ }
+ }
+
+ if !quiet {
+ logger.Info("Successfully fetched credentials from AWS Secrets Manager",
+ zap.String("database", dbName),
+ zap.String("user", username))
+ }
+
+ // Create environment variables
+ envVars := EnvVars{
+ User: username,
+ Password: password,
+ Database: dbName,
+ Host: masterHost,
+ Port: masterPortStr,
+ SSLMode: "require", // Default SSL mode
+ }
+
+ // Set environment variables in the current process
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_USER", envVars.User); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_USER: %w", err)
+ }
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_PASSWORD", envVars.Password); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_PASSWORD: %w", err)
+ }
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_DATABASE", envVars.Database); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_DATABASE: %w", err)
+ }
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_HOST", envVars.Host); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_HOST: %w", err)
+ }
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_PORT", envVars.Port); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_PORT: %w", err)
+ }
+ if err := os.Setenv("CHAINSTORAGE_AWS_POSTGRES_SSL_MODE", envVars.SSLMode); err != nil {
+ return xerrors.Errorf("failed to set CHAINSTORAGE_AWS_POSTGRES_SSL_MODE: %w", err)
+ }
+
+ // Output environment variables for reference
+ // Shell export format (for sourcing in bash)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_USER=\"%s\"\n", envVars.User)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_PASSWORD=\"%s\"\n", envVars.Password)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_DATABASE=\"%s\"\n", envVars.Database)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_HOST=\"%s\"\n", envVars.Host)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_PORT=\"%s\"\n", envVars.Port)
+ fmt.Printf("export CHAINSTORAGE_AWS_POSTGRES_SSL_MODE=\"%s\"\n", envVars.SSLMode)
+
+ if !quiet {
+ logger.Info("Environment variables set successfully",
+ zap.String("blockchain", blockchain),
+ zap.String("network", network),
+ zap.String("role", role),
+ zap.String("database", dbName),
+ zap.String("user", username))
+ }
+
+ return nil
+}
+
+func init() {
+ rootCmd.AddCommand(newEnvSetCommand())
+}
diff --git a/cmd/admin/migrate.go b/cmd/admin/migrate.go
new file mode 100644
index 0000000..4446692
--- /dev/null
+++ b/cmd/admin/migrate.go
@@ -0,0 +1,733 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ awssdk "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
+ "github.com/spf13/cobra"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/aws"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage"
+ dynamodb_storage "github.com/coinbase/chainstorage/internal/storage/metastorage/dynamodb"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/dynamodb/model"
+ postgres_storage "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+var (
+ migrateFlags struct {
+ startHeight uint64
+ endHeight uint64
+ eventTag uint32
+ tag uint32
+ batchSize int
+ miniBatchSize int
+ checkpointSize int
+ parallelism int
+ skipEvents bool
+ skipBlocks bool
+ continuousSync bool
+ syncInterval string
+ backoffInterval string
+ autoResume bool
+ }
+)
+
+var (
+ migrateCmd = &cobra.Command{
+ Use: "migrate",
+ Short: "Migrate data from DynamoDB to PostgreSQL with optional continuous sync",
+ Long: `Migrate block metadata and events from DynamoDB to PostgreSQL.
+
+Block Migration:
+- Handles reorgs by migrating non-canonical blocks first, then canonical blocks last
+- Captures complete reorg data by querying DynamoDB directly for all blocks at each height
+- Maintains canonical block identification in PostgreSQL through migration order
+
+Event Migration:
+- Uses event ID-based iteration for efficient migration
+- Gets first event ID from start height, last event ID from end height
+- Migrates events sequentially by event ID range in batches
+- Event IDs in DynamoDB correspond directly to event sequences in PostgreSQL
+
+Continuous Sync Mode:
+- Enables infinite loop mode for real-time data synchronization
+- When enabled and current batch completes:
+ - Sets new StartHeight to current EndHeight
+ - Resets EndHeight to 0 (meaning "sync to latest")
+ - Waits for SyncInterval duration
+ - Restarts migration with new parameters
+- Validation: EndHeight must be 0 OR greater than StartHeight when ContinuousSync is enabled
+
+Performance Parameters:
+- BatchSize: Number of blocks to process in each workflow batch
+- MiniBatchSize: Number of blocks to process in each activity mini-batch (for parallelism)
+- CheckpointSize: Number of blocks to process before creating a workflow checkpoint
+- Parallelism: Number of parallel workers for processing mini-batches
+
+End Height:
+- If --end-height is not provided, the tool will automatically query the latest block
+ from DynamoDB and use that as the end height (exclusive)
+- Note: Auto-detection is only available in the migrate command, not in workflow mode
+
+Note: Block metadata must be migrated before events since events reference blocks via foreign keys.
+
+Examples:
+ # Migrate blocks and events from height 1000000 to latest block (auto-detected)
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --tag=2 \
+ --event-tag=3
+
+ # Migrate specific height range with custom batch sizes
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=100 \
+ --end-height=152 \
+ --tag=2 \
+ --event-tag=3 \
+ --batch-size=50 \
+ --mini-batch-size=10 \
+ --parallelism=4
+
+ # Continuous sync mode - syncs continuously with 30 second intervals
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --tag=2 \
+ --event-tag=3 \
+ --continuous-sync \
+ --sync-interval=30s \
+ --batch-size=100 \
+ --mini-batch-size=20 \
+ --parallelism=2
+
+ # Migrate blocks only (skip events)
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --tag=2 \
+ --event-tag=3 \
+ --skip-events
+
+ # Migrate events only (requires blocks to exist first)
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=1001000 \
+ --tag=2 \
+ --event-tag=3 \
+ --skip-blocks \
+ --backoff-interval=1s
+
+ # High throughput migration with checkpoints
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --start-height=1000000 \
+ --end-height=2000000 \
+ --tag=2 \
+ --event-tag=3 \
+ --batch-size=1000 \
+ --mini-batch-size=100 \
+ --checkpoint-size=10000 \
+ --parallelism=8
+
+ # Auto-resume from where previous migration left off
+ go run cmd/admin/*.go migrate \
+ --env=local \
+ --blockchain=ethereum \
+ --network=mainnet \
+ --auto-resume \
+ --tag=2 \
+ --event-tag=3`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Validate flag combinations
+ if !migrateFlags.autoResume && migrateFlags.startHeight == 0 {
+ return xerrors.New("start-height is required unless --auto-resume is enabled")
+ }
+ if migrateFlags.autoResume && migrateFlags.startHeight != 0 {
+ return xerrors.New("cannot specify both --auto-resume and --start-height (auto-resume will determine start height)")
+ }
+
+ var deps struct {
+ fx.In
+ Config *config.Config
+ Session *session.Session
+ Params fxparams.Params
+ }
+
+ app := startApp(
+ aws.Module,
+ storage.Module,
+ fx.Populate(&deps),
+ )
+ defer func() {
+ app.Close()
+ }()
+
+ // Create DynamoDB storage directly
+ dynamoDBParams := dynamodb_storage.Params{
+ Params: deps.Params,
+ Session: deps.Session,
+ }
+ sourceResult, err := dynamodb_storage.NewMetaStorage(dynamoDBParams)
+ if err != nil {
+ return xerrors.Errorf("failed to create DynamoDB storage: %w", err)
+ }
+
+ // Create PostgreSQL storage directly
+ postgresParams := postgres_storage.Params{
+ Params: deps.Params,
+ }
+ destResult, err := postgres_storage.NewMetaStorage(postgresParams)
+ if err != nil {
+ return xerrors.Errorf("failed to create PostgreSQL storage: %w", err)
+ }
+
+ // Note: Validation will happen after end height auto-detection
+
+ if migrateFlags.batchSize <= 0 {
+ migrateFlags.batchSize = 100
+ }
+
+ if migrateFlags.miniBatchSize <= 0 {
+ migrateFlags.miniBatchSize = migrateFlags.batchSize / 10
+ if migrateFlags.miniBatchSize <= 0 {
+ migrateFlags.miniBatchSize = 10
+ }
+ }
+
+ if migrateFlags.checkpointSize <= 0 {
+ migrateFlags.checkpointSize = 10000
+ }
+
+ if migrateFlags.parallelism <= 0 {
+ migrateFlags.parallelism = 1
+ }
+
+ // Both skip flags cannot be true
+ if migrateFlags.skipEvents && migrateFlags.skipBlocks {
+ return xerrors.New("cannot skip both events and blocks - nothing to migrate")
+ }
+
+ // Validate continuous sync parameters
+ if migrateFlags.continuousSync {
+ logger.Warn("WARNING: Continuous sync is not supported in direct migration mode")
+ logger.Warn("Continuous sync is only available when using the migrator workflow")
+ logger.Warn("This tool will perform a one-time migration and exit")
+
+ if migrateFlags.endHeight != 0 && migrateFlags.endHeight <= migrateFlags.startHeight {
+ return xerrors.Errorf("with continuous sync enabled, end height (%d) must be 0 OR greater than start height (%d)",
+ migrateFlags.endHeight, migrateFlags.startHeight)
+ }
+ }
+
+ // Warn about skip-blocks requirements
+ if migrateFlags.skipBlocks && !migrateFlags.skipEvents {
+ logger.Warn("IMPORTANT: Using --skip-blocks (events-only migration)")
+ logger.Warn("Block metadata MUST already exist in PostgreSQL for the specified height range")
+ logger.Warn("If block metadata is missing, the migration will fail with foreign key errors")
+ logger.Warn("To fix: First migrate blocks with --skip-events, then migrate events with --skip-blocks")
+
+ prompt := "Are you sure block metadata already exists in PostgreSQL for this range? (y/N): "
+ if !confirm(prompt) {
+ logger.Info("Migration cancelled - migrate blocks first with --skip-events")
+ return nil
+ }
+ }
+
+ ctx := context.Background()
+
+ // Handle auto-resume functionality
+ if migrateFlags.autoResume && migrateFlags.startHeight == 0 {
+ logger.Info("AutoResume enabled, querying PostgreSQL destination for latest migrated block")
+ latestBlock, err := destResult.MetaStorage.GetLatestBlock(ctx, migrateFlags.tag)
+ if err != nil {
+ // Check if it's a "not found" error, which means no blocks migrated yet
+ errStr := strings.ToLower(err.Error())
+ if strings.Contains(errStr, "not found") || strings.Contains(errStr, "no rows") {
+ logger.Info("Auto-resume: no blocks found in PostgreSQL destination, starting from beginning")
+ migrateFlags.startHeight = 0
+ } else {
+ return xerrors.Errorf("failed to get latest block height from PostgreSQL: %w", err)
+ }
+ } else {
+ // Resume from the next block after the latest migrated block
+ migrateFlags.startHeight = latestBlock.Height + 1
+ logger.Info("Auto-resume: found latest block in PostgreSQL destination",
+ zap.Uint64("latestHeight", latestBlock.Height),
+ zap.Uint64("resumeFromHeight", migrateFlags.startHeight))
+ }
+ }
+
+ // Handle end height - if not provided, query latest block from DynamoDB
+ if migrateFlags.endHeight == 0 {
+ logger.Info("No end height provided, querying latest block from DynamoDB...")
+
+ // Query latest block from DynamoDB
+ latestBlock, err := sourceResult.MetaStorage.GetLatestBlock(ctx, migrateFlags.tag)
+ if err != nil {
+ return xerrors.Errorf("failed to get latest block from DynamoDB: %w", err)
+ }
+
+ migrateFlags.endHeight = latestBlock.Height + 1 // Make it exclusive
+ logger.Info("Found latest block in DynamoDB",
+ zap.Uint64("latestHeight", latestBlock.Height),
+ zap.Uint64("endHeight", migrateFlags.endHeight),
+ zap.String("latestHash", latestBlock.Hash))
+ }
+
+ // Validate flags after end height auto-detection and auto-resume
+ if !migrateFlags.continuousSync && migrateFlags.startHeight >= migrateFlags.endHeight {
+ // Special case: if auto-resume found we're already caught up
+ if migrateFlags.autoResume {
+ logger.Info("Auto-resume detected: already caught up, no migration needed",
+ zap.Uint64("startHeight", migrateFlags.startHeight),
+ zap.Uint64("endHeight", migrateFlags.endHeight))
+ return nil // Successfully completed with no work to do
+ }
+ return xerrors.Errorf("startHeight (%d) must be less than endHeight (%d)",
+ migrateFlags.startHeight, migrateFlags.endHeight)
+ }
+ // Create DynamoDB client for direct queries
+ dynamoClient := dynamodb.New(deps.Session)
+ blockTable := deps.Config.AWS.DynamoDB.BlockTable
+
+ migrator := &DataMigrator{
+ sourceStorage: sourceResult.MetaStorage,
+ destStorage: destResult.MetaStorage,
+ config: deps.Config,
+ logger: logger,
+ dynamoClient: dynamoClient,
+ blockTable: blockTable,
+ }
+
+ // Confirmation prompt
+ prompt := fmt.Sprintf("This will migrate data from height %d to %d. Continue? (y/N): ",
+ migrateFlags.startHeight, migrateFlags.endHeight)
+ if !confirm(prompt) {
+ logger.Info("Migration cancelled")
+ return nil
+ }
+
+ migrateParams := MigrationParams{
+ StartHeight: migrateFlags.startHeight,
+ EndHeight: migrateFlags.endHeight,
+ EventTag: migrateFlags.eventTag,
+ Tag: migrateFlags.tag,
+ BatchSize: migrateFlags.batchSize,
+ MiniBatchSize: migrateFlags.miniBatchSize,
+ CheckpointSize: migrateFlags.checkpointSize,
+ Parallelism: migrateFlags.parallelism,
+ SkipEvents: migrateFlags.skipEvents,
+ SkipBlocks: migrateFlags.skipBlocks,
+ ContinuousSync: migrateFlags.continuousSync,
+ SyncInterval: migrateFlags.syncInterval,
+ BackoffInterval: migrateFlags.backoffInterval,
+ AutoResume: migrateFlags.autoResume,
+ }
+
+ return migrator.Migrate(ctx, migrateParams)
+ },
+ }
+)
+
+type MigrationParams struct {
+ StartHeight uint64
+ EndHeight uint64
+ EventTag uint32
+ Tag uint32
+ BatchSize int
+ MiniBatchSize int
+ CheckpointSize int
+ Parallelism int
+ SkipEvents bool
+ SkipBlocks bool
+ ContinuousSync bool
+ SyncInterval string
+ BackoffInterval string
+ AutoResume bool
+}
+
+type DataMigrator struct {
+ sourceStorage metastorage.MetaStorage
+ destStorage metastorage.MetaStorage
+ config *config.Config
+ logger *zap.Logger
+ // Direct DynamoDB access for querying all blocks
+ dynamoClient *dynamodb.DynamoDB
+ blockTable string
+}
+
+func (m *DataMigrator) Migrate(ctx context.Context, params MigrationParams) error {
+ m.logger.Info("Starting migration",
+ zap.Uint64("startHeight", params.StartHeight),
+ zap.Uint64("endHeight", params.EndHeight),
+ zap.Bool("skipBlocks", params.SkipBlocks),
+ zap.Bool("skipEvents", params.SkipEvents))
+
+ startTime := time.Now()
+
+ // Phase 1: Migrate block metadata FIRST (required for foreign key references)
+ if !params.SkipBlocks {
+ if err := m.migrateBlocksPerHeight(ctx, params); err != nil {
+ return xerrors.Errorf("failed to migrate blocks: %w", err)
+ }
+ }
+
+ // Phase 2: Migrate events AFTER blocks (depends on block metadata foreign keys)
+ if !params.SkipEvents {
+ if err := m.migrateEvents(ctx, params); err != nil {
+ return xerrors.Errorf("failed to migrate events: %w", err)
+ }
+ }
+
+ duration := time.Since(startTime)
+ m.logger.Info("Migration completed successfully",
+ zap.Duration("duration", duration),
+ zap.Uint64("heightRange", params.EndHeight-params.StartHeight))
+
+ return nil
+}
+
+func (m *DataMigrator) migrateBlocksPerHeight(ctx context.Context, params MigrationParams) error {
+ m.logger.Info("Starting height-by-height block metadata migration with complete reorg support")
+
+ totalHeights := params.EndHeight - params.StartHeight
+ processedHeights := uint64(0)
+ totalNonCanonicalBlocks := 0
+
+ for height := params.StartHeight; height < params.EndHeight; height++ {
+ nonCanonicalCount, err := m.migrateBlocksAtHeight(ctx, params, height)
+ if err != nil {
+ return xerrors.Errorf("failed to migrate blocks at height %d: %w", height, err)
+ }
+
+ totalNonCanonicalBlocks += nonCanonicalCount
+ processedHeights++
+
+ // Progress logging every 100 heights
+ if processedHeights%100 == 0 {
+ percentage := float64(processedHeights) / float64(totalHeights) * 100
+ m.logger.Info("Block migration progress",
+ zap.Uint64("processed", processedHeights),
+ zap.Uint64("total", totalHeights),
+ zap.Float64("percentage", percentage),
+ zap.Int("totalNonCanonicalBlocks", totalNonCanonicalBlocks))
+ }
+ }
+
+ m.logger.Info("Height-by-height block metadata migration completed",
+ zap.Int("totalNonCanonicalBlocks", totalNonCanonicalBlocks))
+ return nil
+}
+
+func (m *DataMigrator) migrateBlocksAtHeight(ctx context.Context, params MigrationParams, height uint64) (int, error) {
+ blockPid := fmt.Sprintf("%d-%d", params.Tag, height)
+
+ // Phase 1: Get and persist non-canonical blocks first
+ // Query: BlockPid = "{tag}-{height}" AND BlockRid != "canonical"
+ nonCanonicalBlocks, err := m.getNonCanonicalBlocksAtHeight(ctx, blockPid)
+ if err != nil && !xerrors.Is(err, storage.ErrItemNotFound) {
+ return 0, xerrors.Errorf("failed to get non-canonical blocks at height %d: %w", height, err)
+ }
+
+ nonCanonicalCount := len(nonCanonicalBlocks)
+ if nonCanonicalCount > 0 {
+ m.logger.Debug("Found non-canonical (reorg) blocks at height",
+ zap.Uint64("height", height),
+ zap.Int("count", nonCanonicalCount))
+
+ // Persist non-canonical blocks FIRST
+ err = m.destStorage.PersistBlockMetas(ctx, false, nonCanonicalBlocks, nil)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to persist non-canonical blocks at height %d: %w", height, err)
+ }
+ }
+
+ // Phase 2: Get and persist canonical block LAST
+ // Query: BlockPid = "{tag}-{height}" AND BlockRid = "canonical"
+ canonicalBlock, err := m.getCanonicalBlockAtHeight(ctx, blockPid)
+ if err != nil {
+ if xerrors.Is(err, storage.ErrItemNotFound) {
+ m.logger.Debug("No canonical block found at height", zap.Uint64("height", height))
+ return nonCanonicalCount, nil
+ }
+ return 0, xerrors.Errorf("failed to get canonical block at height %d: %w", height, err)
+ }
+
+ m.logger.Debug("Found canonical block at height",
+ zap.Uint64("height", height),
+ zap.String("hash", canonicalBlock.Hash),
+ zap.Int("reorgBlockCount", nonCanonicalCount))
+
+ // Persist canonical block LAST - this ensures it becomes canonical in PostgreSQL
+ err = m.destStorage.PersistBlockMetas(ctx, true, []*api.BlockMetadata{canonicalBlock}, nil)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to persist canonical block at height %d: %w", height, err)
+ }
+
+ return nonCanonicalCount, nil
+}
+
+func (m *DataMigrator) getNonCanonicalBlocksAtHeight(ctx context.Context, blockPid string) ([]*api.BlockMetadata, error) {
+ // Query DynamoDB for ALL blocks at this height: BlockPid = blockPid
+ // Then filter out the canonical one client-side
+ input := &dynamodb.QueryInput{
+ TableName: awssdk.String(m.blockTable),
+ KeyConditionExpression: awssdk.String("block_pid = :blockPid"),
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":blockPid": {
+ S: awssdk.String(blockPid),
+ },
+ },
+ ConsistentRead: awssdk.Bool(true),
+ }
+
+ result, err := m.dynamoClient.QueryWithContext(ctx, input)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to query blocks at height: %w", err)
+ }
+
+ // Filter out canonical blocks client-side
+ var nonCanonicalBlocks []*api.BlockMetadata
+ for _, item := range result.Items {
+ var blockEntry model.BlockMetaDataDDBEntry
+ err := dynamodbattribute.UnmarshalMap(item, &blockEntry)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal DynamoDB item: %w", err)
+ }
+
+ // Skip canonical blocks (BlockRid = "canonical")
+ if blockEntry.BlockRid == "canonical" {
+ continue
+ }
+
+ nonCanonicalBlocks = append(nonCanonicalBlocks, model.BlockMetadataToProto(&blockEntry))
+ }
+
+ return nonCanonicalBlocks, nil
+}
+
+func (m *DataMigrator) getCanonicalBlockAtHeight(ctx context.Context, blockPid string) (*api.BlockMetadata, error) {
+ // Query DynamoDB directly: BlockPid = blockPid AND BlockRid = "canonical"
+ input := &dynamodb.QueryInput{
+ TableName: awssdk.String(m.blockTable),
+ KeyConditionExpression: awssdk.String("block_pid = :blockPid AND block_rid = :canonical"),
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":blockPid": {
+ S: awssdk.String(blockPid),
+ },
+ ":canonical": {
+ S: awssdk.String("canonical"),
+ },
+ },
+ ConsistentRead: awssdk.Bool(true),
+ }
+
+ result, err := m.dynamoClient.QueryWithContext(ctx, input)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to query canonical block: %w", err)
+ }
+
+ if len(result.Items) == 0 {
+ return nil, storage.ErrItemNotFound
+ }
+
+ if len(result.Items) > 1 {
+ return nil, xerrors.Errorf("multiple canonical blocks found for %s", blockPid)
+ }
+
+ var blockEntry model.BlockMetaDataDDBEntry
+ err = dynamodbattribute.UnmarshalMap(result.Items[0], &blockEntry)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal canonical block: %w", err)
+ }
+
+ return model.BlockMetadataToProto(&blockEntry), nil
+}
+
+func (m *DataMigrator) migrateEvents(ctx context.Context, params MigrationParams) error {
+ m.logger.Info("Starting event ID-based migration")
+
+ // Step 1: Get the first event ID at start height
+ startEventId, err := m.sourceStorage.GetFirstEventIdByBlockHeight(ctx, params.EventTag, params.StartHeight)
+ if err != nil {
+ if xerrors.Is(err, storage.ErrItemNotFound) {
+ m.logger.Info("No events found at start height", zap.Uint64("startHeight", params.StartHeight))
+ return nil
+ }
+ return xerrors.Errorf("failed to get first event ID at start height %d: %w", params.StartHeight, err)
+ }
+
+ // Step 2: Find the last event ID within the height range [startHeight, endHeight)
+ var endEventId int64
+ if params.EndHeight > params.StartHeight {
+ endEventId, err = m.findLastEventIdInRange(ctx, params.EventTag, params.StartHeight, params.EndHeight)
+ if err != nil {
+ return xerrors.Errorf("failed to find last event ID in range [%d, %d): %w", params.StartHeight, params.EndHeight, err)
+ }
+
+ // If no events found in the range beyond startEventId, just process starting event
+ if endEventId < startEventId {
+ endEventId = startEventId
+ }
+ } else {
+ endEventId = startEventId
+ }
+
+ m.logger.Info("Event ID range determined",
+ zap.Int64("startEventId", startEventId),
+ zap.Int64("endEventId", endEventId),
+ zap.Int64("totalEvents", endEventId-startEventId+1))
+
+ if endEventId < startEventId {
+ m.logger.Info("No events to migrate (end event ID < start event ID)")
+ return nil
+ }
+
+ // Step 3: Migrate events by event ID range in batches
+ totalEvents := endEventId - startEventId + 1
+ processedEvents := int64(0)
+ batchSize := int64(params.BatchSize)
+
+ for currentEventId := startEventId; currentEventId <= endEventId; currentEventId += batchSize {
+ // Calculate the end of this batch
+ batchEndEventId := currentEventId + batchSize - 1
+ if batchEndEventId > endEventId {
+ batchEndEventId = endEventId
+ }
+
+ // Get events in this range from DynamoDB
+ sourceEvents, err := m.sourceStorage.GetEventsByEventIdRange(ctx, params.EventTag, currentEventId, batchEndEventId+1)
+ if err != nil {
+ if xerrors.Is(err, storage.ErrItemNotFound) {
+ m.logger.Debug("No events found in event ID range",
+ zap.Int64("startEventId", currentEventId),
+ zap.Int64("endEventId", batchEndEventId))
+ processedEvents += batchEndEventId - currentEventId + 1
+ continue
+ }
+ return xerrors.Errorf("failed to get events in range [%d, %d]: %w", currentEventId, batchEndEventId, err)
+ }
+
+ if len(sourceEvents) == 0 {
+ processedEvents += batchEndEventId - currentEventId + 1
+ continue
+ }
+
+ m.logger.Debug("Migrating event batch",
+ zap.Int("count", len(sourceEvents)),
+ zap.Int64("startEventId", currentEventId),
+ zap.Int64("endEventId", batchEndEventId))
+
+ // Migrate this batch to PostgreSQL
+ err = m.destStorage.AddEventEntries(ctx, params.EventTag, sourceEvents)
+ if err != nil {
+ return xerrors.Errorf("failed to add events batch [%d, %d] to PostgreSQL: %w", currentEventId, batchEndEventId, err)
+ }
+
+ processedEvents += int64(len(sourceEvents))
+
+ // Progress logging every 1000 events
+ if processedEvents%1000 == 0 || processedEvents == totalEvents {
+ percentage := float64(processedEvents) / float64(totalEvents) * 100
+ m.logger.Info("Event migration progress",
+ zap.Int64("processed", processedEvents),
+ zap.Int64("total", totalEvents),
+ zap.Float64("percentage", percentage))
+ }
+ }
+
+ m.logger.Info("Event ID-based migration completed",
+ zap.Int64("totalEventsMigrated", processedEvents))
+ return nil
+}
+
+// findLastEventIdInRange finds the maximum event ID within the specified height range
+// by searching backwards from endHeight-1 until an event is found or reaching startHeight
+func (m *DataMigrator) findLastEventIdInRange(ctx context.Context, eventTag uint32, startHeight, endHeight uint64) (int64, error) {
+ m.logger.Debug("Finding last event ID in height range",
+ zap.Uint64("startHeight", startHeight),
+ zap.Uint64("endHeight", endHeight))
+
+ // Search backwards from endHeight-1 to startHeight to find the last event
+ for height := endHeight - 1; height >= startHeight; height-- {
+ events, err := m.sourceStorage.GetEventsByBlockHeight(ctx, eventTag, height)
+ if err != nil {
+ if xerrors.Is(err, storage.ErrItemNotFound) {
+ // No events at this height, continue searching backwards
+ m.logger.Debug("No events found at height", zap.Uint64("height", height))
+ continue
+ }
+ return 0, xerrors.Errorf("failed to get events at height %d: %w", height, err)
+ }
+
+ // Find the maximum event ID at this height
+ var maxEventId int64 = -1
+ for _, event := range events {
+ if event.EventId > maxEventId {
+ maxEventId = event.EventId
+ }
+ }
+
+ if maxEventId >= 0 {
+ m.logger.Debug("Found last event in range",
+ zap.Uint64("height", height),
+ zap.Int64("eventId", maxEventId))
+ return maxEventId, nil
+ }
+ }
+
+ // No events found in the entire range
+ m.logger.Debug("No events found in the specified height range")
+ return -1, storage.ErrItemNotFound
+}
+
+func init() {
+ migrateCmd.Flags().Uint64Var(&migrateFlags.startHeight, "start-height", 0, "start block height (inclusive)")
+ migrateCmd.Flags().Uint64Var(&migrateFlags.endHeight, "end-height", 0, "end block height (exclusive, optional - if not provided, will query latest block from DynamoDB)")
+ migrateCmd.Flags().Uint32Var(&migrateFlags.eventTag, "event-tag", 0, "event tag for migration")
+ migrateCmd.Flags().Uint32Var(&migrateFlags.tag, "tag", 1, "block tag for migration")
+ migrateCmd.Flags().IntVar(&migrateFlags.batchSize, "batch-size", 100, "number of blocks to process in each workflow batch")
+ migrateCmd.Flags().IntVar(&migrateFlags.miniBatchSize, "mini-batch-size", 0, "number of blocks to process in each activity mini-batch (default: batch-size/10)")
+ migrateCmd.Flags().IntVar(&migrateFlags.checkpointSize, "checkpoint-size", 10000, "number of blocks to process before creating a workflow checkpoint")
+ migrateCmd.Flags().IntVar(&migrateFlags.parallelism, "parallelism", 1, "number of parallel workers for processing mini-batches")
+ migrateCmd.Flags().BoolVar(&migrateFlags.skipEvents, "skip-events", false, "skip event migration (blocks only)")
+ migrateCmd.Flags().BoolVar(&migrateFlags.skipBlocks, "skip-blocks", false, "skip block migration (events only)")
+ migrateCmd.Flags().BoolVar(&migrateFlags.continuousSync, "continuous-sync", false, "enable continuous sync mode (infinite loop, workflow only)")
+ migrateCmd.Flags().StringVar(&migrateFlags.syncInterval, "sync-interval", "1m", "time duration to wait between continuous sync cycles (e.g., '1m', '30s')")
+ migrateCmd.Flags().StringVar(&migrateFlags.backoffInterval, "backoff-interval", "", "time duration to wait between batches (e.g., '1s', '500ms')")
+ migrateCmd.Flags().BoolVar(&migrateFlags.autoResume, "auto-resume", false, "automatically determine start height from latest block in PostgreSQL destination")
+
+ // start-height is required unless auto-resume is enabled
+ // end-height is optional - if not provided, will query latest block from DynamoDB
+
+ rootCmd.AddCommand(migrateCmd)
+}
diff --git a/cmd/admin/postgres.go b/cmd/admin/postgres.go
new file mode 100644
index 0000000..a76d924
--- /dev/null
+++ b/cmd/admin/postgres.go
@@ -0,0 +1,199 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
+)
+
+const (
+ masterUserFlag = "master-user"
+ masterPassFlag = "master-password"
+ hostFlag = "host"
+ portFlag = "port"
+ workerUserFlag = "worker-user"
+ workerPassFlag = "worker-password"
+ serverUserFlag = "server-user"
+ serverPassFlag = "server-password"
+ sslModeFlag = "ssl-mode"
+ dbNameFlag = "db-name"
+ connectTimeoutFlag = "connect-timeout"
+)
+
+func newPostgresCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "setup-postgres",
+ Short: "Create database and roles for a new network in PostgreSQL.",
+ Long: `Create database and roles for a new network in PostgreSQL.
+
+This command connects to PostgreSQL using master/admin credentials and creates:
+1. A worker role (read/write permissions)
+2. A server role (read-only permissions)
+3. A database owned by the worker role
+4. Proper permissions for both roles
+
+Example usage:
+ # Set up database for ethereum-mainnet
+ chainstorage admin setup-postgres --blockchain ethereum --network mainnet --env local --master-user postgres --master-password mypassword
+
+ # Set up database with custom name
+ chainstorage admin setup-postgres --blockchain ethereum --network mainnet --env local --db-name my_custom_db --master-user admin --master-password secret
+
+ # Use with custom PostgreSQL instance
+ chainstorage admin setup-postgres --blockchain bitcoin --network mainnet --env development --host mydb.example.com --port 5433 --ssl-mode require`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ app := startApp()
+ defer app.Close()
+
+ // Parse all flags
+ masterUser, err := cmd.Flags().GetString(masterUserFlag)
+ if err != nil {
+ return err
+ }
+ masterPassword, err := cmd.Flags().GetString(masterPassFlag)
+ if err != nil {
+ return err
+ }
+ host, err := cmd.Flags().GetString(hostFlag)
+ if err != nil {
+ return err
+ }
+ port, err := cmd.Flags().GetInt(portFlag)
+ if err != nil {
+ return err
+ }
+ workerUser, err := cmd.Flags().GetString(workerUserFlag)
+ if err != nil {
+ return err
+ }
+ workerPassword, err := cmd.Flags().GetString(workerPassFlag)
+ if err != nil {
+ return err
+ }
+ serverUser, err := cmd.Flags().GetString(serverUserFlag)
+ if err != nil {
+ return err
+ }
+ serverPassword, err := cmd.Flags().GetString(serverPassFlag)
+ if err != nil {
+ return err
+ }
+ sslMode, err := cmd.Flags().GetString(sslModeFlag)
+ if err != nil {
+ return err
+ }
+ dbName, err := cmd.Flags().GetString(dbNameFlag)
+ if err != nil {
+ return err
+ }
+ connectTimeout, err := cmd.Flags().GetDuration(connectTimeoutFlag)
+ if err != nil {
+ return err
+ }
+
+ // Validation
+ if masterUser == "" {
+ return xerrors.New("master-user is required")
+ }
+ if masterPassword == "" {
+ return xerrors.New("master-password is required")
+ }
+ if host == "" {
+ return xerrors.New("host is required")
+ }
+ if port <= 0 || port > 65535 {
+ return xerrors.New("port must be between 1 and 65535")
+ }
+ if workerUser == "" {
+ return xerrors.New("worker-user is required")
+ }
+ if workerPassword == "" {
+ return xerrors.New("worker-password is required")
+ }
+ if serverUser == "" {
+ return xerrors.New("server-user is required")
+ }
+ if serverPassword == "" {
+ return xerrors.New("server-password is required")
+ }
+ if workerUser == serverUser {
+ return xerrors.New("worker-user and server-user must be different")
+ }
+
+ // Determine database name using global blockchain and network flags
+ if dbName == "" {
+ // Use global flags from common.go (commonFlags.blockchain and commonFlags.network)
+ // e.g., blockchain="ethereum", network="mainnet" -> "chainstorage_ethereum_mainnet"
+ dbName = fmt.Sprintf("chainstorage_%s_%s", commonFlags.blockchain, commonFlags.network)
+ // Replace hyphens with underscores for valid database name
+ dbName = replaceHyphensWithUnderscores(dbName)
+ }
+
+ // Build master config
+ masterCfg := &config.PostgresConfig{
+ Host: host,
+ Port: port,
+ Database: "postgres", // Always connect to postgres database first
+ User: masterUser,
+ Password: masterPassword,
+ SSLMode: sslMode,
+ ConnectTimeout: connectTimeout,
+ }
+
+ fmt.Printf("🚀 Setting up PostgreSQL for chainstorage...\n")
+ fmt.Printf(" Database: %s\n", dbName)
+ fmt.Printf(" Worker role: %s\n", workerUser)
+ fmt.Printf(" Server role: %s\n", serverUser)
+ fmt.Printf(" Host: %s:%d\n", host, port)
+ fmt.Printf(" Blockchain: %s\n", commonFlags.blockchain)
+ fmt.Printf(" Network: %s\n", commonFlags.network)
+ fmt.Printf(" Environment: %s\n", commonFlags.env)
+ fmt.Printf("\n")
+
+ return postgres.SetupDatabase(context.Background(), masterCfg, workerUser, workerPassword, serverUser, serverPassword, dbName)
+ },
+ }
+
+ // Define flags with reasonable defaults
+ cmd.Flags().String(masterUserFlag, "postgres", "Master/admin user for PostgreSQL")
+ cmd.Flags().String(masterPassFlag, "", "Master/admin password for PostgreSQL")
+ cmd.Flags().String(hostFlag, "localhost", "PostgreSQL host")
+ cmd.Flags().Int(portFlag, 5432, "PostgreSQL port")
+ cmd.Flags().String(workerUserFlag, "chainstorage_worker", "Name for the read/write worker role")
+ cmd.Flags().String(workerPassFlag, "", "Password for the worker role")
+ cmd.Flags().String(serverUserFlag, "chainstorage_server", "Name for the read-only server role")
+ cmd.Flags().String(serverPassFlag, "", "Password for the server role")
+ cmd.Flags().String(dbNameFlag, "", "Directly specify the database name to create (overrides default naming)")
+ cmd.Flags().String(sslModeFlag, "disable", "PostgreSQL SSL mode (disable, require, verify-ca, verify-full)")
+ cmd.Flags().Duration(connectTimeoutFlag, 30*time.Second, "PostgreSQL connection timeout")
+
+ // Mark required flags
+ if err := cmd.MarkFlagRequired(masterPassFlag); err != nil {
+ return nil
+ }
+ if err := cmd.MarkFlagRequired(workerPassFlag); err != nil {
+ return nil
+ }
+ if err := cmd.MarkFlagRequired(serverPassFlag); err != nil {
+ return nil
+ }
+
+ return cmd
+}
+
+// replaceHyphensWithUnderscores converts network names like "ethereum-mainnet" to "ethereum_mainnet"
+// for valid PostgreSQL database naming
+func replaceHyphensWithUnderscores(s string) string {
+ return strings.ReplaceAll(s, "-", "_")
+}
+
+func init() {
+ rootCmd.AddCommand(newPostgresCommand())
+}
diff --git a/cmd/admin/workflow.go b/cmd/admin/workflow.go
index 274cf55..1830fd8 100644
--- a/cmd/admin/workflow.go
+++ b/cmd/admin/workflow.go
@@ -48,6 +48,8 @@ type executors struct {
CrossValidator *workflow.CrossValidator
EventBackfiller *workflow.EventBackfiller
Replicator *workflow.Replicator
+ Migrator *workflow.Migrator
+ Runtime cadence.Runtime
}
var (
@@ -72,6 +74,14 @@ var (
},
}
+ listWorkflowCmd = &cobra.Command{
+ Use: "list",
+ Short: "list running workflows",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return listWorkflows()
+ },
+ }
+
workflowFlags struct {
workflow string
workflowID string
@@ -86,11 +96,37 @@ func init() {
workflowCmd.AddCommand(startWorkflowCmd)
workflowCmd.AddCommand(stopWorkflowCmd)
+ workflowCmd.AddCommand(listWorkflowCmd)
rootCmd.AddCommand(workflowCmd)
}
+func listWorkflows() error {
+ app, executors, err := initApp()
+ if err != nil {
+ return xerrors.Errorf("failed to init app: %w", err)
+ }
+ defer app.Close()
+ ctx := context.Background()
+ workflows, err := executors.Runtime.ListOpenWorkflows(ctx, app.Config().Cadence.Domain, 0) //list all workflows
+ if err != nil {
+ return xerrors.Errorf("failed to list workflows: %w", err)
+ } else {
+ logger.Info("\nlisting all workflows: ")
+ }
+ for _, workflow := range workflows.Executions { //print all workflows
+ logger.Info("\nworkflow",
+ zap.String("workflowID", workflow.Execution.GetWorkflowId()),
+ zap.String("runID", workflow.Execution.GetRunId()),
+ zap.String("type", workflow.Type.GetName()),
+ zap.Time("startTime", workflow.StartTime.AsTime()),
+ zap.String("status", workflow.Status.String()),
+ )
+ }
+ return nil
+}
func startWorkflow() error {
workflowIdentity := workflow.GetWorkflowIdentify(workflowFlags.workflow)
+ workflowId := workflowFlags.workflowID
if workflowIdentity == workflow.UnknownIdentity {
return xerrors.Errorf("invalid workflow: %v", workflowFlags.workflow)
}
@@ -101,7 +137,7 @@ func startWorkflow() error {
}
defer app.Close()
- ctx := context.Background()
+ ctx := context.WithValue(context.Background(), "workflowId", workflowId)
workflowIdentityString, err := workflowIdentity.String()
if err != nil {
return xerrors.Errorf("error parsing workflowIdentity: %w", err)
@@ -166,6 +202,12 @@ func startWorkflow() error {
return xerrors.Errorf("error converting to request type")
}
run, err = executors.Replicator.Execute(ctx, &request)
+ case workflow.MigratorIdentity:
+ request, ok := req.(workflow.MigratorRequest)
+ if !ok {
+ return xerrors.Errorf("error converting to request type")
+ }
+ run, err = executors.Migrator.Execute(ctx, &request)
default:
return xerrors.Errorf("unsupported workflow identity: %v", workflowIdentity)
}
@@ -236,6 +278,8 @@ func stopWorkflow() error {
err = executors.EventBackfiller.StopWorkflow(ctx, workflowIdentityString, reason)
case workflow.ReplicatorIdentity:
err = executors.Replicator.StopWorkflow(ctx, workflowIdentityString, reason)
+ case workflow.MigratorIdentity:
+ err = executors.Migrator.StopWorkflow(ctx, workflowIdentityString, reason)
default:
return xerrors.Errorf("unsupported workflow identity: %v", workflowIdentity)
}
diff --git a/config/chainstorage/abstract/mainnet/base.yml b/config/chainstorage/abstract/mainnet/base.yml
new file mode 100644
index 0000000..bf5fa27
--- /dev/null
+++ b/config/chainstorage/abstract/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_abstract_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_abstract_mainnet
+ transaction_table: example_chainstorage_transactions_table_abstract_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_abstract_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_abstract_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_abstract_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_abstract_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-abstract-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_ABSTRACT
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_ABSTRACT_MAINNET
+config_name: abstract_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-abstract-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/abstract/mainnet/development.yml b/config/chainstorage/abstract/mainnet/development.yml
new file mode 100644
index 0000000..3f69d99
--- /dev/null
+++ b/config/chainstorage/abstract/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-abstract-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/abstract/mainnet/local.yml b/config/chainstorage/abstract/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/abstract/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/abstract/mainnet/production.yml b/config/chainstorage/abstract/mainnet/production.yml
new file mode 100644
index 0000000..28e1581
--- /dev/null
+++ b/config/chainstorage/abstract/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-abstract-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/aptos/mainnet/base.yml b/config/chainstorage/aptos/mainnet/base.yml
index a7e0514..d9e665c 100644
--- a/config/chainstorage/aptos/mainnet/base.yml
+++ b/config/chainstorage/aptos/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_aptos_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_aptos_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_aptos_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_aptos_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_aptos_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -149,6 +160,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/arbitrum/mainnet/base.yml b/config/chainstorage/arbitrum/mainnet/base.yml
index 8185923..2aa0f52 100644
--- a/config/chainstorage/arbitrum/mainnet/base.yml
+++ b/config/chainstorage/arbitrum/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_arbitrum_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_arbitrum_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_arbitrum_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_arbitrum_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_arbitrum_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -153,6 +164,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/avacchain/mainnet/base.yml b/config/chainstorage/avacchain/mainnet/base.yml
index 20f3f43..6e4f0d4 100644
--- a/config/chainstorage/avacchain/mainnet/base.yml
+++ b/config/chainstorage/avacchain/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_avacchain_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_avacchain_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_avacchain_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_avacchain_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_avacchain_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -150,6 +161,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/avacchain/mainnet/development.yml b/config/chainstorage/avacchain/mainnet/development.yml
index e0058a3..4dbab42 100644
--- a/config/chainstorage/avacchain/mainnet/development.yml
+++ b/config/chainstorage/avacchain/mainnet/development.yml
@@ -11,3 +11,17 @@ server:
workflows:
cross_validator:
validation_start_height: 16000000
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config/chainstorage/base/goerli/base.yml b/config/chainstorage/base/goerli/base.yml
index 20f8e79..0423358 100644
--- a/config/chainstorage/base/goerli/base.yml
+++ b/config/chainstorage/base/goerli/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_base_goerli
versioned_event_table: example_chainstorage_versioned_block_events_base_goerli
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_base_goerli
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_base_goerli
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_base_goerli_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -151,6 +162,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/base/mainnet/base.yml b/config/chainstorage/base/mainnet/base.yml
index ce2169a..4e171bc 100644
--- a/config/chainstorage/base/mainnet/base.yml
+++ b/config/chainstorage/base/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_base_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_base_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_base_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_base_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_base_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -152,6 +163,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/base/mainnet/development.yml b/config/chainstorage/base/mainnet/development.yml
index af8e4a8..df41924 100644
--- a/config/chainstorage/base/mainnet/development.yml
+++ b/config/chainstorage/base/mainnet/development.yml
@@ -9,3 +9,17 @@ server:
workflows:
cross_validator:
validation_percentage: 20
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config/chainstorage/bitcoin/mainnet/base.yml b/config/chainstorage/bitcoin/mainnet/base.yml
index aeaee06..92d2ee1 100644
--- a/config/chainstorage/bitcoin/mainnet/base.yml
+++ b/config/chainstorage/bitcoin/mainnet/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_bitcoin_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_bitcoin_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_bitcoin_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_bitcoin_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_bitcoin_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -152,6 +163,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/bitcoincash/mainnet/base.yml b/config/chainstorage/bitcoincash/mainnet/base.yml
new file mode 100644
index 0000000..0d12d01
--- /dev/null
+++ b/config/chainstorage/bitcoincash/mainnet/base.yml
@@ -0,0 +1,265 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 5
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 30m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_bitcoincash_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_bitcoincash_mainnet
+ event_table: example_chainstorage_block_events_bitcoincash_mainnet
+ event_table_height_index: example_chainstorage_block_events_by_height_bitcoincash_mainnet
+ transaction_table: example_chainstorage_transactions_table_bitcoincash_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_bitcoincash_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_bitcoincash_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_bitcoincash_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_bitcoincash_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-bitcoincash-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 10m
+ blockchain: BLOCKCHAIN_BITCOINCASH
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ tx_batch_size: 100
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 0
+ stable: 0
+ feature:
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 2
+ network: NETWORK_BITCOINCASH_MAINNET
+config_name: bitcoincash_mainnet
+cron:
+ block_range_size: 2
+ disable_dlq_processor: true
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-bitcoincash-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 5
+ block_time_delta: 1h
+ event_height_delta: 5
+ event_time_delta: 1h
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ out_of_sync_node_distance: 10
+ tier: 2
+ time_since_last_block: 1h15m
+ time_since_last_event: 1h15m
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 21
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 4
+ task_list: default
+ validation_percentage: 10
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 250
+ event_gap_limit: 300
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 15m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 10s
+ checkpoint_size: 1000
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 5
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: false
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 10s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/bitcoincash/mainnet/development.yml b/config/chainstorage/bitcoincash/mainnet/development.yml
new file mode 100644
index 0000000..45916cd
--- /dev/null
+++ b/config/chainstorage/bitcoincash/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-bitcoincash-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/bitcoincash/mainnet/local.yml b/config/chainstorage/bitcoincash/mainnet/local.yml
new file mode 100644
index 0000000..e1199fb
--- /dev/null
+++ b/config/chainstorage/bitcoincash/mainnet/local.yml
@@ -0,0 +1,38 @@
+# This file is generated by "make config". DO NOT EDIT.
+chain:
+ client:
+ master:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+ validator:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
+workflows:
+ monitor:
+ failover_enabled: false
+ poller:
+ failover_enabled: false
diff --git a/config/chainstorage/bitcoincash/mainnet/production.yml b/config/chainstorage/bitcoincash/mainnet/production.yml
new file mode 100644
index 0000000..f4f5848
--- /dev/null
+++ b/config/chainstorage/bitcoincash/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-bitcoincash-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/bsc/mainnet/base.yml b/config/chainstorage/bsc/mainnet/base.yml
index f0aabd8..e09be77 100644
--- a/config/chainstorage/bsc/mainnet/base.yml
+++ b/config/chainstorage/bsc/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_bsc_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_bsc_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_bsc_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_bsc_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_bsc_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -152,6 +163,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/dogecoin/mainnet/base.yml b/config/chainstorage/dogecoin/mainnet/base.yml
index 164a3e5..6d575ca 100644
--- a/config/chainstorage/dogecoin/mainnet/base.yml
+++ b/config/chainstorage/dogecoin/mainnet/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_dogecoin_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_dogecoin_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_dogecoin_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_dogecoin_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_dogecoin_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -156,6 +167,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/goerli/base.yml b/config/chainstorage/ethereum/goerli/base.yml
index e80904c..baab7bf 100644
--- a/config/chainstorage/ethereum/goerli/base.yml
+++ b/config/chainstorage/ethereum/goerli/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_ethereum_goerli
versioned_event_table: example_chainstorage_versioned_block_events_ethereum_goerli
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereum_goerli
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereum_goerli
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereum_goerli_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -153,6 +164,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/holesky/base.yml b/config/chainstorage/ethereum/holesky/base.yml
index d77a380..b03b42e 100644
--- a/config/chainstorage/ethereum/holesky/base.yml
+++ b/config/chainstorage/ethereum/holesky/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_ethereum_holesky
versioned_event_table: example_chainstorage_versioned_block_events_ethereum_holesky
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereum_holesky
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereum_holesky
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereum_holesky_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -149,6 +160,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/holesky/beacon/base.yml b/config/chainstorage/ethereum/holesky/beacon/base.yml
index 5c16c71..e3049fa 100644
--- a/config/chainstorage/ethereum/holesky/beacon/base.yml
+++ b/config/chainstorage/ethereum/holesky/beacon/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_ethereum_holesky_beacon
versioned_event_table: example_chainstorage_versioned_block_events_ethereum_holesky_beacon
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereum_holesky_beacon
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereum_holesky
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereum_holesky_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -150,6 +161,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/mainnet/base.yml b/config/chainstorage/ethereum/mainnet/base.yml
index 1d0ae70..fa483ec 100644
--- a/config/chainstorage/ethereum/mainnet/base.yml
+++ b/config/chainstorage/ethereum/mainnet/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_ethereum_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_ethereum_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereum_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereum_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereum_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -155,6 +166,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/mainnet/beacon/base.yml b/config/chainstorage/ethereum/mainnet/beacon/base.yml
index 9a0f30a..7b8db4c 100644
--- a/config/chainstorage/ethereum/mainnet/beacon/base.yml
+++ b/config/chainstorage/ethereum/mainnet/beacon/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_ethereum_mainnet_beacon
versioned_event_table: example_chainstorage_versioned_block_events_ethereum_mainnet_beacon
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereum_mainnet_beacon
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereum_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereum_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -150,6 +161,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/ethereum/mainnet/development.yml b/config/chainstorage/ethereum/mainnet/development.yml
index 422fe06..4a10a4d 100644
--- a/config/chainstorage/ethereum/mainnet/development.yml
+++ b/config/chainstorage/ethereum/mainnet/development.yml
@@ -28,6 +28,20 @@ sla:
time_since_last_block: 3m
time_since_last_event: 3m
workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
failover_enabled: false
poller:
diff --git a/config/chainstorage/ethereumclassic/mainnet/base.yml b/config/chainstorage/ethereumclassic/mainnet/base.yml
new file mode 100644
index 0000000..9ae3234
--- /dev/null
+++ b/config/chainstorage/ethereumclassic/mainnet/base.yml
@@ -0,0 +1,271 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: example-chainstorage-ethereumclassic-mainnet-dev
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_ethereumclassic_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_ethereumclassic_mainnet
+ event_table: example_chainstorage_block_events_ethereumclassic_mainnet
+ event_table_height_index: example_chainstorage_block_events_by_height_ethclassic_main
+ transaction_table: example_chainstorage_transactions_table_ethereumclassic_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_ethereumclassic_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_ethereumclassic_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_ethereumclassic_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_ethereumclassic_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-ethereumclassic-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 12s
+ blockchain: BLOCKCHAIN_ETHEREUMCLASSIC
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 3
+ stable: 3
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 12
+ network: NETWORK_ETHEREUMCLASSIC_MAINNET
+config_name: ethereumclassic_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-ethereumclassic-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 10
+ block_time_delta: 2m
+ event_height_delta: 10
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 10
+ tier: 1
+ time_since_last_block: 2m
+ time_since_last_event: 2m
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 24
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 1000
+ checkpoint_size: 1000
+ parallelism: 4
+ task_list: default
+ validation_percentage: 1
+ validation_start_height: 15500000
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 1s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/ethereumclassic/mainnet/development.yml b/config/chainstorage/ethereumclassic/mainnet/development.yml
new file mode 100644
index 0000000..1d691b8
--- /dev/null
+++ b/config/chainstorage/ethereumclassic/mainnet/development.yml
@@ -0,0 +1,48 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-ethereumclassic-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+chain:
+ event_tag:
+ latest: 2
+ stable: 0
+ feature:
+ default_stable_event: false
+server:
+ bind_address: 0.0.0.0:9090
+sla:
+ block_height_delta: 12
+ block_time_delta: 3m
+ event_height_delta: 12
+ event_time_delta: 3m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - streamer/event_tag=1
+ - streamer/event_tag=2
+ - cross_validator
+ out_of_sync_node_distance: 12
+ time_since_last_block: 3m
+ time_since_last_event: 3m
+workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ failover_enabled: false
+ poller:
+ failover_enabled: false
diff --git a/config/chainstorage/ethereumclassic/mainnet/local.yml b/config/chainstorage/ethereumclassic/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/ethereumclassic/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/ethereumclassic/mainnet/production.yml b/config/chainstorage/ethereumclassic/mainnet/production.yml
new file mode 100644
index 0000000..3fef5f3
--- /dev/null
+++ b/config/chainstorage/ethereumclassic/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-ethereumclassic-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/fantom/mainnet/base.yml b/config/chainstorage/fantom/mainnet/base.yml
index 4002878..481cc7c 100644
--- a/config/chainstorage/fantom/mainnet/base.yml
+++ b/config/chainstorage/fantom/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_fantom_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_fantom_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_fantom_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_fantom_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_fantom_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -149,6 +160,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/litecoin/mainnet/base.yml b/config/chainstorage/litecoin/mainnet/base.yml
new file mode 100644
index 0000000..5406ffe
--- /dev/null
+++ b/config/chainstorage/litecoin/mainnet/base.yml
@@ -0,0 +1,264 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 5
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 30m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_litecoin_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_litecoin_mainnet
+ event_table: example_chainstorage_block_events_litecoin_mainnet
+ event_table_height_index: example_chainstorage_block_events_by_height_litecoin_mainnet
+ transaction_table: example_chainstorage_transactions_table_litecoin_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_litecoin_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_litecoin_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_litecoin_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_litecoin_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-litecoin-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 10m
+ blockchain: BLOCKCHAIN_LITECOIN
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 0
+ stable: 0
+ feature:
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 2
+ network: NETWORK_LITECOIN_MAINNET
+config_name: litecoin_mainnet
+cron:
+ block_range_size: 2
+ disable_dlq_processor: true
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-litecoin-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 5
+ block_time_delta: 1h
+ event_height_delta: 5
+ event_time_delta: 1h
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ out_of_sync_node_distance: 10
+ tier: 2
+ time_since_last_block: 1h15m
+ time_since_last_event: 1h15m
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 21
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 4
+ task_list: default
+ validation_percentage: 10
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 250
+ event_gap_limit: 300
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 15m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 10s
+ checkpoint_size: 1000
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 5
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: false
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 10s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/litecoin/mainnet/development.yml b/config/chainstorage/litecoin/mainnet/development.yml
new file mode 100644
index 0000000..4d9b76c
--- /dev/null
+++ b/config/chainstorage/litecoin/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-litecoin-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/litecoin/mainnet/local.yml b/config/chainstorage/litecoin/mainnet/local.yml
new file mode 100644
index 0000000..9f5cd26
--- /dev/null
+++ b/config/chainstorage/litecoin/mainnet/local.yml
@@ -0,0 +1,38 @@
+# This file is generated by "make config". DO NOT EDIT.
+chain:
+ client:
+ master:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+ validator:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ rps: 1
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
+workflows:
+ monitor:
+ failover_enabled: false
+ poller:
+ failover_enabled: false
diff --git a/config/chainstorage/litecoin/mainnet/production.yml b/config/chainstorage/litecoin/mainnet/production.yml
new file mode 100644
index 0000000..3c3c7f0
--- /dev/null
+++ b/config/chainstorage/litecoin/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-litecoin-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/megaeth/mainnet/base.yml b/config/chainstorage/megaeth/mainnet/base.yml
new file mode 100644
index 0000000..b68234b
--- /dev/null
+++ b/config/chainstorage/megaeth/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_megaeth_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_megaeth_mainnet
+ transaction_table: example_chainstorage_transactions_table_megaeth_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_megaeth_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_megaeth_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_megaeth_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_megaeth_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-megaeth-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_MEGAETH
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_MEGAETH_MAINNET
+config_name: megaeth_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-megaeth-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/megaeth/mainnet/development.yml b/config/chainstorage/megaeth/mainnet/development.yml
new file mode 100644
index 0000000..e00e7b0
--- /dev/null
+++ b/config/chainstorage/megaeth/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-megaeth-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/megaeth/mainnet/local.yml b/config/chainstorage/megaeth/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/megaeth/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/megaeth/mainnet/production.yml b/config/chainstorage/megaeth/mainnet/production.yml
new file mode 100644
index 0000000..d3c8108
--- /dev/null
+++ b/config/chainstorage/megaeth/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-megaeth-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/monad/mainnet/base.yml b/config/chainstorage/monad/mainnet/base.yml
new file mode 100644
index 0000000..218758a
--- /dev/null
+++ b/config/chainstorage/monad/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_monad_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_monad_mainnet
+ transaction_table: example_chainstorage_transactions_table_monad_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_monad_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_monad_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_monad_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_monad_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-monad-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_MONAD
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_MONAD_MAINNET
+config_name: monad_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-monad-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/monad/mainnet/development.yml b/config/chainstorage/monad/mainnet/development.yml
new file mode 100644
index 0000000..61c6690
--- /dev/null
+++ b/config/chainstorage/monad/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-monad-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/monad/mainnet/local.yml b/config/chainstorage/monad/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/monad/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/monad/mainnet/production.yml b/config/chainstorage/monad/mainnet/production.yml
new file mode 100644
index 0000000..7e476a5
--- /dev/null
+++ b/config/chainstorage/monad/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-monad-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/optimism/mainnet/base.yml b/config/chainstorage/optimism/mainnet/base.yml
index 7f5c93f..e2ca588 100644
--- a/config/chainstorage/optimism/mainnet/base.yml
+++ b/config/chainstorage/optimism/mainnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_optimism_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_optimism_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_optimism_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_optimism_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_optimism_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -149,6 +160,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/plasma/mainnet/base.yml b/config/chainstorage/plasma/mainnet/base.yml
new file mode 100644
index 0000000..c1081fc
--- /dev/null
+++ b/config/chainstorage/plasma/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_plasma_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_plasma_mainnet
+ transaction_table: example_chainstorage_transactions_table_plasma_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_plasma_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_plasma_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_plasma_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_plasma_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-plasma-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_PLASMA
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_PLASMA_MAINNET
+config_name: plasma_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-plasma-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/plasma/mainnet/development.yml b/config/chainstorage/plasma/mainnet/development.yml
new file mode 100644
index 0000000..0e0031f
--- /dev/null
+++ b/config/chainstorage/plasma/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-plasma-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/plasma/mainnet/local.yml b/config/chainstorage/plasma/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/plasma/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/plasma/mainnet/production.yml b/config/chainstorage/plasma/mainnet/production.yml
new file mode 100644
index 0000000..e9e40b6
--- /dev/null
+++ b/config/chainstorage/plasma/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-plasma-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/polygon/mainnet/base.yml b/config/chainstorage/polygon/mainnet/base.yml
index 626538d..e2e8f03 100644
--- a/config/chainstorage/polygon/mainnet/base.yml
+++ b/config/chainstorage/polygon/mainnet/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_polygon_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_polygon_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_polygon_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_polygon_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_polygon_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -158,6 +169,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/polygon/mainnet/development.yml b/config/chainstorage/polygon/mainnet/development.yml
index 78784b6..dc87202 100644
--- a/config/chainstorage/polygon/mainnet/development.yml
+++ b/config/chainstorage/polygon/mainnet/development.yml
@@ -13,3 +13,17 @@ server:
workflows:
cross_validator:
validation_percentage: 20
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config/chainstorage/polygon/testnet/base.yml b/config/chainstorage/polygon/testnet/base.yml
index 7c28545..314de88 100644
--- a/config/chainstorage/polygon/testnet/base.yml
+++ b/config/chainstorage/polygon/testnet/base.yml
@@ -22,6 +22,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_polygon_testnet
versioned_event_table: example_chainstorage_versioned_block_events_polygon_testnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_polygon_testnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_polygon_testnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_polygon_testnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -152,6 +163,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/polygon/testnet/development.yml b/config/chainstorage/polygon/testnet/development.yml
index 51a6eca..20d2f3d 100644
--- a/config/chainstorage/polygon/testnet/development.yml
+++ b/config/chainstorage/polygon/testnet/development.yml
@@ -11,5 +11,19 @@ server:
workflows:
cross_validator:
validation_percentage: 10
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
poller:
session_enabled: true
diff --git a/config/chainstorage/seismic/mainnet/base.yml b/config/chainstorage/seismic/mainnet/base.yml
new file mode 100644
index 0000000..bf99593
--- /dev/null
+++ b/config/chainstorage/seismic/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_seismic_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_seismic_mainnet
+ transaction_table: example_chainstorage_transactions_table_seismic_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_seismic_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_seismic_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_seismic_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_seismic_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-seismic-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_SEISMIC
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_SEISMIC_MAINNET
+config_name: seismic_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-seismic-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/seismic/mainnet/development.yml b/config/chainstorage/seismic/mainnet/development.yml
new file mode 100644
index 0000000..8af1399
--- /dev/null
+++ b/config/chainstorage/seismic/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-seismic-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/seismic/mainnet/local.yml b/config/chainstorage/seismic/mainnet/local.yml
new file mode 100644
index 0000000..d7af1a2
--- /dev/null
+++ b/config/chainstorage/seismic/mainnet/local.yml
@@ -0,0 +1,39 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
+aws:
+ storage:
+ data_compression: ZSTD
+chain:
+ custom_params:
+ src20_aes_key: ""
+ feature:
+ block_validation_enabled: false
+ block_start_height: 1
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group:
+ endpoints:
+ - name: monda-jsonrpc-m
+ rps: 1
+ url: https://lyron.seismicdev.net/rpc
+ weight: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: monda-jsonrpc-s
+ rps: 1
+ url: https://lyron.seismicdev.net/rpc
+ weight: 1
+ validator:
+ endpoint_group: ""
\ No newline at end of file
diff --git a/config/chainstorage/seismic/mainnet/production.yml b/config/chainstorage/seismic/mainnet/production.yml
new file mode 100644
index 0000000..01dc335
--- /dev/null
+++ b/config/chainstorage/seismic/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-seismic-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/seismic/testnet/base.yml b/config/chainstorage/seismic/testnet/base.yml
new file mode 100644
index 0000000..9b11035
--- /dev/null
+++ b/config/chainstorage/seismic/testnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_seismic_testnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_seismic_testnet
+ transaction_table: example_chainstorage_transactions_table_seismic_testnet
+ versioned_event_table: example_chainstorage_versioned_block_events_seismic_testnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_seismic_testnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_seismic_testnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_seismic_testnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-seismic-testnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_SEISMIC
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_SEISMIC_TESTNET
+config_name: seismic_testnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-seismic-testnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/seismic/testnet/development.yml b/config/chainstorage/seismic/testnet/development.yml
new file mode 100644
index 0000000..3bab82b
--- /dev/null
+++ b/config/chainstorage/seismic/testnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-seismic-testnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/seismic/testnet/local.yml b/config/chainstorage/seismic/testnet/local.yml
new file mode 100644
index 0000000..d7af1a2
--- /dev/null
+++ b/config/chainstorage/seismic/testnet/local.yml
@@ -0,0 +1,39 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
+aws:
+ storage:
+ data_compression: ZSTD
+chain:
+ custom_params:
+ src20_aes_key: ""
+ feature:
+ block_validation_enabled: false
+ block_start_height: 1
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group:
+ endpoints:
+ - name: monda-jsonrpc-m
+ rps: 1
+ url: https://lyron.seismicdev.net/rpc
+ weight: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: monda-jsonrpc-s
+ rps: 1
+ url: https://lyron.seismicdev.net/rpc
+ weight: 1
+ validator:
+ endpoint_group: ""
\ No newline at end of file
diff --git a/config/chainstorage/seismic/testnet/production.yml b/config/chainstorage/seismic/testnet/production.yml
new file mode 100644
index 0000000..88cd229
--- /dev/null
+++ b/config/chainstorage/seismic/testnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-seismic-testnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/solana/mainnet/base.yml b/config/chainstorage/solana/mainnet/base.yml
index 2a8e8f7..e86f24f 100644
--- a/config/chainstorage/solana/mainnet/base.yml
+++ b/config/chainstorage/solana/mainnet/base.yml
@@ -24,6 +24,17 @@ aws:
transaction_table: example_chainstorage_transactions_table_solana_mainnet
versioned_event_table: example_chainstorage_versioned_block_events_solana_mainnet
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_solana_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_solana_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_solana_mainnet_worker
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -152,6 +163,20 @@ workflows:
task_list: default
workflow_identity: workflow.event_backfiller
workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
monitor:
activity_retry:
backoff_coefficient: 2
diff --git a/config/chainstorage/solana/mainnet/development.yml b/config/chainstorage/solana/mainnet/development.yml
index 99839d6..f2581d8 100644
--- a/config/chainstorage/solana/mainnet/development.yml
+++ b/config/chainstorage/solana/mainnet/development.yml
@@ -11,5 +11,19 @@ chain:
server:
bind_address: 0.0.0.0:9090
workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
poller:
num_blocks_to_skip: 10
diff --git a/config/chainstorage/story/mainnet/base.yml b/config/chainstorage/story/mainnet/base.yml
new file mode 100644
index 0000000..5f1fa59
--- /dev/null
+++ b/config/chainstorage/story/mainnet/base.yml
@@ -0,0 +1,268 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: ""
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_story_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_story_mainnet
+ transaction_table: example_chainstorage_transactions_table_story_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_story_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_story_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_story_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_story_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: GZIP
+cadence:
+ address: ""
+ domain: chainstorage-story-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 1
+ stable: 1
+ block_time: 2s
+ blockchain: BLOCKCHAIN_STORY
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 1
+ stable: 1
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 10
+ network: NETWORK_STORY_MAINNET
+config_name: story_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-story-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ event_height_delta: 60
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ time_since_last_event: 2m30s
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 20m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 20
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ batch_size: 100
+ checkpoint_size: 1000
+ parallelism: 10
+ task_list: default
+ validation_percentage: 100
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 0s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 0s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/story/mainnet/development.yml b/config/chainstorage/story/mainnet/development.yml
new file mode 100644
index 0000000..9e7331c
--- /dev/null
+++ b/config/chainstorage/story/mainnet/development.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-story-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/story/mainnet/local.yml b/config/chainstorage/story/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/story/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/story/mainnet/production.yml b/config/chainstorage/story/mainnet/production.yml
new file mode 100644
index 0000000..50fe8f1
--- /dev/null
+++ b/config/chainstorage/story/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-story-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config/chainstorage/tron/mainnet/base.yml b/config/chainstorage/tron/mainnet/base.yml
new file mode 100644
index 0000000..63baa58
--- /dev/null
+++ b/config/chainstorage/tron/mainnet/base.yml
@@ -0,0 +1,273 @@
+# This file is generated by "make config". DO NOT EDIT.
+api:
+ auth: ""
+ max_num_block_files: 1000
+ max_num_blocks: 50
+ num_workers: 10
+ rate_limit:
+ global_rps: 3000
+ per_client_rps: 2000
+ streaming_batch_size: 50
+ streaming_interval: 1s
+ streaming_max_no_event_time: 10m
+aws:
+ aws_account: development
+ bucket: example-chainstorage-tron-mainnet-dev
+ dlq:
+ delay_secs: 900
+ name: example_chainstorage_blocks_tron_mainnet_dlq
+ visibility_timeout_secs: 600
+ dynamodb:
+ block_table: example_chainstorage_blocks_tron_mainnet
+ event_table: example_chainstorage_block_events_tron_mainnet
+ event_table_height_index: example_chainstorage_block_events_by_height_tron_mainnet
+ transaction_table: example_chainstorage_transactions_table_tron_mainnet
+ versioned_event_table: example_chainstorage_versioned_block_events_tron_mainnet
+ versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_tron_mainnet
+ postgres:
+ connect_timeout: 30s
+ database: chainstorage_tron_mainnet
+ host: localhost
+ max_connections: 25
+ min_connections: 5
+ password: ""
+ port: 5433
+ ssl_mode: require
+ statement_timeout: 60s
+ user: cs_tron_mainnet_worker
+ presigned_url_expiration: 30m
+ region: us-east-1
+ storage:
+ data_compression: ZSTD
+cadence:
+ address: ""
+ domain: chainstorage-tron-mainnet
+ retention_period: 7
+ tls:
+ enabled: true
+ validate_hostname: true
+chain:
+ block_start_height: 0
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 12s
+ blockchain: BLOCKCHAIN_TRON
+ client:
+ additional:
+ endpoint_group: ""
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ event_tag:
+ latest: 3
+ stable: 3
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ default_stable_event: true
+ rosetta_parser: true
+ irreversible_distance: 12
+ network: NETWORK_TRON_MAINNET
+config_name: tron_mainnet
+cron:
+ block_range_size: 4
+functional_test: ""
+gcp:
+ presigned_url_expiration: 30m
+ project: development
+sdk:
+ auth_header: ""
+ auth_token: ""
+ chainstorage_address: https://example-chainstorage-tron-mainnet
+ num_workers: 10
+ restful: true
+server:
+ bind_address: localhost:9090
+sla:
+ block_height_delta: 10
+ block_time_delta: 2m
+ event_height_delta: 10
+ event_time_delta: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+ out_of_sync_node_distance: 10
+ tier: 1
+ time_since_last_block: 2m
+ time_since_last_event: 2m
+workflows:
+ backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 2500
+ checkpoint_size: 5000
+ max_reprocessed_per_batch: 30
+ mini_batch_size: 1
+ num_concurrent_extractors: 24
+ task_list: default
+ workflow_identity: workflow.backfiller
+ workflow_run_timeout: 24h
+ benchmarker:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ child_workflow_execution_start_to_close_timeout: 60m
+ task_list: default
+ workflow_identity: workflow.benchmarker
+ workflow_run_timeout: 24h
+ cross_validator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 8
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 1000
+ checkpoint_size: 1000
+ parallelism: 4
+ task_list: default
+ validation_percentage: 1
+ validation_start_height: 15500000
+ workflow_identity: workflow.cross_validator
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ event_backfiller:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 250
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.event_backfiller
+ workflow_run_timeout: 24h
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 10s
+ batch_size: 50
+ block_gap_limit: 3000
+ checkpoint_size: 500
+ event_gap_limit: 300
+ failover_enabled: true
+ parallelism: 4
+ task_list: default
+ workflow_identity: workflow.monitor
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ poller:
+ activity_heartbeat_timeout: 2m
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 6
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ backoff_interval: 1s
+ checkpoint_size: 1000
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ fast_sync: false
+ liveness_check_enabled: true
+ liveness_check_interval: 1m
+ liveness_check_violation_limit: 10
+ max_blocks_to_sync_per_cycle: 100
+ parallelism: 10
+ session_creation_timeout: 2m
+ session_enabled: true
+ task_list: default
+ workflow_identity: workflow.poller
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 6
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ replicator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 10m
+ batch_size: 1000
+ checkpoint_size: 10000
+ mini_batch_size: 100
+ parallelism: 10
+ task_list: default
+ workflow_identity: workflow.replicator
+ workflow_run_timeout: 24h
+ streamer:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 5
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 2m
+ backoff_interval: 1s
+ batch_size: 500
+ checkpoint_size: 500
+ task_list: default
+ workflow_identity: workflow.streamer
+ workflow_retry:
+ backoff_coefficient: 1
+ initial_interval: 30s
+ maximum_attempts: 3
+ maximum_interval: 30s
+ workflow_run_timeout: 24h
+ workers:
+ - task_list: default
diff --git a/config/chainstorage/tron/mainnet/development.yml b/config/chainstorage/tron/mainnet/development.yml
new file mode 100644
index 0000000..7840da7
--- /dev/null
+++ b/config/chainstorage/tron/mainnet/development.yml
@@ -0,0 +1,48 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: development
+ bucket: example-chainstorage-tron-mainnet-dev
+cadence:
+ address: temporal-dev.example.com:7233
+chain:
+ event_tag:
+ latest: 2
+ stable: 0
+ feature:
+ default_stable_event: false
+server:
+ bind_address: 0.0.0.0:9090
+sla:
+ block_height_delta: 12
+ block_time_delta: 3m
+ event_height_delta: 12
+ event_time_delta: 3m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - streamer/event_tag=1
+ - streamer/event_tag=2
+ - cross_validator
+ out_of_sync_node_distance: 12
+ time_since_last_block: 3m
+ time_since_last_event: 3m
+workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ monitor:
+ failover_enabled: false
+ poller:
+ failover_enabled: false
diff --git a/config/chainstorage/tron/mainnet/local.yml b/config/chainstorage/tron/mainnet/local.yml
new file mode 100644
index 0000000..cc1d22d
--- /dev/null
+++ b/config/chainstorage/tron/mainnet/local.yml
@@ -0,0 +1,10 @@
+# This file is generated by "make config". DO NOT EDIT.
+gcp:
+ project: chainstorage-local
+sdk:
+ chainstorage_address: localhost:9090
+ restful: false
+storage_type:
+ blob: S3
+ dlq: SQS
+ meta: DYNAMODB
diff --git a/config/chainstorage/tron/mainnet/production.yml b/config/chainstorage/tron/mainnet/production.yml
new file mode 100644
index 0000000..632b861
--- /dev/null
+++ b/config/chainstorage/tron/mainnet/production.yml
@@ -0,0 +1,8 @@
+# This file is generated by "make config". DO NOT EDIT.
+aws:
+ aws_account: production
+ bucket: example-chainstorage-tron-mainnet-prod
+cadence:
+ address: temporal.example.com:7233
+server:
+ bind_address: 0.0.0.0:9090
diff --git a/config_templates/config/base.template.yml b/config_templates/config/base.template.yml
index ccb956f..4dca46b 100644
--- a/config_templates/config/base.template.yml
+++ b/config_templates/config/base.template.yml
@@ -21,6 +21,17 @@ aws:
versioned_event_table: example_chainstorage_versioned_block_events_{{blockchain}}_{{network}}
versioned_event_table_block_index: example_chainstorage_versioned_block_events_by_block_id_{{blockchain}}_{{network}}
transaction_table: example_chainstorage_transactions_table_{{blockchain}}_{{network}}
+ postgres:
+ host: localhost # Override with CHAINSTORAGE_AWS_POSTGRES_HOST for non-dev environments
+ port: 5433
+ user: "cs_{{blockchain}}_{{network}}_worker" # Dynamically generated username based on blockchain and network
+ password: "" # Set via CHAINSTORAGE_AWS_POSTGRES_PASSWORD env var for security
+ database: chainstorage_{{blockchain}}_{{network}}
+ ssl_mode: require # Use 'require' for production, 'disable' for local development
+ max_connections: 25 # Connection pool maximum size
+ min_connections: 5 # Connection pool minimum size
+ connect_timeout: 30s # Connection establishment timeout
+ statement_timeout: 60s # Individual statement/transaction timeout
presigned_url_expiration: 30m
region: us-east-1
storage:
@@ -222,5 +233,19 @@ workflows:
task_list: default
workflow_run_timeout: 24h
workflow_identity: workflow.replicator
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
workers:
- task_list: default
diff --git a/config_templates/config/chainstorage/abstract/mainnet/base.template.yml b/config_templates/config/chainstorage/abstract/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/abstract/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/abstract/mainnet/development.template.yml b/config_templates/config/chainstorage/abstract/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/abstract/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/abstract/mainnet/local.template.yml b/config_templates/config/chainstorage/abstract/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/abstract/mainnet/production.template.yml b/config_templates/config/chainstorage/abstract/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/aptos/mainnet/base.template.yml b/config_templates/config/chainstorage/aptos/mainnet/base.template.yml
index 72a5263..836357c 100644
--- a/config_templates/config/chainstorage/aptos/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/aptos/mainnet/base.template.yml
@@ -27,3 +27,17 @@ workflows:
max_blocks_to_sync_per_cycle: 300
streamer:
backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/arbitrum/mainnet/base.template.yml b/config_templates/config/chainstorage/arbitrum/mainnet/base.template.yml
index 8773533..06375a6 100644
--- a/config_templates/config/chainstorage/arbitrum/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/arbitrum/mainnet/base.template.yml
@@ -36,3 +36,17 @@ workflows:
validation_start_height: 22207816 # CREL nodes do not support arb_trace API which was used before the NITRO upgrade
validation_percentage: 1
irreversible_distance: 500
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/avacchain/mainnet/base.template.yml b/config_templates/config/chainstorage/avacchain/mainnet/base.template.yml
index 14ee6d1..7765449 100644
--- a/config_templates/config/chainstorage/avacchain/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/avacchain/mainnet/base.template.yml
@@ -31,3 +31,17 @@ workflows:
cross_validator:
batch_size: 1000
validation_percentage: 1
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/avacchain/mainnet/development.template.yml b/config_templates/config/chainstorage/avacchain/mainnet/development.template.yml
index a554dd5..cfb7d46 100644
--- a/config_templates/config/chainstorage/avacchain/mainnet/development.template.yml
+++ b/config_templates/config/chainstorage/avacchain/mainnet/development.template.yml
@@ -3,3 +3,17 @@ chain:
workflows:
cross_validator:
validation_start_height: 16000000
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/base/goerli/base.template.yml b/config_templates/config/chainstorage/base/goerli/base.template.yml
index 4d1b3c2..eacd006 100644
--- a/config_templates/config/chainstorage/base/goerli/base.template.yml
+++ b/config_templates/config/chainstorage/base/goerli/base.template.yml
@@ -23,3 +23,17 @@ workflows:
session_enabled: true
streamer:
backoff_interval: 2s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/base/mainnet/base.template.yml b/config_templates/config/chainstorage/base/mainnet/base.template.yml
index 01e2129..6297d5d 100644
--- a/config_templates/config/chainstorage/base/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/base/mainnet/base.template.yml
@@ -38,3 +38,17 @@ workflows:
failover_enabled: true
streamer:
backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/base/mainnet/development.template.yml b/config_templates/config/chainstorage/base/mainnet/development.template.yml
index 35e9ee6..3ba4847 100644
--- a/config_templates/config/chainstorage/base/mainnet/development.template.yml
+++ b/config_templates/config/chainstorage/base/mainnet/development.template.yml
@@ -1,3 +1,17 @@
workflows:
cross_validator:
validation_percentage: 20
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/bitcoin/mainnet/base.template.yml b/config_templates/config/chainstorage/bitcoin/mainnet/base.template.yml
index 5b7a492..abdc116 100644
--- a/config_templates/config/chainstorage/bitcoin/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/bitcoin/mainnet/base.template.yml
@@ -41,3 +41,17 @@ workflows:
parallelism: 10
streamer:
backoff_interval: 10s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/bitcoincash/mainnet/base.template.yml b/config_templates/config/chainstorage/bitcoincash/mainnet/base.template.yml
new file mode 100644
index 0000000..c7f8d27
--- /dev/null
+++ b/config_templates/config/chainstorage/bitcoincash/mainnet/base.template.yml
@@ -0,0 +1,59 @@
+api:
+ max_num_blocks: 5
+ streaming_max_no_event_time: 30m
+aws:
+ dynamodb:
+ event_table: example_chainstorage_block_events_{{blockchain}}_{{network}}
+ event_table_height_index: example_chainstorage_block_events_by_height_{{blockchain}}_{{network}}
+chain:
+ client:
+ tx_batch_size: 100
+ block_tag:
+ latest: 2
+ stable: 2
+ event_tag:
+ latest: 0
+ stable: 0
+ block_time: 10m
+ irreversible_distance: 2
+ feature:
+ rosetta_parser: true
+cron:
+ block_range_size: 2
+ disable_dlq_processor: true
+sla:
+ block_height_delta: 5
+ block_time_delta: 1h
+ out_of_sync_node_distance: 10
+ tier: 2
+ time_since_last_block: 1h15m
+ event_height_delta: 5
+ event_time_delta: 1h
+ time_since_last_event: 1h15m
+workflows:
+ backfiller:
+ num_concurrent_extractors: 21
+ monitor:
+ checkpoint_size: 250
+ poller:
+ activity_heartbeat_timeout: 15m
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 10s
+ max_blocks_to_sync_per_cycle: 5
+ parallelism: 10
+ streamer:
+ backoff_interval: 10s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/bitcoincash/mainnet/development.template.yml b/config_templates/config/chainstorage/bitcoincash/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/bitcoincash/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/bitcoincash/mainnet/local.template.yml b/config_templates/config/chainstorage/bitcoincash/mainnet/local.template.yml
new file mode 100644
index 0000000..cb48576
--- /dev/null
+++ b/config_templates/config/chainstorage/bitcoincash/mainnet/local.template.yml
@@ -0,0 +1,30 @@
+
+chain:
+ client:
+ master:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+ rps: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+ rps: 1
+ validator:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/9fab7739b91042e7903fca6001e81b23
+ weight: 1
+ rps: 1
+workflows:
+ poller:
+ failover_enabled: false
+ monitor:
+ failover_enabled: false
+
diff --git a/config_templates/config/chainstorage/bitcoincash/mainnet/production.template.yml b/config_templates/config/chainstorage/bitcoincash/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/bsc/mainnet/base.template.yml b/config_templates/config/chainstorage/bsc/mainnet/base.template.yml
index 362040a..4677444 100644
--- a/config_templates/config/chainstorage/bsc/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/bsc/mainnet/base.template.yml
@@ -23,6 +23,20 @@ workflows:
backoff_interval: 3s
streamer:
backoff_interval: 1s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
chain:
block_tag:
latest: 2
diff --git a/config_templates/config/chainstorage/dogecoin/mainnet/base.template.yml b/config_templates/config/chainstorage/dogecoin/mainnet/base.template.yml
index 855eeda..6ed28ec 100644
--- a/config_templates/config/chainstorage/dogecoin/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/dogecoin/mainnet/base.template.yml
@@ -32,3 +32,17 @@ workflows:
parallelism: 10
max_blocks_to_sync_per_cycle: 50
session_enabled: true
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/ethereum/goerli/base.template.yml b/config_templates/config/chainstorage/ethereum/goerli/base.template.yml
index 50ec4b0..5f4c1cc 100644
--- a/config_templates/config/chainstorage/ethereum/goerli/base.template.yml
+++ b/config_templates/config/chainstorage/ethereum/goerli/base.template.yml
@@ -29,3 +29,17 @@ workflows:
session_enabled: true
consensus_validation: true
consensus_validation_muted: true
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/ethereum/holesky/base.template.yml b/config_templates/config/chainstorage/ethereum/holesky/base.template.yml
index 553a2d0..d4a31a3 100644
--- a/config_templates/config/chainstorage/ethereum/holesky/base.template.yml
+++ b/config_templates/config/chainstorage/ethereum/holesky/base.template.yml
@@ -19,3 +19,17 @@ workflows:
num_concurrent_extractors: 24
poller:
session_enabled: true
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/ethereum/holesky/beacon/base.template.yml b/config_templates/config/chainstorage/ethereum/holesky/beacon/base.template.yml
index f7f9909..96d60ae 100644
--- a/config_templates/config/chainstorage/ethereum/holesky/beacon/base.template.yml
+++ b/config_templates/config/chainstorage/ethereum/holesky/beacon/base.template.yml
@@ -29,3 +29,17 @@ workflows:
session_enabled: true
monitor:
irreversible_distance: 10
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/ethereum/mainnet/beacon/base.template.yml b/config_templates/config/chainstorage/ethereum/mainnet/beacon/base.template.yml
index 60d14f5..f11bd22 100644
--- a/config_templates/config/chainstorage/ethereum/mainnet/beacon/base.template.yml
+++ b/config_templates/config/chainstorage/ethereum/mainnet/beacon/base.template.yml
@@ -30,3 +30,17 @@ workflows:
failover_enabled: true
monitor:
failover_enabled: true
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/ethereum/mainnet/development.template.yml b/config_templates/config/chainstorage/ethereum/mainnet/development.template.yml
index 401fee1..86f80ac 100644
--- a/config_templates/config/chainstorage/ethereum/mainnet/development.template.yml
+++ b/config_templates/config/chainstorage/ethereum/mainnet/development.template.yml
@@ -22,6 +22,20 @@ sla:
- streamer/event_tag=2
- cross_validator
workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
poller:
failover_enabled: false
monitor:
diff --git a/config_templates/config/chainstorage/ethereumclassic/mainnet/base.template.yml b/config_templates/config/chainstorage/ethereumclassic/mainnet/base.template.yml
new file mode 100644
index 0000000..23d7115
--- /dev/null
+++ b/config_templates/config/chainstorage/ethereumclassic/mainnet/base.template.yml
@@ -0,0 +1,56 @@
+aws:
+ aws_account: development
+ bucket: example-chainstorage-ethereumclassic-mainnet-dev
+ dlq:
+ name: example_chainstorage_blocks_ethereumclassic_mainnet_dlq
+ dynamodb:
+ block_table: example_chainstorage_blocks_ethereumclassic_mainnet
+ event_table: example_chainstorage_block_events_ethereumclassic_mainnet
+ event_table_height_index: example_chainstorage_block_events_by_height_ethclassic_main
+chain:
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 12s
+ event_tag:
+ latest: 3
+ stable: 3
+ irreversible_distance: 12
+ feature:
+ rosetta_parser: true
+ default_stable_event: true
+ block_validation_enabled: true
+ block_validation_muted: true
+sla:
+ block_height_delta: 10
+ block_time_delta: 2m
+ out_of_sync_node_distance: 10
+ tier: 1
+ time_since_last_block: 2m
+ event_height_delta: 10
+ event_time_delta: 2m
+ time_since_last_event: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ checkpoint_size: 5000
+ num_concurrent_extractors: 24
+ cross_validator:
+ batch_size: 1000
+ validation_start_height: 15500000
+ validation_percentage: 1
+ poller:
+ parallelism: 10
+ failover_enabled: true
+ session_enabled: true
+ backoff_interval: 1s
+ consensus_validation: true
+ consensus_validation_muted: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 1s
diff --git a/config_templates/config/chainstorage/ethereumclassic/mainnet/development.template.yml b/config_templates/config/chainstorage/ethereumclassic/mainnet/development.template.yml
new file mode 100644
index 0000000..86f80ac
--- /dev/null
+++ b/config_templates/config/chainstorage/ethereumclassic/mainnet/development.template.yml
@@ -0,0 +1,42 @@
+chain:
+ event_tag:
+ latest: 2
+ stable: 0
+ feature:
+ default_stable_event: false
+aws:
+ aws_account: development
+sla:
+ block_height_delta: 12
+ block_time_delta: 3m
+ out_of_sync_node_distance: 12
+ time_since_last_block: 3m
+ event_height_delta: 12
+ event_time_delta: 3m
+ time_since_last_event: 3m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - streamer/event_tag=1
+ - streamer/event_tag=2
+ - cross_validator
+workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ poller:
+ failover_enabled: false
+ monitor:
+ failover_enabled: false
diff --git a/config_templates/config/chainstorage/ethereumclassic/mainnet/local.template.yml b/config_templates/config/chainstorage/ethereumclassic/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/ethereumclassic/mainnet/production.template.yml b/config_templates/config/chainstorage/ethereumclassic/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/fantom/mainnet/base.template.yml b/config_templates/config/chainstorage/fantom/mainnet/base.template.yml
index 7356fb1..5364281 100644
--- a/config_templates/config/chainstorage/fantom/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/fantom/mainnet/base.template.yml
@@ -23,3 +23,17 @@ workflows:
fast_sync: true
streamer:
backoff_interval: 1s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/litecoin/mainnet/base.template.yml b/config_templates/config/chainstorage/litecoin/mainnet/base.template.yml
new file mode 100644
index 0000000..abdc116
--- /dev/null
+++ b/config_templates/config/chainstorage/litecoin/mainnet/base.template.yml
@@ -0,0 +1,57 @@
+api:
+ max_num_blocks: 5
+ streaming_max_no_event_time: 30m
+aws:
+ dynamodb:
+ event_table: example_chainstorage_block_events_{{blockchain}}_{{network}}
+ event_table_height_index: example_chainstorage_block_events_by_height_{{blockchain}}_{{network}}
+chain:
+ block_tag:
+ latest: 2
+ stable: 2
+ event_tag:
+ latest: 0
+ stable: 0
+ block_time: 10m
+ irreversible_distance: 2
+ feature:
+ rosetta_parser: true
+cron:
+ block_range_size: 2
+ disable_dlq_processor: true
+sla:
+ block_height_delta: 5
+ block_time_delta: 1h
+ out_of_sync_node_distance: 10
+ tier: 2
+ time_since_last_block: 1h15m
+ event_height_delta: 5
+ event_time_delta: 1h
+ time_since_last_event: 1h15m
+workflows:
+ backfiller:
+ num_concurrent_extractors: 21
+ monitor:
+ checkpoint_size: 250
+ poller:
+ activity_heartbeat_timeout: 15m
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 10s
+ max_blocks_to_sync_per_cycle: 5
+ parallelism: 10
+ streamer:
+ backoff_interval: 10s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/litecoin/mainnet/development.template.yml b/config_templates/config/chainstorage/litecoin/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/litecoin/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/litecoin/mainnet/local.template.yml b/config_templates/config/chainstorage/litecoin/mainnet/local.template.yml
new file mode 100644
index 0000000..214634d
--- /dev/null
+++ b/config_templates/config/chainstorage/litecoin/mainnet/local.template.yml
@@ -0,0 +1,30 @@
+
+chain:
+ client:
+ master:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+ rps: 1
+ slave:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+ rps: 1
+ validator:
+ endpoint_group:
+ endpoints:
+ - name: getblock
+ url: https://go.getblock.io/50d006b05722430b940d0c63e47ff893
+ weight: 1
+ rps: 1
+workflows:
+ poller:
+ failover_enabled: false
+ monitor:
+ failover_enabled: false
+
diff --git a/config_templates/config/chainstorage/litecoin/mainnet/production.template.yml b/config_templates/config/chainstorage/litecoin/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/megaeth/mainnet/base.template.yml b/config_templates/config/chainstorage/megaeth/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/megaeth/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/megaeth/mainnet/development.template.yml b/config_templates/config/chainstorage/megaeth/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/megaeth/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/megaeth/mainnet/local.template.yml b/config_templates/config/chainstorage/megaeth/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/megaeth/mainnet/production.template.yml b/config_templates/config/chainstorage/megaeth/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/monad/mainnet/base.template.yml b/config_templates/config/chainstorage/monad/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/monad/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/monad/mainnet/development.template.yml b/config_templates/config/chainstorage/monad/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/monad/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/monad/mainnet/local.template.yml b/config_templates/config/chainstorage/monad/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/monad/mainnet/production.template.yml b/config_templates/config/chainstorage/monad/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/optimism/mainnet/base.template.yml b/config_templates/config/chainstorage/optimism/mainnet/base.template.yml
index be52a5d..f2ad072 100644
--- a/config_templates/config/chainstorage/optimism/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/optimism/mainnet/base.template.yml
@@ -29,3 +29,17 @@ workflows:
monitor:
backoff_interval: 0s
parallelism: 10
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/plasma/mainnet/base.template.yml b/config_templates/config/chainstorage/plasma/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/plasma/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/plasma/mainnet/development.template.yml b/config_templates/config/chainstorage/plasma/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/plasma/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/plasma/mainnet/local.template.yml b/config_templates/config/chainstorage/plasma/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/plasma/mainnet/production.template.yml b/config_templates/config/chainstorage/plasma/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/polygon/mainnet/development.template.yml b/config_templates/config/chainstorage/polygon/mainnet/development.template.yml
index 1f9dc3b..6eeab52 100644
--- a/config_templates/config/chainstorage/polygon/mainnet/development.template.yml
+++ b/config_templates/config/chainstorage/polygon/mainnet/development.template.yml
@@ -7,3 +7,17 @@ chain:
workflows:
cross_validator:
validation_percentage: 20
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/polygon/testnet/base.template.yml b/config_templates/config/chainstorage/polygon/testnet/base.template.yml
index 7586aec..ad4896f 100644
--- a/config_templates/config/chainstorage/polygon/testnet/base.template.yml
+++ b/config_templates/config/chainstorage/polygon/testnet/base.template.yml
@@ -31,3 +31,17 @@ workflows:
checkpoint_size: 500
validation_start_height: 37000000
validation_percentage: 20
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/polygon/testnet/development.template.yml b/config_templates/config/chainstorage/polygon/testnet/development.template.yml
index fbc164f..97e0561 100644
--- a/config_templates/config/chainstorage/polygon/testnet/development.template.yml
+++ b/config_templates/config/chainstorage/polygon/testnet/development.template.yml
@@ -8,3 +8,17 @@ workflows:
session_enabled: true
cross_validator:
validation_percentage: 10
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/seismic/mainnet/base.template.yml b/config_templates/config/chainstorage/seismic/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/seismic/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/seismic/mainnet/development.template.yml b/config_templates/config/chainstorage/seismic/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/seismic/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/seismic/mainnet/local.template.yml b/config_templates/config/chainstorage/seismic/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/seismic/mainnet/production.template.yml b/config_templates/config/chainstorage/seismic/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/seismic/testnet/base.template.yml b/config_templates/config/chainstorage/seismic/testnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/seismic/testnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/seismic/testnet/development.template.yml b/config_templates/config/chainstorage/seismic/testnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/seismic/testnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/seismic/testnet/local.template.yml b/config_templates/config/chainstorage/seismic/testnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/seismic/testnet/production.template.yml b/config_templates/config/chainstorage/seismic/testnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/solana/mainnet/base.template.yml b/config_templates/config/chainstorage/solana/mainnet/base.template.yml
index 600ea96..51ac081 100644
--- a/config_templates/config/chainstorage/solana/mainnet/base.template.yml
+++ b/config_templates/config/chainstorage/solana/mainnet/base.template.yml
@@ -53,3 +53,17 @@ workflows:
session_enabled: true
streamer:
backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/solana/mainnet/development.template.yml b/config_templates/config/chainstorage/solana/mainnet/development.template.yml
index 8eba3a7..15ce33a 100644
--- a/config_templates/config/chainstorage/solana/mainnet/development.template.yml
+++ b/config_templates/config/chainstorage/solana/mainnet/development.template.yml
@@ -7,3 +7,17 @@ chain:
workflows:
poller:
num_blocks_to_skip: 10
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/story/mainnet/base.template.yml b/config_templates/config/chainstorage/story/mainnet/base.template.yml
new file mode 100644
index 0000000..6297d5d
--- /dev/null
+++ b/config_templates/config/chainstorage/story/mainnet/base.template.yml
@@ -0,0 +1,54 @@
+chain:
+ block_time: 2s
+ feature:
+ block_validation_enabled: true
+ block_validation_muted: true
+ rosetta_parser: true
+ irreversible_distance: 10
+sla:
+ block_height_delta: 60
+ block_time_delta: 2m
+ out_of_sync_node_distance: 60
+ tier: 1
+ time_since_last_block: 2m30s
+ event_height_delta: 60
+ event_time_delta: 2m
+ time_since_last_event: 2m30s
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ num_concurrent_extractors: 20
+ activity_start_to_close_timeout: 20m
+ cross_validator:
+ backoff_interval: 1s
+ parallelism: 10
+ validation_percentage: 100
+ poller:
+ backoff_interval: 0s
+ consensus_validation: true
+ consensus_validation_muted: true
+ failover_enabled: true
+ parallelism: 10
+ session_enabled: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 0s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/story/mainnet/development.template.yml b/config_templates/config/chainstorage/story/mainnet/development.template.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/config_templates/config/chainstorage/story/mainnet/development.template.yml
@@ -0,0 +1 @@
+
diff --git a/config_templates/config/chainstorage/story/mainnet/local.template.yml b/config_templates/config/chainstorage/story/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/story/mainnet/production.template.yml b/config_templates/config/chainstorage/story/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/tron/mainnet/base.template.yml b/config_templates/config/chainstorage/tron/mainnet/base.template.yml
new file mode 100644
index 0000000..d324572
--- /dev/null
+++ b/config_templates/config/chainstorage/tron/mainnet/base.template.yml
@@ -0,0 +1,83 @@
+aws:
+ aws_account: development
+ bucket: example-chainstorage-{{blockchain}}-{{network}}-dev
+ dlq:
+ name: example_chainstorage_blocks_{{blockchain}}_{{network}}_dlq
+ dynamodb:
+ event_table: example_chainstorage_block_events_{{blockchain}}_{{network}}
+ event_table_height_index: example_chainstorage_block_events_by_height_{{blockchain}}_{{network}}
+ storage:
+ data_compression: ZSTD
+chain:
+ client:
+ consensus:
+ endpoint_group: ""
+ http_timeout: 0s
+ master:
+ endpoint_group: ""
+ slave:
+ endpoint_group: ""
+ validator:
+ endpoint_group: ""
+ additional:
+ endpoint_group: ""
+ block_tag:
+ latest: 2
+ stable: 2
+ block_time: 12s
+ event_tag:
+ latest: 3
+ stable: 3
+ irreversible_distance: 12
+ feature:
+ rosetta_parser: true
+ default_stable_event: true
+ block_validation_enabled: true
+ block_validation_muted: true
+sla:
+ block_height_delta: 10
+ block_time_delta: 2m
+ out_of_sync_node_distance: 10
+ tier: 1
+ time_since_last_block: 2m
+ event_height_delta: 10
+ event_time_delta: 2m
+ time_since_last_event: 2m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - cross_validator
+workflows:
+ backfiller:
+ checkpoint_size: 5000
+ num_concurrent_extractors: 24
+ cross_validator:
+ batch_size: 1000
+ validation_start_height: 15500000
+ validation_percentage: 1
+ poller:
+ parallelism: 10
+ failover_enabled: true
+ session_enabled: true
+ backoff_interval: 1s
+ consensus_validation: true
+ consensus_validation_muted: true
+ monitor:
+ failover_enabled: true
+ streamer:
+ backoff_interval: 1s
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
diff --git a/config_templates/config/chainstorage/tron/mainnet/development.template.yml b/config_templates/config/chainstorage/tron/mainnet/development.template.yml
new file mode 100644
index 0000000..86f80ac
--- /dev/null
+++ b/config_templates/config/chainstorage/tron/mainnet/development.template.yml
@@ -0,0 +1,42 @@
+chain:
+ event_tag:
+ latest: 2
+ stable: 0
+ feature:
+ default_stable_event: false
+aws:
+ aws_account: development
+sla:
+ block_height_delta: 12
+ block_time_delta: 3m
+ out_of_sync_node_distance: 12
+ time_since_last_block: 3m
+ event_height_delta: 12
+ event_time_delta: 3m
+ time_since_last_event: 3m
+ expected_workflows:
+ - monitor
+ - poller
+ - streamer
+ - streamer/event_tag=1
+ - streamer/event_tag=2
+ - cross_validator
+workflows:
+ migrator:
+ activity_retry:
+ backoff_coefficient: 2
+ initial_interval: 10s
+ maximum_attempts: 3
+ maximum_interval: 3m
+ activity_schedule_to_close_timeout: 1h
+ activity_start_to_close_timeout: 30m
+ backoff_interval: 5s
+ batch_size: 1000
+ checkpoint_size: 5000
+ task_list: default
+ workflow_identity: workflow.migrator
+ workflow_run_timeout: 24h
+ poller:
+ failover_enabled: false
+ monitor:
+ failover_enabled: false
diff --git a/config_templates/config/chainstorage/tron/mainnet/local.template.yml b/config_templates/config/chainstorage/tron/mainnet/local.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/config_templates/config/chainstorage/tron/mainnet/production.template.yml b/config_templates/config/chainstorage/tron/mainnet/production.template.yml
new file mode 100644
index 0000000..e69de29
diff --git a/docker-compose-local-dev.yml b/docker-compose-local-dev.yml
new file mode 100644
index 0000000..e7d6c9e
--- /dev/null
+++ b/docker-compose-local-dev.yml
@@ -0,0 +1,126 @@
+version: "3"
+volumes:
+ localstack_data: {}
+ temporal_postgres_data: {}
+ chainstorage_postgres_data: {}
+services:
+ localstack:
+ image: localstack/localstack:3.1.0
+ ports:
+ - 4566:4566
+ - 4510-4559:4510-4559 # external services port range
+ environment:
+ - DEBUG=1
+ - DOCKER_HOST=unix:///var/run/docker.sock
+ - AWS_DEFAULT_REGION=us-east-1
+ - AWS_ACCESS_KEY_ID=requirednotused
+ - AWS_SECRET_ACCESS_KEY=requirednotused
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ - ./bin/localstack:/docker-entrypoint-initaws.d
+ - ./bin/localstack/policies:/policies
+ - ./dev-data/localstack:/var/lib/localstack
+ temporal-postgres:
+ image: postgres:15.6-alpine
+ ports:
+ - ${TEMPORAL_POSTGRES_PORT:-5432}:5432
+ environment:
+ - POSTGRES_USER=temporal
+ - POSTGRES_PASSWORD=temporal
+ - POSTGRES_DB=temporal
+ volumes:
+ - ./dev-data/temporal-pg:/var/lib/postgresql/data
+ - ./scripts/init-temporal-postgres.sh:/docker-entrypoint-initdb.d/init-temporal-postgres.sh
+
+ chainstorage-postgres:
+ image: postgres:15.6-alpine
+ ports:
+ - ${CHAINSTORAGE_POSTGRES_PORT:-5433}:5432
+ environment:
+ - POSTGRES_USER=postgres
+ - POSTGRES_PASSWORD=postgres
+ - POSTGRES_DB=postgres
+ # Shared passwords for the per-network roles created by the init script
+ - CHAINSTORAGE_WORKER_PASSWORD=worker_password
+ - CHAINSTORAGE_SERVER_PASSWORD=server_password
+ volumes:
+ - ./dev-data/chainstorage-pg:/var/lib/postgresql/data
+ - ./scripts/init-local-postgres.sh:/docker-entrypoint-initdb.d/init-local-postgres.sh
+
+ temporal:
+ image: temporalio/auto-setup:1.22.4
+ ports:
+ - 7233:7233
+ labels:
+ service_group: temporal
+ environment:
+ - DB=postgresql
+ - DB_PORT=5432
+ - POSTGRES_USER=temporal
+ - POSTGRES_PWD=temporal
+ - POSTGRES_SEEDS=temporal-postgres
+ - USE_HOSTNAME_IP=true
+ restart: always
+ depends_on:
+ - temporal-postgres
+ temporal-ui:
+ image: temporalio/ui:2.23.0
+ ports:
+ - 8088:8080
+ labels:
+ service_group: temporal-web
+ environment:
+ - TEMPORAL_CSRF_COOKIE_INSECURE=true
+ - TEMPORAL_ADDRESS=temporal:7233
+ depends_on:
+ - temporal
+ temporal-admin-tools:
+ depends_on:
+ - temporal
+ environment:
+ - TEMPORAL_ADDRESS=temporal:7233
+ - TEMPORAL_CLI_ADDRESS=temporal:7233
+ image: temporalio/admin-tools:1.22.0
+ stdin_open: true
+ tty: true
+ s3manager:
+ image: cloudlena/s3manager:latest
+ ports:
+ - 8081:8080
+ environment:
+ - ENDPOINT=localstack:4566
+ - REGION=us-east-1
+ - ACCESS_KEY_ID=requirednotused
+ - SECRET_ACCESS_KEY=requirednotused
+ - USE_SSL=false
+ depends_on:
+ - localstack
+ dynamodb-admin:
+ image: aaronshaf/dynamodb-admin:4.6.1
+ ports:
+ - 8082:8001
+ environment:
+ - DYNAMO_ENDPOINT=localstack:4566
+ - REGION=us-east-1
+ - ACCESS_KEY_ID=requirednotused
+ - SECRET_ACCESS_KEY=requirednotused
+ - USE_SSL=false
+ depends_on:
+ - localstack
+ # chainstorage-server:
+ # image: pika-chainstorage
+ # command: ["/app/server"]
+ # ports:
+ # - 9090:9090
+ # environment:
+ # - CHAINSTORAGE_CONFIG=tron_mainnet
+ # - CHAINSTORAGE_SDK_AUTH_HEADER=cb-nft-api-token
+ # - CHAINSTORAGE_SDK_AUTH_TOKEN=123321
+ # depends_on:
+ # - localstack
+ # - temporal
+ # restart: always
+ # network_mode: host
+
+# SETUP: Just run 'docker-compose -f docker-compose-local-dev.yml up -d'
+# Everything is automatically set up - databases, roles, and permissions
\ No newline at end of file
diff --git a/docker-compose-testing.yml b/docker-compose-testing.yml
index db72801..80346d0 100644
--- a/docker-compose-testing.yml
+++ b/docker-compose-testing.yml
@@ -2,7 +2,7 @@ version: "3"
services:
localstack:
- image: localstack/localstack:2.3.2
+ image: localstack/localstack:3.1.0
ports:
- 4566:4566
- 4510-4559:4510-4559 # external services port range
@@ -14,3 +14,23 @@ services:
- AWS_SECRET_ACCESS_KEY=requirednotused
volumes:
- /var/run/docker.sock:/var/run/docker.sock
+ postgres:
+ image: postgres:13
+ ports:
+ - "5433:5432"
+ environment:
+ - POSTGRES_USER=postgres
+ - POSTGRES_PASSWORD=postgres
+ - POSTGRES_DB=postgres
+ # Shared passwords for the per-network roles created by the init script
+ - CHAINSTORAGE_WORKER_PASSWORD=worker_password
+ - CHAINSTORAGE_SERVER_PASSWORD=server_password
+ command: >
+ postgres
+ -c ssl=on
+ -c ssl_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
+ -c ssl_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
+ volumes:
+ # Creates roles and databases automatically
+ - ./scripts/init-local-postgres.sh:/docker-entrypoint-initdb.d/init-local-postgres.sh
+ restart: always
\ No newline at end of file
diff --git a/gen/src/python/coinbase/c3/common/common_pb2.py b/gen/src/python/coinbase/c3/common/common_pb2.py
new file mode 100644
index 0000000..26dd40c
--- /dev/null
+++ b/gen/src/python/coinbase/c3/common/common_pb2.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/c3/common/common.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/c3/common/common.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x63oinbase/c3/common/common.proto\x12\x12\x63oinbase.c3.common*\x9f\x03\n\nBlockchain\x12\x16\n\x12\x42LOCKCHAIN_UNKNOWN\x10\x00\x12\x15\n\x11\x42LOCKCHAIN_SOLANA\x10\x0b\x12\x16\n\x12\x42LOCKCHAIN_BITCOIN\x10\x10\x12\x17\n\x13\x42LOCKCHAIN_ETHEREUM\x10\x11\x12\x1a\n\x16\x42LOCKCHAIN_BITCOINCASH\x10\x12\x12\x17\n\x13\x42LOCKCHAIN_LITECOIN\x10\x13\x12\x17\n\x13\x42LOCKCHAIN_DOGECOIN\x10\x1a\x12\x13\n\x0f\x42LOCKCHAIN_TRON\x10\x1e\x12\x12\n\x0e\x42LOCKCHAIN_BSC\x10\x1f\x12\x18\n\x14\x42LOCKCHAIN_AVACCHAIN\x10 \x12\x16\n\x12\x42LOCKCHAIN_POLYGON\x10#\x12\x17\n\x13\x42LOCKCHAIN_OPTIMISM\x10\'\x12\x17\n\x13\x42LOCKCHAIN_ARBITRUM\x10)\x12\x14\n\x10\x42LOCKCHAIN_APTOS\x10/\x12\x15\n\x11\x42LOCKCHAIN_FANTOM\x10\x33\x12\x13\n\x0f\x42LOCKCHAIN_BASE\x10\x38\x12\x14\n\x10\x42LOCKCHAIN_STORY\x10<*\xd5\x07\n\x07Network\x12\x13\n\x0fNETWORK_UNKNOWN\x10\x00\x12\x1a\n\x16NETWORK_SOLANA_MAINNET\x10\x16\x12\x1a\n\x16NETWORK_SOLANA_TESTNET\x10\x17\x12\x1b\n\x17NETWORK_BITCOIN_MAINNET\x10!\x12\x1b\n\x17NETWORK_BITCOIN_TESTNET\x10\"\x12\x1c\n\x18NETWORK_ETHEREUM_MAINNET\x10#\x12\x1c\n\x18NETWORK_ETHEREUM_TESTNET\x10$\x12\x1f\n\x1bNETWORK_BITCOINCASH_MAINNET\x10%\x12\x1f\n\x1bNETWORK_BITCOINCASH_TESTNET\x10&\x12\x1c\n\x18NETWORK_LITECOIN_MAINNET\x10\'\x12\x1c\n\x18NETWORK_LITECOIN_TESTNET\x10(\x12\x18\n\x14NETWORK_TRON_MAINNET\x10@\x12\x18\n\x14NETWORK_TRON_TESTNET\x10\x41\x12\x1b\n\x17NETWORK_ETHEREUM_GOERLI\x10\x42\x12\x1c\n\x18NETWORK_DOGECOIN_MAINNET\x10\x38\x12\x1c\n\x18NETWORK_DOGECOIN_TESTNET\x10\x39\x12\x17\n\x13NETWORK_BSC_MAINNET\x10\x46\x12\x17\n\x13NETWORK_BSC_TESTNET\x10G\x12\x1d\n\x19NETWORK_AVACCHAIN_MAINNET\x10H\x12\x1d\n\x19NETWORK_AVACCHAIN_TESTNET\x10I\x12\x1b\n\x17NETWORK_POLYGON_MAINNET\x10N\x12\x1b\n\x17NETWORK_POLYGON_TESTNET\x10O\x12\x1c\n\x18NETWORK_OPTIMISM_MAINNET\x10V\x12\x1c\n\x18NETWORK_OPTIMISM_TESTNET\x10W\x12\x1c\n\x18NETWORK_ARBITRUM_MAINNET\x10[\x12\x1c\n\x18NETWORK_ARBITRUM_TESTNET\x10\\\x12\x19\n\x15NETWORK_APTOS_MAINNET\x10g\x12\x19\n\x15NETWORK_APTOS_TESTNET\x10h\x12\x1a\n\x16NETWORK_FANTOM_MAINNET\x10o\x12\x1a\n\x16NETWORK_FANTOM_TESTNET\x10p\x12\x18\n\x14NETWORK_BASE_MAINNET\x10{\x12\x17\n\x13NETWORK_BASE_GOERLI\x10}\x12\x1d\n\x18NETWORK_ETHEREUM_HOLESKY\x10\x88\x01\x12\x1a\n\x15NETWORK_STORY_MAINNET\x10\x8c\x01\x42={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/api_pb2.py b/gen/src/python/coinbase/chainstorage/api_pb2.py
new file mode 100644
index 0000000..eda3b19
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/api_pb2.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/api.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/api.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from coinbase.chainstorage import blockchain_pb2 as coinbase_dot_chainstorage_dot_blockchain__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x63oinbase/chainstorage/api.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a&coinbase/chainstorage/blockchain.proto\"\xf3\x01\n\tBlockFile\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0c\n\x04hash\x18\x02 \x01(\t\x12\x13\n\x0bparent_hash\x18\x03 \x01(\t\x12\x0e\n\x06height\x18\x04 \x01(\x04\x12\x10\n\x08\x66ile_url\x18\x05 \x01(\t\x12\x15\n\rparent_height\x18\x06 \x01(\x04\x12\x0f\n\x07skipped\x18\x07 \x01(\x08\x12\x37\n\x0b\x63ompression\x18\x08 \x01(\x0e\x32\".coinbase.chainstorage.Compression\x12\x33\n\x0f\x62lock_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xfb\x01\n\x0f\x42lockchainEvent\x12\x14\n\x08sequence\x18\x01 \x01(\tB\x02\x18\x01\x12\x39\n\x04type\x18\x02 \x01(\x0e\x32+.coinbase.chainstorage.BlockchainEvent.Type\x12\x35\n\x05\x62lock\x18\x03 \x01(\x0b\x32&.coinbase.chainstorage.BlockIdentifier\x12\x11\n\tevent_tag\x18\x04 \x01(\r\x12\x14\n\x0csequence_num\x18\x05 \x01(\x03\"7\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x42LOCK_ADDED\x10\x01\x12\x11\n\rBLOCK_REMOVED\x10\x02\"$\n\x15GetLatestBlockRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\"\x87\x01\n\x16GetLatestBlockResponse\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0c\n\x04hash\x18\x02 \x01(\t\x12\x13\n\x0bparent_hash\x18\x03 \x01(\t\x12\x0e\n\x06height\x18\x04 \x01(\x04\x12-\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"@\n\x13GetBlockFileRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\x0c\n\x04hash\x18\x03 \x01(\t\"F\n\x14GetBlockFileResponse\x12.\n\x04\x66ile\x18\x01 \x01(\x0b\x32 .coinbase.chainstorage.BlockFile\"T\n\x1bGetBlockFilesByRangeRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x14\n\x0cstart_height\x18\x02 \x01(\x04\x12\x12\n\nend_height\x18\x03 \x01(\x04\"O\n\x1cGetBlockFilesByRangeResponse\x12/\n\x05\x66iles\x18\x01 \x03(\x0b\x32 .coinbase.chainstorage.BlockFile\"?\n\x12GetRawBlockRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\x0c\n\x04hash\x18\x03 \x01(\t\"B\n\x13GetRawBlockResponse\x12+\n\x05\x62lock\x18\x01 \x01(\x0b\x32\x1c.coinbase.chainstorage.Block\"S\n\x1aGetRawBlocksByRangeRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x14\n\x0cstart_height\x18\x02 \x01(\x04\x12\x12\n\nend_height\x18\x03 \x01(\x04\"K\n\x1bGetRawBlocksByRangeResponse\x12,\n\x06\x62locks\x18\x01 \x03(\x0b\x32\x1c.coinbase.chainstorage.Block\"B\n\x15GetNativeBlockRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\x0c\n\x04hash\x18\x03 \x01(\t\"K\n\x16GetNativeBlockResponse\x12\x31\n\x05\x62lock\x18\x01 \x01(\x0b\x32\".coinbase.chainstorage.NativeBlock\"V\n\x1dGetNativeBlocksByRangeRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x14\n\x0cstart_height\x18\x02 \x01(\x04\x12\x12\n\nend_height\x18\x03 \x01(\x04\"T\n\x1eGetNativeBlocksByRangeResponse\x12\x32\n\x06\x62locks\x18\x01 \x03(\x0b\x32\".coinbase.chainstorage.NativeBlock\"C\n\x16GetRosettaBlockRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\x0c\n\x04hash\x18\x03 \x01(\t\"M\n\x17GetRosettaBlockResponse\x12\x32\n\x05\x62lock\x18\x01 \x01(\x0b\x32#.coinbase.chainstorage.RosettaBlock\"W\n\x1eGetRosettaBlocksByRangeRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x14\n\x0cstart_height\x18\x02 \x01(\x04\x12\x12\n\nend_height\x18\x03 \x01(\x04\"V\n\x1fGetRosettaBlocksByRangeResponse\x12\x33\n\x06\x62locks\x18\x01 \x03(\x0b\x32#.coinbase.chainstorage.RosettaBlock\"w\n\x12\x43hainEventsRequest\x12\"\n\x1ainitial_position_in_stream\x18\x01 \x01(\t\x12\x14\n\x08sequence\x18\x02 \x01(\tB\x02\x18\x01\x12\x11\n\tevent_tag\x18\x03 \x01(\r\x12\x14\n\x0csequence_num\x18\x04 \x01(\x03\"L\n\x13\x43hainEventsResponse\x12\x35\n\x05\x65vent\x18\x01 \x01(\x0b\x32&.coinbase.chainstorage.BlockchainEvent\"\x92\x01\n\x15GetChainEventsRequest\x12\x14\n\x08sequence\x18\x01 \x01(\tB\x02\x18\x01\x12\"\n\x1ainitial_position_in_stream\x18\x02 \x01(\t\x12\x16\n\x0emax_num_events\x18\x03 \x01(\x04\x12\x11\n\tevent_tag\x18\x04 \x01(\r\x12\x14\n\x0csequence_num\x18\x05 \x01(\x03\"P\n\x16GetChainEventsResponse\x12\x36\n\x06\x65vents\x18\x01 \x03(\x0b\x32&.coinbase.chainstorage.BlockchainEvent\"\x19\n\x17GetChainMetadataRequest\"\xd1\x01\n\x18GetChainMetadataResponse\x12\x18\n\x10latest_block_tag\x18\x08 \x01(\r\x12\x18\n\x10stable_block_tag\x18\t \x01(\r\x12\x18\n\x10latest_event_tag\x18\n \x01(\r\x12\x18\n\x10stable_event_tag\x18\x0b \x01(\r\x12\x1a\n\x12\x62lock_start_height\x18\x0c \x01(\x04\x12\x1d\n\x15irreversible_distance\x18\r \x01(\x04\x12\x12\n\nblock_time\x18\x0e \x01(\t\"\x83\x01\n\x1dGetVersionedChainEventRequest\x12\x16\n\x0e\x66rom_event_tag\x18\x01 \x01(\r\x12\x19\n\rfrom_sequence\x18\x02 \x01(\tB\x02\x18\x01\x12\x14\n\x0cto_event_tag\x18\x03 \x01(\r\x12\x19\n\x11\x66rom_sequence_num\x18\x04 \x01(\x03\"W\n\x1eGetVersionedChainEventResponse\x12\x35\n\x05\x65vent\x18\x01 \x01(\x0b\x32&.coinbase.chainstorage.BlockchainEvent\"E\n\x1cGetBlockByTransactionRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x18\n\x10transaction_hash\x18\x02 \x01(\t\"W\n\x1dGetBlockByTransactionResponse\x12\x36\n\x06\x62locks\x18\x01 \x03(\x0b\x32&.coinbase.chainstorage.BlockIdentifier\"D\n\x1bGetNativeTransactionRequest\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x18\n\x10transaction_hash\x18\x02 \x01(\t\"^\n\x1cGetNativeTransactionResponse\x12>\n\x0ctransactions\x18\x01 \x03(\x0b\x32(.coinbase.chainstorage.NativeTransaction\"l\n\x1eGetVerifiedAccountStateRequest\x12J\n\x03req\x18\x01 \x01(\x0b\x32=.coinbase.chainstorage.InternalGetVerifiedAccountStateRequest\"h\n\x1fGetVerifiedAccountStateResponse\x12\x45\n\x08response\x18\x01 \x01(\x0b\x32\x33.coinbase.chainstorage.ValidateAccountStateResponse*+\n\x0b\x43ompression\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04GZIP\x10\x01\x12\x08\n\x04ZSTD\x10\x02*+\n\x0fInitialPosition\x12\x0c\n\x08\x45\x41RLIEST\x10\x00\x12\n\n\x06LATEST\x10\x01\x32\xaa\x0f\n\x0c\x43hainStorage\x12m\n\x0eGetLatestBlock\x12,.coinbase.chainstorage.GetLatestBlockRequest\x1a-.coinbase.chainstorage.GetLatestBlockResponse\x12g\n\x0cGetBlockFile\x12*.coinbase.chainstorage.GetBlockFileRequest\x1a+.coinbase.chainstorage.GetBlockFileResponse\x12\x7f\n\x14GetBlockFilesByRange\x12\x32.coinbase.chainstorage.GetBlockFilesByRangeRequest\x1a\x33.coinbase.chainstorage.GetBlockFilesByRangeResponse\x12\x64\n\x0bGetRawBlock\x12).coinbase.chainstorage.GetRawBlockRequest\x1a*.coinbase.chainstorage.GetRawBlockResponse\x12|\n\x13GetRawBlocksByRange\x12\x31.coinbase.chainstorage.GetRawBlocksByRangeRequest\x1a\x32.coinbase.chainstorage.GetRawBlocksByRangeResponse\x12m\n\x0eGetNativeBlock\x12,.coinbase.chainstorage.GetNativeBlockRequest\x1a-.coinbase.chainstorage.GetNativeBlockResponse\x12\x85\x01\n\x16GetNativeBlocksByRange\x12\x34.coinbase.chainstorage.GetNativeBlocksByRangeRequest\x1a\x35.coinbase.chainstorage.GetNativeBlocksByRangeResponse\x12p\n\x0fGetRosettaBlock\x12-.coinbase.chainstorage.GetRosettaBlockRequest\x1a..coinbase.chainstorage.GetRosettaBlockResponse\x12\x88\x01\n\x17GetRosettaBlocksByRange\x12\x35.coinbase.chainstorage.GetRosettaBlocksByRangeRequest\x1a\x36.coinbase.chainstorage.GetRosettaBlocksByRangeResponse\x12l\n\x11StreamChainEvents\x12).coinbase.chainstorage.ChainEventsRequest\x1a*.coinbase.chainstorage.ChainEventsResponse0\x01\x12m\n\x0eGetChainEvents\x12,.coinbase.chainstorage.GetChainEventsRequest\x1a-.coinbase.chainstorage.GetChainEventsResponse\x12s\n\x10GetChainMetadata\x12..coinbase.chainstorage.GetChainMetadataRequest\x1a/.coinbase.chainstorage.GetChainMetadataResponse\x12\x85\x01\n\x16GetVersionedChainEvent\x12\x34.coinbase.chainstorage.GetVersionedChainEventRequest\x1a\x35.coinbase.chainstorage.GetVersionedChainEventResponse\x12\x82\x01\n\x15GetBlockByTransaction\x12\x33.coinbase.chainstorage.GetBlockByTransactionRequest\x1a\x34.coinbase.chainstorage.GetBlockByTransactionResponse\x12\x7f\n\x14GetNativeTransaction\x12\x32.coinbase.chainstorage.GetNativeTransactionRequest\x1a\x33.coinbase.chainstorage.GetNativeTransactionResponse\x12\x88\x01\n\x17GetVerifiedAccountState\x12\x35.coinbase.chainstorage.GetVerifiedAccountStateRequest\x1a\x36.coinbase.chainstorage.GetVerifiedAccountStateResponseB?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.api_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_BLOCKCHAINEVENT'].fields_by_name['sequence']._loaded_options = None
+ _globals['_BLOCKCHAINEVENT'].fields_by_name['sequence']._serialized_options = b'\030\001'
+ _globals['_CHAINEVENTSREQUEST'].fields_by_name['sequence']._loaded_options = None
+ _globals['_CHAINEVENTSREQUEST'].fields_by_name['sequence']._serialized_options = b'\030\001'
+ _globals['_GETCHAINEVENTSREQUEST'].fields_by_name['sequence']._loaded_options = None
+ _globals['_GETCHAINEVENTSREQUEST'].fields_by_name['sequence']._serialized_options = b'\030\001'
+ _globals['_GETVERSIONEDCHAINEVENTREQUEST'].fields_by_name['from_sequence']._loaded_options = None
+ _globals['_GETVERSIONEDCHAINEVENTREQUEST'].fields_by_name['from_sequence']._serialized_options = b'\030\001'
+ _globals['_COMPRESSION']._serialized_start=3485
+ _globals['_COMPRESSION']._serialized_end=3528
+ _globals['_INITIALPOSITION']._serialized_start=3530
+ _globals['_INITIALPOSITION']._serialized_end=3573
+ _globals['_BLOCKFILE']._serialized_start=132
+ _globals['_BLOCKFILE']._serialized_end=375
+ _globals['_BLOCKCHAINEVENT']._serialized_start=378
+ _globals['_BLOCKCHAINEVENT']._serialized_end=629
+ _globals['_BLOCKCHAINEVENT_TYPE']._serialized_start=574
+ _globals['_BLOCKCHAINEVENT_TYPE']._serialized_end=629
+ _globals['_GETLATESTBLOCKREQUEST']._serialized_start=631
+ _globals['_GETLATESTBLOCKREQUEST']._serialized_end=667
+ _globals['_GETLATESTBLOCKRESPONSE']._serialized_start=670
+ _globals['_GETLATESTBLOCKRESPONSE']._serialized_end=805
+ _globals['_GETBLOCKFILEREQUEST']._serialized_start=807
+ _globals['_GETBLOCKFILEREQUEST']._serialized_end=871
+ _globals['_GETBLOCKFILERESPONSE']._serialized_start=873
+ _globals['_GETBLOCKFILERESPONSE']._serialized_end=943
+ _globals['_GETBLOCKFILESBYRANGEREQUEST']._serialized_start=945
+ _globals['_GETBLOCKFILESBYRANGEREQUEST']._serialized_end=1029
+ _globals['_GETBLOCKFILESBYRANGERESPONSE']._serialized_start=1031
+ _globals['_GETBLOCKFILESBYRANGERESPONSE']._serialized_end=1110
+ _globals['_GETRAWBLOCKREQUEST']._serialized_start=1112
+ _globals['_GETRAWBLOCKREQUEST']._serialized_end=1175
+ _globals['_GETRAWBLOCKRESPONSE']._serialized_start=1177
+ _globals['_GETRAWBLOCKRESPONSE']._serialized_end=1243
+ _globals['_GETRAWBLOCKSBYRANGEREQUEST']._serialized_start=1245
+ _globals['_GETRAWBLOCKSBYRANGEREQUEST']._serialized_end=1328
+ _globals['_GETRAWBLOCKSBYRANGERESPONSE']._serialized_start=1330
+ _globals['_GETRAWBLOCKSBYRANGERESPONSE']._serialized_end=1405
+ _globals['_GETNATIVEBLOCKREQUEST']._serialized_start=1407
+ _globals['_GETNATIVEBLOCKREQUEST']._serialized_end=1473
+ _globals['_GETNATIVEBLOCKRESPONSE']._serialized_start=1475
+ _globals['_GETNATIVEBLOCKRESPONSE']._serialized_end=1550
+ _globals['_GETNATIVEBLOCKSBYRANGEREQUEST']._serialized_start=1552
+ _globals['_GETNATIVEBLOCKSBYRANGEREQUEST']._serialized_end=1638
+ _globals['_GETNATIVEBLOCKSBYRANGERESPONSE']._serialized_start=1640
+ _globals['_GETNATIVEBLOCKSBYRANGERESPONSE']._serialized_end=1724
+ _globals['_GETROSETTABLOCKREQUEST']._serialized_start=1726
+ _globals['_GETROSETTABLOCKREQUEST']._serialized_end=1793
+ _globals['_GETROSETTABLOCKRESPONSE']._serialized_start=1795
+ _globals['_GETROSETTABLOCKRESPONSE']._serialized_end=1872
+ _globals['_GETROSETTABLOCKSBYRANGEREQUEST']._serialized_start=1874
+ _globals['_GETROSETTABLOCKSBYRANGEREQUEST']._serialized_end=1961
+ _globals['_GETROSETTABLOCKSBYRANGERESPONSE']._serialized_start=1963
+ _globals['_GETROSETTABLOCKSBYRANGERESPONSE']._serialized_end=2049
+ _globals['_CHAINEVENTSREQUEST']._serialized_start=2051
+ _globals['_CHAINEVENTSREQUEST']._serialized_end=2170
+ _globals['_CHAINEVENTSRESPONSE']._serialized_start=2172
+ _globals['_CHAINEVENTSRESPONSE']._serialized_end=2248
+ _globals['_GETCHAINEVENTSREQUEST']._serialized_start=2251
+ _globals['_GETCHAINEVENTSREQUEST']._serialized_end=2397
+ _globals['_GETCHAINEVENTSRESPONSE']._serialized_start=2399
+ _globals['_GETCHAINEVENTSRESPONSE']._serialized_end=2479
+ _globals['_GETCHAINMETADATAREQUEST']._serialized_start=2481
+ _globals['_GETCHAINMETADATAREQUEST']._serialized_end=2506
+ _globals['_GETCHAINMETADATARESPONSE']._serialized_start=2509
+ _globals['_GETCHAINMETADATARESPONSE']._serialized_end=2718
+ _globals['_GETVERSIONEDCHAINEVENTREQUEST']._serialized_start=2721
+ _globals['_GETVERSIONEDCHAINEVENTREQUEST']._serialized_end=2852
+ _globals['_GETVERSIONEDCHAINEVENTRESPONSE']._serialized_start=2854
+ _globals['_GETVERSIONEDCHAINEVENTRESPONSE']._serialized_end=2941
+ _globals['_GETBLOCKBYTRANSACTIONREQUEST']._serialized_start=2943
+ _globals['_GETBLOCKBYTRANSACTIONREQUEST']._serialized_end=3012
+ _globals['_GETBLOCKBYTRANSACTIONRESPONSE']._serialized_start=3014
+ _globals['_GETBLOCKBYTRANSACTIONRESPONSE']._serialized_end=3101
+ _globals['_GETNATIVETRANSACTIONREQUEST']._serialized_start=3103
+ _globals['_GETNATIVETRANSACTIONREQUEST']._serialized_end=3171
+ _globals['_GETNATIVETRANSACTIONRESPONSE']._serialized_start=3173
+ _globals['_GETNATIVETRANSACTIONRESPONSE']._serialized_end=3267
+ _globals['_GETVERIFIEDACCOUNTSTATEREQUEST']._serialized_start=3269
+ _globals['_GETVERIFIEDACCOUNTSTATEREQUEST']._serialized_end=3377
+ _globals['_GETVERIFIEDACCOUNTSTATERESPONSE']._serialized_start=3379
+ _globals['_GETVERIFIEDACCOUNTSTATERESPONSE']._serialized_end=3483
+ _globals['_CHAINSTORAGE']._serialized_start=3576
+ _globals['_CHAINSTORAGE']._serialized_end=5538
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/api_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/api_pb2_grpc.py
new file mode 100644
index 0000000..080639f
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/api_pb2_grpc.py
@@ -0,0 +1,742 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+from coinbase.chainstorage import api_pb2 as coinbase_dot_chainstorage_dot_api__pb2
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/api_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
+
+class ChainStorageStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetLatestBlock = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetLatestBlock',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockResponse.FromString,
+ _registered_method=True)
+ self.GetBlockFile = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetBlockFile',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileResponse.FromString,
+ _registered_method=True)
+ self.GetBlockFilesByRange = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetBlockFilesByRange',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeResponse.FromString,
+ _registered_method=True)
+ self.GetRawBlock = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetRawBlock',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockResponse.FromString,
+ _registered_method=True)
+ self.GetRawBlocksByRange = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetRawBlocksByRange',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeResponse.FromString,
+ _registered_method=True)
+ self.GetNativeBlock = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetNativeBlock',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockResponse.FromString,
+ _registered_method=True)
+ self.GetNativeBlocksByRange = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetNativeBlocksByRange',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeResponse.FromString,
+ _registered_method=True)
+ self.GetRosettaBlock = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetRosettaBlock',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockResponse.FromString,
+ _registered_method=True)
+ self.GetRosettaBlocksByRange = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetRosettaBlocksByRange',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeResponse.FromString,
+ _registered_method=True)
+ self.StreamChainEvents = channel.unary_stream(
+ '/coinbase.chainstorage.ChainStorage/StreamChainEvents',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.ChainEventsRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.ChainEventsResponse.FromString,
+ _registered_method=True)
+ self.GetChainEvents = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetChainEvents',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsResponse.FromString,
+ _registered_method=True)
+ self.GetChainMetadata = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetChainMetadata',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataResponse.FromString,
+ _registered_method=True)
+ self.GetVersionedChainEvent = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetVersionedChainEvent',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventResponse.FromString,
+ _registered_method=True)
+ self.GetBlockByTransaction = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetBlockByTransaction',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionResponse.FromString,
+ _registered_method=True)
+ self.GetNativeTransaction = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetNativeTransaction',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionResponse.FromString,
+ _registered_method=True)
+ self.GetVerifiedAccountState = channel.unary_unary(
+ '/coinbase.chainstorage.ChainStorage/GetVerifiedAccountState',
+ request_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateRequest.SerializeToString,
+ response_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateResponse.FromString,
+ _registered_method=True)
+
+
+class ChainStorageServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def GetLatestBlock(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetBlockFile(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetBlockFilesByRange(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetRawBlock(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetRawBlocksByRange(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetNativeBlock(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetNativeBlocksByRange(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetRosettaBlock(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetRosettaBlocksByRange(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def StreamChainEvents(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetChainEvents(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetChainMetadata(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetVersionedChainEvent(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetBlockByTransaction(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetNativeTransaction(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetVerifiedAccountState(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ChainStorageServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetLatestBlock': grpc.unary_unary_rpc_method_handler(
+ servicer.GetLatestBlock,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockResponse.SerializeToString,
+ ),
+ 'GetBlockFile': grpc.unary_unary_rpc_method_handler(
+ servicer.GetBlockFile,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileResponse.SerializeToString,
+ ),
+ 'GetBlockFilesByRange': grpc.unary_unary_rpc_method_handler(
+ servicer.GetBlockFilesByRange,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeResponse.SerializeToString,
+ ),
+ 'GetRawBlock': grpc.unary_unary_rpc_method_handler(
+ servicer.GetRawBlock,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockResponse.SerializeToString,
+ ),
+ 'GetRawBlocksByRange': grpc.unary_unary_rpc_method_handler(
+ servicer.GetRawBlocksByRange,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeResponse.SerializeToString,
+ ),
+ 'GetNativeBlock': grpc.unary_unary_rpc_method_handler(
+ servicer.GetNativeBlock,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockResponse.SerializeToString,
+ ),
+ 'GetNativeBlocksByRange': grpc.unary_unary_rpc_method_handler(
+ servicer.GetNativeBlocksByRange,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeResponse.SerializeToString,
+ ),
+ 'GetRosettaBlock': grpc.unary_unary_rpc_method_handler(
+ servicer.GetRosettaBlock,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockResponse.SerializeToString,
+ ),
+ 'GetRosettaBlocksByRange': grpc.unary_unary_rpc_method_handler(
+ servicer.GetRosettaBlocksByRange,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeResponse.SerializeToString,
+ ),
+ 'StreamChainEvents': grpc.unary_stream_rpc_method_handler(
+ servicer.StreamChainEvents,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.ChainEventsRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.ChainEventsResponse.SerializeToString,
+ ),
+ 'GetChainEvents': grpc.unary_unary_rpc_method_handler(
+ servicer.GetChainEvents,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsResponse.SerializeToString,
+ ),
+ 'GetChainMetadata': grpc.unary_unary_rpc_method_handler(
+ servicer.GetChainMetadata,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataResponse.SerializeToString,
+ ),
+ 'GetVersionedChainEvent': grpc.unary_unary_rpc_method_handler(
+ servicer.GetVersionedChainEvent,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventResponse.SerializeToString,
+ ),
+ 'GetBlockByTransaction': grpc.unary_unary_rpc_method_handler(
+ servicer.GetBlockByTransaction,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionResponse.SerializeToString,
+ ),
+ 'GetNativeTransaction': grpc.unary_unary_rpc_method_handler(
+ servicer.GetNativeTransaction,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionResponse.SerializeToString,
+ ),
+ 'GetVerifiedAccountState': grpc.unary_unary_rpc_method_handler(
+ servicer.GetVerifiedAccountState,
+ request_deserializer=coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateRequest.FromString,
+ response_serializer=coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'coinbase.chainstorage.ChainStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('coinbase.chainstorage.ChainStorage', rpc_method_handlers)
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ChainStorage(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def GetLatestBlock(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetLatestBlock',
+ coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetLatestBlockResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetBlockFile(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetBlockFile',
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockFileResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetBlockFilesByRange(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetBlockFilesByRange',
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockFilesByRangeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetRawBlock(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetRawBlock',
+ coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetRawBlockResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetRawBlocksByRange(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetRawBlocksByRange',
+ coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetRawBlocksByRangeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetNativeBlock(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetNativeBlock',
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlockResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetNativeBlocksByRange(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetNativeBlocksByRange',
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeBlocksByRangeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetRosettaBlock(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetRosettaBlock',
+ coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlockResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetRosettaBlocksByRange(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetRosettaBlocksByRange',
+ coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetRosettaBlocksByRangeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def StreamChainEvents(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/StreamChainEvents',
+ coinbase_dot_chainstorage_dot_api__pb2.ChainEventsRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.ChainEventsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetChainEvents(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetChainEvents',
+ coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetChainEventsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetChainMetadata(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetChainMetadata',
+ coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetChainMetadataResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetVersionedChainEvent(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetVersionedChainEvent',
+ coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetVersionedChainEventResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetBlockByTransaction(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetBlockByTransaction',
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetBlockByTransactionResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetNativeTransaction(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetNativeTransaction',
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetNativeTransactionResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def GetVerifiedAccountState(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/coinbase.chainstorage.ChainStorage/GetVerifiedAccountState',
+ coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateRequest.SerializeToString,
+ coinbase_dot_chainstorage_dot_api__pb2.GetVerifiedAccountStateResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2.py
new file mode 100644
index 0000000..a983aff
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_aptos.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_aptos.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,coinbase/chainstorage/blockchain_aptos.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1e\n\rAptosBlobdata\x12\r\n\x05\x62lock\x18\x01 \x01(\x0c\"\x7f\n\nAptosBlock\x12\x32\n\x06header\x18\x01 \x01(\x0b\x32\".coinbase.chainstorage.AptosHeader\x12=\n\x0ctransactions\x18\x02 \x03(\x0b\x32\'.coinbase.chainstorage.AptosTransaction\"g\n\x0b\x41ptosHeader\x12\x14\n\x0c\x62lock_height\x18\x01 \x01(\x04\x12\x12\n\nblock_hash\x18\x02 \x01(\t\x12.\n\nblock_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd5\x05\n\x10\x41ptosTransaction\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12\x14\n\x0c\x62lock_height\x18\x02 \x01(\x04\x12-\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x39\n\x04info\x18\x04 \x01(\x0b\x32+.coinbase.chainstorage.AptosTransactionInfo\x12\x45\n\x04type\x18\x05 \x01(\x0e\x32\x37.coinbase.chainstorage.AptosTransaction.TransactionType\x12N\n\x0e\x62lock_metadata\x18\x64 \x01(\x0b\x32\x34.coinbase.chainstorage.AptosBlockMetadataTransactionH\x00\x12\x41\n\x07genesis\x18\x65 \x01(\x0b\x32..coinbase.chainstorage.AptosGenesisTransactionH\x00\x12R\n\x10state_checkpoint\x18\x66 \x01(\x0b\x32\x36.coinbase.chainstorage.AptosStateCheckpointTransactionH\x00\x12;\n\x04user\x18g \x01(\x0b\x32+.coinbase.chainstorage.AptosUserTransactionH\x00\x12\x45\n\tvalidator\x18h \x01(\x0b\x32\x30.coinbase.chainstorage.AptosValidatorTransactionH\x00\"r\n\x0fTransactionType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07GENESIS\x10\x01\x12\x12\n\x0e\x42LOCK_METADATA\x10\x02\x12\x14\n\x10STATE_CHECKPOINT\x10\x03\x12\x08\n\x04USER\x10\x04\x12\r\n\tVALIDATOR\x10\x05\x42\n\n\x08txn_data\"\xad\x02\n\x14\x41ptosTransactionInfo\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x19\n\x11state_change_hash\x18\x02 \x01(\t\x12\x17\n\x0f\x65vent_root_hash\x18\x03 \x01(\t\x12\x1f\n\x15state_checkpoint_hash\x18\x04 \x01(\tH\x00\x12\x10\n\x08gas_used\x18\x05 \x01(\x04\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x11\n\tvm_status\x18\x07 \x01(\t\x12\x1d\n\x15\x61\x63\x63umulator_root_hash\x18\x08 \x01(\t\x12;\n\x07\x63hanges\x18\t \x03(\x0b\x32*.coinbase.chainstorage.AptosWriteSetChangeB \n\x1eoptional_state_checkpoint_hash\"\x95\x05\n\x13\x41ptosWriteSetChange\x12=\n\x04type\x18\x01 \x01(\x0e\x32/.coinbase.chainstorage.AptosWriteSetChange.Type\x12\x41\n\rdelete_module\x18\x64 \x01(\x0b\x32(.coinbase.chainstorage.AptosDeleteModuleH\x00\x12\x45\n\x0f\x64\x65lete_resource\x18\x65 \x01(\x0b\x32*.coinbase.chainstorage.AptosDeleteResourceH\x00\x12H\n\x11\x64\x65lete_table_item\x18\x66 \x01(\x0b\x32+.coinbase.chainstorage.AptosDeleteTableItemH\x00\x12?\n\x0cwrite_module\x18g \x01(\x0b\x32\'.coinbase.chainstorage.AptosWriteModuleH\x00\x12\x43\n\x0ewrite_resource\x18h \x01(\x0b\x32).coinbase.chainstorage.AptosWriteResourceH\x00\x12\x46\n\x10write_table_item\x18i \x01(\x0b\x32*.coinbase.chainstorage.AptosWriteTableItemH\x00\"\x92\x01\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x11\n\rDELETE_MODULE\x10\x01\x12\x13\n\x0f\x44\x45LETE_RESOURCE\x10\x02\x12\x15\n\x11\x44\x45LETE_TABLE_ITEM\x10\x03\x12\x10\n\x0cWRITE_MODULE\x10\x04\x12\x12\n\x0eWRITE_RESOURCE\x10\x05\x12\x14\n\x10WRITE_TABLE_ITEM\x10\x06\x42\x08\n\x06\x63hange\"v\n\x11\x41ptosDeleteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\t\x12\x38\n\x06module\x18\x03 \x01(\x0b\x32(.coinbase.chainstorage.AptosMoveModuleId\"P\n\x13\x41ptosDeleteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\t\x12\x10\n\x08resource\x18\x03 \x01(\t\"\x86\x01\n\x14\x41ptosDeleteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x39\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32+.coinbase.chainstorage.AptosDeleteTableData\"5\n\x14\x41ptosDeleteTableData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t\"y\n\x10\x41ptosWriteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\t\x12<\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32..coinbase.chainstorage.AptosMoveModuleBytecode\"]\n\x12\x41ptosWriteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\t\x12\x10\n\x08type_str\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t\"\x97\x01\n\x13\x41ptosWriteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\r\n\x05value\x18\x04 \x01(\t\x12<\n\x04\x64\x61ta\x18\x05 \x01(\x0b\x32..coinbase.chainstorage.AptosWriteTableItemData\"[\n\x17\x41ptosWriteTableItemData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x12\n\nvalue_type\x18\x04 \x01(\t\"\xd4\x01\n\x1d\x41ptosBlockMetadataTransaction\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65poch\x18\x02 \x01(\x04\x12\r\n\x05round\x18\x03 \x01(\x04\x12\x31\n\x06\x65vents\x18\x04 \x03(\x0b\x32!.coinbase.chainstorage.AptosEvent\x12#\n\x1bprevious_block_votes_bitvec\x18\x05 \x01(\x0c\x12\x10\n\x08proposer\x18\x06 \x01(\t\x12\x1f\n\x17\x66\x61iled_proposer_indices\x18\x07 \x03(\r\"t\n\nAptosEvent\x12\x31\n\x03key\x18\x01 \x01(\x0b\x32$.coinbase.chainstorage.AptosEventKey\x12\x17\n\x0fsequence_number\x18\x02 \x01(\x04\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t\"A\n\rAptosEventKey\x12\x17\n\x0f\x63reation_number\x18\x01 \x01(\x04\x12\x17\n\x0f\x61\x63\x63ount_address\x18\x02 \x01(\t\"!\n\x1f\x41ptosStateCheckpointTransaction\"\x83\x01\n\x17\x41ptosGenesisTransaction\x12\x35\n\x07payload\x18\x01 \x01(\x0b\x32$.coinbase.chainstorage.AptosWriteSet\x12\x31\n\x06\x65vents\x18\x02 \x03(\x0b\x32!.coinbase.chainstorage.AptosEvent\"\xb4\x02\n\rAptosWriteSet\x12\x41\n\x0ewrite_set_type\x18\x01 \x01(\x0e\x32).coinbase.chainstorage.AptosWriteSet.Type\x12\x46\n\x10script_write_set\x18\x64 \x01(\x0b\x32*.coinbase.chainstorage.AptosScriptWriteSetH\x00\x12\x46\n\x10\x64irect_write_set\x18\x65 \x01(\x0b\x32*.coinbase.chainstorage.AptosDirectWriteSetH\x00\"C\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x14\n\x10SCRIPT_WRITE_SET\x10\x01\x12\x14\n\x10\x44IRECT_WRITE_SET\x10\x02\x42\x0b\n\twrite_set\"d\n\x13\x41ptosScriptWriteSet\x12\x12\n\nexecute_as\x18\x01 \x01(\t\x12\x39\n\x06script\x18\x02 \x01(\x0b\x32).coinbase.chainstorage.AptosScriptPayload\"\x8e\x01\n\x13\x41ptosDirectWriteSet\x12\x44\n\x10write_set_change\x18\x01 \x03(\x0b\x32*.coinbase.chainstorage.AptosWriteSetChange\x12\x31\n\x06\x65vents\x18\x02 \x03(\x0b\x32!.coinbase.chainstorage.AptosEvent\"\x8e\x01\n\x14\x41ptosUserTransaction\x12\x43\n\x07request\x18\x01 \x01(\x0b\x32\x32.coinbase.chainstorage.AptosUserTransactionRequest\x12\x31\n\x06\x65vents\x18\x02 \x03(\x0b\x32!.coinbase.chainstorage.AptosEvent\"\xb0\x02\n\x1b\x41ptosUserTransactionRequest\x12\x0e\n\x06sender\x18\x01 \x01(\t\x12\x17\n\x0fsequence_number\x18\x02 \x01(\x04\x12\x16\n\x0emax_gas_amount\x18\x03 \x01(\x04\x12\x16\n\x0egas_unit_price\x18\x04 \x01(\x04\x12=\n\x19\x65xpiration_timestamp_secs\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x07payload\x18\x06 \x01(\x0b\x32..coinbase.chainstorage.AptosTransactionPayload\x12\x38\n\tsignature\x18\x07 \x01(\x0b\x32%.coinbase.chainstorage.AptosSignature\"\xf7\x04\n\x17\x41ptosTransactionPayload\x12\x41\n\x04type\x18\x01 \x01(\x0e\x32\x33.coinbase.chainstorage.AptosTransactionPayload.Type\x12R\n\x16\x65ntry_function_payload\x18\x64 \x01(\x0b\x32\x30.coinbase.chainstorage.AptosEntryFunctionPayloadH\x00\x12\x43\n\x0escript_payload\x18\x65 \x01(\x0b\x32).coinbase.chainstorage.AptosScriptPayloadH\x00\x12P\n\x15module_bundle_payload\x18\x66 \x01(\x0b\x32/.coinbase.chainstorage.AptosModuleBundlePayloadH\x00\x12H\n\x11write_set_payload\x18g \x01(\x0b\x32+.coinbase.chainstorage.AptosWriteSetPayloadH\x00\x12G\n\x10multisig_payload\x18h \x01(\x0b\x32+.coinbase.chainstorage.AptosMultisigPayloadH\x00\"\x8f\x01\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x1a\n\x16\x45NTRY_FUNCTION_PAYLOAD\x10\x01\x12\x12\n\x0eSCRIPT_PAYLOAD\x10\x02\x12\x19\n\x15MODULE_BUNDLE_PAYLOAD\x10\x03\x12\x15\n\x11WRITE_SET_PAYLOAD\x10\x04\x12\x14\n\x10MULTISIG_PAYLOAD\x10\x05\x42\t\n\x07payload\"\x85\x01\n\x19\x41ptosEntryFunctionPayload\x12=\n\x08\x66unction\x18\x01 \x01(\x0b\x32+.coinbase.chainstorage.AptosEntryFunctionId\x12\x16\n\x0etype_arguments\x18\x02 \x03(\t\x12\x11\n\targuments\x18\x03 \x03(\x0c\"g\n\x14\x41ptosEntryFunctionId\x12\x38\n\x06module\x18\x01 \x01(\x0b\x32(.coinbase.chainstorage.AptosMoveModuleId\x12\x15\n\rfunction_name\x18\x02 \x01(\t\"2\n\x11\x41ptosMoveModuleId\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"}\n\x12\x41ptosScriptPayload\x12<\n\x04\x63ode\x18\x01 \x01(\x0b\x32..coinbase.chainstorage.AptosMoveScriptBytecode\x12\x16\n\x0etype_arguments\x18\x02 \x03(\t\x12\x11\n\targuments\x18\x03 \x03(\x0c\"b\n\x17\x41ptosMoveScriptBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\t\x12\x35\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32(.coinbase.chainstorage.AptosMoveFunction\"\xab\x02\n\x11\x41ptosMoveFunction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\nvisibility\x18\x02 \x01(\x0e\x32-.coinbase.chainstorage.AptosMoveFunction.Type\x12\x10\n\x08is_entry\x18\x03 \x01(\x08\x12U\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x38.coinbase.chainstorage.AptosMoveFunctionGenericTypeParam\x12\x0e\n\x06params\x18\x05 \x03(\t\x12\x0e\n\x06return\x18\x06 \x03(\t\"<\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07PRIVATE\x10\x01\x12\n\n\x06PUBLIC\x10\x02\x12\n\n\x06\x46RIEND\x10\x03\"8\n!AptosMoveFunctionGenericTypeParam\x12\x13\n\x0b\x63onstraints\x18\x01 \x03(\t\"[\n\x18\x41ptosModuleBundlePayload\x12?\n\x07modules\x18\x01 \x03(\x0b\x32..coinbase.chainstorage.AptosMoveModuleBytecode\"`\n\x17\x41ptosMoveModuleBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\t\x12\x33\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32&.coinbase.chainstorage.AptosMoveModule\"\xe9\x01\n\x0f\x41ptosMoveModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x39\n\x07\x66riends\x18\x03 \x03(\x0b\x32(.coinbase.chainstorage.AptosMoveModuleId\x12\x43\n\x11\x65xposed_functions\x18\x04 \x03(\x0b\x32(.coinbase.chainstorage.AptosMoveFunction\x12\x37\n\x07structs\x18\x05 \x03(\x0b\x32&.coinbase.chainstorage.AptosMoveStruct\"\xd7\x01\n\x0f\x41ptosMoveStruct\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tis_native\x18\x02 \x01(\x08\x12\x11\n\tabilities\x18\x03 \x03(\t\x12S\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x36.coinbase.chainstorage.AptosMoveStructGenericTypeParam\x12;\n\x06\x66ields\x18\x05 \x03(\x0b\x32+.coinbase.chainstorage.AptosMoveStructField\"6\n\x1f\x41ptosMoveStructGenericTypeParam\x12\x13\n\x0b\x63onstraints\x18\x01 \x03(\t\"2\n\x14\x41ptosMoveStructField\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"O\n\x14\x41ptosWriteSetPayload\x12\x37\n\twrite_set\x18\x01 \x01(\x0b\x32$.coinbase.chainstorage.AptosWriteSet\"\xa7\x01\n\x14\x41ptosMultisigPayload\x12\x18\n\x10multisig_address\x18\x01 \x01(\t\x12U\n\x13transaction_payload\x18\x02 \x01(\x0b\x32\x36.coinbase.chainstorage.AptosMultisigTransactionPayloadH\x00\x42\x1e\n\x1coptional_transaction_payload\"\x80\x02\n\x1f\x41ptosMultisigTransactionPayload\x12I\n\x04type\x18\x01 \x01(\x0e\x32;.coinbase.chainstorage.AptosMultisigTransactionPayload.Type\x12R\n\x16\x65ntry_function_payload\x18\x64 \x01(\x0b\x32\x30.coinbase.chainstorage.AptosEntryFunctionPayloadH\x00\"3\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x1a\n\x16\x45NTRY_FUNCTION_PAYLOAD\x10\x01\x42\t\n\x07payload\"\xa8\x04\n\x0e\x41ptosSignature\x12\x38\n\x04type\x18\x01 \x01(\x0e\x32*.coinbase.chainstorage.AptosSignature.Type\x12?\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32,.coinbase.chainstorage.AptosEd25519SignatureH\x00\x12J\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32\x31.coinbase.chainstorage.AptosMultiEd25519SignatureH\x00\x12\x46\n\x0bmulti_agent\x18\x04 \x01(\x0b\x32/.coinbase.chainstorage.AptosMultiAgentSignatureH\x00\x12\x42\n\tfee_payer\x18\x05 \x01(\x0b\x32-.coinbase.chainstorage.AptosFeePayerSignatureH\x00\x12J\n\rsingle_sender\x18\x06 \x01(\x0b\x32\x31.coinbase.chainstorage.AptosSingleSenderSignatureH\x00\"j\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07\x45\x44\x32\x35\x35\x31\x39\x10\x01\x12\x11\n\rMULTI_ED25519\x10\x02\x12\x0f\n\x0bMULTI_AGENT\x10\x03\x12\r\n\tFEE_PAYER\x10\x04\x12\x11\n\rSINGLE_SENDER\x10\x05\x42\x0b\n\tsignature\">\n\x15\x41ptosEd25519Signature\x12\x12\n\npublic_key\x18\x01 \x01(\t\x12\x11\n\tsignature\x18\x02 \x01(\t\"t\n\x1a\x41ptosMultiEd25519Signature\x12\x13\n\x0bpublic_keys\x18\x01 \x03(\t\x12\x12\n\nsignatures\x18\x02 \x03(\t\x12\x11\n\tthreshold\x18\x03 \x01(\r\x12\x1a\n\x12public_key_indices\x18\x04 \x01(\t\"\xc5\x01\n\x18\x41ptosMultiAgentSignature\x12<\n\x06sender\x18\x01 \x01(\x0b\x32,.coinbase.chainstorage.AptosAccountSignature\x12\"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12G\n\x11secondary_signers\x18\x03 \x03(\x0b\x32,.coinbase.chainstorage.AptosAccountSignature\"\xa6\x02\n\x16\x41ptosFeePayerSignature\x12<\n\x06sender\x18\x01 \x01(\x0b\x32,.coinbase.chainstorage.AptosAccountSignature\x12\"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12G\n\x11secondary_signers\x18\x03 \x03(\x0b\x32,.coinbase.chainstorage.AptosAccountSignature\x12\x46\n\x10\x66\x65\x65_payer_signer\x18\x04 \x01(\x0b\x32,.coinbase.chainstorage.AptosAccountSignature\x12\x19\n\x11\x66\x65\x65_payer_address\x18\x05 \x01(\t\"C\n\x1a\x41ptosSingleSenderSignature\x12\x12\n\npublic_key\x18\x01 \x01(\t\x12\x11\n\tsignature\x18\x02 \x01(\t\"@\n\x17\x41ptosSingleKeySignature\x12\x12\n\npublic_key\x18\x01 \x01(\t\x12\x11\n\tsignature\x18\x02 \x01(\t\"^\n\x16\x41ptosMultiKeySignature\x12\x13\n\x0bpublic_keys\x18\x01 \x03(\t\x12\x12\n\nsignatures\x18\x02 \x03(\t\x12\x1b\n\x13signatures_required\x18\x03 \x01(\r\"\xda\x03\n\x15\x41ptosAccountSignature\x12?\n\x04type\x18\x01 \x01(\x0e\x32\x31.coinbase.chainstorage.AptosAccountSignature.Type\x12?\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32,.coinbase.chainstorage.AptosEd25519SignatureH\x00\x12J\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32\x31.coinbase.chainstorage.AptosMultiEd25519SignatureH\x00\x12\x44\n\nsingle_key\x18\x05 \x01(\x0b\x32..coinbase.chainstorage.AptosSingleKeySignatureH\x00\x12\x42\n\tmulti_key\x18\x06 \x01(\x0b\x32-.coinbase.chainstorage.AptosMultiKeySignatureH\x00\"\\\n\x04Type\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07\x45\x44\x32\x35\x35\x31\x39\x10\x01\x12\x11\n\rMULTI_ED25519\x10\x02\x12\x0e\n\nSINGLE_KEY\x10\x04\x12\r\n\tMULTI_KEY\x10\x05\"\x04\x08\x03\x10\x03\x42\x0b\n\tsignature\"N\n\x19\x41ptosValidatorTransaction\x12\x31\n\x06\x65vents\x18\x01 \x03(\x0b\x32!.coinbase.chainstorage.AptosEventB?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_aptos_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_APTOSBLOBDATA']._serialized_start=104
+ _globals['_APTOSBLOBDATA']._serialized_end=134
+ _globals['_APTOSBLOCK']._serialized_start=136
+ _globals['_APTOSBLOCK']._serialized_end=263
+ _globals['_APTOSHEADER']._serialized_start=265
+ _globals['_APTOSHEADER']._serialized_end=368
+ _globals['_APTOSTRANSACTION']._serialized_start=371
+ _globals['_APTOSTRANSACTION']._serialized_end=1096
+ _globals['_APTOSTRANSACTION_TRANSACTIONTYPE']._serialized_start=970
+ _globals['_APTOSTRANSACTION_TRANSACTIONTYPE']._serialized_end=1084
+ _globals['_APTOSTRANSACTIONINFO']._serialized_start=1099
+ _globals['_APTOSTRANSACTIONINFO']._serialized_end=1400
+ _globals['_APTOSWRITESETCHANGE']._serialized_start=1403
+ _globals['_APTOSWRITESETCHANGE']._serialized_end=2064
+ _globals['_APTOSWRITESETCHANGE_TYPE']._serialized_start=1908
+ _globals['_APTOSWRITESETCHANGE_TYPE']._serialized_end=2054
+ _globals['_APTOSDELETEMODULE']._serialized_start=2066
+ _globals['_APTOSDELETEMODULE']._serialized_end=2184
+ _globals['_APTOSDELETERESOURCE']._serialized_start=2186
+ _globals['_APTOSDELETERESOURCE']._serialized_end=2266
+ _globals['_APTOSDELETETABLEITEM']._serialized_start=2269
+ _globals['_APTOSDELETETABLEITEM']._serialized_end=2403
+ _globals['_APTOSDELETETABLEDATA']._serialized_start=2405
+ _globals['_APTOSDELETETABLEDATA']._serialized_end=2458
+ _globals['_APTOSWRITEMODULE']._serialized_start=2460
+ _globals['_APTOSWRITEMODULE']._serialized_end=2581
+ _globals['_APTOSWRITERESOURCE']._serialized_start=2583
+ _globals['_APTOSWRITERESOURCE']._serialized_end=2676
+ _globals['_APTOSWRITETABLEITEM']._serialized_start=2679
+ _globals['_APTOSWRITETABLEITEM']._serialized_end=2830
+ _globals['_APTOSWRITETABLEITEMDATA']._serialized_start=2832
+ _globals['_APTOSWRITETABLEITEMDATA']._serialized_end=2923
+ _globals['_APTOSBLOCKMETADATATRANSACTION']._serialized_start=2926
+ _globals['_APTOSBLOCKMETADATATRANSACTION']._serialized_end=3138
+ _globals['_APTOSEVENT']._serialized_start=3140
+ _globals['_APTOSEVENT']._serialized_end=3256
+ _globals['_APTOSEVENTKEY']._serialized_start=3258
+ _globals['_APTOSEVENTKEY']._serialized_end=3323
+ _globals['_APTOSSTATECHECKPOINTTRANSACTION']._serialized_start=3325
+ _globals['_APTOSSTATECHECKPOINTTRANSACTION']._serialized_end=3358
+ _globals['_APTOSGENESISTRANSACTION']._serialized_start=3361
+ _globals['_APTOSGENESISTRANSACTION']._serialized_end=3492
+ _globals['_APTOSWRITESET']._serialized_start=3495
+ _globals['_APTOSWRITESET']._serialized_end=3803
+ _globals['_APTOSWRITESET_TYPE']._serialized_start=3723
+ _globals['_APTOSWRITESET_TYPE']._serialized_end=3790
+ _globals['_APTOSSCRIPTWRITESET']._serialized_start=3805
+ _globals['_APTOSSCRIPTWRITESET']._serialized_end=3905
+ _globals['_APTOSDIRECTWRITESET']._serialized_start=3908
+ _globals['_APTOSDIRECTWRITESET']._serialized_end=4050
+ _globals['_APTOSUSERTRANSACTION']._serialized_start=4053
+ _globals['_APTOSUSERTRANSACTION']._serialized_end=4195
+ _globals['_APTOSUSERTRANSACTIONREQUEST']._serialized_start=4198
+ _globals['_APTOSUSERTRANSACTIONREQUEST']._serialized_end=4502
+ _globals['_APTOSTRANSACTIONPAYLOAD']._serialized_start=4505
+ _globals['_APTOSTRANSACTIONPAYLOAD']._serialized_end=5136
+ _globals['_APTOSTRANSACTIONPAYLOAD_TYPE']._serialized_start=4982
+ _globals['_APTOSTRANSACTIONPAYLOAD_TYPE']._serialized_end=5125
+ _globals['_APTOSENTRYFUNCTIONPAYLOAD']._serialized_start=5139
+ _globals['_APTOSENTRYFUNCTIONPAYLOAD']._serialized_end=5272
+ _globals['_APTOSENTRYFUNCTIONID']._serialized_start=5274
+ _globals['_APTOSENTRYFUNCTIONID']._serialized_end=5377
+ _globals['_APTOSMOVEMODULEID']._serialized_start=5379
+ _globals['_APTOSMOVEMODULEID']._serialized_end=5429
+ _globals['_APTOSSCRIPTPAYLOAD']._serialized_start=5431
+ _globals['_APTOSSCRIPTPAYLOAD']._serialized_end=5556
+ _globals['_APTOSMOVESCRIPTBYTECODE']._serialized_start=5558
+ _globals['_APTOSMOVESCRIPTBYTECODE']._serialized_end=5656
+ _globals['_APTOSMOVEFUNCTION']._serialized_start=5659
+ _globals['_APTOSMOVEFUNCTION']._serialized_end=5958
+ _globals['_APTOSMOVEFUNCTION_TYPE']._serialized_start=5898
+ _globals['_APTOSMOVEFUNCTION_TYPE']._serialized_end=5958
+ _globals['_APTOSMOVEFUNCTIONGENERICTYPEPARAM']._serialized_start=5960
+ _globals['_APTOSMOVEFUNCTIONGENERICTYPEPARAM']._serialized_end=6016
+ _globals['_APTOSMODULEBUNDLEPAYLOAD']._serialized_start=6018
+ _globals['_APTOSMODULEBUNDLEPAYLOAD']._serialized_end=6109
+ _globals['_APTOSMOVEMODULEBYTECODE']._serialized_start=6111
+ _globals['_APTOSMOVEMODULEBYTECODE']._serialized_end=6207
+ _globals['_APTOSMOVEMODULE']._serialized_start=6210
+ _globals['_APTOSMOVEMODULE']._serialized_end=6443
+ _globals['_APTOSMOVESTRUCT']._serialized_start=6446
+ _globals['_APTOSMOVESTRUCT']._serialized_end=6661
+ _globals['_APTOSMOVESTRUCTGENERICTYPEPARAM']._serialized_start=6663
+ _globals['_APTOSMOVESTRUCTGENERICTYPEPARAM']._serialized_end=6717
+ _globals['_APTOSMOVESTRUCTFIELD']._serialized_start=6719
+ _globals['_APTOSMOVESTRUCTFIELD']._serialized_end=6769
+ _globals['_APTOSWRITESETPAYLOAD']._serialized_start=6771
+ _globals['_APTOSWRITESETPAYLOAD']._serialized_end=6850
+ _globals['_APTOSMULTISIGPAYLOAD']._serialized_start=6853
+ _globals['_APTOSMULTISIGPAYLOAD']._serialized_end=7020
+ _globals['_APTOSMULTISIGTRANSACTIONPAYLOAD']._serialized_start=7023
+ _globals['_APTOSMULTISIGTRANSACTIONPAYLOAD']._serialized_end=7279
+ _globals['_APTOSMULTISIGTRANSACTIONPAYLOAD_TYPE']._serialized_start=4982
+ _globals['_APTOSMULTISIGTRANSACTIONPAYLOAD_TYPE']._serialized_end=5033
+ _globals['_APTOSSIGNATURE']._serialized_start=7282
+ _globals['_APTOSSIGNATURE']._serialized_end=7834
+ _globals['_APTOSSIGNATURE_TYPE']._serialized_start=7715
+ _globals['_APTOSSIGNATURE_TYPE']._serialized_end=7821
+ _globals['_APTOSED25519SIGNATURE']._serialized_start=7836
+ _globals['_APTOSED25519SIGNATURE']._serialized_end=7898
+ _globals['_APTOSMULTIED25519SIGNATURE']._serialized_start=7900
+ _globals['_APTOSMULTIED25519SIGNATURE']._serialized_end=8016
+ _globals['_APTOSMULTIAGENTSIGNATURE']._serialized_start=8019
+ _globals['_APTOSMULTIAGENTSIGNATURE']._serialized_end=8216
+ _globals['_APTOSFEEPAYERSIGNATURE']._serialized_start=8219
+ _globals['_APTOSFEEPAYERSIGNATURE']._serialized_end=8513
+ _globals['_APTOSSINGLESENDERSIGNATURE']._serialized_start=8515
+ _globals['_APTOSSINGLESENDERSIGNATURE']._serialized_end=8582
+ _globals['_APTOSSINGLEKEYSIGNATURE']._serialized_start=8584
+ _globals['_APTOSSINGLEKEYSIGNATURE']._serialized_end=8648
+ _globals['_APTOSMULTIKEYSIGNATURE']._serialized_start=8650
+ _globals['_APTOSMULTIKEYSIGNATURE']._serialized_end=8744
+ _globals['_APTOSACCOUNTSIGNATURE']._serialized_start=8747
+ _globals['_APTOSACCOUNTSIGNATURE']._serialized_end=9221
+ _globals['_APTOSACCOUNTSIGNATURE_TYPE']._serialized_start=9116
+ _globals['_APTOSACCOUNTSIGNATURE_TYPE']._serialized_end=9208
+ _globals['_APTOSVALIDATORTRANSACTION']._serialized_start=9223
+ _globals['_APTOSVALIDATORTRANSACTION']._serialized_end=9301
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2_grpc.py
new file mode 100644
index 0000000..5e3fb7e
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_aptos_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_aptos_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2.py
new file mode 100644
index 0000000..a1dd4f2
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_bitcoin.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_bitcoin.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.coinbase/chainstorage/blockchain_bitcoin.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\"c\n\x0f\x42itcoinBlobdata\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12@\n\x12input_transactions\x18\x02 \x03(\x0b\x32$.coinbase.chainstorage.RepeatedBytes\"\x1d\n\rRepeatedBytes\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"\x8a\x03\n\rBitcoinHeader\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x15\n\rstripped_size\x18\x03 \x01(\x04\x12\x0c\n\x04size\x18\x04 \x01(\x04\x12\x0e\n\x06weight\x18\x05 \x01(\x04\x12\x0e\n\x06height\x18\x06 \x01(\x04\x12\x0f\n\x07version\x18\x07 \x01(\x04\x12\x13\n\x0bversion_hex\x18\x08 \x01(\t\x12\x13\n\x0bmerkle_root\x18\t \x01(\t\x12\x0c\n\x04time\x18\n \x01(\x04\x12\x13\n\x0bmedian_time\x18\x0b \x01(\x04\x12\r\n\x05nonce\x18\x0c \x01(\x04\x12\x0c\n\x04\x62its\x18\r \x01(\t\x12\x12\n\ndifficulty\x18\x0e \x01(\t\x12\x12\n\nchain_work\x18\x0f \x01(\t\x12\x1e\n\x16number_of_transactions\x18\x10 \x01(\x04\x12\x1b\n\x13previous_block_hash\x18\x11 \x01(\t\x12\x17\n\x0fnext_block_hash\x18\x12 \x01(\t\x12-\n\ttimestamp\x18\x13 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xde\x03\n\x12\x42itcoinTransaction\x12\x0b\n\x03hex\x18\x02 \x01(\t\x12\x16\n\x0etransaction_id\x18\x03 \x01(\t\x12\x0c\n\x04hash\x18\x04 \x01(\t\x12\x0c\n\x04size\x18\x05 \x01(\x04\x12\x14\n\x0cvirtual_size\x18\x06 \x01(\x04\x12\x0e\n\x06weight\x18\x07 \x01(\x04\x12\x0f\n\x07version\x18\x08 \x01(\x04\x12\x11\n\tlock_time\x18\t \x01(\x04\x12>\n\x06inputs\x18\n \x03(\x0b\x32..coinbase.chainstorage.BitcoinTransactionInput\x12@\n\x07outputs\x18\x0b \x03(\x0b\x32/.coinbase.chainstorage.BitcoinTransactionOutput\x12\x12\n\nblock_hash\x18\x0c \x01(\t\x12\x12\n\nblock_time\x18\x0e \x01(\x04\x12\x0c\n\x04time\x18\x0f \x01(\x04\x12\x13\n\x0bis_coinbase\x18\x10 \x01(\x08\x12\r\n\x05index\x18\x11 \x01(\x04\x12\x13\n\x0binput_count\x18\x12 \x01(\x04\x12\x14\n\x0coutput_count\x18\x13 \x01(\x04\x12\x13\n\x0binput_value\x18\x14 \x01(\x04\x12\x14\n\x0coutput_value\x18\x15 \x01(\x04\x12\x0b\n\x03\x66\x65\x65\x18\x16 \x01(\x04\"\xb3\x02\n\x17\x42itcoinTransactionInput\x12\x10\n\x08\x63oinbase\x18\x01 \x01(\t\x12\x16\n\x0etransaction_id\x18\x02 \x01(\t\x12\x19\n\x11\x66rom_output_index\x18\x03 \x01(\x04\x12G\n\x10script_signature\x18\x04 \x01(\x0b\x32-.coinbase.chainstorage.BitcoinScriptSignature\x12\x10\n\x08sequence\x18\x05 \x01(\x04\x12#\n\x1btransaction_input_witnesses\x18\x06 \x03(\t\x12\x44\n\x0b\x66rom_output\x18\x07 \x01(\x0b\x32/.coinbase.chainstorage.BitcoinTransactionOutput\x12\r\n\x05index\x18\x08 \x01(\x04\"7\n\x16\x42itcoinScriptSignature\x12\x10\n\x08\x61ssembly\x18\x01 \x01(\t\x12\x0b\n\x03hex\x18\x02 \x01(\t\"\x82\x01\n\x18\x42itcoinTransactionOutput\x12\r\n\x05index\x18\x02 \x01(\x04\x12H\n\x11script_public_key\x18\x03 \x01(\x0b\x32-.coinbase.chainstorage.BitcoinScriptPublicKey\x12\r\n\x05value\x18\x04 \x01(\x04\"V\n\x16\x42itcoinScriptPublicKey\x12\x10\n\x08\x61ssembly\x18\x01 \x01(\t\x12\x0b\n\x03hex\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x06 \x01(\t\"\x85\x01\n\x0c\x42itcoinBlock\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.coinbase.chainstorage.BitcoinHeader\x12?\n\x0ctransactions\x18\x02 \x03(\x0b\x32).coinbase.chainstorage.BitcoinTransactionB?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_bitcoin_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_BITCOINBLOBDATA']._serialized_start=106
+ _globals['_BITCOINBLOBDATA']._serialized_end=205
+ _globals['_REPEATEDBYTES']._serialized_start=207
+ _globals['_REPEATEDBYTES']._serialized_end=236
+ _globals['_BITCOINHEADER']._serialized_start=239
+ _globals['_BITCOINHEADER']._serialized_end=633
+ _globals['_BITCOINTRANSACTION']._serialized_start=636
+ _globals['_BITCOINTRANSACTION']._serialized_end=1114
+ _globals['_BITCOINTRANSACTIONINPUT']._serialized_start=1117
+ _globals['_BITCOINTRANSACTIONINPUT']._serialized_end=1424
+ _globals['_BITCOINSCRIPTSIGNATURE']._serialized_start=1426
+ _globals['_BITCOINSCRIPTSIGNATURE']._serialized_end=1481
+ _globals['_BITCOINTRANSACTIONOUTPUT']._serialized_start=1484
+ _globals['_BITCOINTRANSACTIONOUTPUT']._serialized_end=1614
+ _globals['_BITCOINSCRIPTPUBLICKEY']._serialized_start=1616
+ _globals['_BITCOINSCRIPTPUBLICKEY']._serialized_end=1702
+ _globals['_BITCOINBLOCK']._serialized_start=1705
+ _globals['_BITCOINBLOCK']._serialized_end=1838
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2_grpc.py
new file mode 100644
index 0000000..bb2d32c
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_bitcoin_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_bitcoin_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2.py
new file mode 100644
index 0000000..1db6478
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_ethereum_beacon.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_ethereum_beacon.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from coinbase.chainstorage import blockchain_ethereum_pb2 as coinbase_dot_chainstorage_dot_blockchain__ethereum__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6coinbase/chainstorage/blockchain_ethereum_beacon.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a/coinbase/chainstorage/blockchain_ethereum.proto\"L\n\x16\x45thereumBeaconBlobdata\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\r\n\x05\x62lock\x18\x02 \x01(\x0c\x12\r\n\x05\x62lobs\x18\x04 \x01(\x0cJ\x04\x08\x03\x10\x04\"\xd0\x01\n\x13\x45thereumBeaconBlock\x12@\n\x06header\x18\x01 \x01(\x0b\x32\x30.coinbase.chainstorage.EthereumBeaconBlockHeader\x12=\n\x05\x62lock\x18\x02 \x01(\x0b\x32..coinbase.chainstorage.EthereumBeaconBlockData\x12\x38\n\x05\x62lobs\x18\x03 \x03(\x0b\x32).coinbase.chainstorage.EthereumBeaconBlob\"\xad\x01\n\x19\x45thereumBeaconBlockHeader\x12\x0c\n\x04slot\x18\x01 \x01(\x04\x12\x16\n\x0eproposer_index\x18\x02 \x01(\x04\x12\x13\n\x0bparent_root\x18\x03 \x01(\t\x12\x12\n\nstate_root\x18\x04 \x01(\t\x12\x11\n\tbody_root\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x0c\n\x04root\x18\x07 \x01(\t\x12\r\n\x05\x65poch\x18\x08 \x01(\x04\"\xc0\x04\n\x17\x45thereumBeaconBlockData\x12=\n\x07version\x18\x01 \x01(\x0e\x32,.coinbase.chainstorage.EthereumBeaconVersion\x12\x0c\n\x04slot\x18\x02 \x01(\x04\x12\x16\n\x0eproposer_index\x18\x03 \x01(\x04\x12\x13\n\x0bparent_root\x18\x04 \x01(\t\x12\x12\n\nstate_root\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12H\n\x0cphase0_block\x18\x64 \x01(\x0b\x32\x30.coinbase.chainstorage.EthereumBeaconBlockPhase0H\x00\x12H\n\x0c\x61ltair_block\x18\x65 \x01(\x0b\x32\x30.coinbase.chainstorage.EthereumBeaconBlockAltairH\x00\x12N\n\x0f\x62\x65llatrix_block\x18\x66 \x01(\x0b\x32\x33.coinbase.chainstorage.EthereumBeaconBlockBellatrixH\x00\x12J\n\rcapella_block\x18g \x01(\x0b\x32\x31.coinbase.chainstorage.EthereumBeaconBlockCapellaH\x00\x12\x46\n\x0b\x64\x65neb_block\x18h \x01(\x0b\x32/.coinbase.chainstorage.EthereumBeaconBlockDenebH\x00\x42\x0c\n\nblock_data\"t\n\x19\x45thereumBeaconBlockPhase0\x12\x15\n\rrandao_reveal\x18\x01 \x01(\t\x12@\n\teth1_data\x18\x02 \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconEth1Data\"t\n\x19\x45thereumBeaconBlockAltair\x12\x15\n\rrandao_reveal\x18\x01 \x01(\t\x12@\n\teth1_data\x18\x02 \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconEth1Data\"\xd2\x01\n\x1c\x45thereumBeaconBlockBellatrix\x12\x15\n\rrandao_reveal\x18\x01 \x01(\t\x12@\n\teth1_data\x18\x02 \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconEth1Data\x12Y\n\x11\x65xecution_payload\x18\x03 \x01(\x0b\x32>.coinbase.chainstorage.EthereumBeaconExecutionPayloadBellatrix\"\xce\x01\n\x1a\x45thereumBeaconBlockCapella\x12\x15\n\rrandao_reveal\x18\x01 \x01(\t\x12@\n\teth1_data\x18\x02 \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconEth1Data\x12W\n\x11\x65xecution_payload\x18\x03 \x01(\x0b\x32<.coinbase.chainstorage.EthereumBeaconExecutionPayloadCapella\"\xe8\x01\n\x18\x45thereumBeaconBlockDeneb\x12\x15\n\rrandao_reveal\x18\x01 \x01(\t\x12@\n\teth1_data\x18\x02 \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconEth1Data\x12U\n\x11\x65xecution_payload\x18\x03 \x01(\x0b\x32:.coinbase.chainstorage.EthereumBeaconExecutionPayloadDeneb\x12\x1c\n\x14\x62lob_kzg_commitments\x18\x04 \x03(\t\"Y\n\x16\x45thereumBeaconEth1Data\x12\x14\n\x0c\x64\x65posit_root\x18\x01 \x01(\t\x12\x15\n\rdeposit_count\x18\x02 \x01(\x04\x12\x12\n\nblock_hash\x18\x03 \x01(\t\"\xeb\x02\n\'EthereumBeaconExecutionPayloadBellatrix\x12\x13\n\x0bparent_hash\x18\x01 \x01(\t\x12\x15\n\rfee_recipient\x18\x02 \x01(\t\x12\x12\n\nstate_root\x18\x03 \x01(\t\x12\x15\n\rreceipts_root\x18\x04 \x01(\t\x12\x12\n\nlogs_bloom\x18\x05 \x01(\t\x12\x13\n\x0bprev_randao\x18\x06 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x07 \x01(\x04\x12\x11\n\tgas_limit\x18\x08 \x01(\x04\x12\x10\n\x08gas_used\x18\t \x01(\x04\x12-\n\ttimestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nextra_data\x18\x0b \x01(\t\x12\x18\n\x10\x62\x61se_fee_per_gas\x18\x0c \x01(\t\x12\x12\n\nblock_hash\x18\r \x01(\t\x12\x14\n\x0ctransactions\x18\x0e \x03(\x0c\"\xa9\x03\n%EthereumBeaconExecutionPayloadCapella\x12\x13\n\x0bparent_hash\x18\x01 \x01(\t\x12\x15\n\rfee_recipient\x18\x02 \x01(\t\x12\x12\n\nstate_root\x18\x03 \x01(\t\x12\x15\n\rreceipts_root\x18\x04 \x01(\t\x12\x12\n\nlogs_bloom\x18\x05 \x01(\t\x12\x13\n\x0bprev_randao\x18\x06 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x07 \x01(\x04\x12\x11\n\tgas_limit\x18\x08 \x01(\x04\x12\x10\n\x08gas_used\x18\t \x01(\x04\x12-\n\ttimestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nextra_data\x18\x0b \x01(\t\x12\x18\n\x10\x62\x61se_fee_per_gas\x18\x0c \x01(\t\x12\x12\n\nblock_hash\x18\r \x01(\t\x12\x14\n\x0ctransactions\x18\x0e \x03(\x0c\x12>\n\x0bwithdrawals\x18\x0f \x03(\x0b\x32).coinbase.chainstorage.EthereumWithdrawal\"\xd7\x03\n#EthereumBeaconExecutionPayloadDeneb\x12\x13\n\x0bparent_hash\x18\x01 \x01(\t\x12\x15\n\rfee_recipient\x18\x02 \x01(\t\x12\x12\n\nstate_root\x18\x03 \x01(\t\x12\x15\n\rreceipts_root\x18\x04 \x01(\t\x12\x12\n\nlogs_bloom\x18\x05 \x01(\t\x12\x13\n\x0bprev_randao\x18\x06 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x07 \x01(\x04\x12\x11\n\tgas_limit\x18\x08 \x01(\x04\x12\x10\n\x08gas_used\x18\t \x01(\x04\x12-\n\ttimestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nextra_data\x18\x0b \x01(\t\x12\x18\n\x10\x62\x61se_fee_per_gas\x18\x0c \x01(\t\x12\x12\n\nblock_hash\x18\r \x01(\t\x12\x14\n\x0ctransactions\x18\x0e \x03(\x0c\x12>\n\x0bwithdrawals\x18\x0f \x03(\x0b\x32).coinbase.chainstorage.EthereumWithdrawal\x12\x15\n\rblob_gas_used\x18\x10 \x01(\x04\x12\x17\n\x0f\x65xcess_blob_gas\x18\x11 \x01(\x04\"\xa7\x01\n\x12\x45thereumBeaconBlob\x12\x0c\n\x04slot\x18\x01 \x01(\x04\x12\x13\n\x0bparent_root\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x04\x12\x0c\n\x04\x62lob\x18\x04 \x01(\x0c\x12\x16\n\x0ekzg_commitment\x18\x05 \x01(\t\x12\x11\n\tkzg_proof\x18\x06 \x01(\t\x12&\n\x1ekzg_commitment_inclusion_proof\x18\x07 \x03(\t*c\n\x15\x45thereumBeaconVersion\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PHASE0\x10\x01\x12\n\n\x06\x41LTAIR\x10\x02\x12\r\n\tBELLATRIX\x10\x03\x12\x0b\n\x07\x43\x41PELLA\x10\x04\x12\t\n\x05\x44\x45NEB\x10\x05\x42?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_ethereum_beacon_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_ETHEREUMBEACONVERSION']._serialized_start=3629
+ _globals['_ETHEREUMBEACONVERSION']._serialized_end=3728
+ _globals['_ETHEREUMBEACONBLOBDATA']._serialized_start=163
+ _globals['_ETHEREUMBEACONBLOBDATA']._serialized_end=239
+ _globals['_ETHEREUMBEACONBLOCK']._serialized_start=242
+ _globals['_ETHEREUMBEACONBLOCK']._serialized_end=450
+ _globals['_ETHEREUMBEACONBLOCKHEADER']._serialized_start=453
+ _globals['_ETHEREUMBEACONBLOCKHEADER']._serialized_end=626
+ _globals['_ETHEREUMBEACONBLOCKDATA']._serialized_start=629
+ _globals['_ETHEREUMBEACONBLOCKDATA']._serialized_end=1205
+ _globals['_ETHEREUMBEACONBLOCKPHASE0']._serialized_start=1207
+ _globals['_ETHEREUMBEACONBLOCKPHASE0']._serialized_end=1323
+ _globals['_ETHEREUMBEACONBLOCKALTAIR']._serialized_start=1325
+ _globals['_ETHEREUMBEACONBLOCKALTAIR']._serialized_end=1441
+ _globals['_ETHEREUMBEACONBLOCKBELLATRIX']._serialized_start=1444
+ _globals['_ETHEREUMBEACONBLOCKBELLATRIX']._serialized_end=1654
+ _globals['_ETHEREUMBEACONBLOCKCAPELLA']._serialized_start=1657
+ _globals['_ETHEREUMBEACONBLOCKCAPELLA']._serialized_end=1863
+ _globals['_ETHEREUMBEACONBLOCKDENEB']._serialized_start=1866
+ _globals['_ETHEREUMBEACONBLOCKDENEB']._serialized_end=2098
+ _globals['_ETHEREUMBEACONETH1DATA']._serialized_start=2100
+ _globals['_ETHEREUMBEACONETH1DATA']._serialized_end=2189
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADBELLATRIX']._serialized_start=2192
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADBELLATRIX']._serialized_end=2555
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADCAPELLA']._serialized_start=2558
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADCAPELLA']._serialized_end=2983
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADDENEB']._serialized_start=2986
+ _globals['_ETHEREUMBEACONEXECUTIONPAYLOADDENEB']._serialized_end=3457
+ _globals['_ETHEREUMBEACONBLOB']._serialized_start=3460
+ _globals['_ETHEREUMBEACONBLOB']._serialized_end=3627
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2_grpc.py
new file mode 100644
index 0000000..3a93896
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_beacon_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_ethereum_beacon_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2.py
new file mode 100644
index 0000000..10eebcc
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_ethereum.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_ethereum.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/coinbase/chainstorage/blockchain_ethereum.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb6\x01\n\x10\x45thereumBlobdata\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x1c\n\x14transaction_receipts\x18\x02 \x03(\x0c\x12\x1a\n\x12transaction_traces\x18\x03 \x03(\x0c\x12\x0e\n\x06uncles\x18\x04 \x03(\x0c\x12:\n\x07polygon\x18\x64 \x01(\x0b\x32\'.coinbase.chainstorage.PolygonExtraDataH\x00\x42\x0c\n\nextra_data\"\"\n\x10PolygonExtraData\x12\x0e\n\x06\x61uthor\x18\x01 \x01(\x0c\"\xbf\x01\n\rEthereumBlock\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.coinbase.chainstorage.EthereumHeader\x12@\n\x0ctransactions\x18\x02 \x03(\x0b\x32*.coinbase.chainstorage.EthereumTransaction\x12\x35\n\x06uncles\x18\x03 \x03(\x0b\x32%.coinbase.chainstorage.EthereumHeader\"]\n\x12\x45thereumWithdrawal\x12\r\n\x05index\x18\x01 \x01(\x04\x12\x17\n\x0fvalidator_index\x18\x02 \x01(\x04\x12\x0f\n\x07\x61\x64\x64ress\x18\x03 \x01(\t\x12\x0e\n\x06\x61mount\x18\x04 \x01(\x04\"\x92\x06\n\x0e\x45thereumHeader\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x13\n\x0bparent_hash\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x04\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0ctransactions\x18\x05 \x03(\t\x12\r\n\x05nonce\x18\x06 \x01(\t\x12\x13\n\x0bsha3_uncles\x18\x07 \x01(\t\x12\x12\n\nlogs_bloom\x18\x08 \x01(\t\x12\x19\n\x11transactions_root\x18\t \x01(\t\x12\x12\n\nstate_root\x18\n \x01(\t\x12\x15\n\rreceipts_root\x18\x0b \x01(\t\x12\r\n\x05miner\x18\x0c \x01(\t\x12\x12\n\ndifficulty\x18\r \x01(\x04\x12\x18\n\x10total_difficulty\x18\x0e \x01(\t\x12\x12\n\nextra_data\x18\x0f \x01(\t\x12\x0c\n\x04size\x18\x10 \x01(\x04\x12\x11\n\tgas_limit\x18\x11 \x01(\x04\x12\x10\n\x08gas_used\x18\x12 \x01(\x04\x12\x0e\n\x06uncles\x18\x13 \x03(\t\x12\x1a\n\x10\x62\x61se_fee_per_gas\x18\x14 \x01(\x04H\x00\x12\x10\n\x08mix_hash\x18\x15 \x01(\t\x12>\n\x0bwithdrawals\x18\x16 \x03(\x0b\x32).coinbase.chainstorage.EthereumWithdrawal\x12\x18\n\x10withdrawals_root\x18\x17 \x01(\t\x12\x10\n\x06\x61uthor\x18\x18 \x01(\tH\x01\x12\x17\n\rblob_gas_used\x18\x19 \x01(\x04H\x02\x12\x19\n\x0f\x65xcess_blob_gas\x18\x1a \x01(\x04H\x03\x12 \n\x18parent_beacon_block_root\x18\x1b \x01(\t\x12\x18\n\x10\x62lock_extra_data\x18\x1c \x01(\tB\x1b\n\x19optional_base_fee_per_gasB\x19\n\x17optional_polygon_authorB\x18\n\x16optional_blob_gas_usedB\x1a\n\x18optional_excess_blob_gas\"B\n\x19\x45thereumTransactionAccess\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x14\n\x0cstorage_keys\x18\x02 \x03(\t\"f\n\x1d\x45thereumTransactionAccessList\x12\x45\n\x0b\x61\x63\x63\x65ss_list\x18\x01 \x03(\x0b\x32\x30.coinbase.chainstorage.EthereumTransactionAccess\"\x99\x08\n\x13\x45thereumTransaction\x12\x12\n\nblock_hash\x18\x01 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x02 \x01(\x04\x12\x0c\n\x04\x66rom\x18\x03 \x01(\t\x12\x0b\n\x03gas\x18\x04 \x01(\x04\x12\x11\n\tgas_price\x18\x05 \x01(\x04\x12\x0c\n\x04hash\x18\x06 \x01(\t\x12\r\n\x05input\x18\x07 \x01(\t\x12\r\n\x05nonce\x18\x08 \x01(\x04\x12\n\n\x02to\x18\t \x01(\t\x12\r\n\x05index\x18\n \x01(\x04\x12\r\n\x05value\x18\x0b \x01(\t\x12\x42\n\x07receipt\x18\x0c \x01(\x0b\x32\x31.coinbase.chainstorage.EthereumTransactionReceipt\x12\x45\n\x0ftoken_transfers\x18\x0e \x03(\x0b\x32,.coinbase.chainstorage.EthereumTokenTransfer\x12\x0c\n\x04type\x18\x0f \x01(\x04\x12\x19\n\x0fmax_fee_per_gas\x18\x10 \x01(\x04H\x00\x12\"\n\x18max_priority_fee_per_gas\x18\x11 \x01(\x04H\x01\x12W\n\x17transaction_access_list\x18\x12 \x01(\x0b\x32\x34.coinbase.chainstorage.EthereumTransactionAccessListH\x02\x12R\n\x10\x66lattened_traces\x18\x13 \x03(\x0b\x32\x38.coinbase.chainstorage.EthereumTransactionFlattenedTrace\x12\x33\n\x0f\x62lock_timestamp\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1e\n\x14priority_fee_per_gas\x18\x15 \x01(\x04H\x03\x12\x0e\n\x04mint\x18\x16 \x01(\tH\x04\x12\t\n\x01v\x18\x17 \x01(\t\x12\t\n\x01r\x18\x18 \x01(\t\x12\t\n\x01s\x18\x19 \x01(\t\x12\x12\n\x08\x63hain_id\x18\x1a \x01(\x04H\x05\x12\x13\n\x0bsource_hash\x18\x1b \x01(\t\x12\x14\n\x0cis_system_tx\x18\x1c \x01(\x08\x12\x1e\n\x14max_fee_per_blob_gas\x18\x1d \x01(\tH\x06\x12\x1d\n\x15\x62lob_versioned_hashes\x18\x1e \x03(\tB\x1a\n\x18optional_max_fee_per_gasB#\n!optional_max_priority_fee_per_gasB\"\n optional_transaction_access_listB\x1f\n\x1doptional_priority_fee_per_gasB\x0f\n\roptional_mintB\x13\n\x11optional_chain_idB\x1f\n\x1doptional_max_fee_per_blob_gas\"\xc6\t\n\x1a\x45thereumTransactionReceipt\x12\x18\n\x10transaction_hash\x18\x01 \x01(\t\x12\x19\n\x11transaction_index\x18\x02 \x01(\x04\x12\x12\n\nblock_hash\x18\x03 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x04 \x01(\x04\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\x12\x1b\n\x13\x63umulative_gas_used\x18\x07 \x01(\x04\x12\x10\n\x08gas_used\x18\x08 \x01(\x04\x12\x18\n\x10\x63ontract_address\x18\t \x01(\t\x12\x35\n\x04logs\x18\n \x03(\x0b\x32\'.coinbase.chainstorage.EthereumEventLog\x12\x12\n\nlogs_bloom\x18\x0b \x01(\t\x12\x0c\n\x04root\x18\x0c \x01(\t\x12\x10\n\x06status\x18\x0e \x01(\x04H\x00\x12\x0c\n\x04type\x18\x0f \x01(\x04\x12\x1b\n\x13\x65\x66\x66\x65\x63tive_gas_price\x18\x10 \x01(\x04\x12R\n\x0bl1_fee_info\x18\x11 \x01(\x0b\x32;.coinbase.chainstorage.EthereumTransactionReceipt.L1FeeInfoH\x01\x12\x17\n\rdeposit_nonce\x18\x12 \x01(\x04H\x02\x12!\n\x17\x64\x65posit_receipt_version\x18\x13 \x01(\x04H\x03\x12\x18\n\x0e\x62lob_gas_price\x18\x14 \x01(\x04H\x04\x12\x17\n\rblob_gas_used\x18\x15 \x01(\x04H\x05\x12\r\n\x03\x66\x65\x65\x18\x16 \x01(\x04H\x06\x12\x11\n\x07net_fee\x18\x17 \x01(\x04H\x07\x12\x13\n\tnet_usage\x18\x18 \x01(\x04H\x08\x12\x16\n\x0c\x65nergy_usage\x18\x19 \x01(\x04H\t\x12\x14\n\nenergy_fee\x18\x1a \x01(\x04H\n\x12\x1d\n\x13origin_energy_usage\x18\x1b \x01(\x04H\x0b\x12\x1c\n\x12\x65nergy_usage_total\x18\x1c \x01(\x04H\x0c\x12\x1e\n\x14\x65nergy_penalty_total\x18\x1d \x01(\x04H\r\x1a]\n\tL1FeeInfo\x12\x13\n\x0bl1_gas_used\x18\x01 \x01(\x04\x12\x14\n\x0cl1_gas_price\x18\x02 \x01(\x04\x12\x0e\n\x06l1_fee\x18\x03 \x01(\x04\x12\x15\n\rl1_fee_scalar\x18\x04 \x01(\tB\x11\n\x0foptional_statusB\x16\n\x14optional_l1_fee_infoB\x18\n\x16optional_deposit_nonceB\"\n optional_deposit_receipt_versionB\x19\n\x17optional_blob_gas_priceB\x18\n\x16optional_blob_gas_usedB\x0e\n\x0coptional_feeB\x12\n\x10optional_net_feeB\x14\n\x12optional_net_usageB\x17\n\x15optional_energy_usageB\x15\n\x13optional_energy_feeB\x1e\n\x1coptional_origin_energy_usageB\x1d\n\x1boptional_energy_usage_totalB\x1f\n\x1doptional_energy_penalty_totalJ\x04\x08\r\x10\x0e\"\xc4\x01\n\x10\x45thereumEventLog\x12\x0f\n\x07removed\x18\x01 \x01(\x08\x12\x11\n\tlog_index\x18\x02 \x01(\x04\x12\x18\n\x10transaction_hash\x18\x03 \x01(\t\x12\x19\n\x11transaction_index\x18\x04 \x01(\x04\x12\x12\n\nblock_hash\x18\x05 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x06 \x01(\x04\x12\x0f\n\x07\x61\x64\x64ress\x18\x07 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x08 \x01(\t\x12\x0e\n\x06topics\x18\t \x03(\t\"\xde\x01\n\x18\x45thereumTransactionTrace\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0c\n\x04\x66rom\x18\x03 \x01(\t\x12\n\n\x02to\x18\x04 \x01(\t\x12\r\n\x05value\x18\x05 \x01(\t\x12\x0b\n\x03gas\x18\x06 \x01(\x04\x12\x10\n\x08gas_used\x18\x07 \x01(\x04\x12\r\n\x05input\x18\x08 \x01(\t\x12\x0e\n\x06output\x18\t \x01(\t\x12>\n\x05\x63\x61lls\x18\n \x03(\x0b\x32/.coinbase.chainstorage.EthereumTransactionTrace\"\xf9\x02\n!EthereumTransactionFlattenedTrace\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0c\n\x04\x66rom\x18\x03 \x01(\t\x12\n\n\x02to\x18\x04 \x01(\t\x12\r\n\x05value\x18\x05 \x01(\t\x12\x0b\n\x03gas\x18\x06 \x01(\x04\x12\x10\n\x08gas_used\x18\x07 \x01(\x04\x12\r\n\x05input\x18\x08 \x01(\t\x12\x0e\n\x06output\x18\t \x01(\t\x12\x11\n\tsubtraces\x18\n \x01(\x04\x12\x15\n\rtrace_address\x18\x0b \x03(\x04\x12\x12\n\ntrace_type\x18\x0c \x01(\t\x12\x11\n\tcall_type\x18\r \x01(\t\x12\x10\n\x08trace_id\x18\x0e \x01(\t\x12\x0e\n\x06status\x18\x0f \x01(\x04\x12\x12\n\nblock_hash\x18\x10 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\x11 \x01(\x04\x12\x18\n\x10transaction_hash\x18\x12 \x01(\t\x12\x19\n\x11transaction_index\x18\x13 \x01(\x04\"\xe5\x02\n\x15\x45thereumTokenTransfer\x12\x15\n\rtoken_address\x18\x01 \x01(\t\x12\x14\n\x0c\x66rom_address\x18\x02 \x01(\t\x12\x12\n\nto_address\x18\x03 \x01(\t\x12\r\n\x05value\x18\x04 \x01(\t\x12\x19\n\x11transaction_index\x18\x05 \x01(\x04\x12\x18\n\x10transaction_hash\x18\x06 \x01(\t\x12\x11\n\tlog_index\x18\x07 \x01(\x04\x12\x12\n\nblock_hash\x18\x08 \x01(\t\x12\x14\n\x0c\x62lock_number\x18\t \x01(\x04\x12:\n\x05\x65rc20\x18\x64 \x01(\x0b\x32).coinbase.chainstorage.ERC20TokenTransferH\x00\x12<\n\x06\x65rc721\x18\x65 \x01(\x0b\x32*.coinbase.chainstorage.ERC721TokenTransferH\x00\x42\x10\n\x0etoken_transfer\"M\n\x12\x45RC20TokenTransfer\x12\x14\n\x0c\x66rom_address\x18\x01 \x01(\t\x12\x12\n\nto_address\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"Q\n\x13\x45RC721TokenTransfer\x12\x14\n\x0c\x66rom_address\x18\x01 \x01(\t\x12\x12\n\nto_address\x18\x02 \x01(\t\x12\x10\n\x08token_id\x18\x03 \x01(\t\"2\n\x19\x45thereumAccountStateProof\x12\x15\n\raccount_proof\x18\x01 \x01(\x0c\",\n\x12\x45thereumExtraInput\x12\x16\n\x0e\x65rc20_contract\x18\x01 \x01(\t\"V\n\x1c\x45thereumAccountStateResponse\x12\r\n\x05nonce\x18\x01 \x01(\x04\x12\x14\n\x0cstorage_hash\x18\x02 \x01(\t\x12\x11\n\tcode_hash\x18\x03 \x01(\tB?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_ethereum_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_ETHEREUMBLOBDATA']._serialized_start=108
+ _globals['_ETHEREUMBLOBDATA']._serialized_end=290
+ _globals['_POLYGONEXTRADATA']._serialized_start=292
+ _globals['_POLYGONEXTRADATA']._serialized_end=326
+ _globals['_ETHEREUMBLOCK']._serialized_start=329
+ _globals['_ETHEREUMBLOCK']._serialized_end=520
+ _globals['_ETHEREUMWITHDRAWAL']._serialized_start=522
+ _globals['_ETHEREUMWITHDRAWAL']._serialized_end=615
+ _globals['_ETHEREUMHEADER']._serialized_start=618
+ _globals['_ETHEREUMHEADER']._serialized_end=1404
+ _globals['_ETHEREUMTRANSACTIONACCESS']._serialized_start=1406
+ _globals['_ETHEREUMTRANSACTIONACCESS']._serialized_end=1472
+ _globals['_ETHEREUMTRANSACTIONACCESSLIST']._serialized_start=1474
+ _globals['_ETHEREUMTRANSACTIONACCESSLIST']._serialized_end=1576
+ _globals['_ETHEREUMTRANSACTION']._serialized_start=1579
+ _globals['_ETHEREUMTRANSACTION']._serialized_end=2628
+ _globals['_ETHEREUMTRANSACTIONRECEIPT']._serialized_start=2631
+ _globals['_ETHEREUMTRANSACTIONRECEIPT']._serialized_end=3853
+ _globals['_ETHEREUMTRANSACTIONRECEIPT_L1FEEINFO']._serialized_start=3394
+ _globals['_ETHEREUMTRANSACTIONRECEIPT_L1FEEINFO']._serialized_end=3487
+ _globals['_ETHEREUMEVENTLOG']._serialized_start=3856
+ _globals['_ETHEREUMEVENTLOG']._serialized_end=4052
+ _globals['_ETHEREUMTRANSACTIONTRACE']._serialized_start=4055
+ _globals['_ETHEREUMTRANSACTIONTRACE']._serialized_end=4277
+ _globals['_ETHEREUMTRANSACTIONFLATTENEDTRACE']._serialized_start=4280
+ _globals['_ETHEREUMTRANSACTIONFLATTENEDTRACE']._serialized_end=4657
+ _globals['_ETHEREUMTOKENTRANSFER']._serialized_start=4660
+ _globals['_ETHEREUMTOKENTRANSFER']._serialized_end=5017
+ _globals['_ERC20TOKENTRANSFER']._serialized_start=5019
+ _globals['_ERC20TOKENTRANSFER']._serialized_end=5096
+ _globals['_ERC721TOKENTRANSFER']._serialized_start=5098
+ _globals['_ERC721TOKENTRANSFER']._serialized_end=5179
+ _globals['_ETHEREUMACCOUNTSTATEPROOF']._serialized_start=5181
+ _globals['_ETHEREUMACCOUNTSTATEPROOF']._serialized_end=5231
+ _globals['_ETHEREUMEXTRAINPUT']._serialized_start=5233
+ _globals['_ETHEREUMEXTRAINPUT']._serialized_end=5277
+ _globals['_ETHEREUMACCOUNTSTATERESPONSE']._serialized_start=5279
+ _globals['_ETHEREUMACCOUNTSTATERESPONSE']._serialized_end=5365
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2_grpc.py
new file mode 100644
index 0000000..7312787
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_ethereum_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_ethereum_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_pb2.py
new file mode 100644
index 0000000..fda9c0d
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_pb2.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from coinbase.c3.common import common_pb2 as coinbase_dot_c3_dot_common_dot_common__pb2
+from coinbase.crypto.rosetta.types import block_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_block__pb2
+from coinbase.crypto.rosetta.types import transaction_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_transaction__pb2
+from coinbase.chainstorage import blockchain_bitcoin_pb2 as coinbase_dot_chainstorage_dot_blockchain__bitcoin__pb2
+from coinbase.chainstorage import blockchain_aptos_pb2 as coinbase_dot_chainstorage_dot_blockchain__aptos__pb2
+from coinbase.chainstorage import blockchain_solana_pb2 as coinbase_dot_chainstorage_dot_blockchain__solana__pb2
+from coinbase.chainstorage import blockchain_rosetta_pb2 as coinbase_dot_chainstorage_dot_blockchain__rosetta__pb2
+from coinbase.chainstorage import blockchain_ethereum_pb2 as coinbase_dot_chainstorage_dot_blockchain__ethereum__pb2
+from coinbase.chainstorage import blockchain_ethereum_beacon_pb2 as coinbase_dot_chainstorage_dot_blockchain__ethereum__beacon__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&coinbase/chainstorage/blockchain.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1f\x63oinbase/c3/common/common.proto\x1a)coinbase/crypto/rosetta/types/block.proto\x1a/coinbase/crypto/rosetta/types/transaction.proto\x1a.coinbase/chainstorage/blockchain_bitcoin.proto\x1a,coinbase/chainstorage/blockchain_aptos.proto\x1a-coinbase/chainstorage/blockchain_solana.proto\x1a.coinbase/chainstorage/blockchain_rosetta.proto\x1a/coinbase/chainstorage/blockchain_ethereum.proto\x1a\x36\x63oinbase/chainstorage/blockchain_ethereum_beacon.proto\"\x9a\x05\n\x05\x42lock\x12\x32\n\nblockchain\x18\x01 \x01(\x0e\x32\x1e.coinbase.c3.common.Blockchain\x12,\n\x07network\x18\x02 \x01(\x0e\x32\x1b.coinbase.c3.common.Network\x12\x36\n\x08metadata\x18\x03 \x01(\x0b\x32$.coinbase.chainstorage.BlockMetadata\x12H\n\x14transaction_metadata\x18\x04 \x01(\x0b\x32*.coinbase.chainstorage.TransactionMetadata\x12\x34\n\nside_chain\x18\x05 \x01(\x0e\x32 .coinbase.chainstorage.SideChain\x12;\n\x08\x65thereum\x18\x64 \x01(\x0b\x32\'.coinbase.chainstorage.EthereumBlobdataH\x00\x12\x39\n\x07\x62itcoin\x18\x65 \x01(\x0b\x32&.coinbase.chainstorage.BitcoinBlobdataH\x00\x12\x39\n\x07rosetta\x18\x66 \x01(\x0b\x32&.coinbase.chainstorage.RosettaBlobdataH\x00\x12\x37\n\x06solana\x18g \x01(\x0b\x32%.coinbase.chainstorage.SolanaBlobdataH\x00\x12\x35\n\x05\x61ptos\x18h \x01(\x0b\x32$.coinbase.chainstorage.AptosBlobdataH\x00\x12H\n\x0f\x65thereum_beacon\x18i \x01(\x0b\x32-.coinbase.chainstorage.EthereumBeaconBlobdataH\x00\x42\n\n\x08\x62lobdata\"|\n\x0f\x42lockIdentifier\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\x0b\n\x03tag\x18\x03 \x01(\r\x12\x0f\n\x07skipped\x18\x04 \x01(\x08\x12-\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xbf\x01\n\rBlockMetadata\x12\x0b\n\x03tag\x18\x01 \x01(\r\x12\x0c\n\x04hash\x18\x02 \x01(\t\x12\x13\n\x0bparent_hash\x18\x03 \x01(\t\x12\x0e\n\x06height\x18\x04 \x01(\x04\x12\x17\n\x0fobject_key_main\x18\x05 \x01(\t\x12\x15\n\rparent_height\x18\x06 \x01(\x04\x12\x0f\n\x07skipped\x18\x07 \x01(\x08\x12-\n\ttimestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"+\n\x13TransactionMetadata\x12\x14\n\x0ctransactions\x18\x01 \x03(\t\"C\n\x0cRosettaBlock\x12\x33\n\x05\x62lock\x18\x01 \x01(\x0b\x32$.coinbase.crypto.rosetta.types.Block\"\xf6\x05\n\x0bNativeBlock\x12\x32\n\nblockchain\x18\x01 \x01(\x0e\x32\x1e.coinbase.c3.common.Blockchain\x12,\n\x07network\x18\x02 \x01(\x0e\x32\x1b.coinbase.c3.common.Network\x12\x0b\n\x03tag\x18\x03 \x01(\r\x12\x0c\n\x04hash\x18\x04 \x01(\t\x12\x13\n\x0bparent_hash\x18\x05 \x01(\t\x12\x0e\n\x06height\x18\x06 \x01(\x04\x12-\n\ttimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10num_transactions\x18\x08 \x01(\x04\x12\x15\n\rparent_height\x18\t \x01(\x04\x12\x0f\n\x07skipped\x18\n \x01(\x08\x12\x34\n\nside_chain\x18\x0b \x01(\x0e\x32 .coinbase.chainstorage.SideChain\x12\x38\n\x08\x65thereum\x18\x64 \x01(\x0b\x32$.coinbase.chainstorage.EthereumBlockH\x00\x12\x36\n\x07\x62itcoin\x18\x65 \x01(\x0b\x32#.coinbase.chainstorage.BitcoinBlockH\x00\x12\x37\n\x07rosetta\x18\x66 \x01(\x0b\x32$.coinbase.crypto.rosetta.types.BlockH\x00\x12\x34\n\x06solana\x18g \x01(\x0b\x32\".coinbase.chainstorage.SolanaBlockH\x00\x12\x32\n\x05\x61ptos\x18h \x01(\x0b\x32!.coinbase.chainstorage.AptosBlockH\x00\x12\x39\n\tsolana_v2\x18i \x01(\x0b\x32$.coinbase.chainstorage.SolanaBlockV2H\x00\x12\x45\n\x0f\x65thereum_beacon\x18j \x01(\x0b\x32*.coinbase.chainstorage.EthereumBeaconBlockH\x00\x42\x07\n\x05\x62lock\"\xbd\x04\n\x11NativeTransaction\x12\x32\n\nblockchain\x18\x01 \x01(\x0e\x32\x1e.coinbase.c3.common.Blockchain\x12,\n\x07network\x18\x02 \x01(\x0e\x32\x1b.coinbase.c3.common.Network\x12\x0b\n\x03tag\x18\x03 \x01(\r\x12\x18\n\x10transaction_hash\x18\x04 \x01(\t\x12\x14\n\x0c\x62lock_height\x18\x05 \x01(\x04\x12\x12\n\nblock_hash\x18\x06 \x01(\t\x12\x33\n\x0f\x62lock_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x08\x65thereum\x18\x64 \x01(\x0b\x32*.coinbase.chainstorage.EthereumTransactionH\x00\x12<\n\x07\x62itcoin\x18\x65 \x01(\x0b\x32).coinbase.chainstorage.BitcoinTransactionH\x00\x12=\n\x07rosetta\x18\x66 \x01(\x0b\x32*.coinbase.crypto.rosetta.types.TransactionH\x00\x12:\n\x06solana\x18g \x01(\x0b\x32(.coinbase.chainstorage.SolanaTransactionH\x00\x12\x38\n\x05\x61ptos\x18h \x01(\x0b\x32\'.coinbase.chainstorage.AptosTransactionH\x00\x42\r\n\x0btransaction\"k\n\x17GetAccountProofResponse\x12\x44\n\x08\x65thereum\x18\x64 \x01(\x0b\x32\x30.coinbase.chainstorage.EthereumAccountStateProofH\x00\x42\n\n\x08response\"\xeb\x01\n\x1bValidateAccountStateRequest\x12R\n\x0b\x61\x63\x63ount_req\x18\x01 \x01(\x0b\x32=.coinbase.chainstorage.InternalGetVerifiedAccountStateRequest\x12\x31\n\x05\x62lock\x18\x02 \x01(\x0b\x32\".coinbase.chainstorage.NativeBlock\x12\x45\n\raccount_proof\x18\x03 \x01(\x0b\x32..coinbase.chainstorage.GetAccountProofResponse\"\xb2\x01\n&InternalGetVerifiedAccountStateRequest\x12\x0f\n\x07\x61\x63\x63ount\x18\x01 \x01(\t\x12\x0b\n\x03tag\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\x04\x12\x0c\n\x04hash\x18\x04 \x01(\t\x12=\n\x08\x65thereum\x18\x64 \x01(\x0b\x32).coinbase.chainstorage.EthereumExtraInputH\x00\x42\r\n\x0b\x65xtra_input\"\x84\x01\n\x1cValidateAccountStateResponse\x12\x0f\n\x07\x62\x61lance\x18\x01 \x01(\t\x12G\n\x08\x65thereum\x18\x64 \x01(\x0b\x32\x33.coinbase.chainstorage.EthereumAccountStateResponseH\x00\x42\n\n\x08response\"W\n\x1bValidateRosettaBlockRequest\x12\x38\n\x0cnative_block\x18\x01 \x01(\x0b\x32\".coinbase.chainstorage.NativeBlock*m\n\tSideChain\x12\x12\n\x0eSIDECHAIN_NONE\x10\x00\x12%\n!SIDECHAIN_ETHEREUM_MAINNET_BEACON\x10\x01\x12%\n!SIDECHAIN_ETHEREUM_HOLESKY_BEACON\x10\x02\x42?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_SIDECHAIN']._serialized_start=3709
+ _globals['_SIDECHAIN']._serialized_end=3818
+ _globals['_BLOCK']._serialized_start=518
+ _globals['_BLOCK']._serialized_end=1184
+ _globals['_BLOCKIDENTIFIER']._serialized_start=1186
+ _globals['_BLOCKIDENTIFIER']._serialized_end=1310
+ _globals['_BLOCKMETADATA']._serialized_start=1313
+ _globals['_BLOCKMETADATA']._serialized_end=1504
+ _globals['_TRANSACTIONMETADATA']._serialized_start=1506
+ _globals['_TRANSACTIONMETADATA']._serialized_end=1549
+ _globals['_ROSETTABLOCK']._serialized_start=1551
+ _globals['_ROSETTABLOCK']._serialized_end=1618
+ _globals['_NATIVEBLOCK']._serialized_start=1621
+ _globals['_NATIVEBLOCK']._serialized_end=2379
+ _globals['_NATIVETRANSACTION']._serialized_start=2382
+ _globals['_NATIVETRANSACTION']._serialized_end=2955
+ _globals['_GETACCOUNTPROOFRESPONSE']._serialized_start=2957
+ _globals['_GETACCOUNTPROOFRESPONSE']._serialized_end=3064
+ _globals['_VALIDATEACCOUNTSTATEREQUEST']._serialized_start=3067
+ _globals['_VALIDATEACCOUNTSTATEREQUEST']._serialized_end=3302
+ _globals['_INTERNALGETVERIFIEDACCOUNTSTATEREQUEST']._serialized_start=3305
+ _globals['_INTERNALGETVERIFIEDACCOUNTSTATEREQUEST']._serialized_end=3483
+ _globals['_VALIDATEACCOUNTSTATERESPONSE']._serialized_start=3486
+ _globals['_VALIDATEACCOUNTSTATERESPONSE']._serialized_end=3618
+ _globals['_VALIDATEROSETTABLOCKREQUEST']._serialized_start=3620
+ _globals['_VALIDATEROSETTABLOCKREQUEST']._serialized_end=3707
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_pb2_grpc.py
new file mode 100644
index 0000000..1a065aa
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2.py
new file mode 100644
index 0000000..372f7ee
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_rosetta.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_rosetta.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.coinbase/chainstorage/blockchain_rosetta.proto\x12\x15\x63oinbase.chainstorage\"P\n\x0fRosettaBlobdata\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x1a\n\x12other_transactions\x18\x02 \x03(\x0c\x12\x11\n\traw_block\x18\x03 \x01(\x0c\x42?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_rosetta_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_ROSETTABLOBDATA']._serialized_start=73
+ _globals['_ROSETTABLOBDATA']._serialized_end=153
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2_grpc.py
new file mode 100644
index 0000000..c005a5f
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_rosetta_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_rosetta_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2.py b/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2.py
new file mode 100644
index 0000000..d49cfff
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/chainstorage/blockchain_solana.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/chainstorage/blockchain_solana.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-coinbase/chainstorage/blockchain_solana.proto\x12\x15\x63oinbase.chainstorage\x1a\x1fgoogle/protobuf/timestamp.proto\" \n\x0eSolanaBlobdata\x12\x0e\n\x06header\x18\x01 \x01(\x0c\"\xb8\x01\n\x0bSolanaBlock\x12\x33\n\x06header\x18\x01 \x01(\x0b\x32#.coinbase.chainstorage.SolanaHeader\x12>\n\x0ctransactions\x18\x02 \x03(\x0b\x32(.coinbase.chainstorage.SolanaTransaction\x12\x34\n\x07rewards\x18\x03 \x03(\x0b\x32#.coinbase.chainstorage.SolanaReward\"\xbc\x01\n\rSolanaBlockV2\x12\x33\n\x06header\x18\x01 \x01(\x0b\x32#.coinbase.chainstorage.SolanaHeader\x12@\n\x0ctransactions\x18\x02 \x03(\x0b\x32*.coinbase.chainstorage.SolanaTransactionV2\x12\x34\n\x07rewards\x18\x03 \x03(\x0b\x32#.coinbase.chainstorage.SolanaReward\"\xa8\x01\n\x0cSolanaHeader\x12\x12\n\nblock_hash\x18\x01 \x01(\t\x12\x1b\n\x13previous_block_hash\x18\x02 \x01(\t\x12\x0c\n\x04slot\x18\x03 \x01(\x04\x12\x13\n\x0bparent_slot\x18\x04 \x01(\x04\x12.\n\nblock_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x62lock_height\x18\x06 \x01(\x04\"\xba\x01\n\x11SolanaTransaction\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\x12@\n\x07payload\x18\x02 \x01(\x0b\x32/.coinbase.chainstorage.SolanaTransactionPayload\x12:\n\x04meta\x18\x03 \x01(\x0b\x32,.coinbase.chainstorage.SolanaTransactionMeta\x12\x0f\n\x07version\x18\x04 \x01(\x05\"\xc0\x01\n\x13SolanaTransactionV2\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\x12\x42\n\x07payload\x18\x02 \x01(\x0b\x32\x31.coinbase.chainstorage.SolanaTransactionPayloadV2\x12<\n\x04meta\x18\x03 \x01(\x0b\x32..coinbase.chainstorage.SolanaTransactionMetaV2\x12\x0f\n\x07version\x18\x04 \x01(\x05\"\x84\x03\n\x15SolanaTransactionMeta\x12\x0b\n\x03\x65rr\x18\x01 \x01(\t\x12\x0b\n\x03\x66\x65\x65\x18\x02 \x01(\x04\x12\x14\n\x0cpre_balances\x18\x03 \x03(\x04\x12\x15\n\rpost_balances\x18\x04 \x03(\x04\x12\x45\n\x12pre_token_balances\x18\x05 \x03(\x0b\x32).coinbase.chainstorage.SolanaTokenBalance\x12\x46\n\x13post_token_balances\x18\x06 \x03(\x0b\x32).coinbase.chainstorage.SolanaTokenBalance\x12I\n\x12inner_instructions\x18\x07 \x03(\x0b\x32-.coinbase.chainstorage.SolanaInnerInstruction\x12\x14\n\x0clog_messages\x18\x08 \x03(\t\x12\x34\n\x07rewards\x18\t \x03(\x0b\x32#.coinbase.chainstorage.SolanaReward\"\x88\x03\n\x17SolanaTransactionMetaV2\x12\x0b\n\x03\x65rr\x18\x01 \x01(\t\x12\x0b\n\x03\x66\x65\x65\x18\x02 \x01(\x04\x12\x14\n\x0cpre_balances\x18\x03 \x03(\x04\x12\x15\n\rpost_balances\x18\x04 \x03(\x04\x12\x45\n\x12pre_token_balances\x18\x05 \x03(\x0b\x32).coinbase.chainstorage.SolanaTokenBalance\x12\x46\n\x13post_token_balances\x18\x06 \x03(\x0b\x32).coinbase.chainstorage.SolanaTokenBalance\x12K\n\x12inner_instructions\x18\x07 \x03(\x0b\x32/.coinbase.chainstorage.SolanaInnerInstructionV2\x12\x14\n\x0clog_messages\x18\x08 \x03(\t\x12\x34\n\x07rewards\x18\t \x03(\x0b\x32#.coinbase.chainstorage.SolanaReward\"\x88\x01\n\x12SolanaTokenBalance\x12\x15\n\raccount_index\x18\x01 \x01(\x04\x12\x0c\n\x04mint\x18\x02 \x01(\t\x12>\n\x0ctoken_amount\x18\x03 \x01(\x0b\x32(.coinbase.chainstorage.SolanaTokenAmount\x12\r\n\x05owner\x18\x04 \x01(\t\"O\n\x11SolanaTokenAmount\x12\x0e\n\x06\x61mount\x18\x01 \x01(\t\x12\x10\n\x08\x64\x65\x63imals\x18\x02 \x01(\x04\x12\x18\n\x10ui_amount_string\x18\x03 \x01(\t\"g\n\x16SolanaInnerInstruction\x12\r\n\x05index\x18\x01 \x01(\x04\x12>\n\x0cinstructions\x18\x02 \x03(\x0b\x32(.coinbase.chainstorage.SolanaInstruction\"k\n\x18SolanaInnerInstructionV2\x12\r\n\x05index\x18\x01 \x01(\x04\x12@\n\x0cinstructions\x18\x02 \x03(\x0b\x32*.coinbase.chainstorage.SolanaInstructionV2\"\x88\x01\n\x0cSolanaReward\x12\x0e\n\x06pubkey\x18\x01 \x01(\x0c\x12\x10\n\x08lamports\x18\x02 \x01(\x03\x12\x14\n\x0cpost_balance\x18\x03 \x01(\x04\x12\x13\n\x0breward_type\x18\x04 \x01(\t\x12\x14\n\ncommission\x18\x05 \x01(\x04H\x00\x42\x15\n\x13optional_commission\"e\n\x18SolanaTransactionPayload\x12\x12\n\nsignatures\x18\x01 \x03(\t\x12\x35\n\x07message\x18\x02 \x01(\x0b\x32$.coinbase.chainstorage.SolanaMessage\"i\n\x1aSolanaTransactionPayloadV2\x12\x12\n\nsignatures\x18\x01 \x03(\t\x12\x37\n\x07message\x18\x02 \x01(\x0b\x32&.coinbase.chainstorage.SolanaMessageV2\"\xde\x01\n\rSolanaMessage\x12:\n\x06header\x18\x01 \x01(\x0b\x32*.coinbase.chainstorage.SolanaMessageHeader\x12\x19\n\x11recent_block_hash\x18\x03 \x01(\t\x12>\n\x0cinstructions\x18\x04 \x03(\x0b\x32(.coinbase.chainstorage.SolanaInstruction\x12\x36\n\x08\x61\x63\x63ounts\x18\x05 \x03(\x0b\x32$.coinbase.chainstorage.SolanaAccount\"\xf1\x01\n\x0fSolanaMessageV2\x12\x37\n\x0c\x61\x63\x63ount_keys\x18\x01 \x03(\x0b\x32!.coinbase.chainstorage.AccountKey\x12H\n\x15\x61\x64\x64ress_table_lookups\x18\x02 \x03(\x0b\x32).coinbase.chainstorage.AddressTableLookup\x12@\n\x0cinstructions\x18\x03 \x03(\x0b\x32*.coinbase.chainstorage.SolanaInstructionV2\x12\x19\n\x11recent_block_hash\x18\x04 \x01(\t\"N\n\nAccountKey\x12\x0e\n\x06pubkey\x18\x01 \x01(\t\x12\x0e\n\x06signer\x18\x02 \x01(\x08\x12\x0e\n\x06source\x18\x03 \x01(\t\x12\x10\n\x08writable\x18\x04 \x01(\x08\"]\n\x12\x41\x64\x64ressTableLookup\x12\x13\n\x0b\x61\x63\x63ount_key\x18\x01 \x01(\t\x12\x18\n\x10readonly_indexes\x18\x02 \x03(\x04\x12\x18\n\x10writable_indexes\x18\x03 \x03(\x04\"\x84\x01\n\x13SolanaMessageHeader\x12\x1f\n\x17num_required_signatures\x18\x01 \x01(\x04\x12$\n\x1cnum_readonly_signed_accounts\x18\x02 \x01(\x04\x12&\n\x1enum_readonly_unsigned_accounts\x18\x03 \x01(\x04\"w\n\x11SolanaInstruction\x12\x18\n\x10program_id_index\x18\x01 \x01(\x04\x12\x10\n\x08\x61\x63\x63ounts\x18\x02 \x03(\x04\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\x12\n\nprogram_id\x18\x04 \x01(\t\x12\x14\n\x0c\x61\x63\x63ount_keys\x18\x05 \x03(\t\"6\n\x14SolanaRawInstruction\x12\x10\n\x08\x61\x63\x63ounts\x18\x01 \x03(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xec\x07\n\x13SolanaInstructionV2\x12\x35\n\x07program\x18\x01 \x01(\x0e\x32$.coinbase.chainstorage.SolanaProgram\x12\x12\n\nprogram_id\x18\x02 \x01(\t\x12\x46\n\x0fraw_instruction\x18\x64 \x01(\x0b\x32+.coinbase.chainstorage.SolanaRawInstructionH\x00\x12^\n\x1c\x61\x64\x64ress_lookup_table_program\x18\x65 \x01(\x0b\x32\x36.coinbase.chainstorage.SolanaAddressLookupTableProgramH\x00\x12K\n\x12\x62pf_loader_program\x18\x66 \x01(\x0b\x32-.coinbase.chainstorage.SolanaBpfLoaderProgramH\x00\x12\x62\n\x1e\x62pf_upgradeable_loader_program\x18g \x01(\x0b\x32\x38.coinbase.chainstorage.SolanaBpfUpgradeableLoaderProgramH\x00\x12@\n\x0cvote_program\x18h \x01(\x0b\x32(.coinbase.chainstorage.SolanaVoteProgramH\x00\x12\x44\n\x0esystem_program\x18i \x01(\x0b\x32*.coinbase.chainstorage.SolanaSystemProgramH\x00\x12\x42\n\rstake_program\x18j \x01(\x0b\x32).coinbase.chainstorage.SolanaStakeProgramH\x00\x12G\n\x10spl_memo_program\x18k \x01(\x0b\x32+.coinbase.chainstorage.SolanaSplMemoProgramH\x00\x12I\n\x11spl_token_program\x18l \x01(\x0b\x32,.coinbase.chainstorage.SolanaSplTokenProgramH\x00\x12R\n\x16spl_token_2022_program\x18m \x01(\x0b\x32\x30.coinbase.chainstorage.SolanaSplToken2022ProgramH\x00\x12m\n$spl_associated_token_account_program\x18n \x01(\x0b\x32=.coinbase.chainstorage.SolanaSplAssociatedTokenAccountProgramH\x00\x42\x0e\n\x0cprogram_data\"\xf6\x01\n\x1fSolanaAddressLookupTableProgram\x12`\n\x10instruction_type\x18\x01 \x01(\x0e\x32\x46.coinbase.chainstorage.SolanaAddressLookupTableProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\"\x1e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x42\r\n\x0binstruction\"\xe4\x01\n\x16SolanaBpfLoaderProgram\x12W\n\x10instruction_type\x18\x01 \x01(\x0e\x32=.coinbase.chainstorage.SolanaBpfLoaderProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\"\x1e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x42\r\n\x0binstruction\"\xfa\x01\n!SolanaBpfUpgradeableLoaderProgram\x12\x62\n\x10instruction_type\x18\x01 \x01(\x0e\x32H.coinbase.chainstorage.SolanaBpfUpgradeableLoaderProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\"\x1e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x42\r\n\x0binstruction\"\xe4\x04\n\x11SolanaVoteProgram\x12R\n\x10instruction_type\x18\x01 \x01(\x0e\x32\x38.coinbase.chainstorage.SolanaVoteProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\x12L\n\ninitialize\x18\x65 \x01(\x0b\x32\x36.coinbase.chainstorage.SolanaVoteInitializeInstructionH\x00\x12@\n\x04vote\x18\x66 \x01(\x0b\x32\x30.coinbase.chainstorage.SolanaVoteVoteInstructionH\x00\x12H\n\x08withdraw\x18g \x01(\x0b\x32\x34.coinbase.chainstorage.SolanaVoteWithdrawInstructionH\x00\x12g\n\x19\x63ompact_update_vote_state\x18h \x01(\x0b\x32\x42.coinbase.chainstorage.SolanaVoteCompactUpdateVoteStateInstructionH\x00\"e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nINITIALIZE\x10\x01\x12\x08\n\x04VOTE\x10\x02\x12\x0c\n\x08WITHDRAW\x10\x03\x12\x1d\n\x19\x43OMPACT_UPDATE_VOTE_STATE\x10\x04\x42\r\n\x0binstruction\"\xa0\x05\n\x13SolanaSystemProgram\x12T\n\x10instruction_type\x18\x01 \x01(\x0e\x32:.coinbase.chainstorage.SolanaSystemProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\x12U\n\x0e\x63reate_account\x18\x65 \x01(\x0b\x32;.coinbase.chainstorage.SolanaSystemCreateAccountInstructionH\x00\x12J\n\x08transfer\x18\x66 \x01(\x0b\x32\x36.coinbase.chainstorage.SolanaSystemTransferInstructionH\x00\x12g\n\x18\x63reate_account_with_seed\x18g \x01(\x0b\x32\x43.coinbase.chainstorage.SolanaSystemCreateAccountWithSeedInstructionH\x00\x12\\\n\x12transfer_with_seed\x18h \x01(\x0b\x32>.coinbase.chainstorage.SolanaSystemTransferWithSeedInstructionH\x00\"v\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x12\n\x0e\x43REATE_ACCOUNT\x10\x01\x12\x0c\n\x08TRANSFER\x10\x02\x12\x1c\n\x18\x43REATE_ACCOUNT_WITH_SEED\x10\x03\x12\x16\n\x12TRANSFER_WITH_SEED\x10\x04\x42\r\n\x0binstruction\"\xec\x05\n\x12SolanaStakeProgram\x12S\n\x10instruction_type\x18\x01 \x01(\x0e\x32\x39.coinbase.chainstorage.SolanaStakeProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\x12M\n\ninitialize\x18\x65 \x01(\x0b\x32\x37.coinbase.chainstorage.SolanaStakeInitializeInstructionH\x00\x12I\n\x08\x64\x65legate\x18\x66 \x01(\x0b\x32\x35.coinbase.chainstorage.SolanaStakeDelegateInstructionH\x00\x12M\n\ndeactivate\x18g \x01(\x0b\x32\x37.coinbase.chainstorage.SolanaStakeDeactivateInstructionH\x00\x12\x43\n\x05merge\x18h \x01(\x0b\x32\x32.coinbase.chainstorage.SolanaStakeMergeInstructionH\x00\x12\x43\n\x05split\x18i \x01(\x0b\x32\x32.coinbase.chainstorage.SolanaStakeSplitInstructionH\x00\x12I\n\x08withdraw\x18j \x01(\x0b\x32\x35.coinbase.chainstorage.SolanaStakeWithdrawInstructionH\x00\"p\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nINITIALIZE\x10\x01\x12\x0c\n\x08\x44\x45LEGATE\x10\x02\x12\x0e\n\nDEACTIVATE\x10\x03\x12\t\n\x05MERGE\x10\x04\x12\t\n\x05SPLIT\x10\x05\x12\x0c\n\x08WITHDRAW\x10\x06\x42\r\n\x0binstruction\"\xde\x01\n\x14SolanaSplMemoProgram\x12U\n\x10instruction_type\x18\x01 \x01(\x0e\x32;.coinbase.chainstorage.SolanaSplMemoProgram.InstructionType\x12?\n\x04memo\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaSplMemoInstructionH\x00\"\x1f\n\x0fInstructionType\x12\x0c\n\x08SPL_MEMO\x10\x00\x42\r\n\x0binstruction\"\xce\x04\n\x15SolanaSplTokenProgram\x12V\n\x10instruction_type\x18\x01 \x01(\x0e\x32<.coinbase.chainstorage.SolanaSplTokenProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\x12\x63\n\x15get_account_data_size\x18\x65 \x01(\x0b\x32\x42.coinbase.chainstorage.SolanaSplTokenGetAccountDataSizeInstructionH\x00\x12n\n\x1ainitialize_immutable_owner\x18\x66 \x01(\x0b\x32H.coinbase.chainstorage.SolanaSplTokenInitializeImmutableOwnerInstructionH\x00\x12L\n\x08transfer\x18g \x01(\x0b\x32\x38.coinbase.chainstorage.SolanaSplTokenTransferInstructionH\x00\"g\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x19\n\x15GET_ACCOUNT_DATA_SIZE\x10\x01\x12\x1e\n\x1aINITIALIZE_IMMUTABLE_OWNER\x10\x02\x12\x0c\n\x08TRANSFER\x10\x03\x42\r\n\x0binstruction\"\xea\x01\n\x19SolanaSplToken2022Program\x12Z\n\x10instruction_type\x18\x01 \x01(\x0e\x32@.coinbase.chainstorage.SolanaSplToken2022Program.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\"\x1e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x42\r\n\x0binstruction\"\x84\x02\n&SolanaSplAssociatedTokenAccountProgram\x12g\n\x10instruction_type\x18\x01 \x01(\x0e\x32M.coinbase.chainstorage.SolanaSplAssociatedTokenAccountProgram.InstructionType\x12\x42\n\x07unknown\x18\x64 \x01(\x0b\x32/.coinbase.chainstorage.SolanaUnknownInstructionH\x00\"\x1e\n\x0fInstructionType\x12\x0b\n\x07UNKNOWN\x10\x00\x42\r\n\x0binstruction\"(\n\x18SolanaUnknownInstruction\x12\x0c\n\x04info\x18\x01 \x01(\x0c\"\xbd\x01\n\x1fSolanaVoteInitializeInstruction\x12\x14\n\x0cvote_account\x18\x01 \x01(\t\x12\x13\n\x0brent_sysvar\x18\x02 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x03 \x01(\t\x12\x0c\n\x04node\x18\x04 \x01(\t\x12\x18\n\x10\x61uthorized_voter\x18\x05 \x01(\t\x12\x1d\n\x15\x61uthorized_withdrawer\x18\x06 \x01(\t\x12\x12\n\ncommission\x18\x07 \x01(\r\"\x94\x02\n\x19SolanaVoteVoteInstruction\x12\x14\n\x0cvote_account\x18\x01 \x01(\t\x12\x1a\n\x12slot_hashes_sysvar\x18\x02 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x03 \x01(\t\x12\x16\n\x0evote_authority\x18\x04 \x01(\t\x12\x43\n\x04vote\x18\x05 \x01(\x0b\x32\x35.coinbase.chainstorage.SolanaVoteVoteInstruction.Vote\x1aR\n\x04Vote\x12\r\n\x05slots\x18\x01 \x03(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\t\x12-\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"x\n\x1dSolanaVoteWithdrawInstruction\x12\x14\n\x0cvote_account\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x02 \x01(\t\x12\x1a\n\x12withdraw_authority\x18\x03 \x01(\t\x12\x10\n\x08lamports\x18\x04 \x01(\x04\"\xbc\x03\n+SolanaVoteCompactUpdateVoteStateInstruction\x12\x14\n\x0cvote_account\x18\x01 \x01(\t\x12\x16\n\x0evote_authority\x18\x02 \x01(\t\x12m\n\x11vote_state_update\x18\x03 \x01(\x0b\x32R.coinbase.chainstorage.SolanaVoteCompactUpdateVoteStateInstruction.VoteStateUpdate\x1a\x33\n\x07Lockout\x12\x1a\n\x12\x63onfirmation_count\x18\x01 \x01(\x04\x12\x0c\n\x04slot\x18\x02 \x01(\x04\x1a\xba\x01\n\x0fVoteStateUpdate\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\\\n\x08lockouts\x18\x02 \x03(\x0b\x32J.coinbase.chainstorage.SolanaVoteCompactUpdateVoteStateInstruction.Lockout\x12\x0c\n\x04root\x18\x03 \x01(\x04\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"{\n$SolanaSystemCreateAccountInstruction\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x13\n\x0bnew_account\x18\x02 \x01(\t\x12\x10\n\x08lamports\x18\x03 \x01(\x04\x12\r\n\x05space\x18\x04 \x01(\x04\x12\r\n\x05owner\x18\x05 \x01(\t\"X\n\x1fSolanaSystemTransferInstruction\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x02 \x01(\t\x12\x10\n\x08lamports\x18\x03 \x01(\x04\"\x9f\x01\n,SolanaSystemCreateAccountWithSeedInstruction\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x13\n\x0bnew_account\x18\x02 \x01(\t\x12\x0c\n\x04\x62\x61se\x18\x03 \x01(\t\x12\x0c\n\x04seed\x18\x04 \x01(\t\x12\x10\n\x08lamports\x18\x05 \x01(\x04\x12\r\n\x05space\x18\x06 \x01(\x04\x12\r\n\x05owner\x18\x07 \x01(\t\"\xa0\x01\n\'SolanaSystemTransferWithSeedInstruction\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x13\n\x0bsource_base\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x03 \x01(\t\x12\x10\n\x08lamports\x18\x04 \x01(\x04\x12\x13\n\x0bsource_seed\x18\x05 \x01(\t\x12\x14\n\x0csource_owner\x18\x06 \x01(\t\"\xec\x02\n SolanaStakeInitializeInstruction\x12\x15\n\rstake_account\x18\x01 \x01(\t\x12\x13\n\x0brent_sysvar\x18\x02 \x01(\t\x12V\n\nauthorized\x18\x03 \x01(\x0b\x32\x42.coinbase.chainstorage.SolanaStakeInitializeInstruction.Authorized\x12N\n\x06lockup\x18\x04 \x01(\x0b\x32>.coinbase.chainstorage.SolanaStakeInitializeInstruction.Lockup\x1a\x30\n\nAuthorized\x12\x0e\n\x06staker\x18\x01 \x01(\t\x12\x12\n\nwithdrawer\x18\x02 \x01(\t\x1a\x42\n\x06Lockup\x12\x16\n\x0eunix_timestamp\x18\x01 \x01(\x03\x12\r\n\x05\x65poch\x18\x02 \x01(\x04\x12\x11\n\tcustodian\x18\x03 \x01(\t\"\xb8\x01\n\x1eSolanaStakeDelegateInstruction\x12\x15\n\rstake_account\x18\x01 \x01(\t\x12\x14\n\x0cvote_account\x18\x02 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x03 \x01(\t\x12\x1c\n\x14stake_history_sysvar\x18\x04 \x01(\t\x12\x1c\n\x14stake_config_account\x18\x05 \x01(\t\x12\x17\n\x0fstake_authority\x18\x06 \x01(\t\"h\n SolanaStakeDeactivateInstruction\x12\x15\n\rstake_account\x18\x01 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x02 \x01(\t\x12\x17\n\x0fstake_authority\x18\x03 \x01(\t\"\x8f\x01\n\x1bSolanaStakeMergeInstruction\x12\x13\n\x0b\x64\x65stination\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x03 \x01(\t\x12\x1c\n\x14stake_history_sysvar\x18\x04 \x01(\t\x12\x17\n\x0fstake_authority\x18\x05 \x01(\t\"z\n\x1bSolanaStakeSplitInstruction\x12\x15\n\rstake_account\x18\x01 \x01(\t\x12\x19\n\x11new_split_account\x18\x02 \x01(\t\x12\x17\n\x0fstake_authority\x18\x03 \x01(\t\x12\x10\n\x08lamports\x18\x04 \x01(\x04\"\xae\x01\n\x1eSolanaStakeWithdrawInstruction\x12\x15\n\rstake_account\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x02 \x01(\t\x12\x14\n\x0c\x63lock_sysvar\x18\x03 \x01(\t\x12\x1c\n\x14stake_history_sysvar\x18\x04 \x01(\t\x12\x1a\n\x12withdraw_authority\x18\x05 \x01(\t\x12\x10\n\x08lamports\x18\x06 \x01(\x04\"(\n\x18SolanaSplMemoInstruction\x12\x0c\n\x04memo\x18\x01 \x01(\t\"T\n+SolanaSplTokenGetAccountDataSizeInstruction\x12\x0c\n\x04mint\x18\x01 \x01(\t\x12\x17\n\x0f\x65xtension_types\x18\x02 \x03(\t\"D\n1SolanaSplTokenInitializeImmutableOwnerInstruction\x12\x0f\n\x07\x61\x63\x63ount\x18\x01 \x01(\t\"k\n!SolanaSplTokenTransferInstruction\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x02 \x01(\t\x12\x11\n\tauthority\x18\x03 \x01(\t\x12\x0e\n\x06\x61mount\x18\x04 \x01(\t\"E\n\rSolanaAccount\x12\x12\n\npublic_key\x18\x01 \x01(\t\x12\x0e\n\x06signer\x18\x02 \x01(\x08\x12\x10\n\x08writable\x18\x03 \x01(\x08*\xe0\x01\n\rSolanaProgram\x12\x07\n\x03RAW\x10\x00\x12\x18\n\x14\x41\x44\x44RESS_LOOKUP_TABLE\x10\x01\x12\x0e\n\nBPF_Loader\x10\x02\x12\x1a\n\x16\x42PF_UPGRADEABLE_Loader\x10\x03\x12\x08\n\x04VOTE\x10\x04\x12\n\n\x06SYSTEM\x10\x05\x12\t\n\x05STAKE\x10\x06\x12\x0c\n\x08SPL_MEMO\x10\x07\x12\r\n\tSPL_TOKEN\x10\x08\x12\x12\n\x0eSPL_TOKEN_2022\x10\t\x12 \n\x1cSPL_ASSOCIATED_TOKEN_ACCOUNT\x10\n\x12\x0c\n\x08UNPARSED\x10\x0b\x42?Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorageb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.chainstorage.blockchain_solana_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'Z=github.com/coinbase/chainstorage/protos/coinbase/chainstorage'
+ _globals['_SOLANAPROGRAM']._serialized_start=11804
+ _globals['_SOLANAPROGRAM']._serialized_end=12028
+ _globals['_SOLANABLOBDATA']._serialized_start=105
+ _globals['_SOLANABLOBDATA']._serialized_end=137
+ _globals['_SOLANABLOCK']._serialized_start=140
+ _globals['_SOLANABLOCK']._serialized_end=324
+ _globals['_SOLANABLOCKV2']._serialized_start=327
+ _globals['_SOLANABLOCKV2']._serialized_end=515
+ _globals['_SOLANAHEADER']._serialized_start=518
+ _globals['_SOLANAHEADER']._serialized_end=686
+ _globals['_SOLANATRANSACTION']._serialized_start=689
+ _globals['_SOLANATRANSACTION']._serialized_end=875
+ _globals['_SOLANATRANSACTIONV2']._serialized_start=878
+ _globals['_SOLANATRANSACTIONV2']._serialized_end=1070
+ _globals['_SOLANATRANSACTIONMETA']._serialized_start=1073
+ _globals['_SOLANATRANSACTIONMETA']._serialized_end=1461
+ _globals['_SOLANATRANSACTIONMETAV2']._serialized_start=1464
+ _globals['_SOLANATRANSACTIONMETAV2']._serialized_end=1856
+ _globals['_SOLANATOKENBALANCE']._serialized_start=1859
+ _globals['_SOLANATOKENBALANCE']._serialized_end=1995
+ _globals['_SOLANATOKENAMOUNT']._serialized_start=1997
+ _globals['_SOLANATOKENAMOUNT']._serialized_end=2076
+ _globals['_SOLANAINNERINSTRUCTION']._serialized_start=2078
+ _globals['_SOLANAINNERINSTRUCTION']._serialized_end=2181
+ _globals['_SOLANAINNERINSTRUCTIONV2']._serialized_start=2183
+ _globals['_SOLANAINNERINSTRUCTIONV2']._serialized_end=2290
+ _globals['_SOLANAREWARD']._serialized_start=2293
+ _globals['_SOLANAREWARD']._serialized_end=2429
+ _globals['_SOLANATRANSACTIONPAYLOAD']._serialized_start=2431
+ _globals['_SOLANATRANSACTIONPAYLOAD']._serialized_end=2532
+ _globals['_SOLANATRANSACTIONPAYLOADV2']._serialized_start=2534
+ _globals['_SOLANATRANSACTIONPAYLOADV2']._serialized_end=2639
+ _globals['_SOLANAMESSAGE']._serialized_start=2642
+ _globals['_SOLANAMESSAGE']._serialized_end=2864
+ _globals['_SOLANAMESSAGEV2']._serialized_start=2867
+ _globals['_SOLANAMESSAGEV2']._serialized_end=3108
+ _globals['_ACCOUNTKEY']._serialized_start=3110
+ _globals['_ACCOUNTKEY']._serialized_end=3188
+ _globals['_ADDRESSTABLELOOKUP']._serialized_start=3190
+ _globals['_ADDRESSTABLELOOKUP']._serialized_end=3283
+ _globals['_SOLANAMESSAGEHEADER']._serialized_start=3286
+ _globals['_SOLANAMESSAGEHEADER']._serialized_end=3418
+ _globals['_SOLANAINSTRUCTION']._serialized_start=3420
+ _globals['_SOLANAINSTRUCTION']._serialized_end=3539
+ _globals['_SOLANARAWINSTRUCTION']._serialized_start=3541
+ _globals['_SOLANARAWINSTRUCTION']._serialized_end=3595
+ _globals['_SOLANAINSTRUCTIONV2']._serialized_start=3598
+ _globals['_SOLANAINSTRUCTIONV2']._serialized_end=4602
+ _globals['_SOLANAADDRESSLOOKUPTABLEPROGRAM']._serialized_start=4605
+ _globals['_SOLANAADDRESSLOOKUPTABLEPROGRAM']._serialized_end=4851
+ _globals['_SOLANAADDRESSLOOKUPTABLEPROGRAM_INSTRUCTIONTYPE']._serialized_start=4806
+ _globals['_SOLANAADDRESSLOOKUPTABLEPROGRAM_INSTRUCTIONTYPE']._serialized_end=4836
+ _globals['_SOLANABPFLOADERPROGRAM']._serialized_start=4854
+ _globals['_SOLANABPFLOADERPROGRAM']._serialized_end=5082
+ _globals['_SOLANABPFLOADERPROGRAM_INSTRUCTIONTYPE']._serialized_start=4806
+ _globals['_SOLANABPFLOADERPROGRAM_INSTRUCTIONTYPE']._serialized_end=4836
+ _globals['_SOLANABPFUPGRADEABLELOADERPROGRAM']._serialized_start=5085
+ _globals['_SOLANABPFUPGRADEABLELOADERPROGRAM']._serialized_end=5335
+ _globals['_SOLANABPFUPGRADEABLELOADERPROGRAM_INSTRUCTIONTYPE']._serialized_start=4806
+ _globals['_SOLANABPFUPGRADEABLELOADERPROGRAM_INSTRUCTIONTYPE']._serialized_end=4836
+ _globals['_SOLANAVOTEPROGRAM']._serialized_start=5338
+ _globals['_SOLANAVOTEPROGRAM']._serialized_end=5950
+ _globals['_SOLANAVOTEPROGRAM_INSTRUCTIONTYPE']._serialized_start=5834
+ _globals['_SOLANAVOTEPROGRAM_INSTRUCTIONTYPE']._serialized_end=5935
+ _globals['_SOLANASYSTEMPROGRAM']._serialized_start=5953
+ _globals['_SOLANASYSTEMPROGRAM']._serialized_end=6625
+ _globals['_SOLANASYSTEMPROGRAM_INSTRUCTIONTYPE']._serialized_start=6492
+ _globals['_SOLANASYSTEMPROGRAM_INSTRUCTIONTYPE']._serialized_end=6610
+ _globals['_SOLANASTAKEPROGRAM']._serialized_start=6628
+ _globals['_SOLANASTAKEPROGRAM']._serialized_end=7376
+ _globals['_SOLANASTAKEPROGRAM_INSTRUCTIONTYPE']._serialized_start=7249
+ _globals['_SOLANASTAKEPROGRAM_INSTRUCTIONTYPE']._serialized_end=7361
+ _globals['_SOLANASPLMEMOPROGRAM']._serialized_start=7379
+ _globals['_SOLANASPLMEMOPROGRAM']._serialized_end=7601
+ _globals['_SOLANASPLMEMOPROGRAM_INSTRUCTIONTYPE']._serialized_start=7555
+ _globals['_SOLANASPLMEMOPROGRAM_INSTRUCTIONTYPE']._serialized_end=7586
+ _globals['_SOLANASPLTOKENPROGRAM']._serialized_start=7604
+ _globals['_SOLANASPLTOKENPROGRAM']._serialized_end=8194
+ _globals['_SOLANASPLTOKENPROGRAM_INSTRUCTIONTYPE']._serialized_start=8076
+ _globals['_SOLANASPLTOKENPROGRAM_INSTRUCTIONTYPE']._serialized_end=8179
+ _globals['_SOLANASPLTOKEN2022PROGRAM']._serialized_start=8197
+ _globals['_SOLANASPLTOKEN2022PROGRAM']._serialized_end=8431
+ _globals['_SOLANASPLTOKEN2022PROGRAM_INSTRUCTIONTYPE']._serialized_start=4806
+ _globals['_SOLANASPLTOKEN2022PROGRAM_INSTRUCTIONTYPE']._serialized_end=4836
+ _globals['_SOLANASPLASSOCIATEDTOKENACCOUNTPROGRAM']._serialized_start=8434
+ _globals['_SOLANASPLASSOCIATEDTOKENACCOUNTPROGRAM']._serialized_end=8694
+ _globals['_SOLANASPLASSOCIATEDTOKENACCOUNTPROGRAM_INSTRUCTIONTYPE']._serialized_start=4806
+ _globals['_SOLANASPLASSOCIATEDTOKENACCOUNTPROGRAM_INSTRUCTIONTYPE']._serialized_end=4836
+ _globals['_SOLANAUNKNOWNINSTRUCTION']._serialized_start=8696
+ _globals['_SOLANAUNKNOWNINSTRUCTION']._serialized_end=8736
+ _globals['_SOLANAVOTEINITIALIZEINSTRUCTION']._serialized_start=8739
+ _globals['_SOLANAVOTEINITIALIZEINSTRUCTION']._serialized_end=8928
+ _globals['_SOLANAVOTEVOTEINSTRUCTION']._serialized_start=8931
+ _globals['_SOLANAVOTEVOTEINSTRUCTION']._serialized_end=9207
+ _globals['_SOLANAVOTEVOTEINSTRUCTION_VOTE']._serialized_start=9125
+ _globals['_SOLANAVOTEVOTEINSTRUCTION_VOTE']._serialized_end=9207
+ _globals['_SOLANAVOTEWITHDRAWINSTRUCTION']._serialized_start=9209
+ _globals['_SOLANAVOTEWITHDRAWINSTRUCTION']._serialized_end=9329
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION']._serialized_start=9332
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION']._serialized_end=9776
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION_LOCKOUT']._serialized_start=9536
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION_LOCKOUT']._serialized_end=9587
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION_VOTESTATEUPDATE']._serialized_start=9590
+ _globals['_SOLANAVOTECOMPACTUPDATEVOTESTATEINSTRUCTION_VOTESTATEUPDATE']._serialized_end=9776
+ _globals['_SOLANASYSTEMCREATEACCOUNTINSTRUCTION']._serialized_start=9778
+ _globals['_SOLANASYSTEMCREATEACCOUNTINSTRUCTION']._serialized_end=9901
+ _globals['_SOLANASYSTEMTRANSFERINSTRUCTION']._serialized_start=9903
+ _globals['_SOLANASYSTEMTRANSFERINSTRUCTION']._serialized_end=9991
+ _globals['_SOLANASYSTEMCREATEACCOUNTWITHSEEDINSTRUCTION']._serialized_start=9994
+ _globals['_SOLANASYSTEMCREATEACCOUNTWITHSEEDINSTRUCTION']._serialized_end=10153
+ _globals['_SOLANASYSTEMTRANSFERWITHSEEDINSTRUCTION']._serialized_start=10156
+ _globals['_SOLANASYSTEMTRANSFERWITHSEEDINSTRUCTION']._serialized_end=10316
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION']._serialized_start=10319
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION']._serialized_end=10683
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION_AUTHORIZED']._serialized_start=10567
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION_AUTHORIZED']._serialized_end=10615
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION_LOCKUP']._serialized_start=10617
+ _globals['_SOLANASTAKEINITIALIZEINSTRUCTION_LOCKUP']._serialized_end=10683
+ _globals['_SOLANASTAKEDELEGATEINSTRUCTION']._serialized_start=10686
+ _globals['_SOLANASTAKEDELEGATEINSTRUCTION']._serialized_end=10870
+ _globals['_SOLANASTAKEDEACTIVATEINSTRUCTION']._serialized_start=10872
+ _globals['_SOLANASTAKEDEACTIVATEINSTRUCTION']._serialized_end=10976
+ _globals['_SOLANASTAKEMERGEINSTRUCTION']._serialized_start=10979
+ _globals['_SOLANASTAKEMERGEINSTRUCTION']._serialized_end=11122
+ _globals['_SOLANASTAKESPLITINSTRUCTION']._serialized_start=11124
+ _globals['_SOLANASTAKESPLITINSTRUCTION']._serialized_end=11246
+ _globals['_SOLANASTAKEWITHDRAWINSTRUCTION']._serialized_start=11249
+ _globals['_SOLANASTAKEWITHDRAWINSTRUCTION']._serialized_end=11423
+ _globals['_SOLANASPLMEMOINSTRUCTION']._serialized_start=11425
+ _globals['_SOLANASPLMEMOINSTRUCTION']._serialized_end=11465
+ _globals['_SOLANASPLTOKENGETACCOUNTDATASIZEINSTRUCTION']._serialized_start=11467
+ _globals['_SOLANASPLTOKENGETACCOUNTDATASIZEINSTRUCTION']._serialized_end=11551
+ _globals['_SOLANASPLTOKENINITIALIZEIMMUTABLEOWNERINSTRUCTION']._serialized_start=11553
+ _globals['_SOLANASPLTOKENINITIALIZEIMMUTABLEOWNERINSTRUCTION']._serialized_end=11621
+ _globals['_SOLANASPLTOKENTRANSFERINSTRUCTION']._serialized_start=11623
+ _globals['_SOLANASPLTOKENTRANSFERINSTRUCTION']._serialized_end=11730
+ _globals['_SOLANAACCOUNT']._serialized_start=11732
+ _globals['_SOLANAACCOUNT']._serialized_end=11801
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2_grpc.py b/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2_grpc.py
new file mode 100644
index 0000000..443a7c3
--- /dev/null
+++ b/gen/src/python/coinbase/chainstorage/blockchain_solana_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/chainstorage/blockchain_solana_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2.py
new file mode 100644
index 0000000..14b3a61
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/account_identifer.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/account_identifer.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5coinbase/crypto/rosetta/types/account_identifer.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\"\x87\x02\n\x11\x41\x63\x63ountIdentifier\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12H\n\x0bsub_account\x18\x02 \x01(\x0b\x32\x33.coinbase.crypto.rosetta.types.SubAccountIdentifier\x12P\n\x08metadata\x18\x03 \x03(\x0b\x32>.coinbase.crypto.rosetta.types.AccountIdentifier.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\"\xc3\x01\n\x14SubAccountIdentifier\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12S\n\x08metadata\x18\x02 \x03(\x0b\x32\x41.coinbase.crypto.rosetta.types.SubAccountIdentifier.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\x42GZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.account_identifer_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_ACCOUNTIDENTIFIER_METADATAENTRY']._loaded_options = None
+ _globals['_ACCOUNTIDENTIFIER_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_SUBACCOUNTIDENTIFIER_METADATAENTRY']._loaded_options = None
+ _globals['_SUBACCOUNTIDENTIFIER_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_ACCOUNTIDENTIFIER']._serialized_start=116
+ _globals['_ACCOUNTIDENTIFIER']._serialized_end=379
+ _globals['_ACCOUNTIDENTIFIER_METADATAENTRY']._serialized_start=310
+ _globals['_ACCOUNTIDENTIFIER_METADATAENTRY']._serialized_end=379
+ _globals['_SUBACCOUNTIDENTIFIER']._serialized_start=382
+ _globals['_SUBACCOUNTIDENTIFIER']._serialized_end=577
+ _globals['_SUBACCOUNTIDENTIFIER_METADATAENTRY']._serialized_start=310
+ _globals['_SUBACCOUNTIDENTIFIER_METADATAENTRY']._serialized_end=379
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2_grpc.py
new file mode 100644
index 0000000..c9e38c2
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/account_identifer_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/account_identifer_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2.py
new file mode 100644
index 0000000..adfbb1e
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/amount.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/amount.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*coinbase/crypto/rosetta/types/amount.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\"\xe0\x01\n\x06\x41mount\x12\r\n\x05value\x18\x01 \x01(\t\x12\x39\n\x08\x63urrency\x18\x02 \x01(\x0b\x32\'.coinbase.crypto.rosetta.types.Currency\x12\x45\n\x08metadata\x18\x03 \x03(\x0b\x32\x33.coinbase.crypto.rosetta.types.Amount.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\"\xbc\x01\n\x08\x43urrency\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x10\n\x08\x64\x65\x63imals\x18\x02 \x01(\x05\x12G\n\x08metadata\x18\x03 \x03(\x0b\x32\x35.coinbase.crypto.rosetta.types.Currency.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\x42GZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.amount_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_AMOUNT_METADATAENTRY']._loaded_options = None
+ _globals['_AMOUNT_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_CURRENCY_METADATAENTRY']._loaded_options = None
+ _globals['_CURRENCY_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_AMOUNT']._serialized_start=105
+ _globals['_AMOUNT']._serialized_end=329
+ _globals['_AMOUNT_METADATAENTRY']._serialized_start=260
+ _globals['_AMOUNT_METADATAENTRY']._serialized_end=329
+ _globals['_CURRENCY']._serialized_start=332
+ _globals['_CURRENCY']._serialized_end=520
+ _globals['_CURRENCY_METADATAENTRY']._serialized_start=260
+ _globals['_CURRENCY_METADATAENTRY']._serialized_end=329
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2_grpc.py
new file mode 100644
index 0000000..f1de77a
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/amount_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/amount_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/block_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/block_pb2.py
new file mode 100644
index 0000000..eeeed72
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/block_pb2.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/block.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/block.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from coinbase.crypto.rosetta.types import transaction_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_transaction__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)coinbase/crypto/rosetta/types/block.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a/coinbase/crypto/rosetta/types/transaction.proto\"\xba\x03\n\x05\x42lock\x12H\n\x10\x62lock_identifier\x18\x02 \x01(\x0b\x32..coinbase.crypto.rosetta.types.BlockIdentifier\x12O\n\x17parent_block_identifier\x18\x03 \x01(\x0b\x32..coinbase.crypto.rosetta.types.BlockIdentifier\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12@\n\x0ctransactions\x18\x05 \x03(\x0b\x32*.coinbase.crypto.rosetta.types.Transaction\x12\x44\n\x08metadata\x18\x06 \x03(\x0b\x32\x32.coinbase.crypto.rosetta.types.Block.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01J\x04\x08\x01\x10\x02R\x12network_identifier\".\n\x0f\x42lockIdentifier\x12\r\n\x05index\x18\x01 \x01(\x03\x12\x0c\n\x04hash\x18\x02 \x01(\tBGZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.block_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_BLOCK_METADATAENTRY']._loaded_options = None
+ _globals['_BLOCK_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_BLOCK']._serialized_start=186
+ _globals['_BLOCK']._serialized_end=628
+ _globals['_BLOCK_METADATAENTRY']._serialized_start=533
+ _globals['_BLOCK_METADATAENTRY']._serialized_end=602
+ _globals['_BLOCKIDENTIFIER']._serialized_start=630
+ _globals['_BLOCKIDENTIFIER']._serialized_end=676
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/block_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/block_pb2_grpc.py
new file mode 100644
index 0000000..59b58d1
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/block_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/block_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2.py
new file mode 100644
index 0000000..6a595fe
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/coin_change.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/coin_change.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/coinbase/crypto/rosetta/types/coin_change.proto\x12\x1d\x63oinbase.crypto.rosetta.types\"\xec\x01\n\nCoinChange\x12\x46\n\x0f\x63oin_identifier\x18\x01 \x01(\x0b\x32-.coinbase.crypto.rosetta.types.CoinIdentifier\x12I\n\x0b\x63oin_action\x18\x02 \x01(\x0e\x32\x34.coinbase.crypto.rosetta.types.CoinChange.CoinAction\"K\n\nCoinAction\x12\x1b\n\x17\x43OIN_ACTION_UNSPECIFIED\x10\x00\x12\x10\n\x0c\x43OIN_CREATED\x10\x01\x12\x0e\n\nCOIN_SPENT\x10\x02\"$\n\x0e\x43oinIdentifier\x12\x12\n\nidentifier\x18\x01 \x01(\tBGZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.coin_change_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_COINCHANGE']._serialized_start=83
+ _globals['_COINCHANGE']._serialized_end=319
+ _globals['_COINCHANGE_COINACTION']._serialized_start=244
+ _globals['_COINCHANGE_COINACTION']._serialized_end=319
+ _globals['_COINIDENTIFIER']._serialized_start=321
+ _globals['_COINIDENTIFIER']._serialized_end=357
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2_grpc.py
new file mode 100644
index 0000000..203a5ff
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/coin_change_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/coin_change_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2.py
new file mode 100644
index 0000000..736cf3b
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/network_identifier.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/network_identifier.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6coinbase/crypto/rosetta/types/network_identifier.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\"\x8d\x01\n\x11NetworkIdentifier\x12\x12\n\nblockchain\x18\x01 \x01(\t\x12\x0f\n\x07network\x18\x02 \x01(\t\x12S\n\x16sub_network_identifier\x18\x03 \x01(\x0b\x32\x33.coinbase.crypto.rosetta.types.SubNetworkIdentifier\"\xc3\x01\n\x14SubNetworkIdentifier\x12\x0f\n\x07network\x18\x01 \x01(\t\x12S\n\x08metadata\x18\x02 \x03(\x0b\x32\x41.coinbase.crypto.rosetta.types.SubNetworkIdentifier.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\x42GZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.network_identifier_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_SUBNETWORKIDENTIFIER_METADATAENTRY']._loaded_options = None
+ _globals['_SUBNETWORKIDENTIFIER_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_NETWORKIDENTIFIER']._serialized_start=117
+ _globals['_NETWORKIDENTIFIER']._serialized_end=258
+ _globals['_SUBNETWORKIDENTIFIER']._serialized_start=261
+ _globals['_SUBNETWORKIDENTIFIER']._serialized_end=456
+ _globals['_SUBNETWORKIDENTIFIER_METADATAENTRY']._serialized_start=387
+ _globals['_SUBNETWORKIDENTIFIER_METADATAENTRY']._serialized_end=456
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2_grpc.py
new file mode 100644
index 0000000..60bbd29
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/network_identifier_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/network_identifier_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2.py
new file mode 100644
index 0000000..2834ca4
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/operation.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/operation.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from coinbase.crypto.rosetta.types import account_identifer_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_account__identifer__pb2
+from coinbase.crypto.rosetta.types import amount_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_amount__pb2
+from coinbase.crypto.rosetta.types import coin_change_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_coin__change__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-coinbase/crypto/rosetta/types/operation.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\x1a\x35\x63oinbase/crypto/rosetta/types/account_identifer.proto\x1a*coinbase/crypto/rosetta/types/amount.proto\x1a/coinbase/crypto/rosetta/types/coin_change.proto\"\x96\x04\n\tOperation\x12P\n\x14operation_identifier\x18\x01 \x01(\x0b\x32\x32.coinbase.crypto.rosetta.types.OperationIdentifier\x12N\n\x12related_operations\x18\x02 \x03(\x0b\x32\x32.coinbase.crypto.rosetta.types.OperationIdentifier\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x0e\n\x06status\x18\x04 \x01(\t\x12\x41\n\x07\x61\x63\x63ount\x18\x05 \x01(\x0b\x32\x30.coinbase.crypto.rosetta.types.AccountIdentifier\x12\x35\n\x06\x61mount\x18\x06 \x01(\x0b\x32%.coinbase.crypto.rosetta.types.Amount\x12>\n\x0b\x63oin_change\x18\x07 \x01(\x0b\x32).coinbase.crypto.rosetta.types.CoinChange\x12H\n\x08metadata\x18\x08 \x03(\x0b\x32\x36.coinbase.crypto.rosetta.types.Operation.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\";\n\x13OperationIdentifier\x12\r\n\x05index\x18\x01 \x01(\x03\x12\x15\n\rnetwork_index\x18\x02 \x01(\x03\x42GZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.operation_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_OPERATION_METADATAENTRY']._loaded_options = None
+ _globals['_OPERATION_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_OPERATION']._serialized_start=256
+ _globals['_OPERATION']._serialized_end=790
+ _globals['_OPERATION_METADATAENTRY']._serialized_start=721
+ _globals['_OPERATION_METADATAENTRY']._serialized_end=790
+ _globals['_OPERATIONIDENTIFIER']._serialized_start=792
+ _globals['_OPERATIONIDENTIFIER']._serialized_end=851
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2_grpc.py
new file mode 100644
index 0000000..4318eee
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/operation_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/operation_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2.py b/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2.py
new file mode 100644
index 0000000..1d3fe1c
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: coinbase/crypto/rosetta/types/transaction.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'coinbase/crypto/rosetta/types/transaction.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from coinbase.crypto.rosetta.types import operation_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_operation__pb2
+from coinbase.crypto.rosetta.types import network_identifier_pb2 as coinbase_dot_crypto_dot_rosetta_dot_types_dot_network__identifier__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/coinbase/crypto/rosetta/types/transaction.proto\x12\x1d\x63oinbase.crypto.rosetta.types\x1a\x19google/protobuf/any.proto\x1a-coinbase/crypto/rosetta/types/operation.proto\x1a\x36\x63oinbase/crypto/rosetta/types/network_identifier.proto\"\x85\x03\n\x0bTransaction\x12T\n\x16transaction_identifier\x18\x01 \x01(\x0b\x32\x34.coinbase.crypto.rosetta.types.TransactionIdentifier\x12<\n\noperations\x18\x02 \x03(\x0b\x32(.coinbase.crypto.rosetta.types.Operation\x12O\n\x14related_transactions\x18\x03 \x03(\x0b\x32\x31.coinbase.crypto.rosetta.types.RelatedTransaction\x12J\n\x08metadata\x18\x04 \x03(\x0b\x32\x38.coinbase.crypto.rosetta.types.Transaction.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\"%\n\x15TransactionIdentifier\x12\x0c\n\x04hash\x18\x01 \x01(\t\"\xcb\x02\n\x12RelatedTransaction\x12L\n\x12network_identifier\x18\x01 \x01(\x0b\x32\x30.coinbase.crypto.rosetta.types.NetworkIdentifier\x12T\n\x16transaction_identifier\x18\x02 \x01(\x0b\x32\x34.coinbase.crypto.rosetta.types.TransactionIdentifier\x12N\n\tdirection\x18\x03 \x01(\x0e\x32;.coinbase.crypto.rosetta.types.RelatedTransaction.Direction\"A\n\tDirection\x12\x19\n\x15\x44IRECTION_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46ORWARD\x10\x01\x12\x0c\n\x08\x42\x41\x43KWARD\x10\x02\x42GZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/typesb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'coinbase.crypto.rosetta.types.transaction_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'ZEgithub.com/coinbase/chainstorage/protos/coinbase/crypto/rosetta/types'
+ _globals['_TRANSACTION_METADATAENTRY']._loaded_options = None
+ _globals['_TRANSACTION_METADATAENTRY']._serialized_options = b'8\001'
+ _globals['_TRANSACTION']._serialized_start=213
+ _globals['_TRANSACTION']._serialized_end=602
+ _globals['_TRANSACTION_METADATAENTRY']._serialized_start=533
+ _globals['_TRANSACTION_METADATAENTRY']._serialized_end=602
+ _globals['_TRANSACTIONIDENTIFIER']._serialized_start=604
+ _globals['_TRANSACTIONIDENTIFIER']._serialized_end=641
+ _globals['_RELATEDTRANSACTION']._serialized_start=644
+ _globals['_RELATEDTRANSACTION']._serialized_end=975
+ _globals['_RELATEDTRANSACTION_DIRECTION']._serialized_start=910
+ _globals['_RELATEDTRANSACTION_DIRECTION']._serialized_end=975
+# @@protoc_insertion_point(module_scope)
diff --git a/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2_grpc.py b/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2_grpc.py
new file mode 100644
index 0000000..affdefa
--- /dev/null
+++ b/gen/src/python/coinbase/crypto/rosetta/types/transaction_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.71.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in coinbase/crypto/rosetta/types/transaction_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/go.mod b/go.mod
index 3b728fc..e55ad8d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,18 +1,22 @@
module github.com/coinbase/chainstorage
-go 1.22.0
+go 1.24.0
+
+toolchain go1.24.11
require (
cloud.google.com/go/firestore v1.14.0
cloud.google.com/go/storage v1.37.0
github.com/VividCortex/ewma v1.2.0
- github.com/aws/aws-sdk-go v1.50.4
+ github.com/aws/aws-sdk-go v1.55.7
+ github.com/aws/aws-sdk-go-v2/config v1.27.0
+ github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/cenkalti/backoff/v4 v4.2.1
- github.com/coinbase/rosetta-sdk-go v0.8.3
+ github.com/coinbase/rosetta-sdk-go v0.8.9
github.com/coinbase/rosetta-sdk-go/types v1.0.0
- github.com/ethereum/go-ethereum v1.13.11
+ github.com/ethereum/go-ethereum v1.16.7
github.com/fatih/color v1.16.0
github.com/gagliardetto/solana-go v1.8.4
github.com/go-playground/validator/v10 v10.17.0
@@ -31,36 +35,59 @@ require (
github.com/smira/go-statsd v1.3.3
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.18.2
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
github.com/uber-go/tally/v4 v4.1.10
github.com/valyala/fasttemplate v1.2.2
- go.temporal.io/api v1.27.0
- go.temporal.io/sdk v1.26.0-rc.2.0.20240214221834-30da688037d1
+ go.temporal.io/api v1.49.1
+ go.temporal.io/sdk v1.35.0
go.temporal.io/sdk/contrib/tally v0.2.0
go.uber.org/atomic v1.11.0
go.uber.org/fx v1.20.1
go.uber.org/mock v0.4.0
go.uber.org/zap v1.26.0
- golang.org/x/crypto v0.32.0
+ golang.org/x/crypto v0.37.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
- golang.org/x/net v0.34.0
- golang.org/x/sync v0.10.0
- golang.org/x/text v0.21.0
+ golang.org/x/net v0.39.0
+ golang.org/x/sync v0.13.0
+ golang.org/x/text v0.24.0
golang.org/x/time v0.5.0
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
google.golang.org/api v0.158.0
- google.golang.org/grpc v1.61.0
- google.golang.org/protobuf v1.34.2
+ google.golang.org/grpc v1.66.0
+ google.golang.org/protobuf v1.36.5
gopkg.in/DataDog/dd-trace-go.v1 v1.59.1
gopkg.in/yaml.v2 v2.4.0
logur.dev/adapter/zap v0.5.0
logur.dev/logur v0.17.0
)
+require github.com/nexus-rpc/sdk-go v0.3.0 // indirect
+
+require github.com/lib/pq v1.10.9
+
+require (
+ github.com/SeismicSystems/aes v0.0.0-20251119232201-ff6734fc5e0e
+ github.com/pressly/goose/v3 v3.14.0
+)
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.25.1 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.0 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.1 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.1 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 // indirect
+ github.com/aws/smithy-go v1.20.1 // indirect
+)
+
require (
cloud.google.com/go v0.112.0 // indirect
- cloud.google.com/go/compute v1.23.3 // indirect
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
cloud.google.com/go/longrunning v0.5.4 // indirect
contrib.go.opencensus.io/exporter/stackdriver v0.13.4 // indirect
@@ -77,7 +104,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.17.0 // indirect
github.com/blendle/zapdriver v1.3.1 // indirect
- github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd // indirect
+ github.com/btcsuite/btcd v0.24.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -109,18 +136,18 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/s2a-go v0.1.7 // indirect
- github.com/google/uuid v1.5.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
- github.com/holiman/uint256 v1.3.2 // indirect
+ github.com/holiman/uint256 v1.3.2
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/compress v1.17.11
github.com/leodido/go-urn v1.2.4 // indirect
github.com/logrusorgru/aurora v2.0.3+incompatible // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -133,19 +160,18 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect
- github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/mr-tron/base58 v1.2.0
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
- github.com/pborman/uuid v1.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
+ github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
+ github.com/prometheus/procfs v0.11.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/robfig/cron v1.2.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
@@ -182,13 +208,12 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
- golang.org/x/oauth2 v0.16.0 // indirect
- golang.org/x/sys v0.29.0 // indirect
- golang.org/x/term v0.28.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
+ golang.org/x/oauth2 v0.27.0 // indirect
+ golang.org/x/sys v0.36.0 // indirect
+ golang.org/x/term v0.31.0 // indirect
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect
@@ -198,3 +223,5 @@ require (
replace github.com/gogo/protobuf v1.3.3 => github.com/gogo/protobuf v1.3.2
replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101500.0
+
+replace github.com/coinbase/rosetta-sdk-go => github.com/coinbase/mesh-sdk-go v0.8.9
diff --git a/go.sum b/go.sum
index 4246d1c..31f739a 100644
--- a/go.sum
+++ b/go.sum
@@ -15,10 +15,8 @@ cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnP
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@@ -70,6 +68,8 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/SeismicSystems/aes v0.0.0-20251119232201-ff6734fc5e0e h1:46c0e75cmuSvi7DEpshhvGIaFJbegu/mqhBHWwGnHxc=
+github.com/SeismicSystems/aes v0.0.0-20251119232201-ff6734fc5e0e/go.mod h1:pxLMRBExL594NpfDc/kt70zHYNqOI4N0fX7rLT+kTSs=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
@@ -89,8 +89,36 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.29.5/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
-github.com/aws/aws-sdk-go v1.50.4 h1:jJNhxunBgfjmCSjMZ3INwQ19ZN3RoGEZfgSCUYF/NZw=
-github.com/aws/aws-sdk-go v1.50.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
+github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go-v2 v1.25.1 h1:P7hU6A5qEdmajGwvae/zDkOq+ULLC9tQBTwqqiwFGpI=
+github.com/aws/aws-sdk-go-v2 v1.25.1/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo=
+github.com/aws/aws-sdk-go-v2/config v1.27.0 h1:J5sdGCAHuWKIXLeXiqr8II/adSvetkx0qdZwdbXXpb0=
+github.com/aws/aws-sdk-go-v2/config v1.27.0/go.mod h1:cfh8v69nuSUohNFMbIISP2fhmblGmYEOKs5V53HiHnk=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.0 h1:lMW2x6sKBsiAJrpi1doOXqWFyEPoE886DTb1X0wb7So=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.1 h1:evvi7FbTAoFxdP/mixmP7LIYzQWAmzBcwNB/es9XPNc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.1/go.mod h1:rH61DT6FDdikhPghymripNUCsf+uVF4Cnk4c4DBKH64=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.1 h1:RAnaIrbxPtlXNVI/OIlh1sidTQ3e1qM6LRjs7N0bE0I=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.1/go.mod h1:nbgAGkH5lk0RZRMh6A4K/oG6Xj11eC/1CyDow+DUAFI=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0/go.mod h1:SxIkWpByiGbhbHYTo9CMTUnx2G4p4ZQMrDPcRRy//1c=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0/go.mod h1:l8gPU5RYGOFHJqWEpPMoRTP0VoaWQSkJdKo+hwWnnDA=
+github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.0 h1:Xf3s55N9cqKvFK6D70zCXvXXN4ZovTCy7glL+gUhLEc=
+github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.0/go.mod h1:RA3ERghFSivbTf0Sbsxv/grUuLMcyAjm0F/PylJMmEs=
+github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.19.0/go.mod h1:YqbU3RS/pkDVu+v+Nwxvn0i1WB0HkNWEePWbmODEbbs=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 h1:6DL0qu5+315wbsAEEmzK+P9leRwNbkp+lGjPC+CEvb8=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0/go.mod h1:olUAyg+FaoFaL/zFaeQQONjOZ9HXoxgvI/c7mQTYz7M=
+github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 h1:cjTRjh700H36MQ8M0LnDn33W3JmwC77mdxIIyPWCdpM=
+github.com/aws/aws-sdk-go-v2/service/sts v1.27.0/go.mod h1:nXfOBMWPokIbOY+Gi7a1psWMSvskUCemZzI+SMB7Akc=
+github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
+github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -106,8 +134,9 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
-github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
+github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
+github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
@@ -148,10 +177,10 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY=
-github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coinbase/rosetta-sdk-go v0.8.3 h1:IYqd+Ser5NVh0s7p8p2Ir82iCvi75E1l0NH2H4NEr0Y=
-github.com/coinbase/rosetta-sdk-go v0.8.3/go.mod h1:ChOHc+BNq7zqJDDkui0DA124GOvlAiRbdgAc1U9GMDQ=
+github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
+github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/coinbase/mesh-sdk-go v0.8.9 h1:4paJktpDY7e5ghWSnSa5QHOXDdKTSlSwDZzbm1JT2tI=
+github.com/coinbase/mesh-sdk-go v0.8.9/go.mod h1:xIu+9M4EN/WkAy/H67lP8iu+/Fy3Wbyihmv8L+XacWM=
github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA=
github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c=
github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A=
@@ -203,8 +232,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
-github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
+github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
+github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/ethereum-optimism/op-geth v1.101500.0 h1:pe/bYceb/w26Kz31/GGlFBrFQjQOuFHm8o2MV5W7n0g=
github.com/ethereum-optimism/op-geth v1.101500.0/go.mod h1:OMpyVMMy5zpAAHlR5s/aGbXRk+7cIKczUEIJj54APbY=
github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=
@@ -345,8 +374,8 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
-github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@@ -364,8 +393,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -420,6 +449,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -447,6 +478,8 @@ github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzW
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -504,6 +537,8 @@ github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hz
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
+github.com/nexus-rpc/sdk-go v0.3.0 h1:Y3B0kLYbMhd4C2u00kcYajvmOrfozEtTV/nHSnV57jA=
+github.com/nexus-rpc/sdk-go v0.3.0/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
@@ -526,7 +561,6 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
@@ -541,6 +575,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pressly/goose/v3 v3.14.0 h1:gNrFLLDF+fujdq394rcdYK3WPxp3VKWifTajlZwInJM=
+github.com/pressly/goose/v3 v3.14.0/go.mod h1:uwSpREK867PbIsdE9GS6pRk1LUPB7gwMkmvk9/hbIMA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -566,8 +602,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
-github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk=
+github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg=
@@ -575,6 +611,8 @@ github.com/prysmaticlabs/gohashtree v0.0.3-alpha h1:1EVinCWdb3Lorq7xn8DYQHf48nCc
github.com/prysmaticlabs/gohashtree v0.0.3-alpha/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
github.com/prysmaticlabs/prysm/v4 v4.1.0 h1:fJWyCzeDgAD/4RGxqnZN0StrFQgZ0MXjpGSWkipV9zw=
github.com/prysmaticlabs/prysm/v4 v4.1.0/go.mod h1:+o907dc4mwEE0wJkQ8RrzCroC+q2WCzdCLtikwonw8c=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA=
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -659,8 +697,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
@@ -741,11 +779,11 @@ go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.temporal.io/api v1.5.0/go.mod h1:BqKxEJJYdxb5dqf0ODfzfMxh8UEQ5L3zKS51FiIYYkA=
-go.temporal.io/api v1.27.0 h1:M7a7p3A/gIKEMAYVBQD+/2hfzh/hC0410393TwD20Ko=
-go.temporal.io/api v1.27.0/go.mod h1:iASB2zPPR+FtFKn5w7/hF7AG2dkvkW7TTMAqL06tz0g=
+go.temporal.io/api v1.49.1 h1:CdiIohibamF4YP9k261DjrzPVnuomRoh1iC//gZ1puA=
+go.temporal.io/api v1.49.1/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM=
go.temporal.io/sdk v1.12.0/go.mod h1:lSp3lH1lI0TyOsus0arnO3FYvjVXBZGi/G7DjnAnm6o=
-go.temporal.io/sdk v1.26.0-rc.2.0.20240214221834-30da688037d1 h1:TJAj59PR+Ek0Z1dQSBx50MDxPeQsMZdaRl71w6QK3VU=
-go.temporal.io/sdk v1.26.0-rc.2.0.20240214221834-30da688037d1/go.mod h1:HDr8fIWJ/HF8dJwTPgOayI8PYB5WoVIxUMjzE78M2ng=
+go.temporal.io/sdk v1.35.0 h1:lRNAQ5As9rLgYa7HBvnmKyzxLcdElTuoFJ0FXM/AsLQ=
+go.temporal.io/sdk v1.35.0/go.mod h1:1q5MuLc2MEJ4lneZTHJzpVebW2oZnyxoIOWX3oFVebw=
go.temporal.io/sdk/contrib/tally v0.2.0 h1:XnTJIQcjOv+WuCJ1u8Ve2nq+s2H4i/fys34MnWDRrOo=
go.temporal.io/sdk/contrib/tally v0.2.0/go.mod h1:1kpSuCms/tHeJQDPuuKkaBsMqfHnIIRnCtUYlPNXxuE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -799,8 +837,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
-golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -837,6 +875,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -874,15 +914,15 @@ golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
-golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -895,8 +935,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -955,14 +995,14 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
-golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -974,8 +1014,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1027,6 +1067,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
+golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1053,8 +1095,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1081,10 +1121,10 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
-google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed h1:J6izYgfBXAI3xTKLgxzTmUltdYaLsuBxFCgDHWJ/eXg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1102,8 +1142,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0=
-google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
+google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
+google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1117,8 +1157,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/DataDog/dd-trace-go.v1 v1.59.1 h1:rXPzybNgv7r9dHCLlQqcThiAL5q2gvAQgFYYjpgwR/k=
gopkg.in/DataDog/dd-trace-go.v1 v1.59.1/go.mod h1:/4wpnyM3/ncN1CY1kqIm84mw8N/QVcHeckn13u8ZJis=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
@@ -1165,6 +1205,26 @@ logur.dev/adapter/zap v0.5.0/go.mod h1:fpjTeoSkN05hrUviBkIe/u0CKWTh1PBxWQLLFgnWh
logur.dev/logur v0.16.1/go.mod h1:DyA5B+b6WjjCcnpE1+HGtTLh2lXooxRq+JmAwXMRK08=
logur.dev/logur v0.17.0 h1:lwFZk349ZBY7KhonJFLshP/VhfFa6BxOjHxNnPHnEyc=
logur.dev/logur v0.17.0/go.mod h1:DyA5B+b6WjjCcnpE1+HGtTLh2lXooxRq+JmAwXMRK08=
+lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
+lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
+modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
+modernc.org/ccgo/v3 v3.16.14 h1:af6KNtFgsVmnDYrWk3PQCS9XT6BXe7o3ZFJKkIKvXNQ=
+modernc.org/ccgo/v3 v3.16.14/go.mod h1:mPDSujUIaTNWQSG4eqKw+atqLOEbma6Ncsa94WbC9zo=
+modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM=
+modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak=
+modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
+modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
+modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o=
+modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.24.0 h1:EsClRIWHGhLTCX44p+Ri/JLD+vFGo0QGjasg2/F9TlI=
+modernc.org/sqlite v1.24.0/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
+modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/aws/config.go b/internal/aws/config.go
index 9f69120..27a7156 100644
--- a/internal/aws/config.go
+++ b/internal/aws/config.go
@@ -23,7 +23,8 @@ func NewConfig(params ConfigParams) *aws.Config {
if params.Config.AWS.IsLocalStack {
cfg.Credentials = credentials.NewStaticCredentials("THESE", "ARE", "IGNORED")
cfg.S3ForcePathStyle = aws.Bool(true)
- cfg.Endpoint = aws.String("http://localhost:4566")
+ // TODO, how to dynamically set the endpoint?
+ cfg.Endpoint = aws.String("http://localstack:4566")
}
return cfg
}
diff --git a/internal/blockchain/client/bitcoin/bitcoin.go b/internal/blockchain/client/bitcoin/bitcoin.go
index 0e68ab5..94814ba 100644
--- a/internal/blockchain/client/bitcoin/bitcoin.go
+++ b/internal/blockchain/client/bitcoin/bitcoin.go
@@ -44,9 +44,6 @@ const (
bitcoinErrCodeInvalidParameter = -8
bitcoinErrMessageBlockNotFound = "Block not found"
bitcoinErrMessageBlockOutOfRange = "Block height out of range"
-
- // batch size
- bitcoinGetInputTransactionsBatchSize = 100
)
var _ internal.Client = (*bitcoinClient)(nil)
@@ -298,28 +295,35 @@ func (b *bitcoinClient) getInputTransactions(
) ([][][]byte, error) {
transactions := header.Transactions
blockHash := header.Hash.Value()
+ txBatchSize := b.config.Chain.Client.TxBatchSize
+ // Use a set to deduplicate input transaction IDs while preserving order
+ inputTransactionIDSet := make(map[string]bool)
var inputTransactionIDs []string
- // TODO: dedupe for inputTransactionIDs
for _, tx := range transactions {
for _, input := range tx.Inputs {
inputTransactionID := input.Identifier.Value()
// coinbase transaction does not have txid
- if inputTransactionID != "" {
+ if inputTransactionID != "" && !inputTransactionIDSet[inputTransactionID] {
+ inputTransactionIDSet[inputTransactionID] = true
inputTransactionIDs = append(inputTransactionIDs, inputTransactionID)
}
}
}
- numTransactions := len(inputTransactionIDs)
- inputTransactionsMap := make(map[string][]byte, numTransactions)
+ numTransactionSet := len(inputTransactionIDSet)
+ inputTransactionsMap := make(map[string][]byte, numTransactionSet)
+ // Get batch size from config
+
+ b.logger.Debug(
+ "getting input transactions>>>",
+ zap.Int("numTransactions", numTransactionSet),
+ zap.Int("txBatchSize", txBatchSize),
+ )
// batch of batchCalls to getrawtransaction in order to fetch input transaction data
- for batchStart := 0; batchStart < numTransactions; batchStart += bitcoinGetInputTransactionsBatchSize {
- batchEnd := batchStart + bitcoinGetInputTransactionsBatchSize
- if batchEnd > numTransactions {
- batchEnd = numTransactions
- }
+ for batchStart := 0; batchStart < numTransactionSet; batchStart += txBatchSize {
+ batchEnd := min(batchStart+txBatchSize, numTransactionSet)
batchParams := make([]jsonrpc.Params, batchEnd-batchStart)
for i, transactionID := range inputTransactionIDs[batchStart:batchEnd] {
diff --git a/internal/blockchain/client/ethereum/abstract.go b/internal/blockchain/client/ethereum/abstract.go
new file mode 100644
index 0000000..671787a
--- /dev/null
+++ b/internal/blockchain/client/ethereum/abstract.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewAbstractClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Reuse the Ethereum client factory since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/ethereum.go b/internal/blockchain/client/ethereum/ethereum.go
index 2afe1d7..5a07043 100644
--- a/internal/blockchain/client/ethereum/ethereum.go
+++ b/internal/blockchain/client/ethereum/ethereum.go
@@ -34,6 +34,7 @@ type (
config *config.Config
logger *zap.Logger
client jsonrpc.Client
+ tracer EthereumBlockTracer
dlq dlq.DLQ
validate *validator.Validate
metrics *ethereumClientMetrics
@@ -42,6 +43,10 @@ type (
commitmentLevel types.CommitmentLevel
}
+ EthereumBlockTracer interface {
+ getBlockTraces(ctx context.Context, tag uint32, block *ethereum.EthereumBlockLit) ([][]byte, error)
+ }
+
EthereumClientOption func(client *EthereumClient)
ethereumClientMetrics struct {
@@ -490,8 +495,13 @@ func (c *EthereumClient) getBlockFromHeader(ctx context.Context, tag uint32, hea
if err != nil {
return nil, xerrors.Errorf("failed to fetch transaction receipts for block %v: %w", height, err)
}
-
- transactionTraces, err := c.getBlockTraces(ctx, tag, headerResult.header)
+ var tracer EthereumBlockTracer
+ if c.tracer != nil {
+ tracer = c.tracer
+ } else {
+ tracer = c
+ }
+ transactionTraces, err := tracer.getBlockTraces(ctx, tag, headerResult.header)
if err != nil {
return nil, xerrors.Errorf("failed to fetch traces for block %v: %w", height, err)
}
@@ -1234,8 +1244,13 @@ func (c *EthereumClient) UpgradeBlock(ctx context.Context, block *api.Block, new
if err != nil {
return nil, xerrors.Errorf("failed to fetch header result for block %v: %w", height, err)
}
-
- transactionTraces, err := c.getBlockTraces(ctx, newTag, headerResult.header)
+ var tracer EthereumBlockTracer
+ if c.tracer != nil {
+ tracer = c.tracer
+ } else {
+ tracer = c
+ }
+ transactionTraces, err := tracer.getBlockTraces(ctx, newTag, headerResult.header)
if err != nil {
return nil, xerrors.Errorf("failed to fetch traces for block %v: %w", height, err)
}
diff --git a/internal/blockchain/client/ethereum/ethereumclassic.go b/internal/blockchain/client/ethereum/ethereumclassic.go
new file mode 100644
index 0000000..e304a5b
--- /dev/null
+++ b/internal/blockchain/client/ethereum/ethereumclassic.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewEthereumClassicClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Ethereum Classic shares the same data schema as Ethereum since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/megaeth.go b/internal/blockchain/client/ethereum/megaeth.go
new file mode 100644
index 0000000..50f09cd
--- /dev/null
+++ b/internal/blockchain/client/ethereum/megaeth.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewMegaethClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Reuse the Ethereum client factory since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/module.go b/internal/blockchain/client/ethereum/module.go
index 2c1d65e..8bb05f0 100644
--- a/internal/blockchain/client/ethereum/module.go
+++ b/internal/blockchain/client/ethereum/module.go
@@ -39,5 +39,37 @@ var Module = fx.Options(
Name: "polygon",
Target: NewPolygonClientFactory,
}),
+ fx.Provide(fx.Annotated{
+ Name: "tron",
+ Target: NewTronClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "story",
+ Target: NewStoryClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "ethereumclassic",
+ Target: NewEthereumClassicClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "plasma",
+ Target: NewPlasmaClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "monad",
+ Target: NewMonadClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "abstract",
+ Target: NewAbstractClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "megaeth",
+ Target: NewMegaethClientFactory,
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "seismic",
+ Target: NewSeismicClientFactory,
+ }),
beacon.Module,
)
diff --git a/internal/blockchain/client/ethereum/monad.go b/internal/blockchain/client/ethereum/monad.go
new file mode 100644
index 0000000..fdd1f90
--- /dev/null
+++ b/internal/blockchain/client/ethereum/monad.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewMonadClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Reuse the Ethereum client factory since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/plasma.go b/internal/blockchain/client/ethereum/plasma.go
new file mode 100644
index 0000000..f4e2281
--- /dev/null
+++ b/internal/blockchain/client/ethereum/plasma.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewPlasmaClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Plasma shares the same data schema as Ethereum since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/seismic.go b/internal/blockchain/client/ethereum/seismic.go
new file mode 100644
index 0000000..133d334
--- /dev/null
+++ b/internal/blockchain/client/ethereum/seismic.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewSeismicClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Shares the same data schema as Ethereum since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/story.go b/internal/blockchain/client/ethereum/story.go
new file mode 100644
index 0000000..df4eb6a
--- /dev/null
+++ b/internal/blockchain/client/ethereum/story.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+)
+
+func NewStoryClientFactory(params internal.JsonrpcClientParams) internal.ClientFactory {
+ // Story shares the same data schema as Ethereum since it is an EVM chain.
+ return NewEthereumClientFactory(params)
+}
diff --git a/internal/blockchain/client/ethereum/story_test.go b/internal/blockchain/client/ethereum/story_test.go
new file mode 100644
index 0000000..59dd721
--- /dev/null
+++ b/internal/blockchain/client/ethereum/story_test.go
@@ -0,0 +1 @@
+package ethereum
diff --git a/internal/blockchain/client/ethereum/tron.go b/internal/blockchain/client/ethereum/tron.go
new file mode 100644
index 0000000..f821814
--- /dev/null
+++ b/internal/blockchain/client/ethereum/tron.go
@@ -0,0 +1,288 @@
+package ethereum
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "time"
+
+ "github.com/go-playground/validator/v10"
+ "go.uber.org/fx"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+ "github.com/coinbase/chainstorage/internal/blockchain/jsonrpc"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/ethereum"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/ethereum/types"
+ "github.com/coinbase/chainstorage/internal/blockchain/restapi"
+ "github.com/coinbase/chainstorage/internal/dlq"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+type (
+ TronClient struct {
+ *EthereumClient
+ additionalClient restapi.Client
+ }
+
+ TronClientParams struct {
+ fx.In
+ fxparams.Params
+ MasterClient jsonrpc.Client `name:"master"`
+ SlaveClient jsonrpc.Client `name:"slave"`
+ ValidatorClient jsonrpc.Client `name:"validator"`
+ ConsensusClient jsonrpc.Client `name:"consensus"`
+ AdditionalClient restapi.Client `name:"additional"`
+ DLQ dlq.DLQ
+ }
+
+ tronApiClientFactory struct {
+ masterClient jsonrpc.Client
+ slaveClient jsonrpc.Client
+ validatorClient jsonrpc.Client
+ consensusClient jsonrpc.Client
+ clientFactory TronApiClientFactoryFn
+ }
+
+ TronApiClientFactoryFn func(client jsonrpc.Client) internal.Client
+)
+
+type tronBlockNumRequestData struct {
+ Num uint64 `json:"num"`
+}
+
+var tronTxInfoMethod = &restapi.RequestMethod{
+ Name: "GetTransactionInfoByBlockNum",
+ ParamsPath: "/wallet/gettransactioninfobyblocknum", // No parameter URls
+ Timeout: 6 * time.Second,
+ HTTPMethod: http.MethodPost,
+}
+
+var tronBlockTxMethod = &restapi.RequestMethod{
+ Name: "GetBlockByNum",
+ ParamsPath: "/wallet/getblockbynum",
+ Timeout: 6 * time.Second,
+ HTTPMethod: http.MethodPost,
+}
+
+func NewTronApiClientFactory(params TronClientParams, clientFactory TronApiClientFactoryFn) internal.ClientFactory {
+ return &tronApiClientFactory{
+ masterClient: params.MasterClient,
+ slaveClient: params.SlaveClient,
+ validatorClient: params.ValidatorClient,
+ consensusClient: params.ConsensusClient,
+ clientFactory: clientFactory,
+ }
+}
+
+func (f *tronApiClientFactory) Master() internal.Client {
+ return f.clientFactory(f.masterClient)
+}
+
+func (f *tronApiClientFactory) Slave() internal.Client {
+ return f.clientFactory(f.slaveClient)
+
+}
+
+func (f *tronApiClientFactory) Validator() internal.Client {
+ return f.clientFactory(f.validatorClient)
+
+}
+
+func (f *tronApiClientFactory) Consensus() internal.Client {
+ return f.clientFactory(f.consensusClient)
+}
+
+// Tron shares the same data schema as Ethereum since it is an EVM chain, but we retrive trace from another restapi Client which independent from the main jsonrpc client.
+// So it need to create a new factory for TronClient and set the additionalClient to the restapi client.
+func NewTronClientFactory(params TronClientParams) internal.ClientFactory {
+ return NewTronApiClientFactory(params, func(client jsonrpc.Client) internal.Client {
+ logger := log.WithPackage(params.Logger)
+ ethClient := &EthereumClient{
+ config: params.Config,
+ logger: logger,
+ client: client,
+ dlq: params.DLQ,
+ validate: validator.New(),
+ metrics: newEthereumClientMetrics(params.Metrics),
+ nodeType: types.EthereumNodeType_ARCHIVAL,
+ traceType: types.TraceType_GETH,
+ commitmentLevel: types.CommitmentLevelLatest,
+ }
+ result := &TronClient{
+ EthereumClient: ethClient,
+ additionalClient: params.AdditionalClient,
+ }
+ result.tracer = result
+ return result
+ })
+}
+
+func (c *TronClient) makeTronHttpCall(ctx context.Context, httpMethod *restapi.RequestMethod, requestData tronBlockNumRequestData) ([]byte, error) {
+ postData, err := json.Marshal(requestData)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to Marshal Tron requestData: %w", err)
+ }
+ response, err := c.additionalClient.Call(ctx, httpMethod, postData)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to call Tron API: %w", err)
+ }
+ return response, nil
+}
+
+func (c *TronClient) getBlockTxByNum(ctx context.Context, blockNumber uint64) ([]byte, error) {
+ requestData := tronBlockNumRequestData{
+ Num: blockNumber,
+ }
+ result, err := c.makeTronHttpCall(ctx, tronBlockTxMethod, requestData)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get Tron block: %w", err)
+ }
+ return result, nil
+}
+
+func (c *TronClient) getBlockTxInfoByNum(ctx context.Context, blockNumber uint64) ([]byte, error) {
+ requestData := tronBlockNumRequestData{
+ Num: blockNumber,
+ }
+ response, err := c.makeTronHttpCall(ctx, tronTxInfoMethod, requestData)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get Tron transaction info: %w", err)
+ }
+ return response, nil
+}
+
+func (c *TronClient) getBlockTraces(ctx context.Context, tag uint32, block *ethereum.EthereumBlockLit) ([][]byte, error) {
+ blockNumber := block.Number.Value()
+
+ // Get block transactions to extract types
+ blockTxData, err := c.getBlockTxByNum(ctx, blockNumber)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get block transactions: %w", err)
+ }
+
+ // Get transaction info
+ txInfoResponse, err := c.getBlockTxInfoByNum(ctx, blockNumber)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get transaction info: %w", err)
+ }
+
+ // Parse block data to extract transaction types by txID
+ txTypeMap, err := c.extractTransactionTypes(blockTxData)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to extract transaction types: %w", err)
+ }
+
+ // Merge txInfo with transaction types
+ results, err := c.mergeTxInfoWithTypes(txInfoResponse, txTypeMap)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to merge txInfo with types: %w", err)
+ }
+
+ return results, nil
+}
+
+// mergeTxInfoWithTypes parses txInfo response and adds transaction types based on txID matching
+func (c *TronClient) mergeTxInfoWithTypes(txInfoResponse []byte, txTypeMap map[string]string) ([][]byte, error) {
+ // Parse txInfo response as array
+ var txInfoArray []json.RawMessage
+ if err := json.Unmarshal(txInfoResponse, &txInfoArray); err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal TronTxInfo: %w", err)
+ }
+
+ // Merge each txInfo with its corresponding type
+ results := make([][]byte, 0, len(txInfoArray))
+ for _, txInfoBytes := range txInfoArray {
+ // Parse txInfo as map to allow dynamic field addition
+ var txInfo map[string]interface{}
+ if err := json.Unmarshal(txInfoBytes, &txInfo); err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal txInfo: %w", err)
+ }
+
+ // Extract txID from txInfo (every transaction must have txID)
+ txID, ok := txInfo["id"].(string)
+ if !ok {
+ return nil, xerrors.Errorf("txInfo id is not a string or is missing: %+v", txInfo)
+ }
+ // Add transaction type if found
+ if txType, exists := txTypeMap[txID]; exists {
+ txInfo["type"] = txType
+ }
+
+ // Re-serialize the modified txInfo
+ modifiedBytes, err := json.Marshal(txInfo)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to marshal modified txInfo: %w", err)
+ }
+
+ results = append(results, modifiedBytes)
+ }
+
+ return results, nil
+}
+
+// extractTransactionTypes extracts transaction types from block data, indexed by txID
+func (c *TronClient) extractTransactionTypes(blockTxData []byte) (map[string]string, error) {
+ if len(blockTxData) == 0 {
+ return make(map[string]string), nil
+ }
+
+ // Parse the block data
+ var blockData map[string]interface{}
+ if err := json.Unmarshal(blockTxData, &blockData); err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal block data: %w", err)
+ }
+
+ txTypeMap := make(map[string]string)
+
+ // Extract transactions array
+ transactions, ok := blockData["transactions"].([]any)
+ if !ok {
+ return txTypeMap, nil // No transactions in block
+ }
+ // Extract txID (every transaction must have txID)
+ for _, tx := range transactions {
+ txMap, ok := tx.(map[string]any)
+ if !ok {
+ return nil, xerrors.Errorf("failed to assert transaction as map[string]interface{}: %+v", tx)
+ }
+
+ txID, ok := txMap["txID"].(string)
+ if !ok {
+ return nil, xerrors.Errorf("transaction is missing txID or it's not a string: %+v", txMap)
+ }
+
+ rawDataVal, ok := txMap["raw_data"]
+ if !ok {
+ continue // Or return an error if raw_data is always expected
+ }
+ rawData, ok := rawDataVal.(map[string]any)
+ if !ok {
+ return nil, xerrors.Errorf("raw_data is not a map: %+v", rawDataVal)
+ }
+
+ contractsVal, ok := rawData["contract"]
+ if !ok {
+ continue
+ }
+ contracts, ok := contractsVal.([]any)
+ if !ok || len(contracts) == 0 {
+ continue
+ }
+
+ contract, ok := contracts[0].(map[string]any)
+ if !ok {
+ return nil, xerrors.Errorf("contract is not a map: %+v", contracts[0])
+ }
+
+ txType, ok := contract["type"].(string)
+ if !ok {
+ return nil, xerrors.Errorf("contract type is not a string: %+v", contract["type"])
+ }
+
+ txTypeMap[txID] = txType
+ }
+
+ return txTypeMap, nil
+}
diff --git a/internal/blockchain/client/ethereum/tron_test.go b/internal/blockchain/client/ethereum/tron_test.go
new file mode 100644
index 0000000..6c4de28
--- /dev/null
+++ b/internal/blockchain/client/ethereum/tron_test.go
@@ -0,0 +1,305 @@
+package ethereum
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "go.uber.org/fx"
+ "go.uber.org/mock/gomock"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/client/internal"
+ "github.com/coinbase/chainstorage/internal/blockchain/jsonrpc"
+ jsonrpcmocks "github.com/coinbase/chainstorage/internal/blockchain/jsonrpc/mocks"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser"
+ "github.com/coinbase/chainstorage/internal/blockchain/restapi"
+ restapimocks "github.com/coinbase/chainstorage/internal/blockchain/restapi/mocks"
+ "github.com/coinbase/chainstorage/internal/dlq"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ "github.com/coinbase/chainstorage/protos/coinbase/c3/common"
+)
+
+type (
+ tronClientTestSuite struct {
+ suite.Suite
+
+ ctrl *gomock.Controller
+ app testapp.TestApp
+ rpcClient *jsonrpcmocks.MockClient
+ restClient *restapimocks.MockClient
+ client internal.Client
+ }
+)
+
+const (
+ tronTestTag = uint32(2)
+ // tronTestHeight = uint64(10000)
+ tronTestHeight = ethereumHeight
+ fixtureBlockTxInfoResponse = `
+ [
+ {
+ "blockNumber": 11322000,
+ "contractResult": [
+ ""
+ ],
+ "blockTimeStamp": 1725466323000,
+ "receipt": {
+ "net_usage": 268
+ },
+ "id": "baa42c87b7c764c548fa37e61e9764415fd4a79d7e073d4f92a456698002016b"
+ },
+ {
+ "id": "f5365847bff6e48d0c6bc23eee276343d2987efd9876c3c1bf597225e3d69991",
+ "blockNumber": 11322000,
+ "internal_transactions": [
+ {
+ "hash": "27b42aff06882822a0c84211121e5f98c06a9b074ee84a085c998397b8b2da3a",
+ "caller_address": "4158baea0b354f7b333b3b1563c849e979ae4e2002",
+ "transferTo_address": "41eed9e56a5cddaa15ef0c42984884a8afcf1bdebb",
+ "callValueInfo": [
+ {}
+ ],
+ "note": "63616c6c"
+ },
+ {
+ "hash": "3e2b8ca208f6c899afdc74b772a4504cdd6704bbeff6d34045351c9ad83f478d",
+ "caller_address": "4158baea0b354f7b333b3b1563c849e979ae4e2002",
+ "transferTo_address": "41f5a6eae2fb24b0bda6288e346982fc14e094c19a",
+ "callValueInfo": [
+ {
+ "callValue": 405000000
+ }
+ ],
+ "note": "63616c6c"
+ }
+ ]
+ }
+ ]
+ `
+ fixtureBlockTxResponse = `
+ {
+ "blockID": "000000000408a36cd32a8e674045e96f895b7708b85fa5141f3c8fd92eb497a8",
+ "block_header": {
+ "raw_data": {
+ "number": 67674988,
+ "txTrieRoot": "08203d3094277ae2bfc9981bde29400a87ab5bc6b9aa807ce42d96a2de5ea109",
+ "witness_address": "417f5e5aca5332ce5e18414d7f85bb62097cefa453",
+ "parentHash": "000000000408a36b033d241520fd155cb0351a27bce043ddb7799ec2790ca1ee",
+ "version": 31,
+ "timestamp": 1733675346000
+ },
+ "witness_signature": "241ea0cb69f7e3dd1436c03c557f2b4a005f6ca315d968a11c1115324f795f2875883db4c91e74a0638c9100a4ced4196080fdaf6077881936636e6169d0fbb801"
+ },
+ "transactions": [
+ {
+ "ret": [
+ {
+ "contractRet": "SUCCESS"
+ }
+ ],
+ "signature": [
+ "e93160d1df484382923db34f02ca196a7dbbd7342948214a739fdf4b96896cfc2ca4d9b706c2393c3f83d269f31901aee43ae8e50a04579b6636c03c321da8e000"
+ ],
+ "txID": "0xbaa42c87b7c764c548fa37e61e9764415fd4a79d7e073d4f92a456698002016b",
+ "raw_data": {
+ "contract": [
+ {
+ "parameter": {
+ "value": {
+ "amount": 6,
+ "owner_address": "4174d7980a2a3e48e3a863365542e92ab8d646e0aa",
+ "to_address": "41c3d34597fb01e25d4d8e7ef34ccdd0f05bf85473"
+ },
+ "type_url": "type.googleapis.com/protocol.TransferContract"
+ },
+ "type": "TransferContract"
+ }
+ ],
+ "ref_block_bytes": "a358",
+ "ref_block_hash": "c773a99ac91938c6",
+ "expiration": 1733675400000,
+ "timestamp": 1733675342465
+ },
+ "raw_data_hex": "0a02a3582208c773a99ac91938c640c0f6ecb8ba325a65080112610a2d747970652e676f6f676c65617069732e636f6d2f70726f746f636f6c2e5472616e73666572436f6e747261637412300a154174d7980a2a3e48e3a863365542e92ab8d646e0aa121541c3d34597fb01e25d4d8e7ef34ccdd0f05bf8547318067081b5e9b8ba32"
+ },
+ {
+ "ret": [
+ {
+ "contractRet": "SUCCESS"
+ }
+ ],
+ "signature": [
+ "27a73675483a50fa52972e7c5ffe33e931ffad1a037a79c3ff2ab97cf08315b557a723d3fc87d5a1e81165775fdfba4b14c7aab31ccd74d06d36bcdd4615f18a1c"
+ ],
+ "txID": "f5365847bff6e48d0c6bc23eee276343d2987efd9876c3c1bf597225e3d69991",
+ "raw_data": {
+ "contract": [
+ {
+ "parameter": {
+ "value": {
+ "balance": 248167143,
+ "receiver_address": "414c614b77e81392450d88f9e339481843abae0e34",
+ "owner_address": "410611d6c1d784930f53979f06f10306251919e1ea"
+ },
+ "type_url": "type.googleapis.com/protocol.UnDelegateResourceContract"
+ },
+ "type": "UnDelegateResourceContract"
+ }
+ ],
+ "ref_block_bytes": "a343",
+ "ref_block_hash": "92d31415885cc680",
+ "expiration": 1733761442497,
+ "fee_limit": 30000000,
+ "timestamp": 1733675342497
+ },
+ "raw_data_hex": "0a02a343220892d31415885cc68040c1c5f0e1ba325a72083a126e0a37747970652e676f6f676c65617069732e636f6d2f70726f746f636f6c2e556e44656c65676174655265736f75726365436f6e747261637412330a15410611d6c1d784930f53979f06f10306251919e1ea18e7f5aa762215414c614b77e81392450d88f9e339481843abae0e3470a1b5e9b8ba3290018087a70e"
+ }
+ ]
+ }
+ `
+)
+
+func TestTronClientTestSuite(t *testing.T) {
+ suite.Run(t, new(tronClientTestSuite))
+}
+
+func (s *tronClientTestSuite) SetupTest() {
+ s.ctrl = gomock.NewController(s.T())
+ s.rpcClient = jsonrpcmocks.NewMockClient(s.ctrl)
+ s.restClient = restapimocks.NewMockClient(s.ctrl)
+
+ var result internal.ClientParams
+ s.app = testapp.New(
+ s.T(),
+ testapp.WithBlockchainNetwork(common.Blockchain_BLOCKCHAIN_TRON, common.Network_NETWORK_TRON_MAINNET),
+ Module,
+ // jsonrpc.Module,
+ // restapi.Module,
+ testTronApiModule(s.rpcClient, s.restClient),
+ fx.Populate(&result),
+ )
+
+ s.client = result.Master
+ s.NotNil(s.client)
+}
+
+func (s *tronClientTestSuite) TearDownTest() {
+ s.app.Close()
+ s.ctrl.Finish()
+}
+
+func testTronApiModule(rpcClient *jsonrpcmocks.MockClient, restClient *restapimocks.MockClient) fx.Option {
+ return fx.Options(
+ internal.Module,
+ fx.Provide(fx.Annotated{
+ Name: "master",
+ Target: func() jsonrpc.Client { return rpcClient },
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "slave",
+ Target: func() jsonrpc.Client { return rpcClient },
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "validator",
+ Target: func() jsonrpc.Client { return rpcClient },
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "consensus",
+ Target: func() jsonrpc.Client { return rpcClient },
+ }),
+ fx.Provide(fx.Annotated{
+ Name: "additional",
+ Target: func() restapi.Client { return restClient },
+ }),
+ fx.Provide(dlq.NewNop),
+ fx.Provide(parser.NewNop),
+ )
+}
+
+func (s *tronClientTestSuite) TestTronClient_New() {
+ require := testutil.Require(s.T())
+
+ var tronClientResult TronClientParams
+ var clientResutl internal.ClientParams
+ app := testapp.New(
+ s.T(),
+ Module,
+ internal.Module,
+ jsonrpc.Module,
+ restapi.Module,
+ testapp.WithBlockchainNetwork(common.Blockchain_BLOCKCHAIN_TRON, common.Network_NETWORK_TRON_MAINNET),
+ fx.Provide(dlq.NewNop),
+ fx.Provide(parser.NewNop),
+ fx.Populate(&tronClientResult),
+ fx.Populate(&clientResutl),
+ )
+ defer app.Close()
+
+ require.NotNil(s.client)
+ require.NotNil(tronClientResult.AdditionalClient)
+ s.NotNil(clientResutl.Master)
+ s.NotNil(clientResutl.Slave)
+ s.NotNil(clientResutl.Validator)
+ s.NotNil(clientResutl.Consensus)
+}
+
+func (s *tronClientTestSuite) TestTronClient_GetBlockByHeight() {
+ require := testutil.Require(s.T())
+ // mock block jsonrpc request --------------------
+ blockResponse := &jsonrpc.Response{
+ Result: json.RawMessage(fixtureBlock),
+ }
+ s.rpcClient.EXPECT().Call(
+ gomock.Any(), ethGetBlockByNumberMethod, jsonrpc.Params{
+ "0xacc290",
+ true,
+ },
+ ).Return(blockResponse, nil)
+ // mock TxReceipt jsonrpc request --------------------
+ receiptResponse := []*jsonrpc.Response{
+ {Result: json.RawMessage(fixtureReceipt)},
+ {Result: json.RawMessage(fixtureReceipt)},
+ }
+ s.rpcClient.EXPECT().BatchCall(
+ gomock.Any(), ethGetTransactionReceiptMethod, gomock.Any(),
+ ).Return(receiptResponse, nil)
+
+ // mock BlockTxInfo restapi request --------------------
+ blockTxInfoPostData := tronBlockNumRequestData{Num: ethereumHeight}
+ postData, _ := json.Marshal(blockTxInfoPostData)
+ txr := json.RawMessage(fixtureBlockTxInfoResponse)
+ s.restClient.EXPECT().Call(gomock.Any(), tronTxInfoMethod, postData).Return(txr, nil)
+
+ // mock BlockTx restapi request --------------------
+ blockTxPostData := tronBlockNumRequestData{Num: ethereumHeight}
+ postData, _ = json.Marshal(blockTxPostData)
+ txr = json.RawMessage(fixtureBlockTxResponse)
+ s.restClient.EXPECT().Call(gomock.Any(), tronBlockTxMethod, postData).Return(txr, nil)
+
+ block, err := s.client.GetBlockByHeight(context.Background(), tronTestTag, tronTestHeight)
+ require.NoError(err)
+ require.Equal(common.Blockchain_BLOCKCHAIN_TRON, block.Blockchain)
+ require.Equal(common.Network_NETWORK_TRON_MAINNET, block.Network)
+
+ metadata := block.Metadata
+ require.NotNil(metadata)
+ require.Equal(ethereumHash, metadata.Hash)
+ require.Equal(ethereumParentHash, metadata.ParentHash)
+ require.Equal(ethereumHeight, metadata.Height)
+ require.Equal(ethereumParentHeight, metadata.ParentHeight)
+ require.Equal(tronTestTag, metadata.Tag)
+
+ blobdata := block.GetEthereum()
+ require.NotNil(blobdata)
+ require.NotNil(blobdata.Header)
+ require.Equal(2, len(blobdata.TransactionReceipts))
+ require.NotNil(blobdata.TransactionTraces)
+ require.Equal(2, len(blobdata.TransactionTraces))
+ require.NotNil(blobdata.TransactionTraces[0])
+ require.NotNil(blobdata.TransactionTraces[1])
+ require.Nil(blobdata.Uncles)
+}
+
+// TODO: add test case for TronClient.getBlockTraces
diff --git a/internal/blockchain/client/internal/client.go b/internal/blockchain/client/internal/client.go
index 4a0d9c1..72785bb 100644
--- a/internal/blockchain/client/internal/client.go
+++ b/internal/blockchain/client/internal/client.go
@@ -54,22 +54,30 @@ type (
Params struct {
fx.In
fxparams.Params
- Parser parser.Parser
- Bitcoin ClientFactory `name:"bitcoin" optional:"true"`
- Bsc ClientFactory `name:"bsc" optional:"true"`
- Ethereum ClientFactory `name:"ethereum" optional:"true"`
- Rosetta ClientFactory `name:"rosetta" optional:"true"`
- Solana ClientFactory `name:"solana" optional:"true"`
- Polygon ClientFactory `name:"polygon" optional:"true"`
- Avacchain ClientFactory `name:"avacchain" optional:"true"`
- Arbitrum ClientFactory `name:"arbitrum" optional:"true"`
- Optimism ClientFactory `name:"optimism" optional:"true"`
- Base ClientFactory `name:"base" optional:"true"`
- Fantom ClientFactory `name:"fantom" optional:"true"`
- Aptos ClientFactory `name:"aptos" optional:"true"`
- EthereumBeacon ClientFactory `name:"ethereum/beacon" optional:"true"`
- CosmosStaking ClientFactory `name:"cosmos/staking" optional:"true"`
- CardanoStaking ClientFactory `name:"cardano/staking" optional:"true"`
+ Parser parser.Parser
+ Bitcoin ClientFactory `name:"bitcoin" optional:"true"`
+ Bsc ClientFactory `name:"bsc" optional:"true"`
+ Ethereum ClientFactory `name:"ethereum" optional:"true"`
+ Rosetta ClientFactory `name:"rosetta" optional:"true"`
+ Solana ClientFactory `name:"solana" optional:"true"`
+ Polygon ClientFactory `name:"polygon" optional:"true"`
+ Avacchain ClientFactory `name:"avacchain" optional:"true"`
+ Arbitrum ClientFactory `name:"arbitrum" optional:"true"`
+ Optimism ClientFactory `name:"optimism" optional:"true"`
+ Base ClientFactory `name:"base" optional:"true"`
+ Fantom ClientFactory `name:"fantom" optional:"true"`
+ Aptos ClientFactory `name:"aptos" optional:"true"`
+ EthereumBeacon ClientFactory `name:"ethereum/beacon" optional:"true"`
+ CosmosStaking ClientFactory `name:"cosmos/staking" optional:"true"`
+ CardanoStaking ClientFactory `name:"cardano/staking" optional:"true"`
+ Tron ClientFactory `name:"tron" optional:"true"`
+ Story ClientFactory `name:"story" optional:"true"`
+ EthereumClassic ClientFactory `name:"ethereumclassic" optional:"true"`
+ Plasma ClientFactory `name:"plasma" optional:"true"`
+ Monad ClientFactory `name:"monad" optional:"true"`
+ Abstract ClientFactory `name:"abstract" optional:"true"`
+ Megaeth ClientFactory `name:"megaeth" optional:"true"`
+ Seismic ClientFactory `name:"seismic" optional:"true"`
}
ClientParams struct {
@@ -111,7 +119,7 @@ func NewClient(params Params) (Result, error) {
sidechain := params.Config.Chain.Sidechain
if sidechain == api.SideChain_SIDECHAIN_NONE {
switch blockchain {
- case common.Blockchain_BLOCKCHAIN_BITCOIN:
+ case common.Blockchain_BLOCKCHAIN_BITCOIN, common.Blockchain_BLOCKCHAIN_BITCOINCASH, common.Blockchain_BLOCKCHAIN_LITECOIN:
factory = params.Bitcoin
case common.Blockchain_BLOCKCHAIN_BSC:
factory = params.Bsc
@@ -133,6 +141,22 @@ func NewClient(params Params) (Result, error) {
factory = params.Base
case common.Blockchain_BLOCKCHAIN_APTOS:
factory = params.Aptos
+ case common.Blockchain_BLOCKCHAIN_TRON:
+ factory = params.Tron
+ case common.Blockchain_BLOCKCHAIN_STORY:
+ factory = params.Story
+ case common.Blockchain_BLOCKCHAIN_ETHEREUMCLASSIC:
+ factory = params.EthereumClassic
+ case common.Blockchain_BLOCKCHAIN_PLASMA:
+ factory = params.Plasma
+ case common.Blockchain_BLOCKCHAIN_MONAD:
+ factory = params.Monad
+ case common.Blockchain_BLOCKCHAIN_ABSTRACT:
+ factory = params.Abstract
+ case common.Blockchain_BLOCKCHAIN_MEGAETH:
+ factory = params.Megaeth
+ case common.Blockchain_BLOCKCHAIN_SEISMIC:
+ factory = params.Seismic
default:
if params.Config.IsRosetta() {
factory = params.Rosetta
diff --git a/internal/blockchain/endpoints/endpoint_provider.go b/internal/blockchain/endpoints/endpoint_provider.go
index cc8abe2..ebef59b 100644
--- a/internal/blockchain/endpoints/endpoint_provider.go
+++ b/internal/blockchain/endpoints/endpoint_provider.go
@@ -5,6 +5,8 @@ import (
"fmt"
"math/rand"
"net/http"
+ "os"
+ "regexp"
"github.com/uber-go/tally/v4"
"go.uber.org/fx"
@@ -50,10 +52,11 @@ type (
EndpointProviderResult struct {
fx.Out
- Master EndpointProvider `name:"master"`
- Slave EndpointProvider `name:"slave"`
- Validator EndpointProvider `name:"validator"`
- Consensus EndpointProvider `name:"consensus"`
+ Master EndpointProvider `name:"master"`
+ Slave EndpointProvider `name:"slave"`
+ Validator EndpointProvider `name:"validator"`
+ Consensus EndpointProvider `name:"consensus"`
+ Additional EndpointProvider `name:"additional"`
}
)
@@ -73,15 +76,17 @@ type (
)
const (
- masterEndpointGroupName = "master"
- slaveEndpointGroupName = "slave"
- validatorEndpointGroupName = "validator"
- consensusEndpointGroupName = "consensus"
- contextKeyFailover = "failover:"
+ masterEndpointGroupName = "master"
+ slaveEndpointGroupName = "slave"
+ validatorEndpointGroupName = "validator"
+ consensusEndpointGroupName = "consensus"
+ contextKeyFailover = "failover:"
+ additionalEndpointGroupName = "additional"
)
var (
ErrFailoverUnavailable = xerrors.New("no endpoint is available for failover")
+ placeholderVarRE = regexp.MustCompile(`\{([A-Za-z_][A-Za-z0-9_]*)\}`)
)
func NewEndpointProvider(params EndpointProviderParams) (EndpointProviderResult, error) {
@@ -116,12 +121,16 @@ func NewEndpointProvider(params EndpointProviderParams) (EndpointProviderResult,
return EndpointProviderResult{}, xerrors.Errorf("failed to create consensus endpoint provider with slave endpoints: %w", err)
}
}
-
+ additional, err := newEndpointProvider(logger, params.Config, scope, ¶ms.Config.Chain.Client.Additional.EndpointGroup, additionalEndpointGroupName)
+ if err != nil {
+ return EndpointProviderResult{}, xerrors.Errorf("failed to create additional endpoint provider: %w", err)
+ }
return EndpointProviderResult{
- Master: master,
- Slave: slave,
- Validator: validator,
- Consensus: consensus,
+ Master: master,
+ Slave: slave,
+ Validator: validator,
+ Consensus: consensus,
+ Additional: additional,
}, nil
}
@@ -207,6 +216,15 @@ func newEndpoint(
endpoint *config.Endpoint,
endpointConfig *config.EndpointConfig,
) (*Endpoint, error) {
+ // Expand URL placeholders before it is used anywhere else (e.g. sticky session cookie hash).
+ if endpoint.Url != "" {
+ expanded, err := expandEndpointURL(endpoint.Url)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to expand endpoint url for %q: %w", endpoint.Name, err)
+ }
+ endpoint.Url = expanded
+ }
+
var opts []ClientOption
//TODO: check if this is still needed
@@ -286,6 +304,38 @@ func newEndpoint(
}, nil
}
+// expandEndpointURL expands placeholders in the URL using environment variables.
+//
+// Supported formats:
+// - {VAR}: custom syntax (e.g. {URL_TOKEN})
+//
+// It returns an error if any placeholder is left unresolved.
+func expandEndpointURL(raw string) (string, error) {
+ missing := make(map[string]struct{})
+ expanded := placeholderVarRE.ReplaceAllStringFunc(raw, func(m string) string {
+ sub := placeholderVarRE.FindStringSubmatch(m)
+ // sub is always [full, group1] when regex matches, but keep it defensive.
+ if len(sub) != 2 {
+ return m
+ }
+
+ key := sub[1]
+ val, ok := os.LookupEnv(key)
+ if !ok {
+ missing[key] = struct{}{}
+ return m
+ }
+ return val
+ })
+
+ if len(missing) > 0 {
+ // Keep the expanded URL in the error for debugging (it will still contain {VAR} markers).
+ return "", xerrors.Errorf("endpoint url contains unresolved placeholder(s): %v", expanded)
+ }
+
+ return expanded, nil
+}
+
func getStickySessionValue(cfg *config.Config) string {
headerValue := fmt.Sprintf("%v-%v-%v", consts.ServiceName, cfg.Chain.Network.GetName(), cfg.Env())
return utils.GenerateSha256HashString(headerValue)
diff --git a/internal/blockchain/endpoints/endpoint_provider_test.go b/internal/blockchain/endpoints/endpoint_provider_test.go
index 4c165cc..d029c57 100644
--- a/internal/blockchain/endpoints/endpoint_provider_test.go
+++ b/internal/blockchain/endpoints/endpoint_provider_test.go
@@ -2,6 +2,7 @@ package endpoints
import (
"context"
+ "errors"
"fmt"
"math"
"net/http/cookiejar"
@@ -14,7 +15,6 @@ import (
"go.uber.org/fx/fxtest"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
- "golang.org/x/xerrors"
"github.com/coinbase/chainstorage/internal/config"
"github.com/coinbase/chainstorage/internal/utils/testutil"
@@ -73,6 +73,47 @@ func TestEndpointProvider(t *testing.T) {
}
}
+func TestEndpointProvider_UrlExpandEnv_Curly(t *testing.T) {
+ require := testutil.Require(t)
+
+ t.Setenv("URL_TOKEN", "abc")
+
+ logger := zaptest.NewLogger(t)
+ cfg, err := config.New()
+ require.NoError(err)
+
+ endpointGroup := &config.EndpointGroup{
+ Endpoints: []config.Endpoint{
+ {Name: "e1", Url: "https://example.com/v2/{URL_TOKEN}", Weight: 1},
+ },
+ }
+
+ ctx := context.Background()
+ provider, err := newEndpointProvider(logger, cfg, tally.NoopScope, endpointGroup, "master")
+ require.NoError(err)
+
+ pick, err := provider.GetEndpoint(ctx)
+ require.NoError(err)
+ require.Equal("https://example.com/v2/abc", pick.Config.Url)
+}
+
+func TestEndpointProvider_UrlExpandEnv_MissingVar(t *testing.T) {
+ require := testutil.Require(t)
+
+ logger := zaptest.NewLogger(t)
+ cfg, err := config.New()
+ require.NoError(err)
+
+ endpointGroup := &config.EndpointGroup{
+ Endpoints: []config.Endpoint{
+ {Name: "e1", Url: "https://example.com/v2/{MISSING_TOKEN}", Weight: 1},
+ },
+ }
+
+ _, err = newEndpointProvider(logger, cfg, tally.NoopScope, endpointGroup, "master")
+ require.ErrorContains(err, "failed to expand endpoint url for \"e1\": endpoint url contains unresolved placeholder(s): https://example.com/v2/{MISSING_TOKEN}")
+}
+
func TestEndpointProvider_WithFailover(t *testing.T) {
require := testutil.Require(t)
@@ -212,7 +253,7 @@ func TestEndpointProvider_EmptyEndpoints(t *testing.T) {
_, err = ep.WithFailoverContext(ctx)
require.Equal([]string{"foo"}, getActiveEndpoints(ctx, ep))
require.Error(err)
- require.True(xerrors.Is(err, ErrFailoverUnavailable))
+ require.True(errors.Is(err, ErrFailoverUnavailable))
}
func TestEndpointProvider_StickySessionCookieHash(t *testing.T) {
diff --git a/internal/blockchain/parser/bitcoin/bitcoin_native.go b/internal/blockchain/parser/bitcoin/bitcoin_native.go
index 7ec6d91..ec2d77f 100644
--- a/internal/blockchain/parser/bitcoin/bitcoin_native.go
+++ b/internal/blockchain/parser/bitcoin/bitcoin_native.go
@@ -35,6 +35,10 @@ const (
bitcoinScriptTypeNullData string = "nulldata"
bitcoinScriptTypeWitnessUnknown string = "witness_unknown"
bitcoinScriptTypeWitnessV1Taproot string = "witness_v1_taproot"
+ bitcoinScriptTypeAnchor string = "anchor"
+ // TODO, Create litecoin parser for LTC address
+ bitcoinScriptTypeMwebPegin string = "witness_mweb_pegin"
+ bitcoinScriptTypeMwebHogaddr string = "witness_mweb_hogaddr"
)
type (
@@ -190,10 +194,13 @@ func validateBitcoinScriptPubKey(sl validator.StructLevel) {
}
}
// Types that we expect to be able to parse address for
- case bitcoinScriptTypePubKeyHash, bitcoinScriptTypeScriptHash, bitcoinScriptTypeWitnessV0PubKeyHash, bitcoinScriptTypeWitnessV0ScriptHash, bitcoinScriptTypeWitnessUnknown, bitcoinScriptTypeWitnessV1Taproot:
+ // https://github.com/bitcoin/bitcoin/commit/455fca86cfada1823aa28615b5683f9dc73dbb9a
+ case bitcoinScriptTypePubKeyHash, bitcoinScriptTypeScriptHash, bitcoinScriptTypeWitnessV0PubKeyHash, bitcoinScriptTypeWitnessV0ScriptHash, bitcoinScriptTypeWitnessUnknown, bitcoinScriptTypeWitnessV1Taproot, bitcoinScriptTypeAnchor:
if len(address) == 0 {
sl.ReportError(address, "Address[main]", "Address[main]", "bspk_a", "")
- }
+ } // Types that we expect to be able to parse address for
+ case bitcoinScriptTypeMwebPegin, bitcoinScriptTypeMwebHogaddr:
+ // https://github.com/litecoin-project/litecoin/blob/cd1660afaf5b31a80e797668b12b5b3933844842/src/script/standard.cpp#L60
default:
sl.ReportError(address, "Address[unsupported]", "Address[unsupported]", "bspk_as", "")
}
diff --git a/internal/blockchain/parser/ethereum/abstract_native.go b/internal/blockchain/parser/ethereum/abstract_native.go
new file mode 100644
index 0000000..a7be06e
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/abstract_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewAbstractNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Reuse the Ethereum native parser since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/abstract_validato.go b/internal/blockchain/parser/ethereum/abstract_validato.go
new file mode 100644
index 0000000..228820f
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/abstract_validato.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewAbstractValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/ethereum_native.go b/internal/blockchain/parser/ethereum/ethereum_native.go
index 6dabbf4..5cf8bda 100644
--- a/internal/blockchain/parser/ethereum/ethereum_native.go
+++ b/internal/blockchain/parser/ethereum/ethereum_native.go
@@ -238,18 +238,20 @@ type (
}
ethereumNativeParserImpl struct {
- Logger *zap.Logger
- validate *validator.Validate
- nodeType types.EthereumNodeType
- traceType types.TraceType
- config *config.Config
- metrics *ethereumNativeParserMetrics
+ Logger *zap.Logger
+ validate *validator.Validate
+ nodeType types.EthereumNodeType
+ traceType types.TraceType
+ config *config.Config
+ metrics *ethereumNativeParserMetrics
+ src20Parser SRC20TokenTransferParser // Optional, only Seismic sets this
}
ethereumParserOptions struct {
nodeType types.EthereumNodeType
traceType types.TraceType
checksumAddress bool
+ src20Parser SRC20TokenTransferParser
}
nestedParityTrace struct {
@@ -288,6 +290,7 @@ const (
parseFailure = "parse_failure"
arbitrumNITROUpgradeBlockNumber = 22_207_818
+ tronNoncePlaceHolder = "0x0000000000000000"
)
func (v EthereumHexString) MarshalJSON() ([]byte, error) {
@@ -331,7 +334,7 @@ func (v *EthereumQuantity) UnmarshalJSON(input []byte) error {
return xerrors.Errorf("failed to unmarshal EthereumQuantity into string: %w", err)
}
- if s == "" {
+ if s == "" || s == tronNoncePlaceHolder {
*v = 0
return nil
}
@@ -448,12 +451,13 @@ func NewEthereumNativeParser(params internal.ParserParams, opts ...internal.Pars
}
return ðereumNativeParserImpl{
- Logger: log.WithPackage(params.Logger),
- validate: validator.New(),
- nodeType: options.nodeType,
- traceType: options.traceType,
- config: params.Config,
- metrics: newEthereumNativeParserMetrics(params.Metrics),
+ Logger: log.WithPackage(params.Logger),
+ validate: validator.New(),
+ nodeType: options.nodeType,
+ traceType: options.traceType,
+ config: params.Config,
+ metrics: newEthereumNativeParserMetrics(params.Metrics),
+ src20Parser: options.src20Parser,
}, nil
}
@@ -492,6 +496,15 @@ func WithEthereumChecksumAddress() internal.ParserFactoryOption {
}
}
+// WithSRC20Parser sets the SRC20 token transfer parser for Seismic chain.
+func WithSRC20Parser(parser SRC20TokenTransferParser) internal.ParserFactoryOption {
+ return func(options any) {
+ if v, ok := options.(*ethereumParserOptions); ok {
+ v.src20Parser = parser
+ }
+ }
+}
+
func (p *ethereumNativeParserImpl) ParseBlock(ctx context.Context, rawBlock *api.Block) (*api.NativeBlock, error) {
metadata := rawBlock.GetMetadata()
if metadata == nil {
@@ -570,14 +583,26 @@ func (p *ethereumNativeParserImpl) ParseBlock(ctx context.Context, rawBlock *api
if numTransactions != len(tokenTransfers) {
return nil, xerrors.Errorf("unexpected number of token transfers: expected=%v actual=%v", numTransactions, len(tokenTransfers))
}
-
transactionToFlattenedTracesMap := make(map[string][]*api.EthereumTransactionFlattenedTrace, 0)
if isParityTrace {
- if err := p.parseTransactionFlattenedParityTraces(blobdata, transactionToFlattenedTracesMap); err != nil {
- return nil, xerrors.Errorf("failed to parse transaction parity traces: %w", err)
+ if p.config.Blockchain() != common.Blockchain_BLOCKCHAIN_TRON {
+ if err := p.parseTransactionFlattenedParityTraces(blobdata, transactionToFlattenedTracesMap); err != nil {
+ return nil, xerrors.Errorf("failed to parse transaction parity traces: %w", err)
+ }
+ }
+ }
+ // post process block data for Tron data, convert hash and account address, and set flattened traces
+ if p.config.Blockchain() == common.Blockchain_BLOCKCHAIN_TRON {
+ if err := postProcessTronBlock(
+ blobdata,
+ header,
+ transactions,
+ transactionReceipts,
+ tokenTransfers,
+ transactionToFlattenedTracesMap); err != nil {
+ return nil, xerrors.Errorf("failed to post process tron block: %w", err)
}
}
-
for i, transaction := range transactions {
transaction.Receipt = transactionReceipts[i]
transaction.TokenTransfers = tokenTransfers[i]
@@ -606,8 +631,8 @@ func (p *ethereumNativeParserImpl) ParseBlock(ctx context.Context, rawBlock *api
Blockchain: rawBlock.Blockchain,
Network: rawBlock.Network,
Tag: metadata.Tag,
- Hash: metadata.Hash,
- ParentHash: metadata.ParentHash,
+ Hash: header.Hash,
+ ParentHash: header.ParentHash,
Height: metadata.Height,
ParentHeight: metadata.ParentHeight,
Timestamp: header.Timestamp,
@@ -897,7 +922,7 @@ func (p *ethereumNativeParserImpl) parseTransactionReceipts(blobdata *api.Ethere
}
// Field effectiveGasPrice is added to the eth_getTransactionReceipt call for EIP-1559.
- // Pre-London, it is equal to the transaction’s gasPrice.
+ // Pre-London, it is equal to the transaction's gasPrice.
// Post-London, it is equal to the actual gas price paid for inclusion.
// Since it's hard to backfill all old blocks, set `effectiveGasPrice` as gasPrice for Pre-London blocks.
// Ref: https://hackmd.io/@timbeiko/1559-json-rpc
@@ -1244,7 +1269,6 @@ func (p *ethereumNativeParserImpl) parseTokenTransfers(transactionReceipts []*ap
if len(eventLog.Topics) == 3 && eventLog.Topics[0] == TransferEventTopic {
// Parse ERC-20 token
// https://ethereum.org/en/developers/docs/standards/tokens/erc-20/
-
tokenTransfer, err := p.parseERC20TokenTransfer(eventLog)
if err != nil {
return nil, xerrors.Errorf("failed to parse erc20 token transfer: %w", err)
@@ -1262,6 +1286,17 @@ func (p *ethereumNativeParserImpl) parseTokenTransfers(transactionReceipts []*ap
if tokenTransfer != nil {
tokenTransfers = append(tokenTransfers, tokenTransfer)
}
+ } else if p.src20Parser != nil && len(eventLog.Topics) == 4 && eventLog.Topics[0] == SRCTransferEventTopic {
+ // Parse SRC-20 token (Seismic only)
+ // Outer topic check avoids unnecessary function calls
+ // SRCTransferEventTopic is exported from seismic_native.go
+ tokenTransfer, err := p.src20Parser.ParseSRC20TokenTransfer(eventLog)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to parse src20 token transfer: %w", err)
+ }
+ if tokenTransfer != nil {
+ tokenTransfers = append(tokenTransfers, tokenTransfer)
+ }
}
}
diff --git a/internal/blockchain/parser/ethereum/ethereumclassic_native.go b/internal/blockchain/parser/ethereum/ethereumclassic_native.go
new file mode 100644
index 0000000..a26fca6
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/ethereumclassic_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewEthereumClassicNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Ethereum Classic shares the same data schema as Ethereum since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/ethereumclassic_validator.go b/internal/blockchain/parser/ethereum/ethereumclassic_validator.go
new file mode 100644
index 0000000..2b2834d
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/ethereumclassic_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewEthereumClassicValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/megaeth_native.go b/internal/blockchain/parser/ethereum/megaeth_native.go
new file mode 100644
index 0000000..2ab8c7f
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/megaeth_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewMegaethNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Reuse the Ethereum native parser since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/megaeth_validator.go b/internal/blockchain/parser/ethereum/megaeth_validator.go
new file mode 100644
index 0000000..fea6128
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/megaeth_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewMegaethValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/module.go b/internal/blockchain/parser/ethereum/module.go
index 8290ee8..794b927 100644
--- a/internal/blockchain/parser/ethereum/module.go
+++ b/internal/blockchain/parser/ethereum/module.go
@@ -36,5 +36,29 @@ var Module = fx.Options(
Build(),
internal.NewParserBuilder("fantom", NewFantomNativeParser).
Build(),
+ internal.NewParserBuilder("tron", NewTronNativeParser).
+ SetValidatorFactory(NewBaseValidator).
+ Build(),
+ internal.NewParserBuilder("story", NewStoryNativeParser).
+ SetValidatorFactory(NewStoryValidator).
+ Build(),
+ internal.NewParserBuilder("ethereumclassic", NewEthereumClassicNativeParser).
+ SetValidatorFactory(NewEthereumClassicValidator).
+ Build(),
+ internal.NewParserBuilder("plasma", NewPlasmaNativeParser).
+ SetValidatorFactory(NewPlasmaValidator).
+ Build(),
+ internal.NewParserBuilder("monad", NewMonadNativeParser).
+ SetValidatorFactory(NewMonadValidator).
+ Build(),
+ internal.NewParserBuilder("abstract", NewAbstractNativeParser).
+ SetValidatorFactory(NewAbstractValidator).
+ Build(),
+ internal.NewParserBuilder("megaeth", NewMegaethNativeParser).
+ SetValidatorFactory(NewMegaethValidator).
+ Build(),
+ internal.NewParserBuilder("seismic", NewSeismicNativeParser).
+ SetValidatorFactory(NewSeismicValidator).
+ Build(),
beacon.Module,
)
diff --git a/internal/blockchain/parser/ethereum/monad_native.go b/internal/blockchain/parser/ethereum/monad_native.go
new file mode 100644
index 0000000..8fb7437
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/monad_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewMonadNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Reuse the Ethereum native parser since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/monad_validator.go b/internal/blockchain/parser/ethereum/monad_validator.go
new file mode 100644
index 0000000..c62cac5
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/monad_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewMonadValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/plasma_native.go b/internal/blockchain/parser/ethereum/plasma_native.go
new file mode 100644
index 0000000..c05818a
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/plasma_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewPlasmaNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Plasma shares the same data schema as Ethereum since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/plasma_validator.go b/internal/blockchain/parser/ethereum/plasma_validator.go
new file mode 100644
index 0000000..87453d6
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/plasma_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewPlasmaValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/seismic_native.go b/internal/blockchain/parser/ethereum/seismic_native.go
new file mode 100644
index 0000000..65db9c7
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/seismic_native.go
@@ -0,0 +1,178 @@
+package ethereum
+
+import (
+ "crypto/cipher"
+ "encoding/hex"
+ "os"
+ "strings"
+
+ "github.com/SeismicSystems/aes"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+const (
+ // SRCTransferEventTopic is the event signature for SRC20 Transfer events.
+ // Exported for use in ethereum_native.go for outer topic check.
+ SRCTransferEventTopic = "0x80ffa007a69623ef13594f5e8178eee6c4ef2d0cba74c08329e879f695b7d3f6"
+
+ src20Abi = `[
+ {
+ "type": "event",
+ "name": "Approval",
+ "inputs": [
+ {"name": "owner", "type": "address", "indexed": true, "internalType": "address"},
+ {"name": "spender", "type": "address", "indexed": true, "internalType": "address"},
+ {"name": "encryptKeyHash", "type": "bytes32", "indexed": true, "internalType": "bytes32"},
+ {"name": "encryptedAmount", "type": "bytes", "indexed": false, "internalType": "bytes"}
+ ],
+ "anonymous": false
+ },
+ {
+ "type": "event",
+ "name": "Transfer",
+ "inputs": [
+ {"name": "from", "type": "address", "indexed": true, "internalType": "address"},
+ {"name": "to", "type": "address", "indexed": true, "internalType": "address"},
+ {"name": "encryptKeyHash", "type": "bytes32", "indexed": true, "internalType": "bytes32"},
+ {"name": "encryptedAmount", "type": "bytes", "indexed": false, "internalType": "bytes"}
+ ],
+ "anonymous": false
+ }
+ ]`
+)
+
+// SRC20TokenTransferParser handles parsing of Seismic's encrypted SRC20 token transfers.
+// Interface defined for testability and mock support.
+type SRC20TokenTransferParser interface {
+ ParseSRC20TokenTransfer(eventLog *api.EthereumEventLog) (*api.EthereumTokenTransfer, error)
+}
+
+// seismicSRC20Parser implements SRC20TokenTransferParser for Seismic chain.
+type seismicSRC20Parser struct {
+ src20ABI *abi.ABI
+ aesGCM cipher.AEAD
+}
+
+// NewSeismicSRC20Parser creates a new SRC20 token transfer parser.
+// aesKeyHex should be a hex-encoded AES key (with or without "0x" prefix).
+func NewSeismicSRC20Parser(aesKeyHex string) (SRC20TokenTransferParser, error) {
+ if aesKeyHex == "" {
+ return nil, xerrors.New("SRC20 AES key is required")
+ }
+
+ // Parse ABI
+ contractAbi, err := abi.JSON(strings.NewReader(src20Abi))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to parse SRC20 ABI: %w", err)
+ }
+
+ // Decode AES key
+ aesKey, err := hex.DecodeString(strings.TrimPrefix(aesKeyHex, "0x"))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decode AES key: %w", err)
+ }
+
+ // Create AES-GCM cipher
+ aesGCM, err := aes.CreateAESGCM(aesKey)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create AES-GCM cipher: %w", err)
+ }
+
+ return &seismicSRC20Parser{
+ src20ABI: &contractAbi,
+ aesGCM: aesGCM,
+ }, nil
+}
+
+func (p *seismicSRC20Parser) ParseSRC20TokenTransfer(eventLog *api.EthereumEventLog) (*api.EthereumTokenTransfer, error) {
+ // Defensive check: although the outer layer already checks the topic, we verify here for safety
+ if len(eventLog.Topics) != 4 || eventLog.Topics[0] != SRCTransferEventTopic {
+ return nil, nil
+ }
+
+ // Parse event data
+ var transferEvent struct {
+ EncryptedAmount []byte
+ }
+
+ logData, err := hex.DecodeString(strings.TrimPrefix(eventLog.Data, "0x"))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decode log data: %w", err)
+ }
+
+ if err := p.src20ABI.UnpackIntoInterface(&transferEvent, "Transfer", logData); err != nil {
+ return nil, xerrors.Errorf("failed to unpack Transfer event: %w", err)
+ }
+
+ // Decrypt amount
+ value, err := aes.DecryptAESGCM(transferEvent.EncryptedAmount, p.aesGCM)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decrypt amount: %w", err)
+ }
+
+ // Clean addresses from indexed topics
+ tokenAddress, err := internal.CleanAddress(eventLog.Address)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to clean token address for src20: %w", err)
+ }
+ fromAddress, err := internal.CleanAddress(eventLog.Topics[1])
+ if err != nil {
+ return nil, xerrors.Errorf("failed to clean from address for src20: %w", err)
+ }
+ toAddress, err := internal.CleanAddress(eventLog.Topics[2])
+ if err != nil {
+ return nil, xerrors.Errorf("failed to clean to address for src20: %w", err)
+ }
+
+ valueStr := value.String()
+
+ return &api.EthereumTokenTransfer{
+ TokenAddress: tokenAddress,
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ Value: valueStr,
+ TransactionHash: eventLog.TransactionHash,
+ TransactionIndex: eventLog.TransactionIndex,
+ LogIndex: eventLog.LogIndex,
+ BlockHash: eventLog.BlockHash,
+ BlockNumber: eventLog.BlockNumber,
+ TokenTransfer: &api.EthereumTokenTransfer_Erc20{
+ Erc20: &api.ERC20TokenTransfer{
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ Value: valueStr,
+ },
+ },
+ }, nil
+}
+
+// NewSeismicNativeParser creates a new Seismic native parser.
+// It extends the Ethereum parser with SRC20 token transfer parsing capability.
+func NewSeismicNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Get AES key from config, with fallback to environment variable.
+ // This keeps config.New() generic while handling Seismic-specific logic here.
+ aesKey := ""
+ if params.Config.Chain.CustomParams != nil {
+ aesKey = params.Config.Chain.CustomParams["src20_aes_key"]
+ }
+ if aesKey == "" {
+ aesKey = os.Getenv("SRC20_AES_KEY")
+ }
+
+ // If AES key is available, create SRC20 parser
+ if aesKey != "" {
+ src20Parser, err := NewSeismicSRC20Parser(aesKey)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create SRC20 parser: %w", err)
+ }
+ opts = append(opts, WithSRC20Parser(src20Parser))
+ } else {
+ params.Logger.Warn("SRC20_AES_KEY is not configured for Seismic parser; SRC20 token transfers will be skipped.")
+ }
+
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/seismic_validator.go b/internal/blockchain/parser/ethereum/seismic_validator.go
new file mode 100644
index 0000000..265343b
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/seismic_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewSeismicValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/story_native.go b/internal/blockchain/parser/ethereum/story_native.go
new file mode 100644
index 0000000..e46de48
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/story_native.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewStoryNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Story shares the same data schema as Ethereum since its an EVM chain.
+ return NewEthereumNativeParser(params, opts...)
+}
diff --git a/internal/blockchain/parser/ethereum/story_validator.go b/internal/blockchain/parser/ethereum/story_validator.go
new file mode 100644
index 0000000..3a8d38a
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/story_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewStoryValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/ethereum/story_validator_test.go b/internal/blockchain/parser/ethereum/story_validator_test.go
new file mode 100644
index 0000000..dc62f19
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/story_validator_test.go
@@ -0,0 +1,3 @@
+package ethereum
+
+//TODO - Implement the testing for the story validator
diff --git a/internal/blockchain/parser/ethereum/tron_native.go b/internal/blockchain/parser/ethereum/tron_native.go
new file mode 100644
index 0000000..4261ba7
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/tron_native.go
@@ -0,0 +1,385 @@
+package ethereum
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "strconv"
+ "strings"
+
+ "golang.org/x/xerrors"
+
+ "github.com/mr-tron/base58"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/ethereum/types"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+// contractTypeMap maps contract type strings to their corresponding enum values
+var TronContractTypeMap = map[string]uint64{
+ "AccountCreateContract": 0,
+ "TransferContract": 1,
+ "TransferAssetContract": 2,
+ "VoteAssetContract": 3,
+ "VoteWitnessContract": 4,
+ "WitnessCreateContract": 5,
+ "AssetIssueContract": 6,
+ "WitnessUpdateContract": 8,
+ "ParticipateAssetIssueContract": 9,
+ "AccountUpdateContract": 10,
+ "FreezeBalanceContract": 11,
+ "UnfreezeBalanceContract": 12,
+ "WithdrawBalanceContract": 13,
+ "UnfreezeAssetContract": 14,
+ "UpdateAssetContract": 15,
+ "ProposalCreateContract": 16,
+ "ProposalApproveContract": 17,
+ "ProposalDeleteContract": 18,
+ "SetAccountIdContract": 19,
+ "CustomContract": 20,
+ "CreateSmartContract": 30,
+ "TriggerSmartContract": 31,
+ "GetContract": 32,
+ "UpdateSettingContract": 33,
+ "ExchangeCreateContract": 41,
+ "ExchangeInjectContract": 42,
+ "ExchangeWithdrawContract": 43,
+ "ExchangeTransactionContract": 44,
+ "UpdateEnergyLimitContract": 45,
+ "AccountPermissionUpdateContract": 46,
+ "ClearABIContract": 48,
+ "UpdateBrokerageContract": 49,
+ "ShieldedTransferContract": 51,
+ "MarketSellAssetContract": 52,
+ "MarketCancelOrderContract": 53,
+ "FreezeBalanceV2Contract": 54,
+ "UnfreezeBalanceV2Contract": 55,
+ "WithdrawExpireUnfreezeContract": 56,
+ "DelegateResourceContract": 57,
+ "UnDelegateResourceContract": 58,
+ "CancelAllUnfreezeV2Contract": 59,
+}
+
+var TronTraceCallTypeMap = map[string]bool{
+ "CALL": true,
+ "CREATE": true,
+ "CREATE2": true,
+ "STATICCALL": true,
+ "CALLCODE": true,
+ "DELEGATECALL": true,
+}
+
+const TronContractTypeUnknown = 999
+
+type TronCallValueInfo struct {
+ CallValue int64 `json:"callValue"`
+ TokenId string `json:"tokenId"`
+}
+
+type TronTransactionInfo struct {
+ InternalTransactions []TronInternalTransaction `json:"internal_transactions"`
+ Id string `json:"id"`
+ BlockNumber uint64 `json:"blockNumber"`
+ TransactionHash string `json:"transactionHash"`
+ Fee uint64 `json:"fee"`
+ Receipt TronReceipt `json:"receipt"`
+ Type string `json:"type"`
+}
+
+type TronReceipt struct {
+ Result string `json:"result"`
+ // Bandwidth is represented as either net_fee or net_usage, only one will exist in the response
+ NetFee uint64 `json:"net_fee"`
+ NetUsage uint64 `json:"net_usage"`
+ EnergyUsage uint64 `json:"energy_usage"`
+ EnergyFee uint64 `json:"energy_fee"`
+ OriginEnergyUsage uint64 `json:"origin_energy_usage"`
+ EnergyUsageTotal uint64 `json:"energy_usage_total"`
+ EnergyPenaltyTotal uint64 `json:"energy_penalty_total"`
+}
+
+type TronInternalTransaction struct {
+ Hash string `json:"hash"`
+ CallerAddress string `json:"caller_address"`
+ TransferToAddress string `json:"transferTo_address"`
+ CallValueInfo []TronCallValueInfo `json:"callValueInfo"`
+ Note string `json:"note"`
+ Rejected bool `json:"rejected"`
+}
+
+func NewTronNativeParser(params internal.ParserParams, opts ...internal.ParserFactoryOption) (internal.NativeParser, error) {
+ // Tron shares the same data schema as Ethereum since its an EVM chain except skip trace data
+ opts = append(opts, WithEthereumNodeType(types.EthereumNodeType_ARCHIVAL), WithTraceType(types.TraceType_PARITY))
+ return NewEthereumNativeParser(params, opts...)
+}
+
+func convertInternalTransactionToTrace(itx *TronInternalTransaction) *api.EthereumTransactionFlattenedTrace {
+ // only keep native values, ignore TRC10 token values
+ var nativeTokenValue int64
+ for _, callValueInfoItem := range itx.CallValueInfo {
+ if callValueInfoItem.TokenId == "" {
+ // If TokenId is empty, it means this is a native token transfer
+ nativeTokenValue += callValueInfoItem.CallValue
+ }
+ }
+ var note string
+ noteBytes, err := hex.DecodeString(itx.Note)
+ if err != nil {
+ note = ""
+ } else {
+ note = string(noteBytes)
+ }
+ rawType := strings.ToUpper(note)
+ trace := &api.EthereumTransactionFlattenedTrace{
+ Type: rawType,
+ TraceType: rawType,
+ From: hexToTronAddress(itx.CallerAddress),
+ To: hexToTronAddress(itx.TransferToAddress),
+ Value: strconv.FormatInt(nativeTokenValue, 10),
+ TraceId: itx.Hash,
+ CallValueInfo: convertTronCallValueInfo(itx.CallValueInfo),
+ }
+
+ if TronTraceCallTypeMap[rawType] {
+ trace.CallType = rawType
+ trace.TraceType = "CALL"
+ }
+ if itx.Rejected {
+ trace.Error = "Internal transaction is executed failed"
+ trace.Status = 0
+ } else {
+ trace.Status = 1
+ }
+
+ return trace
+}
+
+func convertTronCallValueInfo(callValueInfo []TronCallValueInfo) []*api.CallValueInfo {
+ result := make([]*api.CallValueInfo, len(callValueInfo))
+ for i, info := range callValueInfo {
+ result[i] = &api.CallValueInfo{
+ TokenId: info.TokenId,
+ CallValue: info.CallValue,
+ }
+ }
+ return result
+}
+
+func parseTronTxInfo(
+ blobData *api.EthereumBlobdata,
+ header *api.EthereumHeader,
+ transactionToFlattenedTracesMap map[string][]*api.EthereumTransactionFlattenedTrace,
+ txReceipts []*api.EthereumTransactionReceipt,
+ transactions []*api.EthereumTransaction,
+) error {
+ if len(blobData.TransactionTraces) == 0 {
+ return nil
+ }
+
+ // Ensure we have matching number of receipts and traces
+ if len(blobData.TransactionTraces) != len(txReceipts) {
+ return xerrors.Errorf(
+ "mismatch between number of transaction traces (%d) and receipts (%d)",
+ len(blobData.TransactionTraces),
+ len(txReceipts),
+ )
+ }
+
+ for txIndex, rawTxInfo := range blobData.TransactionTraces {
+ var txInfo TronTransactionInfo
+ if err := json.Unmarshal(rawTxInfo, &txInfo); err != nil {
+ return xerrors.Errorf("failed to parse transaction trace at index %d: %w", txIndex, err)
+ }
+
+ traceTransactionHash := txInfo.Id
+ txIdx := uint64(txIndex)
+ fee := txInfo.Fee
+ receipt := txInfo.Receipt
+ // 1. enreach txReceipt with fee and net_fee (Bandwidth)fields from transactionInfo.receipt
+ txReceipt := txReceipts[txIndex]
+ if fee != 0 {
+ txReceipt.OptionalFee = &api.EthereumTransactionReceipt_Fee{
+ Fee: uint64(fee),
+ }
+ }
+ if receipt.NetFee != 0 {
+ txReceipt.OptionalNetFee = &api.EthereumTransactionReceipt_NetFee{
+ NetFee: uint64(receipt.NetFee),
+ }
+ }
+ if receipt.NetUsage != 0 {
+ txReceipt.OptionalNetUsage = &api.EthereumTransactionReceipt_NetUsage{
+ NetUsage: uint64(receipt.NetUsage),
+ }
+ }
+ if receipt.EnergyUsage != 0 {
+ txReceipt.OptionalEnergyUsage = &api.EthereumTransactionReceipt_EnergyUsage{
+ EnergyUsage: uint64(receipt.EnergyUsage),
+ }
+ }
+ if receipt.EnergyFee != 0 {
+ txReceipt.OptionalEnergyFee = &api.EthereumTransactionReceipt_EnergyFee{
+ EnergyFee: uint64(receipt.EnergyFee),
+ }
+ }
+ if receipt.OriginEnergyUsage != 0 {
+ txReceipt.OptionalOriginEnergyUsage = &api.EthereumTransactionReceipt_OriginEnergyUsage{
+ OriginEnergyUsage: uint64(receipt.OriginEnergyUsage),
+ }
+ }
+ if receipt.EnergyUsageTotal != 0 {
+ txReceipt.OptionalEnergyUsageTotal = &api.EthereumTransactionReceipt_EnergyUsageTotal{
+ EnergyUsageTotal: uint64(receipt.EnergyUsageTotal),
+ }
+ }
+ if receipt.EnergyPenaltyTotal != 0 {
+ txReceipt.OptionalEnergyPenaltyTotal = &api.EthereumTransactionReceipt_EnergyPenaltyTotal{
+ EnergyPenaltyTotal: uint64(receipt.EnergyPenaltyTotal),
+ }
+ }
+
+ // 2. mapping internalTransactions to trace
+ internalTxs := txInfo.InternalTransactions
+ traces := make([]*api.EthereumTransactionFlattenedTrace, len(internalTxs))
+ for i, internalTx := range internalTxs {
+ trace := convertInternalTransactionToTrace(&internalTx)
+ trace.BlockHash = toTronHash(header.Hash)
+ trace.BlockNumber = header.Number
+ trace.TransactionHash = traceTransactionHash
+ trace.TransactionIndex = txIdx
+ traces[i] = trace
+ }
+ transactionToFlattenedTracesMap[traceTransactionHash] = traces
+
+ // add type to transaction
+ tx := transactions[txIndex]
+ if typeValue, exists := TronContractTypeMap[txInfo.Type]; exists {
+ tx.Type = typeValue
+ } else {
+ // If type is not found in map, set to 999 as default or log warning
+ tx.Type = TronContractTypeUnknown
+ }
+ }
+ return nil
+}
+
+func toTronHash(hexHash string) string {
+ // if hexHash == "" {
+ // return ""
+ // }
+ // // Normalize the hash by ensuring it's lowercase and removing 0x prefix
+ // hexHash = strings.ToLower(hexHash)
+ return strings.Replace(hexHash, "0x", "", -1)
+}
+
+func hexToTronAddress(hexAddress string) string {
+ if hexAddress == "" {
+ return ""
+ }
+
+ // Ensure consistent format by cleaning the hex address
+ hexAddress = strings.ToLower(hexAddress)
+ if strings.HasPrefix(hexAddress, "0x") {
+ hexAddress = "41" + hexAddress[2:]
+ } else if !strings.HasPrefix(hexAddress, "41") {
+ hexAddress = "41" + hexAddress
+ }
+
+ // Decode hex string to bytes
+ rawBytes, err := hex.DecodeString(hexAddress)
+ if err != nil {
+ // If unable to decode, return the original address to avoid data loss
+ return hexAddress
+ }
+
+ // Compute double SHA-256 checksum
+ hash1 := sha256.Sum256(rawBytes)
+ hash2 := sha256.Sum256(hash1[:])
+ checksum := hash2[:4] // First 4 bytes as checksum
+ // Append checksum to the raw bytes
+ fullBytes := append(rawBytes, checksum...)
+ // Base58Check encode
+ tronAddress := base58.Encode(fullBytes)
+
+ return tronAddress
+}
+
+func convertTokenTransfer(data *api.EthereumTokenTransfer) {
+ data.TokenAddress = hexToTronAddress(data.TokenAddress)
+ data.FromAddress = hexToTronAddress(data.FromAddress)
+ data.ToAddress = hexToTronAddress(data.ToAddress)
+
+ data.TransactionHash = toTronHash(data.TransactionHash)
+ data.BlockHash = toTronHash(data.BlockHash)
+
+ switch v := data.TokenTransfer.(type) {
+ case *api.EthereumTokenTransfer_Erc20:
+ if v.Erc20 != nil {
+ v.Erc20.FromAddress = hexToTronAddress(v.Erc20.FromAddress)
+ v.Erc20.ToAddress = hexToTronAddress(v.Erc20.ToAddress)
+ }
+ case *api.EthereumTokenTransfer_Erc721:
+ if v.Erc721 != nil {
+ v.Erc721.FromAddress = hexToTronAddress(v.Erc721.FromAddress)
+ v.Erc721.ToAddress = hexToTronAddress(v.Erc721.ToAddress)
+ }
+ }
+}
+
+func postProcessTronBlock(
+ blobData *api.EthereumBlobdata,
+ header *api.EthereumHeader,
+ transactions []*api.EthereumTransaction,
+ txReceipts []*api.EthereumTransactionReceipt,
+ tokenTransfers [][]*api.EthereumTokenTransfer,
+ transactionToFlattenedTracesMap map[string][]*api.EthereumTransactionFlattenedTrace,
+) error {
+ if err := parseTronTxInfo(blobData, header, transactionToFlattenedTracesMap, txReceipts, transactions); err != nil {
+ return xerrors.Errorf("failed to parse transaction parity traces: %w", err)
+ }
+
+ header.Hash = toTronHash(header.Hash)
+ header.ParentHash = toTronHash(header.ParentHash)
+ header.TransactionsRoot = toTronHash(header.TransactionsRoot)
+ header.Miner = hexToTronAddress(header.Miner)
+
+ for i := range header.Transactions {
+ header.Transactions[i] = toTronHash(header.Transactions[i])
+ }
+
+ for _, tx := range transactions {
+ tx.BlockHash = toTronHash(tx.BlockHash)
+ tx.Hash = toTronHash(tx.Hash)
+ if tx.From != "" {
+ tx.From = hexToTronAddress(tx.From)
+ }
+ if tx.To != "" {
+ tx.To = hexToTronAddress(tx.To)
+ }
+ }
+
+ for _, txR := range txReceipts {
+ txR.TransactionHash = toTronHash(txR.TransactionHash)
+ txR.BlockHash = toTronHash(txR.BlockHash)
+ if txR.From != "" {
+ txR.From = hexToTronAddress(txR.From)
+ }
+ if txR.To != "" {
+ txR.To = hexToTronAddress(txR.To)
+ }
+ if txR.Logs != nil {
+ for _, txLog := range txR.Logs {
+ txLog.TransactionHash = toTronHash(txLog.TransactionHash)
+ txLog.BlockHash = toTronHash(txLog.BlockHash)
+ txLog.Address = hexToTronAddress(txLog.Address)
+ }
+ }
+ }
+ for _, txTokenTransfers := range tokenTransfers {
+ for _, tokenTransfer := range txTokenTransfers {
+ convertTokenTransfer(tokenTransfer)
+ }
+ }
+ return nil
+}
diff --git a/internal/blockchain/parser/ethereum/tron_native_test.go b/internal/blockchain/parser/ethereum/tron_native_test.go
new file mode 100644
index 0000000..d914e1e
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/tron_native_test.go
@@ -0,0 +1,590 @@
+package ethereum
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "go.uber.org/fx"
+ "go.uber.org/mock/gomock"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+ "github.com/coinbase/chainstorage/internal/utils/fixtures"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ "github.com/coinbase/chainstorage/protos/coinbase/c3/common"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type tronParserTestSuite struct {
+ suite.Suite
+
+ ctrl *gomock.Controller
+ testapp testapp.TestApp
+ parser internal.Parser
+}
+
+func TestTronParserTestSuite(t *testing.T) {
+ // t.Skip()
+ suite.Run(t, new(tronParserTestSuite))
+}
+
+func (s *tronParserTestSuite) SetupTest() {
+ s.ctrl = gomock.NewController(s.T())
+
+ var parser internal.Parser
+ s.testapp = testapp.New(
+ s.T(),
+ Module,
+ internal.Module,
+ testapp.WithBlockchainNetwork(common.Blockchain_BLOCKCHAIN_TRON, common.Network_NETWORK_TRON_MAINNET),
+ fx.Populate(&parser),
+ )
+
+ s.parser = parser
+ s.NotNil(s.parser)
+}
+
+func (s *tronParserTestSuite) TearDownTest() {
+ s.testapp.Close()
+ s.ctrl.Finish()
+}
+
+func (s *tronParserTestSuite) TestParseTronBlock() {
+ require := testutil.Require(s.T())
+
+ fixtureHeader := fixtures.MustReadFile("parser/tron/raw_block_header.json")
+
+ rawReceipts, err := s.fixtureParsingHelper("parser/tron/raw_block_tx_receipt.json")
+ require.NoError(err)
+
+ rawTraces, err := s.fixtureParsingHelper("parser/tron/raw_block_trace_tx_info.json")
+ require.NoError(err)
+
+ block := &api.Block{
+ Blockchain: common.Blockchain_BLOCKCHAIN_TRON,
+ Network: common.Network_NETWORK_TRON_MAINNET,
+ Metadata: &api.BlockMetadata{
+ Tag: 2,
+ Hash: "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ ParentHash: "0x0000000004034f5b43c5934257b3d1f1a313bba4af0a4dd2f778fda9e641b615",
+ Height: 0x4034f5c,
+ },
+ Blobdata: &api.Block_Ethereum{
+ Ethereum: &api.EthereumBlobdata{
+ Header: fixtureHeader,
+ TransactionReceipts: rawReceipts,
+ TransactionTraces: rawTraces,
+ },
+ },
+ }
+
+ expectedHeader := &api.EthereumHeader{
+ Hash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ ParentHash: "0000000004034f5b43c5934257b3d1f1a313bba4af0a4dd2f778fda9e641b615",
+ Number: 0x4034F5C,
+ Timestamp: ×tamppb.Timestamp{Seconds: 1732627338},
+ Transactions: []string{
+ "d581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ },
+ Nonce: "0x0000000000000000",
+ Sha3Uncles: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ LogsBloom: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ TransactionsRoot: "d270690faa58558c2b03ae600334f71f9d5a0ad42d7313852fb3742e8576eec9",
+ StateRoot: "0x",
+ ReceiptsRoot: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ Miner: "TNeEwWHXLLUgEtfzTnYN8wtVenGxuMzZCE",
+ TotalDifficulty: "0",
+ ExtraData: "0x",
+ Size: 0x1a366,
+ GasLimit: 0x2b3b43dc6,
+ GasUsed: 0xb1006d,
+ MixHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ OptionalBaseFeePerGas: &api.EthereumHeader_BaseFeePerGas{
+ BaseFeePerGas: uint64(0),
+ },
+ }
+ expectedFlattenedTraces := []*api.EthereumTransactionFlattenedTrace{
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Value: "200",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "499bdbdfaae021dd510c70b433bc48d88d8ca6e0b7aee13ce6d726114e365aaf",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ CallValue: 100,
+ },
+ {
+ CallValue: 100,
+ },
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM",
+ Value: "1000",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "997225b56440a9bd172f05f44a663830b72093a12502551cda99b0bc7c60cbc1",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ TokenId: "1004777",
+ CallValue: 1000000000000000,
+ },
+ {
+ CallValue: 1000,
+ },
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM",
+ Value: "0",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "7ac8dd16dede5c512330f5033c8fd6f5390d742aa51b805f805098109eb54fe9",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ TokenId: "1004777",
+ CallValue: 1000,
+ },
+ {
+ TokenId: "1004777",
+ CallValue: 100,
+ },
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5",
+ Value: "100000",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "cf6f699d9bdae8aa25fae310a06bb60a29a7812548cf3c1d83c737fd1a22c0ee",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ TokenId: "1004777",
+ CallValue: 100,
+ },
+ {
+ CallValue: 100000,
+ },
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5",
+ To: "TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5",
+ Value: "0",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "95787b9a6558c7b6b624d0c1bece9723a7f4c3d414010b6ac105ae5f5aebffbc",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {},
+ },
+ },
+ {
+ Type: "UNDELEGATERESOURCEOFENERGY",
+ From: "TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5",
+ To: "TGzjkw66CtL49eKiQFDwJDuXG9HSQd69p2",
+ Value: "822996311610",
+ TraceType: "UNDELEGATERESOURCEOFENERGY",
+ CallType: "",
+ TraceId: "14526162e31d969ef0dca9b902d51ecc0ffab87dc936dce62022f368119043af",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ CallValue: 822994311610,
+ },
+ {
+ CallValue: 2000000,
+ },
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM",
+ Value: "0",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "8e088220a26ca8d794786e78096e71259cf8744cccdc4f07a8129aa8ee29bb98",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {},
+ },
+ },
+ {
+ Type: "CALL",
+ From: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ To: "TNXC2YCSxhdxsVqhqu3gYZYme6n4i6T1C1",
+ Value: "1424255258",
+ TraceType: "CALL",
+ CallType: "CALL",
+ TraceId: "83b1d41ba953aab4da6e474147f647599ea53bb3213306897127b57e85ddd1ca",
+ Status: 1,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 0x4034F5C,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 1,
+ CallValueInfo: []*api.CallValueInfo{
+ {
+ CallValue: 1424255258,
+ },
+ },
+ },
+ }
+
+ expectedTransactions := []*api.EthereumTransaction{
+ {
+ Hash: "d581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ From: "TDQFomPihdhP8Jzr2LMpdcXgg9qxKfZZmD",
+ To: "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t",
+ Index: 0,
+ Type: 1,
+ Receipt: &api.EthereumTransactionReceipt{
+ TransactionHash: "d581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ From: "TDQFomPihdhP8Jzr2LMpdcXgg9qxKfZZmD",
+ To: "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t",
+ CumulativeGasUsed: 130285,
+ GasUsed: 130285,
+ LogsBloom: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ EffectiveGasPrice: 210,
+ OptionalStatus: &api.EthereumTransactionReceipt_Status{Status: 1},
+ TransactionIndex: 0,
+ OptionalNetUsage: &api.EthereumTransactionReceipt_NetUsage{
+ NetUsage: 345,
+ },
+ OptionalEnergyUsage: &api.EthereumTransactionReceipt_EnergyUsage{
+ EnergyUsage: 130285,
+ },
+ OptionalEnergyUsageTotal: &api.EthereumTransactionReceipt_EnergyUsageTotal{
+ EnergyUsageTotal: 130285,
+ },
+ OptionalEnergyPenaltyTotal: &api.EthereumTransactionReceipt_EnergyPenaltyTotal{
+ EnergyPenaltyTotal: 100635,
+ },
+ Logs: []*api.EthereumEventLog{
+ {
+ TransactionHash: "d581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ Address: "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t",
+ Data: "0x0000000000000000000000000000000000000000000000000000000000027165",
+ TransactionIndex: 0,
+ LogIndex: 0,
+ Topics: []string{
+ "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "0x00000000000000000000000025a51e3e65287539b8d4eb559cbca4488a08bb00",
+ "0x0000000000000000000000009dc5da2b3c502661c8448ba88bacf7f0b22272ad",
+ },
+ },
+ },
+ },
+ },
+ {
+ Hash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ From: "TNXC2YCSxhdxsVqhqu3gYZYme6n4i6T1C1",
+ To: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Index: 69,
+ Type: 2,
+ Receipt: &api.EthereumTransactionReceipt{
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ From: "TNXC2YCSxhdxsVqhqu3gYZYme6n4i6T1C1",
+ To: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ CumulativeGasUsed: 1432695,
+ GasUsed: 74135,
+ LogsBloom: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ EffectiveGasPrice: 210,
+ OptionalStatus: &api.EthereumTransactionReceipt_Status{Status: 1},
+ TransactionIndex: 69,
+ OptionalFee: &api.EthereumTransactionReceipt_Fee{
+ Fee: 379,
+ },
+ OptionalNetFee: &api.EthereumTransactionReceipt_NetFee{
+ NetFee: 379,
+ },
+ OptionalEnergyUsage: &api.EthereumTransactionReceipt_EnergyUsage{
+ EnergyUsage: 68976,
+ },
+ OptionalOriginEnergyUsage: &api.EthereumTransactionReceipt_OriginEnergyUsage{
+ OriginEnergyUsage: 5159,
+ },
+ OptionalEnergyUsageTotal: &api.EthereumTransactionReceipt_EnergyUsageTotal{
+ EnergyUsageTotal: 74135,
+ },
+ Logs: []*api.EthereumEventLog{
+ {
+ LogIndex: 16,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 69,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ Address: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Data: "0x00000000000000000000000000000000000000000000000000000001f9873bc7000000000000000000000000000000000000000000000000093732ae413feb69000000000000000000000000000000000000000000000000093732b42dd59ebe0000000000000000000000000000000000000000000000000000801f33d9f651000000000000000000000000000000000000000000000000000000000036b158",
+ Topics: []string{
+ "0xda6e3523d5765dedff9534b488c7e508318178571c144293451989755e9379e7",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ },
+ },
+ {
+ LogIndex: 17,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 69,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ Address: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Data: "0x000000000000000000000000000000000000000000000000093732a856669e8f000000000000000000000000000000000000000000000000093732b42dd59ebe000000000000000000000000000000000000000000000000000000bf9e4899ba000000000000000000000000000000000000000000000000000000000000a3810000000000000000000000000000000000000000000000000000000000000000",
+ Topics: []string{
+ "0x74fed619850adf4ba83cfb92b9566b424e3de6de4d9a7adc3b1909ea58421a55",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ },
+ },
+ {
+ LogIndex: 18,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 69,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ // Address: "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ Address: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Data: "0x000000000000000000000000000000000000000000000000000000bf9e4899ba",
+ Topics: []string{
+ "0xf2def54ec5eba61fd8f18d019c7beaf6a47df317fb798b3263ad69ec227c9261",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ },
+ },
+ {
+ LogIndex: 19,
+ TransactionHash: "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ TransactionIndex: 69,
+ BlockHash: "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ BlockNumber: 67325788,
+ Address: "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd",
+ Data: "0x000000000000000000000000000000000000000000000000000000bf9e4899ba0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000c032ffd0000000000000000000000000000000000000000000000000000000054e4691a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000093732b42dd59ebe",
+ Topics: []string{
+ "0xf7e21d5bf17851f93ab7bda7e390841620f59dfbe9d86add32824f33bd40d3f5",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ nativeBlock, err := s.parser.ParseNativeBlock(context.Background(), block)
+ require.NoError(err)
+ require.Equal(common.Blockchain_BLOCKCHAIN_TRON, nativeBlock.Blockchain)
+ require.Equal(common.Network_NETWORK_TRON_MAINNET, nativeBlock.Network)
+ actualBlock := nativeBlock.GetEthereum()
+ require.NotNil(actualBlock)
+ require.Equal(expectedHeader, actualBlock.Header)
+
+ require.Equal(2, len(actualBlock.Transactions))
+
+ require.Equal(8, len(actualBlock.Transactions[1].FlattenedTraces))
+
+ tx := actualBlock.Transactions[1]
+ for i, trace := range tx.FlattenedTraces {
+ trace_i := expectedFlattenedTraces[i]
+ require.Equal(trace_i.Type, trace.Type)
+ require.Equal(trace_i.From, trace.From)
+ require.Equal(trace_i.To, trace.To)
+ require.Equal(trace_i.Value, trace.Value)
+ require.Equal(trace_i.TraceType, trace.TraceType)
+ require.Equal(trace_i.CallType, trace.CallType)
+ require.Equal(trace_i.TraceId, trace.TraceId)
+ require.Equal(trace_i.Status, trace.Status)
+ require.Equal(trace_i.BlockHash, trace.BlockHash)
+ require.Equal(trace_i.BlockNumber, trace.BlockNumber)
+ require.Equal(trace_i.TransactionHash, trace.TransactionHash)
+ require.Equal(trace_i.TransactionIndex, trace.TransactionIndex)
+ }
+ require.Equal(tx.FlattenedTraces, expectedFlattenedTraces)
+
+ for i, tx := range actualBlock.Transactions {
+ expected_tx := expectedTransactions[i]
+ require.Equal(expected_tx.Hash, tx.Hash)
+ require.Equal(expected_tx.From, tx.From)
+ require.Equal(expected_tx.To, tx.To)
+ require.Equal(expected_tx.Index, tx.Index)
+ require.Equal(expected_tx.Type, tx.Type)
+ require.Equal(expected_tx.Receipt.From, tx.Receipt.From)
+ require.Equal(expected_tx.Receipt.To, tx.Receipt.To)
+ require.Equal(expected_tx.Receipt.TransactionHash, tx.Receipt.TransactionHash)
+ require.Equal(expected_tx.Receipt.TransactionIndex, tx.Receipt.TransactionIndex)
+ require.Equal(expected_tx.Receipt.BlockHash, tx.Receipt.BlockHash)
+ require.Equal(expected_tx.Receipt.BlockNumber, tx.Receipt.BlockNumber)
+ require.Equal(expected_tx.Receipt.CumulativeGasUsed, tx.Receipt.CumulativeGasUsed)
+ require.Equal(expected_tx.Receipt.GasUsed, tx.Receipt.GasUsed)
+ require.Equal(expected_tx.Receipt.LogsBloom, tx.Receipt.LogsBloom)
+ require.Equal(expected_tx.Receipt.EffectiveGasPrice, tx.Receipt.EffectiveGasPrice)
+ require.Equal(expected_tx.Receipt.Logs, tx.Receipt.Logs)
+
+ if expected_tx.Receipt.GetOptionalFee() != nil {
+ require.NotNil(tx.Receipt.GetOptionalFee())
+ require.Equal(expected_tx.Receipt.GetFee(), tx.Receipt.GetFee())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalFee())
+ }
+ if expected_tx.Receipt.GetOptionalNetFee() != nil {
+ require.NotNil(tx.Receipt.GetOptionalNetFee())
+ require.Equal(expected_tx.Receipt.GetNetFee(), tx.Receipt.GetNetFee())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalNetFee())
+ }
+ if expected_tx.Receipt.GetOptionalNetUsage() != nil {
+ require.NotNil(tx.Receipt.GetOptionalNetUsage())
+ require.Equal(expected_tx.Receipt.GetNetUsage(), tx.Receipt.GetNetUsage())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalNetUsage())
+ }
+ if expected_tx.Receipt.GetOptionalEnergyUsage() != nil {
+ require.NotNil(tx.Receipt.GetOptionalEnergyUsage())
+ require.Equal(expected_tx.Receipt.GetEnergyUsage(), tx.Receipt.GetEnergyUsage())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalEnergyUsage())
+ }
+ if expected_tx.Receipt.GetOptionalEnergyUsageTotal() != nil {
+ require.NotNil(tx.Receipt.GetOptionalEnergyUsageTotal())
+ require.Equal(expected_tx.Receipt.GetEnergyUsageTotal(), tx.Receipt.GetEnergyUsageTotal())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalEnergyUsageTotal())
+ }
+ if expected_tx.Receipt.GetOptionalEnergyPenaltyTotal() != nil {
+ require.NotNil(tx.Receipt.GetOptionalEnergyPenaltyTotal())
+ require.Equal(expected_tx.Receipt.GetEnergyPenaltyTotal(), tx.Receipt.GetEnergyPenaltyTotal())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalEnergyPenaltyTotal())
+ }
+ if expected_tx.Receipt.GetOptionalOriginEnergyUsage() != nil {
+ require.NotNil(tx.Receipt.GetOptionalOriginEnergyUsage())
+ require.Equal(expected_tx.Receipt.GetOriginEnergyUsage(), tx.Receipt.GetOriginEnergyUsage())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalOriginEnergyUsage())
+ }
+ if expected_tx.Receipt.GetOptionalNetUsage() != nil {
+ require.NotNil(tx.Receipt.GetOptionalNetUsage())
+ require.Equal(expected_tx.Receipt.GetNetUsage(), tx.Receipt.GetNetUsage())
+ } else {
+ require.Nil(tx.Receipt.GetOptionalNetUsage())
+ }
+ }
+}
+
+func (s *tronParserTestSuite) fixtureParsingHelper(filePath string) ([][]byte, error) {
+
+ fixtureParityTrace, _ := fixtures.ReadFile(filePath)
+
+ var tmpItems []json.RawMessage
+ err := json.Unmarshal(fixtureParityTrace, &tmpItems)
+
+ items := make([][]byte, len(tmpItems))
+ for i, item := range tmpItems {
+ items[i] = item
+ }
+ return items, err
+}
+
+func (s *tronParserTestSuite) TestToTronHash() {
+ require := testutil.Require(s.T())
+
+ testCases := []struct {
+ input string
+ expected string
+ comment string
+ }{
+ {"0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87", "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87", "with 0x prefix"},
+ {"0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87", "0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87", "without 0x prefix"},
+ {"0xABCDEF1234567890", "ABCDEF1234567890", "uppercase hex"},
+ {"", "", "empth string"},
+ {"0x", "", "only 0x prefix"},
+ }
+
+ for _, tc := range testCases {
+ result := toTronHash(tc.input)
+ require.Equal(tc.expected, result, tc.comment)
+ }
+}
+
+func (s *tronParserTestSuite) TestHexToTronAddress() {
+ require := testutil.Require(s.T())
+ testCases := []struct {
+ input string
+ expected string
+ comment string
+ }{
+ {"0x8b0359acac03bac62cbf89c4b787cb10b3c3f513", "TNeEwWHXLLUgEtfzTnYN8wtVenGxuMzZCE", "with 0x prefix"},
+ {"0xc60a6f5c81431c97ed01b61698b6853557f3afd4", "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd", "with 0x prefix"},
+ {"0x4d12f87c18a914dddbc2b27f378ad126a79b76b6", "TGzjkw66CtL49eKiQFDwJDuXG9HSQd69p2", "with 0x prefix"},
+ {"0xe8667633c747066c70672c58207cc745a9860527", "TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM", "with 0x prefix"},
+ {"0x89ae01b878dffc8088222adf1fb08ebadfeea53a", "TNXC2YCSxhdxsVqhqu3gYZYme6n4i6T1C1", "with 0x prefix"},
+
+ {"418b0359acac03bac62cbf89c4b787cb10b3c3f513", "TNeEwWHXLLUgEtfzTnYN8wtVenGxuMzZCE", "without 0x but have 41 prefix"},
+ {"41c60a6f5c81431c97ed01b61698b6853557f3afd4", "TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd", "without 0x but have 41 prefix"},
+ {"414d12f87c18a914dddbc2b27f378ad126a79b76b6", "TGzjkw66CtL49eKiQFDwJDuXG9HSQd69p2", "without 0x but have 41 prefix"},
+ {"41e8667633c747066c70672c58207cc745a9860527", "TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM", "without 0x but have 41 prefix"},
+ {"4189ae01b878dffc8088222adf1fb08ebadfeea53a", "TNXC2YCSxhdxsVqhqu3gYZYme6n4i6T1C1", "without 0x but have 41 prefix"},
+
+ {"c64e69acde1c7b16c2a3efcdbbdaa96c3644c2b3", "TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5", "without 0x and 41 prefix"},
+ {"a614f803b6fd780986a42c78ec9c7f77e6ded13c", "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t", "without 0x and 41 prefix"},
+
+ {"", "", "empty string"},
+ }
+
+ for _, tc := range testCases {
+ result := hexToTronAddress(tc.input)
+ require.Equal(tc.expected, result, tc.comment)
+ }
+}
diff --git a/internal/blockchain/parser/ethereum/tron_validator.go b/internal/blockchain/parser/ethereum/tron_validator.go
new file mode 100644
index 0000000..2ee7ab4
--- /dev/null
+++ b/internal/blockchain/parser/ethereum/tron_validator.go
@@ -0,0 +1,10 @@
+package ethereum
+
+import (
+ "github.com/coinbase/chainstorage/internal/blockchain/parser/internal"
+)
+
+func NewTronValidator(params internal.ParserParams) internal.TrustlessValidator {
+ // Reuse the same implementation as Ethereum.
+ return NewEthereumValidator(params)
+}
diff --git a/internal/blockchain/parser/internal/parser.go b/internal/blockchain/parser/internal/parser.go
index 945b488..c582201 100644
--- a/internal/blockchain/parser/internal/parser.go
+++ b/internal/blockchain/parser/internal/parser.go
@@ -45,22 +45,30 @@ type (
Params struct {
fx.In
fxparams.Params
- Aleo ParserFactory `name:"aleo" optional:"true"`
- Bitcoin ParserFactory `name:"bitcoin" optional:"true"`
- Bsc ParserFactory `name:"bsc" optional:"true"`
- Ethereum ParserFactory `name:"ethereum" optional:"true"`
- Rosetta ParserFactory `name:"rosetta" optional:"true"`
- Solana ParserFactory `name:"solana" optional:"true"`
- Polygon ParserFactory `name:"polygon" optional:"true"`
- Avacchain ParserFactory `name:"avacchain" optional:"true"`
- Arbitrum ParserFactory `name:"arbitrum" optional:"true"`
- Optimism ParserFactory `name:"optimism" optional:"true"`
- Fantom ParserFactory `name:"fantom" optional:"true"`
- Base ParserFactory `name:"base" optional:"true"`
- Aptos ParserFactory `name:"aptos" optional:"true"`
- EthereumBeacon ParserFactory `name:"ethereum/beacon" optional:"true"`
- CosmosStaking ParserFactory `name:"cosmos/staking" optional:"true"`
- CardanoStaking ParserFactory `name:"cardano/staking" optional:"true"`
+ Aleo ParserFactory `name:"aleo" optional:"true"`
+ Bitcoin ParserFactory `name:"bitcoin" optional:"true"`
+ Bsc ParserFactory `name:"bsc" optional:"true"`
+ Ethereum ParserFactory `name:"ethereum" optional:"true"`
+ Rosetta ParserFactory `name:"rosetta" optional:"true"`
+ Solana ParserFactory `name:"solana" optional:"true"`
+ Polygon ParserFactory `name:"polygon" optional:"true"`
+ Avacchain ParserFactory `name:"avacchain" optional:"true"`
+ Arbitrum ParserFactory `name:"arbitrum" optional:"true"`
+ Optimism ParserFactory `name:"optimism" optional:"true"`
+ Fantom ParserFactory `name:"fantom" optional:"true"`
+ Base ParserFactory `name:"base" optional:"true"`
+ Aptos ParserFactory `name:"aptos" optional:"true"`
+ EthereumBeacon ParserFactory `name:"ethereum/beacon" optional:"true"`
+ CosmosStaking ParserFactory `name:"cosmos/staking" optional:"true"`
+ CardanoStaking ParserFactory `name:"cardano/staking" optional:"true"`
+ Tron ParserFactory `name:"tron" optional:"true"`
+ Story ParserFactory `name:"story" optional:"true"`
+ EthereumClassic ParserFactory `name:"ethereumclassic" optional:"true"`
+ Plasma ParserFactory `name:"plasma" optional:"true"`
+ Monad ParserFactory `name:"monad" optional:"true"`
+ Abstract ParserFactory `name:"abstract" optional:"true"`
+ Megaeth ParserFactory `name:"megaeth" optional:"true"`
+ Seismic ParserFactory `name:"seismic" optional:"true"`
}
ParserParams struct {
@@ -82,7 +90,7 @@ func NewParser(params Params) (Parser, error) {
sidechain := params.Config.Chain.Sidechain
if sidechain == api.SideChain_SIDECHAIN_NONE {
switch blockchain {
- case common.Blockchain_BLOCKCHAIN_BITCOIN:
+ case common.Blockchain_BLOCKCHAIN_BITCOIN, common.Blockchain_BLOCKCHAIN_BITCOINCASH, common.Blockchain_BLOCKCHAIN_LITECOIN:
factory = params.Bitcoin
case common.Blockchain_BLOCKCHAIN_BSC:
factory = params.Bsc
@@ -104,6 +112,22 @@ func NewParser(params Params) (Parser, error) {
factory = params.Fantom
case common.Blockchain_BLOCKCHAIN_APTOS:
factory = params.Aptos
+ case common.Blockchain_BLOCKCHAIN_TRON:
+ factory = params.Tron
+ case common.Blockchain_BLOCKCHAIN_STORY:
+ factory = params.Story
+ case common.Blockchain_BLOCKCHAIN_ETHEREUMCLASSIC:
+ factory = params.EthereumClassic
+ case common.Blockchain_BLOCKCHAIN_PLASMA:
+ factory = params.Plasma
+ case common.Blockchain_BLOCKCHAIN_MONAD:
+ factory = params.Monad
+ case common.Blockchain_BLOCKCHAIN_ABSTRACT:
+ factory = params.Abstract
+ case common.Blockchain_BLOCKCHAIN_MEGAETH:
+ factory = params.Megaeth
+ case common.Blockchain_BLOCKCHAIN_SEISMIC:
+ factory = params.Seismic
default:
if params.Config.IsRosetta() {
factory = params.Rosetta
diff --git a/internal/blockchain/restapi/client.go b/internal/blockchain/restapi/client.go
index 2b27f4f..68cc1dc 100644
--- a/internal/blockchain/restapi/client.go
+++ b/internal/blockchain/restapi/client.go
@@ -46,15 +46,17 @@ type (
Slave endpoints.EndpointProvider `name:"slave"`
Validator endpoints.EndpointProvider `name:"validator"`
Consensus endpoints.EndpointProvider `name:"consensus"`
+ Additional endpoints.EndpointProvider `name:"additional"`
HTTPClient HTTPClient `optional:"true"` // Injected by unit test.
}
ClientResult struct {
fx.Out
- Master Client `name:"master"`
- Slave Client `name:"slave"`
- Validator Client `name:"validator"`
- Consensus Client `name:"consensus"`
+ Master Client `name:"master"`
+ Slave Client `name:"slave"`
+ Validator Client `name:"validator"`
+ Consensus Client `name:"consensus"`
+ Additional Client `name:"additional"`
}
HTTPError struct {
@@ -66,6 +68,7 @@ type (
// The 'Name' is just used for annotation.
// For example, in Aptos, the 'ParamsPath' for block 1 will be: "/blocks/by_height/1?with_transactions=true".
RequestMethod struct {
+ HTTPMethod string
Name string
ParamsPath string
Timeout time.Duration
@@ -106,12 +109,16 @@ func New(params ClientParams) (ClientResult, error) {
if err != nil {
return ClientResult{}, xerrors.Errorf("failed to create consensus client: %w", err)
}
-
+ additional, err := newClient(params, params.Additional)
+ if err != nil {
+ return ClientResult{}, xerrors.Errorf("failed to create additional client: %w", err)
+ }
return ClientResult{
- Master: master,
- Slave: slave,
- Validator: validator,
- Consensus: consensus,
+ Master: master,
+ Slave: slave,
+ Validator: validator,
+ Consensus: consensus,
+ Additional: additional,
}, nil
}
@@ -171,9 +178,7 @@ func (c *clientImpl) makeHTTPRequest(ctx context.Context, method *RequestMethod,
ctx, cancel := context.WithTimeout(ctx, method.Timeout)
defer cancel()
-
- // TODO: will handle both GET and POST.
- request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, bytes.NewReader(requestBody))
+ request, err := http.NewRequestWithContext(ctx, method.HTTPMethod, url, bytes.NewReader(requestBody))
if err != nil {
err = c.sanitizedError(err)
return nil, xerrors.Errorf("failed to create request: %w", err)
diff --git a/internal/blockchain/restapi/client_test.go b/internal/blockchain/restapi/client_test.go
index e28be5a..4586bed 100644
--- a/internal/blockchain/restapi/client_test.go
+++ b/internal/blockchain/restapi/client_test.go
@@ -112,7 +112,7 @@ func TestCall_RequestError(t *testing.T) {
require.Nil(response)
require.Error(err)
- require.Contains(err.Error(), "method=&{hello path 5ns}")
+ require.Contains(err.Error(), "method=&{ hello path 5ns}")
require.Contains(err.Error(), "requestBody=[]")
require.Contains(err.Error(), "endpoint=node_name")
@@ -165,7 +165,7 @@ func TestCall_RequestError_FailedWithRetry(t *testing.T) {
require.Nil(response)
require.Error(err)
- require.Contains(err.Error(), "method=&{hello path 5ns}")
+ require.Contains(err.Error(), "method=&{ hello path 5ns}")
require.Contains(err.Error(), "requestBody=[]")
require.Contains(err.Error(), "endpoint=node_name")
@@ -181,6 +181,54 @@ func TestCall_RequestError_FailedWithRetry(t *testing.T) {
require.Equal("block_not_found", errOut.ErrorCode)
}
+func TestCall_RequestMethod(t *testing.T) {
+ require := testutil.Require(t)
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ httpClient := restapimocks.NewMockHTTPClient(ctrl)
+ // Construct a REST API response with request method in body
+ httpClient.EXPECT().Do(gomock.Any()).DoAndReturn(func(req *http.Request) (*http.Response, error) {
+ method := req.Method
+ body := ioutil.NopCloser(strings.NewReader(`{"method": "` + method + `"}`))
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: body,
+ }, nil
+ }).Times(2)
+
+ var params clientParams
+ app := testapp.New(
+ t,
+ withDummyEndpoints(),
+ fx.Provide(restapi.New),
+ fx.Provide(func() restapi.HTTPClient {
+ return httpClient
+ }),
+ fx.Populate(¶ms),
+ )
+ defer app.Close()
+
+ client := params.Master
+ require.NotNil(client)
+
+ methods := []string{http.MethodGet, http.MethodPost}
+ for _, method := range methods {
+ response, err := client.Call(context.Background(),
+ &restapi.RequestMethod{Name: "hello", ParamsPath: "path", HTTPMethod: method, Timeout: time.Duration(5)},
+ nil)
+ require.NoError(err)
+ // assert the right method
+ var responseData map[string]string
+ err = json.Unmarshal(response, &responseData)
+ require.NoError(err)
+ require.NotEmpty(responseData)
+ require.Equal(method, responseData["method"])
+
+ }
+}
+
func TestCall_RequestError_SucceededAfterRetries(t *testing.T) {
require := testutil.Require(t)
@@ -263,7 +311,7 @@ func TestCall_RequestError_WithCustomizedAttempts(t *testing.T) {
require.Error(err)
require.Nil(response)
- require.Contains(err.Error(), "method=&{hello path 5ns}")
+ require.Contains(err.Error(), "method=&{ hello path 5ns}")
require.Contains(err.Error(), "requestBody=[]")
require.Contains(err.Error(), "endpoint=node_name")
diff --git a/internal/config/config.go b/internal/config/config.go
index 6b30220..bef6585 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -40,6 +40,7 @@ type (
SLA SLAConfig `mapstructure:"sla"`
FunctionalTest FunctionalTestConfig `mapstructure:"functional_test"`
StatsD *StatsDConfig `mapstructure:"statsd"`
+ Prometheus *PrometheusConfig `mapstructure:"prometheus"`
namespace string
env Env
@@ -69,6 +70,9 @@ type (
IrreversibleDistance uint64 `mapstructure:"irreversible_distance" validate:"required"`
Rosetta RosettaConfig `mapstructure:"rosetta"`
BlockTime time.Duration `mapstructure:"block_time" validate:"required"`
+ // CustomParams stores chain-specific custom parameters as key-value pairs.
+ // For example, Seismic uses "src20_aes_key" for encrypted token transfer parsing.
+ CustomParams map[string]string `mapstructure:"custom_params"`
}
ClientConfig struct {
@@ -76,8 +80,10 @@ type (
Slave JSONRPCConfig `mapstructure:"slave"`
Validator JSONRPCConfig `mapstructure:"validator"`
Consensus JSONRPCConfig `mapstructure:"consensus"`
+ Additional JSONRPCConfig `mapstructure:"additional"`
Retry ClientRetryConfig `mapstructure:"retry"`
HttpTimeout time.Duration `mapstructure:"http_timeout"`
+ TxBatchSize int `mapstructure:"tx_batch_size"`
}
JSONRPCConfig struct {
@@ -114,15 +120,33 @@ type (
}
AwsConfig struct {
- Region string `mapstructure:"region" validate:"required"`
- Bucket string `mapstructure:"bucket" validate:"required"`
- DynamoDB DynamoDBConfig `mapstructure:"dynamodb" validate:"required"`
- IsLocalStack bool `mapstructure:"local_stack"`
- IsResetLocal bool `mapstructure:"reset_local"`
- PresignedUrlExpiration time.Duration `mapstructure:"presigned_url_expiration" validate:"required"`
- DLQ SQSConfig `mapstructure:"dlq"`
- Storage StorageConfig `mapstructure:"storage"`
- AWSAccount AWSAccount `mapstructure:"aws_account" validate:"required"`
+ Region string `mapstructure:"region" validate:"required"`
+ Bucket string `mapstructure:"bucket" validate:"required"`
+ Postgres *PostgresConfig `mapstructure:"postgres" validate:"required_without=DynamoDB"`
+ DynamoDB *DynamoDBConfig `mapstructure:"dynamodb" validate:"required_without=Postgres"`
+ IsLocalStack bool `mapstructure:"local_stack"`
+ IsResetLocal bool `mapstructure:"reset_local"`
+ PresignedUrlExpiration time.Duration `mapstructure:"presigned_url_expiration" validate:"required"`
+ DLQ SQSConfig `mapstructure:"dlq"`
+ Storage StorageConfig `mapstructure:"storage"`
+ AWSAccount AWSAccount `mapstructure:"aws_account" validate:"required"`
+ }
+
+ PostgresConfig struct {
+ Host string `mapstructure:"host" validate:"required"`
+ Port int `mapstructure:"port" validate:"required"`
+ Database string `mapstructure:"database" validate:"required"`
+ User string `mapstructure:"user"`
+ Password string `mapstructure:"password"`
+ SSLMode string `mapstructure:"ssl_mode" validate:"required"`
+ MaxConnections int `mapstructure:"max_connections"`
+ MinConnections int `mapstructure:"min_connections"`
+ MaxIdleTime time.Duration `mapstructure:"max_idle_time"`
+ MaxLifetime time.Duration `mapstructure:"max_lifetime"`
+ ConnectTimeout time.Duration `mapstructure:"connect_timeout"`
+ StatementTimeout time.Duration `mapstructure:"statement_timeout"`
+ Schema string `mapstructure:"schema"`
+ TablePrefix string `mapstructure:"table_prefix"`
}
GcpConfig struct {
@@ -173,6 +197,7 @@ type (
CrossValidator CrossValidatorWorkflowConfig `mapstructure:"cross_validator"`
EventBackfiller EventBackfillerWorkflowConfig `mapstructure:"event_backfiller"`
Replicator ReplicatorWorkflowConfig `mapstructure:"replicator"`
+ Migrator MigratorWorkflowConfig `mapstructure:"migrator"`
}
WorkerConfig struct {
@@ -281,6 +306,18 @@ type (
BackoffInterval time.Duration `mapstructure:"backoff_interval"`
}
+ MigratorWorkflowConfig struct {
+ WorkflowConfig `mapstructure:",squash"`
+ BatchSize uint64 `mapstructure:"batch_size" validate:"required"`
+ MiniBatchSize uint64 `mapstructure:"mini_batch_size"`
+ CheckpointSize uint64 `mapstructure:"checkpoint_size" validate:"required"`
+ Parallelism int `mapstructure:"parallelism"`
+ BackoffInterval time.Duration `mapstructure:"backoff_interval"`
+ ContinuousSync bool `mapstructure:"continuous_sync"`
+ SyncInterval time.Duration `mapstructure:"sync_interval"`
+ AutoResume bool `mapstructure:"auto_resume"`
+ }
+
RosettaConfig struct {
Blockchain string `mapstructure:"blockchain"`
Network string `mapstructure:"network" validate:"required_with=Blockchain"`
@@ -417,6 +454,24 @@ type (
Prefix string `mapstructure:"prefix"`
}
+ PrometheusConfig struct {
+ // Port is the port to listen on for the metrics server.
+ Port int `mapstructure:"port" validate:"required"`
+ // MetricsPath is the path to listen on for the metrics server.
+ MetricsPath string `mapstructure:"metrics_path"`
+ // Namespace is the namespace for the metrics.
+ Namespace string `mapstructure:"namespace"`
+ // GlobalLabels are labels that are applied to all metrics.
+ GlobalLabels map[string]string `mapstructure:"global_labels"`
+ // DefaultHistogramBuckets are the default buckets for histogram metrics
+ // if not specified in HistogramBuckets.
+ DefaultHistogramBuckets []float64 `mapstructure:"default_histogram_buckets"`
+ // HistogramBuckets are custom buckets for specific histogram metrics.
+ // This allows for more granular control over the histogram buckets on a
+ // per-metric basis.
+ HistogramBuckets map[string][]float64 `mapstructure:"histogram_buckets"`
+ }
+
ConfigOption func(options *configOptions)
Env string
@@ -469,6 +524,7 @@ var (
"UNSPECIFIED": 0,
"DYNAMODB": 1,
"FIRESTORE": 2,
+ "POSTGRES": 3,
}
DLQType_value = map[string]int32{
@@ -505,6 +561,7 @@ const (
MetaStorageType_UNSPECIFIED MetaStorageType = 0
MetaStorageType_DYNAMODB MetaStorageType = 1
MetaStorageType_FIRESTORE MetaStorageType = 2
+ MetaStorageType_POSTGRES MetaStorageType = 3
DLQType_UNSPECIFIED DLQType = 0
DLQType_SQS DLQType = 1
@@ -569,6 +626,7 @@ func New(opts ...ConfigOption) (*Config, error) {
v.SetDefault("aws.local_stack", true)
v.SetDefault("aws.reset_local", true)
}
+ v.SetDefault("chain.client.tx_batch_size", 100)
// Read the data in base.yml
if err := v.ReadConfig(configReader); err != nil {
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
index a189f59..90399a9 100644
--- a/internal/config/config_test.go
+++ b/internal/config/config_test.go
@@ -8,6 +8,8 @@ import (
"testing"
"time"
+ "github.com/go-playground/validator/v10"
+
"github.com/coinbase/chainstorage/internal/config"
"github.com/coinbase/chainstorage/internal/utils/fixtures"
"github.com/coinbase/chainstorage/internal/utils/testapp"
@@ -145,23 +147,49 @@ func TestDerivedConfigValues(t *testing.T) {
normalizedConfigName := strings.ReplaceAll(configName, "_", "-")
// Verify template derived configs.
- dynamoDB := config.DynamoDBConfig{
- BlockTable: fmt.Sprintf("example_chainstorage_blocks_%v", configName),
- EventTable: cfg.AWS.DynamoDB.EventTable,
- EventTableHeightIndex: cfg.AWS.DynamoDB.EventTableHeightIndex,
- VersionedEventTable: fmt.Sprintf("example_chainstorage_versioned_block_events_%v", configName),
- VersionedEventTableBlockIndex: fmt.Sprintf("example_chainstorage_versioned_block_events_by_block_id_%v", configName),
- TransactionTable: cfg.AWS.DynamoDB.TransactionTable,
- // Skip DynamoDB.Arn verification
- Arn: "",
+ var dynamoDBPtr *config.DynamoDBConfig
+ if cfg.AWS.DynamoDB != nil {
+ dynamoDB := config.DynamoDBConfig{
+ BlockTable: fmt.Sprintf("example_chainstorage_blocks_%v", configName),
+ EventTable: cfg.AWS.DynamoDB.EventTable,
+ EventTableHeightIndex: cfg.AWS.DynamoDB.EventTableHeightIndex,
+ VersionedEventTable: fmt.Sprintf("example_chainstorage_versioned_block_events_%v", configName),
+ VersionedEventTableBlockIndex: fmt.Sprintf("example_chainstorage_versioned_block_events_by_block_id_%v", configName),
+ TransactionTable: cfg.AWS.DynamoDB.TransactionTable,
+ // Skip DynamoDB.Arn verification
+ Arn: "",
+ }
+ dynamoDBPtr = &dynamoDB
+ }
+
+ var postgresPtr *config.PostgresConfig
+ if cfg.AWS.Postgres != nil {
+ postgres := config.PostgresConfig{
+ Host: cfg.AWS.Postgres.Host,
+ Port: cfg.AWS.Postgres.Port,
+ Database: cfg.AWS.Postgres.Database,
+ User: cfg.AWS.Postgres.User,
+ Password: cfg.AWS.Postgres.Password,
+ SSLMode: cfg.AWS.Postgres.SSLMode,
+ MaxConnections: cfg.AWS.Postgres.MaxConnections,
+ MinConnections: cfg.AWS.Postgres.MinConnections,
+ MaxIdleTime: cfg.AWS.Postgres.MaxIdleTime,
+ MaxLifetime: cfg.AWS.Postgres.MaxLifetime,
+ ConnectTimeout: cfg.AWS.Postgres.ConnectTimeout,
+ StatementTimeout: cfg.AWS.Postgres.StatementTimeout,
+ Schema: cfg.AWS.Postgres.Schema,
+ TablePrefix: cfg.AWS.Postgres.TablePrefix,
+ }
+ postgresPtr = &postgres
}
expectedAWS := config.AwsConfig{
Region: "us-east-1",
Bucket: fmt.Sprintf("example-chainstorage-%v-%v", normalizedConfigName, cfg.AwsEnv()),
- DynamoDB: dynamoDB,
- IsLocalStack: true,
- IsResetLocal: true,
+ DynamoDB: dynamoDBPtr,
+ Postgres: postgresPtr,
+ IsLocalStack: cfg.AWS.IsLocalStack,
+ IsResetLocal: cfg.AWS.IsResetLocal,
PresignedUrlExpiration: 30 * time.Minute,
DLQ: config.SQSConfig{
Name: fmt.Sprintf("example_chainstorage_blocks_%v_dlq", configName),
@@ -1069,3 +1097,97 @@ func TestDefaultHttpTimeout(t *testing.T) {
require.NoError(err)
require.Equal(0*time.Second, cfg.Chain.Client.HttpTimeout)
}
+
+func TestDefaultTxBatchSize(t *testing.T) {
+ require := testutil.Require(t)
+
+ cfg, err := config.New()
+ require.NoError(err)
+ require.Equal(100, cfg.Chain.Client.TxBatchSize)
+}
+
+func TestValidateAWSstorageConfig(t *testing.T) {
+ // Test that Postgres validation is skipped when Postgres is nil
+ require := testutil.Require(t)
+
+ cfg, err := config.New()
+ require.NoError(err)
+
+ // Test 1: DynamoDB config when Postgres is nil
+ cfg.AWS.Postgres = nil
+ cfg.AWS.DynamoDB = &config.DynamoDBConfig{
+ BlockTable: "block_table",
+ VersionedEventTable: "versioned_event_table",
+ VersionedEventTableBlockIndex: "versioned_event_table_block_index",
+ }
+
+ // Validate the config - this should NOT fail even though Postgres is nil
+ // and its fields (Host, Port, Database, SSLMode) are marked as required
+ validate := validator.New()
+ err = validate.Struct(&cfg.AWS)
+ require.NoError(err, "Validation should not fail when Postgres is nil")
+
+ require.Equal("block_table", cfg.AWS.DynamoDB.BlockTable)
+ require.Equal("versioned_event_table", cfg.AWS.DynamoDB.VersionedEventTable)
+ require.Equal("versioned_event_table_block_index", cfg.AWS.DynamoDB.VersionedEventTableBlockIndex)
+ cfg.AWS.DynamoDB = &config.DynamoDBConfig{
+ BlockTable: "block_table",
+ VersionedEventTable: "versioned_event_table",
+ VersionedEventTableBlockIndex: "versioned_event_table_block_index",
+ }
+ require.Equal("block_table", cfg.AWS.DynamoDB.BlockTable)
+ require.Equal("versioned_event_table", cfg.AWS.DynamoDB.VersionedEventTable)
+ require.Equal("versioned_event_table_block_index", cfg.AWS.DynamoDB.VersionedEventTableBlockIndex)
+ // Test 2: Postgres config when DynamoDB is nil - with valid config
+ cfg.AWS.DynamoDB = nil
+ cfg.AWS.Postgres = &config.PostgresConfig{
+ Host: "localhost",
+ Port: 5432,
+ Database: "chainstorage",
+ SSLMode: "disable",
+ }
+ err = validate.Struct(&cfg.AWS)
+ require.NoError(err, "Validation should pass with valid Postgres config")
+ require.Equal("localhost", cfg.AWS.Postgres.Host)
+ require.Equal(5432, cfg.AWS.Postgres.Port)
+ require.Equal("chainstorage", cfg.AWS.Postgres.Database)
+ require.Equal("disable", cfg.AWS.Postgres.SSLMode)
+
+ // Test 3: Postgres config with missing required fields should fail validation
+ cfg.AWS.DynamoDB = nil
+ cfg.AWS.Postgres = &config.PostgresConfig{
+ Host: "localhost",
+ Port: 5432,
+ // Missing Database and SSLMode which are required
+ }
+ err = validate.Struct(&cfg.AWS)
+ require.Error(err, "Validation should fail when Postgres is not nil but has missing required fields")
+ // Test 4: Both configs valid when both are not nil
+ cfg.AWS.DynamoDB = &config.DynamoDBConfig{
+ BlockTable: "block_table",
+ VersionedEventTable: "versioned_event_table",
+ VersionedEventTableBlockIndex: "versioned_event_table_block_index",
+ }
+ cfg.AWS.Postgres = &config.PostgresConfig{
+ Host: "localhost",
+ Port: 5432,
+ Database: "chainstorage",
+ SSLMode: "disable",
+ }
+ err = validate.Struct(&cfg.AWS)
+ require.NoError(err, "Validation should pass when both configs are valid")
+ require.Equal("block_table", cfg.AWS.DynamoDB.BlockTable)
+ require.Equal("versioned_event_table", cfg.AWS.DynamoDB.VersionedEventTable)
+ require.Equal("versioned_event_table_block_index", cfg.AWS.DynamoDB.VersionedEventTableBlockIndex)
+ require.Equal("localhost", cfg.AWS.Postgres.Host)
+ require.Equal(5432, cfg.AWS.Postgres.Port)
+ require.Equal("chainstorage", cfg.AWS.Postgres.Database)
+ require.Equal("disable", cfg.AWS.Postgres.SSLMode)
+
+ // Test 5: Both configs nil - validation should FAIL
+ // because at least one storage backend is required (required_without validation)
+ cfg.AWS.DynamoDB = nil
+ cfg.AWS.Postgres = nil
+ err = validate.Struct(&cfg.AWS)
+ require.Error(err, "Validation should fail when both configs are nil - at least one storage backend is required")
+}
diff --git a/internal/gateway/rest_client.go b/internal/gateway/rest_client.go
index e2fafff..b33720b 100644
--- a/internal/gateway/rest_client.go
+++ b/internal/gateway/rest_client.go
@@ -219,6 +219,15 @@ func (c *restClient) GetVerifiedAccountState(ctx context.Context, in *api.GetVer
return &response, nil
}
+func (c *restClient) GetBlockByTimestamp(ctx context.Context, in *api.GetBlockByTimestampRequest, opts ...grpc.CallOption) (*api.GetBlockByTimestampResponse, error) {
+ var response api.GetBlockByTimestampResponse
+ if err := c.makeRequest(ctx, "GetBlockByTimestamp", in, &response); err != nil {
+ return nil, xerrors.Errorf("failed to make request: %w", err)
+ }
+
+ return &response, nil
+}
+
func (c *restClient) makeRequest(ctx context.Context, method string, request proto.Message, response proto.Message) error {
return c.retry.Retry(ctx, func(ctx context.Context) error {
marshaler := protojson.MarshalOptions{}
@@ -237,6 +246,7 @@ func (c *restClient) makeRequest(ctx context.Context, method string, request pro
httpRequest.Header.Set("Accept", "application/json")
if c.authHeader != "" && c.authToken != "" {
httpRequest.Header.Set(c.authHeader, c.authToken)
+ httpRequest.Header.Set("cb-nft-api-token", c.authToken)
}
c.logger.Debug(
diff --git a/internal/server/handler.go b/internal/server/handler.go
index d1142fa..ea49211 100644
--- a/internal/server/handler.go
+++ b/internal/server/handler.go
@@ -107,6 +107,38 @@ type (
contextKey string
)
+// GetBlockByTimestamp implements chainstorage.ChainStorageServer.
+func (s *Server) GetBlockByTimestamp(ctx context.Context, req *api.GetBlockByTimestampRequest) (*api.GetBlockByTimestampResponse, error) {
+ clientID := getClientID(ctx)
+
+ tag := s.config.GetEffectiveBlockTag(req.GetTag())
+ if err := s.validateTag(tag); err != nil {
+ return nil, xerrors.Errorf("failed to validate tag: %w", err)
+ }
+
+ timestamp := req.GetTimestamp()
+ if timestamp == 0 {
+ return nil, status.Error(codes.InvalidArgument, "timestamp is required")
+ }
+
+ // Get block from meta storage using timestamp
+ block, err := s.metaStorage.GetBlockByTimestamp(ctx, tag, timestamp)
+ if err != nil {
+ // Don't wrap the error to allow the interceptor to properly map it
+ return nil, err
+ }
+
+ s.emitBlocksMetric("timestamp", clientID, 1)
+
+ return &api.GetBlockByTimestampResponse{
+ Tag: block.Tag,
+ Hash: block.Hash,
+ ParentHash: block.ParentHash,
+ Height: block.Height,
+ Timestamp: uint64(block.Timestamp.GetSeconds()),
+ }, nil
+}
+
const (
// Custom interceptors
errorInterceptorID = "xerror"
@@ -190,6 +222,7 @@ var rcuByMethod = map[string]int{
"GetRosettaBlocksByRange": 50,
"GetNativeTransaction": 10,
"GetVerifiedAccountState": 10,
+ "GetBlockByTimestamp": 5,
}
func NewServer(params ServerParams) *Server {
diff --git a/internal/server/handler_test.go b/internal/server/handler_test.go
index 4b0c0b0..47c8b40 100644
--- a/internal/server/handler_test.go
+++ b/internal/server/handler_test.go
@@ -22,6 +22,7 @@ import (
"golang.org/x/xerrors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/known/timestamppb"
"github.com/coinbase/chainstorage/internal/blockchain/client"
clientmocks "github.com/coinbase/chainstorage/internal/blockchain/client/mocks"
@@ -3088,3 +3089,113 @@ func (s *handlerTestSuite) TestGetVerifiedAccountState_Erc20() {
require.Equal(result, resp.GetResponse())
}
+
+func (s *handlerTestSuite) TestGetBlockByTimestamp_Success() {
+ require := testutil.Require(s.T())
+
+ // Create a test block with a specific timestamp
+ testTimestamp := uint64(1640995200) // 2022-01-01 00:00:00 UTC
+ testTime := time.Unix(int64(testTimestamp), 0)
+ stableTag := s.app.Config().GetStableBlockTag()
+
+ expectedBlock := &api.BlockMetadata{
+ Tag: stableTag,
+ Hash: "test_hash_123",
+ ParentHash: "test_parent_hash_122",
+ Height: 123,
+ ParentHeight: 122,
+ ObjectKeyMain: "test_object_key",
+ Timestamp: timestamppb.New(testTime),
+ Skipped: false,
+ }
+
+ // Set up mock expectation
+ s.metaStorage.EXPECT().GetBlockByTimestamp(gomock.Any(), stableTag, testTimestamp).Times(1).DoAndReturn(
+ func(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error) {
+ require.Equal(stableTag, tag)
+ return expectedBlock, nil
+ },
+ )
+
+ // Make the request
+ req := &api.GetBlockByTimestampRequest{
+ Tag: 0, // This will be converted to stable tag
+ Timestamp: testTimestamp,
+ }
+
+ resp, err := s.server.GetBlockByTimestamp(context.Background(), req)
+
+ // Verify the response
+ require.NoError(err)
+ require.NotNil(resp)
+ require.Equal(expectedBlock.Tag, resp.Tag)
+ require.Equal(expectedBlock.Hash, resp.Hash)
+ require.Equal(expectedBlock.ParentHash, resp.ParentHash)
+ require.Equal(expectedBlock.Height, resp.Height)
+ require.Equal(uint64(expectedBlock.Timestamp.GetSeconds()), resp.Timestamp)
+}
+
+func (s *handlerTestSuite) TestGetBlockByTimestamp_MissingTimestamp() {
+ require := testutil.Require(s.T())
+
+ // Test with missing timestamp
+ req := &api.GetBlockByTimestampRequest{
+ Tag: 0,
+ // Timestamp: 0 (missing)
+ }
+
+ resp, err := s.server.GetBlockByTimestamp(context.Background(), req)
+
+ require.Nil(resp)
+ s.verifyStatusCode(codes.InvalidArgument, err)
+}
+
+func (s *handlerTestSuite) TestGetBlockByTimestamp_NotFound() {
+ require := testutil.Require(s.T())
+
+ testTimestamp := uint64(1640995200) // 2022-01-01 00:00:00 UTC
+ stableTag := s.app.Config().GetStableBlockTag()
+
+ // Set up mock expectation to return not found
+ s.metaStorage.EXPECT().GetBlockByTimestamp(gomock.Any(), stableTag, testTimestamp).Times(1).DoAndReturn(
+ func(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error) {
+ require.Equal(stableTag, tag)
+ return nil, storage.ErrItemNotFound
+ },
+ )
+
+ req := &api.GetBlockByTimestampRequest{
+ Tag: 0, // This will be converted to stable tag
+ Timestamp: testTimestamp,
+ }
+
+ resp, err := s.server.GetBlockByTimestamp(context.Background(), req)
+
+ require.Nil(resp)
+ s.verifyStatusCode(codes.NotFound, err)
+}
+
+func (s *handlerTestSuite) TestGetBlockByTimestamp_StorageError() {
+ require := testutil.Require(s.T())
+
+ testTimestamp := uint64(1640995200) // 2022-01-01 00:00:00 UTC
+ stableTag := s.app.Config().GetStableBlockTag()
+
+ // Set up mock expectation to return an error
+ s.metaStorage.EXPECT().GetBlockByTimestamp(gomock.Any(), stableTag, testTimestamp).Times(1).DoAndReturn(
+ func(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error) {
+ require.Equal(stableTag, tag)
+ return nil, fmt.Errorf("database connection failed")
+ },
+ )
+
+ req := &api.GetBlockByTimestampRequest{
+ Tag: 0, // This will be converted to stable tag
+ Timestamp: testTimestamp,
+ }
+
+ resp, err := s.server.GetBlockByTimestamp(context.Background(), req)
+
+ require.Nil(resp)
+ s.verifyStatusCode(codes.Internal, err)
+}
diff --git a/internal/storage/blobstorage/gcs/blob_storage_integration_test.go b/internal/storage/blobstorage/gcs/blob_storage_integration_test.go
index 3c7dae2..0a896e6 100644
--- a/internal/storage/blobstorage/gcs/blob_storage_integration_test.go
+++ b/internal/storage/blobstorage/gcs/blob_storage_integration_test.go
@@ -106,6 +106,7 @@ func (s *gcpBlobStorageTestSuite) TestIntegrationGcsBlobStorageIntegration_GzipF
}
func TestIntegrationGcsBlobStorageTestSuite(t *testing.T) {
+ t.Skip()
require := testutil.Require(t)
cfg, err := config.New()
require.NoError(err)
diff --git a/internal/storage/metastorage/dynamodb/block_storage.go b/internal/storage/metastorage/dynamodb/block_storage.go
index a298fc7..f660078 100644
--- a/internal/storage/metastorage/dynamodb/block_storage.go
+++ b/internal/storage/metastorage/dynamodb/block_storage.go
@@ -272,6 +272,11 @@ func (a *blockStorageImpl) validateHeight(height uint64) error {
return nil
}
+func (a *blockStorageImpl) GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error) {
+ // Placeholder implementation - return error indicating not implemented
+ return nil, xerrors.New("GetBlockByTimestamp not implemented for DynamoDB")
+}
+
func makeBlockMetaDataDDBEntry(block *api.BlockMetadata) *model.BlockMetaDataDDBEntry {
blockMetaDataDDBEntry := model.BlockMetaDataDDBEntry{
BlockPid: getBlockPidForHeight(block.Tag, block.Height),
diff --git a/internal/storage/metastorage/dynamodb/mocks/mocks.go b/internal/storage/metastorage/dynamodb/mocks/mocks.go
index a5a1ec8..49f9547 100644
--- a/internal/storage/metastorage/dynamodb/mocks/mocks.go
+++ b/internal/storage/metastorage/dynamodb/mocks/mocks.go
@@ -474,6 +474,56 @@ func (mr *MockDynamoAPIMockRecorder) DeleteItemWithContext(arg0, arg1 any, arg2
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteItemWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).DeleteItemWithContext), varargs...)
}
+// DeleteResourcePolicy mocks base method.
+func (m *MockDynamoAPI) DeleteResourcePolicy(arg0 *dynamodb.DeleteResourcePolicyInput) (*dynamodb.DeleteResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteResourcePolicy", arg0)
+ ret0, _ := ret[0].(*dynamodb.DeleteResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicy indicates an expected call of DeleteResourcePolicy.
+func (mr *MockDynamoAPIMockRecorder) DeleteResourcePolicy(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicy", reflect.TypeOf((*MockDynamoAPI)(nil).DeleteResourcePolicy), arg0)
+}
+
+// DeleteResourcePolicyRequest mocks base method.
+func (m *MockDynamoAPI) DeleteResourcePolicyRequest(arg0 *dynamodb.DeleteResourcePolicyInput) (*request.Request, *dynamodb.DeleteResourcePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteResourcePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*dynamodb.DeleteResourcePolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicyRequest indicates an expected call of DeleteResourcePolicyRequest.
+func (mr *MockDynamoAPIMockRecorder) DeleteResourcePolicyRequest(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyRequest", reflect.TypeOf((*MockDynamoAPI)(nil).DeleteResourcePolicyRequest), arg0)
+}
+
+// DeleteResourcePolicyWithContext mocks base method.
+func (m *MockDynamoAPI) DeleteResourcePolicyWithContext(arg0 context.Context, arg1 *dynamodb.DeleteResourcePolicyInput, arg2 ...request.Option) (*dynamodb.DeleteResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteResourcePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*dynamodb.DeleteResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicyWithContext indicates an expected call of DeleteResourcePolicyWithContext.
+func (mr *MockDynamoAPIMockRecorder) DeleteResourcePolicyWithContext(arg0, arg1 any, arg2 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).DeleteResourcePolicyWithContext), varargs...)
+}
+
// DeleteTable mocks base method.
func (m *MockDynamoAPI) DeleteTable(arg0 *dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) {
m.ctrl.T.Helper()
@@ -1474,6 +1524,56 @@ func (mr *MockDynamoAPIMockRecorder) GetItemWithContext(arg0, arg1 any, arg2 ...
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetItemWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).GetItemWithContext), varargs...)
}
+// GetResourcePolicy mocks base method.
+func (m *MockDynamoAPI) GetResourcePolicy(arg0 *dynamodb.GetResourcePolicyInput) (*dynamodb.GetResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetResourcePolicy", arg0)
+ ret0, _ := ret[0].(*dynamodb.GetResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetResourcePolicy indicates an expected call of GetResourcePolicy.
+func (mr *MockDynamoAPIMockRecorder) GetResourcePolicy(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicy", reflect.TypeOf((*MockDynamoAPI)(nil).GetResourcePolicy), arg0)
+}
+
+// GetResourcePolicyRequest mocks base method.
+func (m *MockDynamoAPI) GetResourcePolicyRequest(arg0 *dynamodb.GetResourcePolicyInput) (*request.Request, *dynamodb.GetResourcePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetResourcePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*dynamodb.GetResourcePolicyOutput)
+ return ret0, ret1
+}
+
+// GetResourcePolicyRequest indicates an expected call of GetResourcePolicyRequest.
+func (mr *MockDynamoAPIMockRecorder) GetResourcePolicyRequest(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicyRequest", reflect.TypeOf((*MockDynamoAPI)(nil).GetResourcePolicyRequest), arg0)
+}
+
+// GetResourcePolicyWithContext mocks base method.
+func (m *MockDynamoAPI) GetResourcePolicyWithContext(arg0 context.Context, arg1 *dynamodb.GetResourcePolicyInput, arg2 ...request.Option) (*dynamodb.GetResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetResourcePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*dynamodb.GetResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetResourcePolicyWithContext indicates an expected call of GetResourcePolicyWithContext.
+func (mr *MockDynamoAPIMockRecorder) GetResourcePolicyWithContext(arg0, arg1 any, arg2 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicyWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).GetResourcePolicyWithContext), varargs...)
+}
+
// ImportTable mocks base method.
func (m *MockDynamoAPI) ImportTable(arg0 *dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error) {
m.ctrl.T.Helper()
@@ -2056,6 +2156,56 @@ func (mr *MockDynamoAPIMockRecorder) PutItemWithContext(arg0, arg1 any, arg2 ...
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutItemWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).PutItemWithContext), varargs...)
}
+// PutResourcePolicy mocks base method.
+func (m *MockDynamoAPI) PutResourcePolicy(arg0 *dynamodb.PutResourcePolicyInput) (*dynamodb.PutResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutResourcePolicy", arg0)
+ ret0, _ := ret[0].(*dynamodb.PutResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutResourcePolicy indicates an expected call of PutResourcePolicy.
+func (mr *MockDynamoAPIMockRecorder) PutResourcePolicy(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicy", reflect.TypeOf((*MockDynamoAPI)(nil).PutResourcePolicy), arg0)
+}
+
+// PutResourcePolicyRequest mocks base method.
+func (m *MockDynamoAPI) PutResourcePolicyRequest(arg0 *dynamodb.PutResourcePolicyInput) (*request.Request, *dynamodb.PutResourcePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutResourcePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*dynamodb.PutResourcePolicyOutput)
+ return ret0, ret1
+}
+
+// PutResourcePolicyRequest indicates an expected call of PutResourcePolicyRequest.
+func (mr *MockDynamoAPIMockRecorder) PutResourcePolicyRequest(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyRequest", reflect.TypeOf((*MockDynamoAPI)(nil).PutResourcePolicyRequest), arg0)
+}
+
+// PutResourcePolicyWithContext mocks base method.
+func (m *MockDynamoAPI) PutResourcePolicyWithContext(arg0 context.Context, arg1 *dynamodb.PutResourcePolicyInput, arg2 ...request.Option) (*dynamodb.PutResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutResourcePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*dynamodb.PutResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutResourcePolicyWithContext indicates an expected call of PutResourcePolicyWithContext.
+func (mr *MockDynamoAPIMockRecorder) PutResourcePolicyWithContext(arg0, arg1 any, arg2 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyWithContext", reflect.TypeOf((*MockDynamoAPI)(nil).PutResourcePolicyWithContext), varargs...)
+}
+
// Query mocks base method.
func (m *MockDynamoAPI) Query(arg0 *dynamodb.QueryInput) (*dynamodb.QueryOutput, error) {
m.ctrl.T.Helper()
diff --git a/internal/storage/metastorage/firestore/block_storage.go b/internal/storage/metastorage/firestore/block_storage.go
index b36572a..3ba6a6c 100644
--- a/internal/storage/metastorage/firestore/block_storage.go
+++ b/internal/storage/metastorage/firestore/block_storage.go
@@ -245,6 +245,11 @@ func (b *blockStorageImpl) validateHeight(height uint64) error {
return nil
}
+func (b *blockStorageImpl) GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*chainstorage.BlockMetadata, error) {
+ // Placeholder implementation - return error indicating not implemented
+ return nil, xerrors.New("GetBlockByTimestamp not implemented for Firestore")
+}
+
func (b *blockStorageImpl) getLatestBlockDocRef(tag uint32) *firestore.DocumentRef {
return b.client.Doc(fmt.Sprintf("env/%s/blocks/%d-latest", b.env, tag))
}
diff --git a/internal/storage/metastorage/firestore/block_storage_integration_test.go b/internal/storage/metastorage/firestore/block_storage_integration_test.go
index 5caf68e..f3eb201 100644
--- a/internal/storage/metastorage/firestore/block_storage_integration_test.go
+++ b/internal/storage/metastorage/firestore/block_storage_integration_test.go
@@ -312,6 +312,7 @@ func (s *blockStorageTestSuite) equalProto(x, y any) {
}
func TestIntegrationBlockStorageTestSuite(t *testing.T) {
+ t.Skip()
// TODO: speed up the tests before re-enabling TestAllEnvs.
// testapp.TestAllEnvs(t, func(t *testing.T, cfg *config.Config) {
// suite.Run(t, &blockStorageTestSuite{config: cfg})
diff --git a/internal/storage/metastorage/firestore/event_storage_integration_test.go b/internal/storage/metastorage/firestore/event_storage_integration_test.go
index d7dcdd6..c84e198 100644
--- a/internal/storage/metastorage/firestore/event_storage_integration_test.go
+++ b/internal/storage/metastorage/firestore/event_storage_integration_test.go
@@ -423,6 +423,7 @@ func (s *eventStorageTestSuite) TestGetEventsByBlockHeight() {
}
func TestIntegrationEventStorageTestSuite(t *testing.T) {
+ t.Skip()
require := testutil.Require(t)
// Test with eth-mainnet for stream version
cfg, err := config.New()
diff --git a/internal/storage/metastorage/internal/meta_storage.go b/internal/storage/metastorage/internal/meta_storage.go
index 70a044b..0252fd5 100644
--- a/internal/storage/metastorage/internal/meta_storage.go
+++ b/internal/storage/metastorage/internal/meta_storage.go
@@ -22,6 +22,8 @@ type (
// GetBlocksByHeights gets blocks by heights. Results is an ordered array that matches the order in `heights` array
// i.e. if the heights is [100,2,3], it will return the metadata in order: [block 100, block 2, block 3]
GetBlocksByHeights(ctx context.Context, tag uint32, heights []uint64) ([]*api.BlockMetadata, error)
+ // GetBlockByTimestamp gets the latest block before or at the given timestamp
+ GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error)
}
EventStorage interface {
@@ -72,6 +74,7 @@ type (
fxparams.Params
DynamoDB MetaStorageFactory `name:"metastorage/dynamodb"`
Firestore MetaStorageFactory `name:"metastorage/firestore"`
+ Postgres MetaStorageFactory `name:"metastorage/postgres"`
}
)
@@ -83,6 +86,8 @@ func WithMetaStorageFactory(params MetaStorageFactoryParams) (Result, error) {
factory = params.DynamoDB
case config.MetaStorageType_FIRESTORE:
factory = params.Firestore
+ case config.MetaStorageType_POSTGRES:
+ factory = params.Postgres
}
if factory == nil {
return Result{}, xerrors.Errorf("meta storage type is not implemented: %v", storageType)
diff --git a/internal/storage/metastorage/mocks/mocks.go b/internal/storage/metastorage/mocks/mocks.go
index 1edebbf..8e808ba 100644
--- a/internal/storage/metastorage/mocks/mocks.go
+++ b/internal/storage/metastorage/mocks/mocks.go
@@ -113,6 +113,21 @@ func (mr *MockMetaStorageMockRecorder) GetBlockByHeight(arg0, arg1, arg2 any) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHeight", reflect.TypeOf((*MockMetaStorage)(nil).GetBlockByHeight), arg0, arg1, arg2)
}
+// GetBlockByTimestamp mocks base method.
+func (m *MockMetaStorage) GetBlockByTimestamp(arg0 context.Context, arg1 uint32, arg2 uint64) (*chainstorage.BlockMetadata, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBlockByTimestamp", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*chainstorage.BlockMetadata)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBlockByTimestamp indicates an expected call of GetBlockByTimestamp.
+func (mr *MockMetaStorageMockRecorder) GetBlockByTimestamp(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByTimestamp", reflect.TypeOf((*MockMetaStorage)(nil).GetBlockByTimestamp), arg0, arg1, arg2)
+}
+
// GetBlocksByHeightRange mocks base method.
func (m *MockMetaStorage) GetBlocksByHeightRange(arg0 context.Context, arg1 uint32, arg2, arg3 uint64) ([]*chainstorage.BlockMetadata, error) {
m.ctrl.T.Helper()
@@ -499,6 +514,21 @@ func (mr *MockBlockStorageMockRecorder) GetBlockByHeight(arg0, arg1, arg2 any) *
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHeight", reflect.TypeOf((*MockBlockStorage)(nil).GetBlockByHeight), arg0, arg1, arg2)
}
+// GetBlockByTimestamp mocks base method.
+func (m *MockBlockStorage) GetBlockByTimestamp(arg0 context.Context, arg1 uint32, arg2 uint64) (*chainstorage.BlockMetadata, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBlockByTimestamp", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*chainstorage.BlockMetadata)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBlockByTimestamp indicates an expected call of GetBlockByTimestamp.
+func (mr *MockBlockStorageMockRecorder) GetBlockByTimestamp(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByTimestamp", reflect.TypeOf((*MockBlockStorage)(nil).GetBlockByTimestamp), arg0, arg1, arg2)
+}
+
// GetBlocksByHeightRange mocks base method.
func (m *MockBlockStorage) GetBlocksByHeightRange(arg0 context.Context, arg1 uint32, arg2, arg3 uint64) ([]*chainstorage.BlockMetadata, error) {
m.ctrl.T.Helper()
diff --git a/internal/storage/metastorage/module.go b/internal/storage/metastorage/module.go
index e8deed2..661471d 100644
--- a/internal/storage/metastorage/module.go
+++ b/internal/storage/metastorage/module.go
@@ -7,6 +7,7 @@ import (
"github.com/coinbase/chainstorage/internal/storage/metastorage/firestore"
"github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
"github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
)
type (
@@ -29,5 +30,6 @@ func NewEventsToChainAdaptor() *EventsToChainAdaptor {
var Module = fx.Options(
dynamodb.Module,
firestore.Module,
+ postgres.Module,
fx.Provide(internal.WithMetaStorageFactory),
)
diff --git a/internal/storage/metastorage/postgres/admin.go b/internal/storage/metastorage/postgres/admin.go
new file mode 100644
index 0000000..910f0b1
--- /dev/null
+++ b/internal/storage/metastorage/postgres/admin.go
@@ -0,0 +1,174 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/lib/pq"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+// SetupDatabase creates a database and roles for a new network in PostgreSQL
+// This function is intended to be called by administrators during setup
+func SetupDatabase(ctx context.Context, masterCfg *config.PostgresConfig, workerUser string, workerPassword string, serverUser string, serverPassword string, dbName string) error {
+ logger := log.WithPackage(log.NewDevelopment())
+
+ logger.Info("Setting up PostgreSQL database",
+ zap.String("database", dbName),
+ zap.String("worker_user", workerUser),
+ zap.String("server_user", serverUser))
+
+ // Connect to the default 'postgres' database with master credentials
+ dsn := fmt.Sprintf(
+ "host=%s port=%d dbname=postgres user=%s password=%s sslmode=%s",
+ masterCfg.Host, masterCfg.Port, masterCfg.User, masterCfg.Password, masterCfg.SSLMode,
+ )
+ if masterCfg.ConnectTimeout > 0 {
+ dsn += fmt.Sprintf(" connect_timeout=%d", int(masterCfg.ConnectTimeout.Seconds()))
+ }
+
+ adminDB, err := sql.Open("postgres", dsn)
+ if err != nil {
+ return xerrors.Errorf("failed to connect to postgres db with master user: %w", err)
+ }
+ defer func() {
+ if closeErr := adminDB.Close(); closeErr != nil {
+ logger.Warn("Failed to close admin database connection", zap.Error(closeErr))
+ }
+ }()
+
+ if err := adminDB.PingContext(ctx); err != nil {
+ return xerrors.Errorf("failed to ping postgres db with master user: %w", err)
+ }
+
+ logger.Info("Successfully connected to PostgreSQL as master user")
+
+ // Create worker role with LOGIN capability and password
+ logger.Info("Creating worker role", zap.String("username", workerUser))
+ workerQuery := fmt.Sprintf("CREATE ROLE %s WITH LOGIN PASSWORD %s",
+ pq.QuoteIdentifier(workerUser), pq.QuoteLiteral(workerPassword))
+ if _, err := adminDB.ExecContext(ctx, workerQuery); err != nil {
+ if pgErr, ok := err.(*pq.Error); ok && pgErr.Code.Name() == "duplicate_object" {
+ logger.Info("Worker role already exists, updating password", zap.String("username", workerUser))
+ // Update password for existing role
+ alterQuery := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s",
+ pq.QuoteIdentifier(workerUser), pq.QuoteLiteral(workerPassword))
+ if _, err := adminDB.ExecContext(ctx, alterQuery); err != nil {
+ return xerrors.Errorf("failed to update password for worker role %s: %w", workerUser, err)
+ }
+ } else {
+ return xerrors.Errorf("failed to create worker role %s: %w", workerUser, err)
+ }
+ } else {
+ logger.Info("Successfully created worker role", zap.String("username", workerUser))
+ }
+
+ // Create server role with LOGIN capability and password
+ logger.Info("Creating server role", zap.String("username", serverUser))
+ serverQuery := fmt.Sprintf("CREATE ROLE %s WITH LOGIN PASSWORD %s",
+ pq.QuoteIdentifier(serverUser), pq.QuoteLiteral(serverPassword))
+ if _, err := adminDB.ExecContext(ctx, serverQuery); err != nil {
+ if pgErr, ok := err.(*pq.Error); ok && pgErr.Code.Name() == "duplicate_object" {
+ logger.Info("Server role already exists, updating password", zap.String("username", serverUser))
+ // Update password for existing role
+ alterQuery := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s",
+ pq.QuoteIdentifier(serverUser), pq.QuoteLiteral(serverPassword))
+ if _, err := adminDB.ExecContext(ctx, alterQuery); err != nil {
+ return xerrors.Errorf("failed to update password for server role %s: %w", serverUser, err)
+ }
+ } else {
+ return xerrors.Errorf("failed to create server role %s: %w", serverUser, err)
+ }
+ } else {
+ logger.Info("Successfully created server role", zap.String("username", serverUser))
+ }
+
+ // Create application database owned by the worker role
+ logger.Info("Creating database", zap.String("database", dbName))
+ ownerOpt := fmt.Sprintf("OWNER = %s", pq.QuoteIdentifier(workerUser))
+ if _, err := adminDB.ExecContext(ctx, fmt.Sprintf(`CREATE DATABASE %s WITH %s`, pq.QuoteIdentifier(dbName), ownerOpt)); err != nil {
+ if pgErr, ok := err.(*pq.Error); ok && pgErr.Code.Name() == "duplicate_database" {
+ logger.Info("Database already exists, skipping creation", zap.String("database", dbName))
+ } else {
+ return xerrors.Errorf("failed to create database %s: %w", dbName, err)
+ }
+ } else {
+ logger.Info("Successfully created database", zap.String("database", dbName))
+ }
+
+ // Connect to the application database to set up permissions
+ logger.Info("Setting up permissions for database", zap.String("database", dbName))
+ appDsn := fmt.Sprintf(
+ "host=%s port=%d dbname=%s user=%s password=%s sslmode=%s",
+ masterCfg.Host, masterCfg.Port, dbName, masterCfg.User, masterCfg.Password, masterCfg.SSLMode,
+ )
+ if masterCfg.ConnectTimeout > 0 {
+ appDsn += fmt.Sprintf(" connect_timeout=%d", int(masterCfg.ConnectTimeout.Seconds()))
+ }
+
+ appDB, err := sql.Open("postgres", appDsn)
+ if err != nil {
+ return xerrors.Errorf("failed to connect to app database %s: %w", dbName, err)
+ }
+ defer func() {
+ if closeErr := appDB.Close(); closeErr != nil {
+ logger.Warn("Failed to close app database connection", zap.Error(closeErr))
+ }
+ }()
+
+ if err := appDB.PingContext(ctx); err != nil {
+ return xerrors.Errorf("failed to ping app database %s: %w", dbName, err)
+ }
+
+ // Grant connect permissions to server role
+ logger.Info("Granting CONNECT permission on database to server role",
+ zap.String("database", dbName),
+ zap.String("server_user", serverUser))
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(serverUser))); err != nil {
+ return xerrors.Errorf("failed to grant connect on db %s to role %s: %w", dbName, serverUser, err)
+ }
+
+ // Grant usage on public schema
+ logger.Info("Granting USAGE on schema public to server role", zap.String("server_user", serverUser))
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", pq.QuoteIdentifier(serverUser))); err != nil {
+ return xerrors.Errorf("failed to grant usage on schema public to role %s: %w", serverUser, err)
+ }
+
+ // Grant SELECT on all current tables to server role
+ logger.Info("Granting SELECT on all existing tables to server role", zap.String("server_user", serverUser))
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("GRANT SELECT ON ALL TABLES IN SCHEMA public TO %s", pq.QuoteIdentifier(serverUser))); err != nil {
+ // This might fail if no tables exist yet, which is fine
+ logger.Warn("Failed to grant select on existing tables (this is normal if no tables exist yet)", zap.Error(err))
+ }
+
+ // Grant SELECT on all future tables to server role
+ logger.Info("Setting up default privileges for future tables for server role", zap.String("server_user", serverUser))
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO %s", pq.QuoteIdentifier(serverUser))); err != nil {
+ return xerrors.Errorf("failed to alter default privileges for role %s: %w", serverUser, err)
+ }
+
+ // Also need to ensure the worker role has the necessary permissions on the database
+ logger.Info("Ensuring worker role has full access to database",
+ zap.String("worker_user", workerUser),
+ zap.String("database", dbName))
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("GRANT ALL PRIVILEGES ON DATABASE %s TO %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(workerUser))); err != nil {
+ logger.Warn("Failed to grant all privileges on database to worker role", zap.Error(err))
+ }
+
+ if _, err := appDB.ExecContext(ctx, fmt.Sprintf("GRANT ALL PRIVILEGES ON SCHEMA public TO %s", pq.QuoteIdentifier(workerUser))); err != nil {
+ logger.Warn("Failed to grant all privileges on schema to worker role", zap.Error(err))
+ }
+
+ logger.Info("Successfully set up database with roles",
+ zap.String("database", dbName),
+ zap.String("worker_user", workerUser),
+ zap.String("server_user", serverUser))
+ logger.Info("Database ready for use with the provided credentials")
+
+ return nil
+}
diff --git a/internal/storage/metastorage/postgres/block_storage.go b/internal/storage/metastorage/postgres/block_storage.go
new file mode 100644
index 0000000..5b47740
--- /dev/null
+++ b/internal/storage/metastorage/postgres/block_storage.go
@@ -0,0 +1,510 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "math/rand"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/parser"
+ "github.com/coinbase/chainstorage/internal/storage/internal/errors"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres/model"
+ "github.com/coinbase/chainstorage/internal/utils/instrument"
+ "github.com/coinbase/chainstorage/internal/utils/utils"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type (
+ blockStorageImpl struct {
+ db *sql.DB
+ blockStartHeight uint64
+ instrumentPersistBlockMetas instrument.Instrument
+ instrumentGetLatestBlock instrument.InstrumentWithResult[*api.BlockMetadata]
+ instrumentGetBlockByHash instrument.InstrumentWithResult[*api.BlockMetadata]
+ instrumentGetBlockByHeight instrument.InstrumentWithResult[*api.BlockMetadata]
+ instrumentGetBlocksByHeightRange instrument.InstrumentWithResult[[]*api.BlockMetadata]
+ instrumentGetBlocksByHeights instrument.InstrumentWithResult[[]*api.BlockMetadata]
+ instrumentGetBlockByTimestamp instrument.InstrumentWithResult[*api.BlockMetadata]
+ }
+)
+
+func newBlockStorage(db *sql.DB, params Params) (internal.BlockStorage, error) {
+ metrics := params.Metrics.SubScope("block_storage").Tagged(map[string]string{
+ "storage_type": "postgres",
+ })
+ accessor := blockStorageImpl{
+ db: db,
+ blockStartHeight: params.Config.Chain.BlockStartHeight,
+ instrumentPersistBlockMetas: instrument.New(metrics, "persist_block_metas"),
+ instrumentGetLatestBlock: instrument.NewWithResult[*api.BlockMetadata](metrics, "get_latest_block"),
+ instrumentGetBlockByHash: instrument.NewWithResult[*api.BlockMetadata](metrics, "get_block_by_hash"),
+ instrumentGetBlockByHeight: instrument.NewWithResult[*api.BlockMetadata](metrics, "get_block_by_height"),
+ instrumentGetBlocksByHeightRange: instrument.NewWithResult[[]*api.BlockMetadata](metrics, "get_blocks_by_height_range"),
+ instrumentGetBlocksByHeights: instrument.NewWithResult[[]*api.BlockMetadata](metrics, "get_blocks_by_heights"),
+ instrumentGetBlockByTimestamp: instrument.NewWithResult[*api.BlockMetadata](metrics, "get_block_by_timestamp"),
+ }
+ return &accessor, nil
+}
+
+func (b *blockStorageImpl) PersistBlockMetas(
+ ctx context.Context, updateWatermark bool, blocks []*api.BlockMetadata, lastBlock *api.BlockMetadata) error {
+ return b.instrumentPersistBlockMetas.Instrument(ctx, func(ctx context.Context) error {
+ if len(blocks) == 0 {
+ return nil
+ }
+
+ // Sort blocks by height for chain validation.
+ // IMPORTANT: When multiple blocks have the same height (e.g., during a reorg), their relative
+ // order after sorting is not guaranteed to be stable. However, this implementation follows the
+ // "last block wins" principle - the last block processed for a given height will become the
+ // canonical block for that height. This behavior is consistent with the DynamoDB implementation
+ // where the last block overwrites the canonical entry.
+ //
+ // The canonical_blocks table uses "ON CONFLICT (height, tag) DO UPDATE" which means:
+ // - If multiple blocks in the input have the same height, the last one processed will
+ // overwrite previous entries in canonical_blocks
+ // - All blocks are still stored in block_metadata (allowing retrieval by specific hash)
+ // - Only the last block for each height becomes the canonical one
+ //
+ // Callers should ensure that when multiple blocks exist for the same height, the desired
+ // canonical block is placed last in the blocks array for that height.
+ sort.Slice(blocks, func(i, j int) bool {
+ return blocks[i].Height < blocks[j].Height
+ })
+ if err := parser.ValidateChain(blocks, lastBlock); err != nil {
+ return xerrors.Errorf("failed to validate chain: %w", err)
+ }
+
+ // Create transaction with timeout context
+ // Use a reasonable timeout for block persistence operations
+ txCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+
+ tx, err := b.db.BeginTx(txCtx, nil)
+ if err != nil {
+ return xerrors.Errorf("failed to begin transaction: %w", err)
+ }
+ committed := false
+ defer func() {
+ if !committed {
+ if rollbackErr := tx.Rollback(); rollbackErr != nil {
+ // Log the rollback error but don't override the original error
+ // In a production environment, you might want to use a proper logger here
+ // For now, we'll just ignore the rollback error as it's already a failure case
+ _ = rollbackErr
+ }
+ }
+ }()
+
+ // Different queries for skipped vs non-skipped blocks due to different conflict resolution
+ blockMetadataSkippedQuery := `
+ INSERT INTO block_metadata (height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ON CONFLICT (tag, height) WHERE skipped = true DO UPDATE SET
+ hash = EXCLUDED.hash,
+ parent_hash = EXCLUDED.parent_hash,
+ parent_height = EXCLUDED.parent_height,
+ object_key_main = EXCLUDED.object_key_main,
+ timestamp = EXCLUDED.timestamp,
+ skipped = EXCLUDED.skipped
+ RETURNING id`
+
+ blockMetadataRegularQuery := `
+ INSERT INTO block_metadata (height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ON CONFLICT (tag, hash) WHERE hash IS NOT NULL AND NOT skipped DO UPDATE SET
+ parent_hash = EXCLUDED.parent_hash,
+ parent_height = EXCLUDED.parent_height,
+ object_key_main = EXCLUDED.object_key_main,
+ timestamp = EXCLUDED.timestamp,
+ skipped = EXCLUDED.skipped
+ RETURNING id`
+
+ // Simply insert or update canonical blocks like DynamoDB does
+ // The "last write wins" behavior matches DynamoDB's TransactWriteItems
+ // Chain validation happens in update_watermark activity, not here
+ canonicalQuery := `
+ INSERT INTO canonical_blocks (height, block_metadata_id, tag)
+ VALUES ($1, $2, $3)
+ ON CONFLICT (height, tag) DO UPDATE
+ SET block_metadata_id = EXCLUDED.block_metadata_id`
+
+ for _, block := range blocks {
+ tsProto := block.GetTimestamp()
+ var unixTimestamp int64
+ if tsProto == nil { // special case for genesis block
+ unixTimestamp = 0
+ } else {
+ unixTimestamp = tsProto.GetSeconds() // directly get seconds from protobuf timestamp
+ }
+
+ var parentHeight uint64
+ if block.Height == 0 {
+ // Genesis block has no parent, set parent height to 0
+ parentHeight = 0
+ } else {
+ parentHeight = block.ParentHeight
+ }
+
+ var blockId int64
+ var query string
+ if block.Skipped {
+ query = blockMetadataSkippedQuery
+ } else {
+ query = blockMetadataRegularQuery
+ }
+
+ err = tx.QueryRowContext(txCtx, query,
+ block.Height,
+ block.Tag,
+ block.Hash,
+ block.ParentHash,
+ parentHeight,
+ block.ObjectKeyMain,
+ unixTimestamp,
+ block.Skipped,
+ ).Scan(&blockId)
+ if err != nil {
+ return xerrors.Errorf("failed to insert block metadata for height %d: %w", block.Height, err)
+ }
+
+ // Insert into canonical_blocks
+ // Always insert/update canonical blocks like DynamoDB does
+ _, err = tx.ExecContext(txCtx, canonicalQuery,
+ block.Height,
+ blockId,
+ block.Tag,
+ )
+ if err != nil {
+ return xerrors.Errorf("failed to insert canonical block for height %d: %w", block.Height, err)
+ }
+ }
+
+ // Update watermark if requested
+ // Set is_watermark=TRUE for the highest block to mark it as validated
+ // This prevents canonical chain leakage to streamer before validation
+ if updateWatermark && len(blocks) > 0 {
+ highestBlock := blocks[len(blocks)-1]
+
+ // Probabilistically clear old watermarks (1 in 5000 chance)
+ // This prevents unbounded accumulation while keeping the operation rare enough
+ // to have negligible performance impact
+ if rand.Intn(5000) == 0 {
+ // Clear all old watermarks for this tag, keeping only the current one
+ // This is safe because only GetLatestBlock uses is_watermark filter,
+ // and it only needs the highest watermarked block
+ clearQuery := `
+ UPDATE canonical_blocks
+ SET is_watermark = FALSE
+ WHERE tag = $1 AND is_watermark = TRUE`
+ _, err = tx.ExecContext(txCtx, clearQuery, highestBlock.Tag)
+ if err != nil {
+ // Log but don't fail - cleanup is best-effort
+ // In production, you might want to use a proper logger here
+ _ = err
+ }
+ }
+
+ // Set the new watermark
+ watermarkQuery := `
+ UPDATE canonical_blocks
+ SET is_watermark = TRUE
+ WHERE tag = $1 AND height = $2`
+ _, err = tx.ExecContext(txCtx, watermarkQuery, highestBlock.Tag, highestBlock.Height)
+ if err != nil {
+ return xerrors.Errorf("failed to update watermark for height %d: %w", highestBlock.Height, err)
+ }
+ }
+
+ // Commit transaction
+ err = tx.Commit()
+ if err != nil {
+ return xerrors.Errorf("failed to commit transaction: %w", err)
+ }
+ committed = true
+ return nil
+ })
+}
+
+func (b *blockStorageImpl) GetLatestBlock(ctx context.Context, tag uint32) (*api.BlockMetadata, error) {
+ return b.instrumentGetLatestBlock.Instrument(ctx, func(ctx context.Context) (*api.BlockMetadata, error) {
+ // Get the latest canonical block by highest height
+ // Only return blocks that have been validated (is_watermark=TRUE)
+ // This prevents canonical chain leakage to streamer before validation
+ query := `
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND cb.is_watermark = TRUE
+ ORDER BY cb.height DESC
+ LIMIT 1`
+ row := b.db.QueryRowContext(ctx, query, tag)
+ block, err := model.BlockMetadataFromCanonicalRow(b.db, row)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, xerrors.Errorf("no latest block found: %w", errors.ErrItemNotFound)
+ }
+ return nil, xerrors.Errorf("failed to get latest block: %w", err)
+ }
+ return block, nil
+ })
+}
+
+func (b *blockStorageImpl) GetBlockByHash(ctx context.Context, tag uint32, height uint64, blockHash string) (*api.BlockMetadata, error) {
+ return b.instrumentGetBlockByHash.Instrument(ctx, func(ctx context.Context) (*api.BlockMetadata, error) {
+ if err := b.validateHeight(height); err != nil {
+ return nil, err
+ }
+ var row *sql.Row
+ if blockHash == "" {
+ // Get the canonical block at this height (could be regular or skipped)
+ query := `
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND cb.height = $2
+ LIMIT 1`
+ row = b.db.QueryRowContext(ctx, query, tag, height)
+ } else {
+ // Query block_metadata directly for the specific hash
+ query := `
+ SELECT id, height, tag, hash, parent_hash, parent_height, object_key_main,
+ timestamp, skipped
+ FROM block_metadata
+ WHERE tag = $1 AND height = $2 AND hash = $3
+ LIMIT 1`
+ row = b.db.QueryRowContext(ctx, query, tag, height, blockHash)
+ }
+
+ block, err := model.BlockMetadataFromRow(b.db, row)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, xerrors.Errorf("block not found: %w", errors.ErrItemNotFound)
+ }
+ return nil, xerrors.Errorf("failed to get block by hash: %w", err)
+ }
+ return block, nil
+ })
+}
+
+func (b *blockStorageImpl) GetBlockByHeight(ctx context.Context, tag uint32, height uint64) (*api.BlockMetadata, error) {
+ return b.instrumentGetBlockByHeight.Instrument(ctx, func(ctx context.Context) (*api.BlockMetadata, error) {
+ if err := b.validateHeight(height); err != nil {
+ return nil, err
+ }
+ // Get block from canonical_blocks table (includes both regular and skipped blocks)
+ query := `
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND cb.height = $2
+ LIMIT 1`
+ row := b.db.QueryRowContext(ctx, query, tag, height)
+ block, err := model.BlockMetadataFromCanonicalRow(b.db, row)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, xerrors.Errorf("block at height %d not found: %w", height, errors.ErrItemNotFound)
+ }
+ return nil, xerrors.Errorf("failed to get block by height: %w", err)
+ }
+ return block, nil
+ })
+}
+
+func (b *blockStorageImpl) GetBlocksByHeightRange(ctx context.Context, tag uint32, startHeight uint64, endHeight uint64) ([]*api.BlockMetadata, error) {
+ return b.instrumentGetBlocksByHeightRange.Instrument(ctx, func(ctx context.Context) ([]*api.BlockMetadata, error) {
+ if startHeight >= endHeight {
+ return nil, errors.ErrOutOfRange
+ }
+ if err := b.validateHeight(startHeight); err != nil {
+ return nil, err
+ }
+
+ // Get all blocks (canonical and skipped) from canonical_blocks table
+ query := `
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND cb.height >= $2 AND cb.height < $3
+ ORDER BY cb.height ASC`
+ rows, err := b.db.QueryContext(ctx, query, tag, startHeight, endHeight)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to query blocks by height range: %w", err)
+ }
+ defer func() {
+ if closeErr := rows.Close(); closeErr != nil && err == nil {
+ err = xerrors.Errorf("failed to close rows: %w", closeErr)
+ }
+ }()
+
+ blocks, err := model.BlockMetadataFromCanonicalRows(b.db, rows)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to scan block rows: %w", err)
+ }
+
+ // Check if we have all blocks in the range (no gaps)
+ expectedCount := int(endHeight - startHeight)
+ if len(blocks) != expectedCount {
+ return nil, xerrors.Errorf("missing blocks in range [%d, %d): expected %d, got %d: %w",
+ startHeight, endHeight, expectedCount, len(blocks), errors.ErrItemNotFound)
+ }
+
+ // Verify no gaps in heights
+ for i, block := range blocks {
+ expectedHeight := startHeight + uint64(i)
+ if block.Height != expectedHeight {
+ return nil, xerrors.Errorf("gap in block heights: expected %d, got %d: %w",
+ expectedHeight, block.Height, errors.ErrItemNotFound)
+ }
+ }
+
+ // Validate chain continuity (parent hash matching) like DynamoDB does
+ // This is critical for detecting reorgs and triggering recovery logic
+ if err = parser.ValidateChain(blocks, nil); err != nil {
+ return nil, xerrors.Errorf("failed to validate chain: %w", err)
+ }
+
+ return blocks, nil
+ })
+}
+
+func (b *blockStorageImpl) GetBlocksByHeights(ctx context.Context, tag uint32, heights []uint64) ([]*api.BlockMetadata, error) {
+ return b.instrumentGetBlocksByHeights.Instrument(ctx, func(ctx context.Context) ([]*api.BlockMetadata, error) {
+ for _, height := range heights {
+ if err := b.validateHeight(height); err != nil {
+ return nil, err
+ }
+ }
+ if len(heights) == 0 {
+ return []*api.BlockMetadata{}, nil
+ }
+ // Build dynamic query with placeholders for IN clause
+ placeholders := make([]string, len(heights))
+ args := make([]interface{}, len(heights)+1)
+ args[0] = tag // First argument is tag
+ for i, height := range heights {
+ placeholders[i] = fmt.Sprintf("$%d", i+2) // Start from $2 since $1 is tag
+ args[i+1] = height
+ }
+ query := fmt.Sprintf(`
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND cb.height IN (%s)
+ ORDER BY cb.height ASC`,
+ strings.Join(placeholders, ", "))
+
+ rows, err := b.db.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to query blocks by heights: %w", err)
+ }
+ defer func() {
+ if closeErr := rows.Close(); closeErr != nil && err == nil {
+ err = xerrors.Errorf("failed to close rows: %w", closeErr)
+ }
+ }()
+
+ blocks, err := model.BlockMetadataFromCanonicalRows(b.db, rows)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to scan block rows: %w", err)
+ }
+
+ // Verify we got all requested blocks and return them in the same order as requested
+ blockMap := make(map[uint64]*api.BlockMetadata)
+ for _, block := range blocks {
+ blockMap[block.Height] = block
+ }
+
+ orderedBlocks := make([]*api.BlockMetadata, len(heights))
+ for i, height := range heights {
+ block, exists := blockMap[height]
+ if !exists {
+ return nil, xerrors.Errorf("block at height %d not found: %w", height, errors.ErrItemNotFound)
+ }
+ orderedBlocks[i] = block
+ }
+
+ return orderedBlocks, nil
+ })
+}
+
+func (b *blockStorageImpl) validateHeight(height uint64) error {
+ if height < b.blockStartHeight {
+ return xerrors.Errorf("height(%d) should be no less than blockStartHeight(%d): %w",
+ height, b.blockStartHeight, errors.ErrInvalidHeight)
+ }
+ return nil
+}
+
+// GetWatermarkCount returns the number of watermarked blocks for monitoring purposes
+// This metric helps track watermark accumulation and determine if cleanup is needed
+func (b *blockStorageImpl) GetWatermarkCount(ctx context.Context, tag uint32) (int64, error) {
+ var count int64
+ query := `SELECT COUNT(*) FROM canonical_blocks WHERE tag = $1 AND is_watermark = TRUE`
+ err := b.db.QueryRowContext(ctx, query, tag).Scan(&count)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to get watermark count: %w", err)
+ }
+ return count, nil
+}
+
+func (b *blockStorageImpl) GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.BlockMetadata, error) {
+ return b.instrumentGetBlockByTimestamp.Instrument(ctx, func(ctx context.Context) (*api.BlockMetadata, error) {
+ // Query to get the latest block before or at the given timestamp
+ query := `
+ SELECT bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main,
+ bm.timestamp, bm.skipped
+ FROM canonical_blocks cb
+ JOIN block_metadata bm ON cb.block_metadata_id = bm.id
+ WHERE cb.tag = $1 AND bm.timestamp <= $2
+ ORDER BY bm.timestamp DESC, bm.height DESC
+ LIMIT 1
+ `
+
+ var blockId int64
+ var height uint64
+ var blockTag uint32
+ var hash, parentHash, objectKeyMain sql.NullString
+ var parentHeight uint64
+ var blockTimestamp int64
+ var skipped bool
+
+ err := b.db.QueryRowContext(ctx, query, tag, timestamp).Scan(
+ &blockId, &height, &blockTag, &hash, &parentHash, &parentHeight, &objectKeyMain, &blockTimestamp, &skipped)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ fmt.Printf("[DEBUG] No block found for tag=%d, timestamp=%d\n", tag, timestamp)
+ return nil, xerrors.Errorf("no block found before timestamp %d: %w", timestamp, errors.ErrItemNotFound)
+ }
+ fmt.Printf("[DEBUG] Failed to get block by timestamp: %v\n", err)
+ return nil, xerrors.Errorf("failed to get block by timestamp: %w", err)
+ }
+
+ if !hash.Valid || blockTimestamp == 0 {
+ fmt.Printf("[DEBUG] Invalid block data: height=%d, blockTimestamp=%d, hash.Valid=%v\n", height, blockTimestamp, hash.Valid)
+ return nil, xerrors.Errorf("no block found before timestamp %d: %w", timestamp, errors.ErrItemNotFound)
+ }
+
+ return &api.BlockMetadata{
+ Tag: blockTag,
+ Hash: hash.String,
+ ParentHash: parentHash.String,
+ Height: height,
+ ParentHeight: parentHeight,
+ ObjectKeyMain: objectKeyMain.String,
+ Timestamp: utils.ToTimestamp(blockTimestamp),
+ Skipped: skipped,
+ }, nil
+ })
+}
diff --git a/internal/storage/metastorage/postgres/block_storage_integration_test.go b/internal/storage/metastorage/postgres/block_storage_integration_test.go
new file mode 100644
index 0000000..087f07f
--- /dev/null
+++ b/internal/storage/metastorage/postgres/block_storage_integration_test.go
@@ -0,0 +1,430 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "math/rand/v2"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/suite"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/testing/protocmp"
+
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/parser"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage/internal/errors"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+const (
+ tag = 1
+)
+
+type blockStorageTestSuite struct {
+ suite.Suite
+ accessor internal.MetaStorage
+ config *config.Config
+ db *sql.DB
+}
+
+func (s *blockStorageTestSuite) SetupTest() {
+ require := testutil.Require(s.T())
+
+ var accessor internal.MetaStorage
+ cfg, err := config.New()
+ require.NoError(err)
+
+ // Skip tests if Postgres is not configured
+ if cfg.AWS.Postgres == nil {
+ s.T().Skip("Postgres not configured, skipping test suite")
+ return
+ }
+
+ // Set the starting block height
+ cfg.Chain.BlockStartHeight = 10
+ s.config = cfg
+ // Create a new test application with Postgres configuration
+ app := testapp.New(
+ s.T(),
+ fx.Provide(NewMetaStorage),
+ testapp.WithIntegration(),
+ testapp.WithConfig(s.config),
+ fx.Populate(&accessor),
+ )
+ defer app.Close()
+ s.accessor = accessor
+
+ // Get database connection for cleanup
+ db, err := newDBConnection(context.Background(), cfg.AWS.Postgres)
+ require.NoError(err)
+ s.db = db
+}
+
+func (s *blockStorageTestSuite) TearDownTest() {
+ if s.db != nil {
+ ctx := context.Background()
+ s.T().Log("Clearing database tables after test")
+ // Clear all tables in reverse order due to foreign key constraints
+ tables := []string{"block_events", "canonical_blocks", "block_metadata"}
+ for _, table := range tables {
+ _, err := s.db.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s", table))
+ if err != nil {
+ s.T().Logf("Failed to clear table %s: %v", table, err)
+ }
+ }
+ }
+}
+
+func (s *blockStorageTestSuite) TearDownSuite() {
+ if s.db != nil {
+ s.db.Close()
+ }
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasByMaxWriteSize() {
+ tests := []struct {
+ totalBlocks int
+ }{
+ {totalBlocks: 2},
+ {totalBlocks: 4},
+ {totalBlocks: 8},
+ {totalBlocks: 64},
+ }
+ for _, test := range tests {
+ s.T().Run(fmt.Sprintf("test %d blocks", test.totalBlocks), func(t *testing.T) {
+ s.runTestPersistBlockMetas(test.totalBlocks)
+ })
+ }
+}
+
+func (s *blockStorageTestSuite) runTestPersistBlockMetas(totalBlocks int) {
+ require := testutil.Require(s.T())
+ startHeight := s.config.Chain.BlockStartHeight
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight, totalBlocks, tag)
+ zaptest.NewLogger(s.T())
+ ctx := context.TODO()
+
+ // shuffle it to make sure it still works
+ shuffleSeed := time.Now().UnixNano()
+ rand.Shuffle(len(blocks), func(i, j int) { blocks[i], blocks[j] = blocks[j], blocks[i] })
+ logger := zaptest.NewLogger(s.T())
+ logger.Info("shuffled blocks", zap.Int64("seed", shuffleSeed))
+
+ fmt.Println("Persisting blocks")
+ err := s.accessor.PersistBlockMetas(ctx, true, blocks, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ expectedLatestBlock := proto.Clone(blocks[totalBlocks-1])
+
+ // fetch range with missing item
+ fmt.Println("Fetching range with missing item")
+ _, err = s.accessor.GetBlocksByHeightRange(ctx, tag, startHeight, startHeight+uint64(totalBlocks+100))
+ require.Error(err)
+ require.True(xerrors.Is(err, errors.ErrItemNotFound))
+
+ // fetch valid range
+ fmt.Println("Fetching valid range")
+ fetchedBlocks, err := s.accessor.GetBlocksByHeightRange(ctx, tag, startHeight, startHeight+uint64(totalBlocks))
+ if err != nil {
+ panic(err)
+ }
+ sort.Slice(fetchedBlocks, func(i, j int) bool {
+ return fetchedBlocks[i].Height < fetchedBlocks[j].Height
+ })
+ assert.Len(s.T(), fetchedBlocks, int(totalBlocks))
+
+ for i := 0; i < len(blocks); i++ {
+ //get block by height
+ // fetch block through three ways, should always return identical result
+ fetchedBlockMeta, err := s.accessor.GetBlockByHeight(ctx, tag, blocks[i].Height)
+ if err != nil {
+ panic(err)
+ }
+ s.equalProto(blocks[i], fetchedBlockMeta)
+
+ fetchedBlockMeta, err = s.accessor.GetBlockByHash(ctx, tag, blocks[i].Height, blocks[i].Hash)
+ if err != nil {
+ panic(err)
+ }
+ s.equalProto(blocks[i], fetchedBlockMeta)
+
+ fetchedBlockMeta, err = s.accessor.GetBlockByHash(ctx, tag, blocks[i].Height, "")
+ if err != nil {
+ panic(err)
+ }
+ s.equalProto(blocks[i], fetchedBlockMeta)
+
+ s.equalProto(blocks[i], fetchedBlocks[i])
+ }
+
+ fetchedBlocksMeta, err := s.accessor.GetBlocksByHeights(ctx, tag, []uint64{startHeight + 1, startHeight + uint64(totalBlocks/2), startHeight, startHeight + uint64(totalBlocks) - 1})
+ if err != nil {
+ fmt.Println("Error fetching blocks by heights", err)
+ panic(err)
+ }
+ assert.Len(s.T(), fetchedBlocksMeta, 4)
+ s.equalProto(blocks[1], fetchedBlocksMeta[0])
+ s.equalProto(blocks[totalBlocks/2], fetchedBlocksMeta[1])
+ s.equalProto(blocks[0], fetchedBlocksMeta[2])
+ s.equalProto(blocks[totalBlocks-1], fetchedBlocksMeta[3])
+
+ fetchedBlockMeta, err := s.accessor.GetLatestBlock(ctx, tag)
+ if err != nil {
+ fmt.Println("Error fetching latest block", err)
+ panic(err)
+ }
+ s.equalProto(expectedLatestBlock, fetchedBlockMeta)
+
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasByInvalidChain() {
+ require := testutil.Require(s.T())
+ blocks := testutil.MakeBlockMetadatas(100, tag)
+ blocks[73].Hash = "0xdeadbeef"
+ err := s.accessor.PersistBlockMetas(context.Background(), true, blocks, nil)
+ require.Error(err)
+ require.True(xerrors.Is(err, parser.ErrInvalidChain))
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasByInvalidLastBlock() {
+ require := testutil.Require(s.T())
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(1_000_000, 100, tag)
+ lastBlock := testutil.MakeBlockMetadata(999_999, tag)
+ lastBlock.Hash = "0xdeadbeef"
+ err := s.accessor.PersistBlockMetas(context.Background(), true, blocks, lastBlock)
+ require.Error(err)
+ require.True(xerrors.Is(err, parser.ErrInvalidChain))
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasWithSkippedBlocks() {
+ require := testutil.Require(s.T())
+
+ ctx := context.Background()
+ startHeight := s.config.Chain.BlockStartHeight
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 100, tag)
+ // Mark 37th block as skipped and point the next block to the previous block.
+ blocks[37] = &api.BlockMetadata{
+ Tag: tag,
+ Height: startHeight + 37,
+ Skipped: true,
+ }
+ blocks[38].ParentHeight = blocks[36].Height
+ blocks[38].ParentHash = blocks[36].Hash
+ err := s.accessor.PersistBlockMetas(ctx, true, blocks, nil)
+ require.NoError(err)
+
+ fetchedBlocks, err := s.accessor.GetBlocksByHeightRange(ctx, tag, startHeight, startHeight+100)
+ require.NoError(err)
+ require.Equal(blocks, fetchedBlocks)
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetas() {
+ s.runTestPersistBlockMetas(10)
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasNotContinuous() {
+ blocks := testutil.MakeBlockMetadatas(10, tag)
+ blocks[2] = blocks[9]
+ err := s.accessor.PersistBlockMetas(context.TODO(), true, blocks[:9], nil)
+ assert.NotNil(s.T(), err)
+}
+
+func (s *blockStorageTestSuite) TestPersistBlockMetasDuplicatedHeights() {
+ blocks := testutil.MakeBlockMetadatas(10, tag)
+ blocks[9].Height = 2
+ err := s.accessor.PersistBlockMetas(context.TODO(), true, blocks, nil)
+ assert.NotNil(s.T(), err)
+}
+
+func (s *blockStorageTestSuite) TestGetBlocksNotExist() {
+ _, err := s.accessor.GetLatestBlock(context.TODO(), tag)
+ assert.True(s.T(), xerrors.Is(err, errors.ErrItemNotFound))
+}
+
+func (s *blockStorageTestSuite) TestGetBlockByHeightInvalidHeight() {
+ _, err := s.accessor.GetBlockByHeight(context.TODO(), tag, 0)
+ assert.True(s.T(), xerrors.Is(err, errors.ErrInvalidHeight))
+}
+
+func (s *blockStorageTestSuite) TestGetBlocksByHeightsInvalidHeight() {
+ _, err := s.accessor.GetBlocksByHeights(context.TODO(), tag, []uint64{0})
+ assert.True(s.T(), xerrors.Is(err, errors.ErrInvalidHeight))
+}
+
+func (s *blockStorageTestSuite) TestGetBlocksByHeightsBlockNotFound() {
+ _, err := s.accessor.GetBlocksByHeights(context.TODO(), tag, []uint64{15})
+ assert.True(s.T(), xerrors.Is(err, errors.ErrItemNotFound))
+}
+
+func (s *blockStorageTestSuite) TestGetBlockByHashInvalidHeight() {
+ _, err := s.accessor.GetBlockByHash(context.TODO(), tag, 0, "0x0")
+ assert.True(s.T(), xerrors.Is(err, errors.ErrInvalidHeight))
+}
+
+func (s *blockStorageTestSuite) TestGetBlocksByHeightRangeInvalidRange() {
+ _, err := s.accessor.GetBlocksByHeightRange(context.TODO(), tag, 100, 100)
+ assert.True(s.T(), xerrors.Is(err, errors.ErrOutOfRange))
+
+ _, err = s.accessor.GetBlocksByHeightRange(context.TODO(), tag, 0, s.config.Chain.BlockStartHeight)
+ assert.True(s.T(), xerrors.Is(err, errors.ErrInvalidHeight))
+}
+
+func (s *blockStorageTestSuite) equalProto(x, y any) {
+ if diff := cmp.Diff(x, y, protocmp.Transform()); diff != "" {
+ assert.FailNow(s.T(), diff)
+ }
+}
+
+func (s *blockStorageTestSuite) TestWatermarkVisibilityControl() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+ startHeight := s.config.Chain.BlockStartHeight
+
+ // Create blocks
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 10, tag)
+
+ // Persist blocks WITHOUT watermark (updateWatermark=false)
+ err := s.accessor.PersistBlockMetas(ctx, false, blocks, nil)
+ require.NoError(err)
+
+ // GetLatestBlock should return ErrItemNotFound because no blocks are watermarked
+ _, err = s.accessor.GetLatestBlock(ctx, tag)
+ require.Error(err)
+ require.True(xerrors.Is(err, errors.ErrItemNotFound), "Expected ErrItemNotFound when no watermark exists")
+
+ // Now persist the same blocks WITH watermark (updateWatermark=true)
+ err = s.accessor.PersistBlockMetas(ctx, true, blocks, nil)
+ require.NoError(err)
+
+ // GetLatestBlock should now return the highest block
+ latestBlock, err := s.accessor.GetLatestBlock(ctx, tag)
+ require.NoError(err)
+ require.NotNil(latestBlock)
+ require.Equal(blocks[9].Height, latestBlock.Height)
+ require.Equal(blocks[9].Hash, latestBlock.Hash)
+
+ // Add more blocks with watermark
+ moreBlocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight+10, 5, tag)
+ err = s.accessor.PersistBlockMetas(ctx, true, moreBlocks, nil)
+ require.NoError(err)
+
+ // GetLatestBlock should return the new highest watermarked block
+ latestBlock, err = s.accessor.GetLatestBlock(ctx, tag)
+ require.NoError(err)
+ require.Equal(moreBlocks[4].Height, latestBlock.Height)
+ require.Equal(moreBlocks[4].Hash, latestBlock.Hash)
+}
+
+func (s *blockStorageTestSuite) TestWatermarkWithReorg() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+ startHeight := s.config.Chain.BlockStartHeight
+
+ // Create initial chain
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 10, tag)
+ err := s.accessor.PersistBlockMetas(ctx, true, blocks, nil)
+ require.NoError(err)
+
+ // Verify latest block
+ latestBlock, err := s.accessor.GetLatestBlock(ctx, tag)
+ require.NoError(err)
+ require.Equal(blocks[9].Height, latestBlock.Height)
+
+ // Simulate reorg: create alternative chain from height startHeight+7
+ // This represents the reorg scenario where we need to replace blocks
+ reorgBlocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight+7, 3, tag)
+ // Change hashes to simulate different blocks
+ for i := range reorgBlocks {
+ reorgBlocks[i].Hash = fmt.Sprintf("0xreorg%d", i)
+ if i > 0 {
+ reorgBlocks[i].ParentHash = reorgBlocks[i-1].Hash
+ } else {
+ reorgBlocks[i].ParentHash = blocks[6].Hash // Link to pre-reorg chain
+ }
+ }
+
+ // Persist reorg blocks with watermark
+ err = s.accessor.PersistBlockMetas(ctx, true, reorgBlocks, blocks[6])
+ require.NoError(err)
+
+ // GetLatestBlock should return the new reorg tip
+ latestBlock, err = s.accessor.GetLatestBlock(ctx, tag)
+ require.NoError(err)
+ require.Equal(reorgBlocks[2].Height, latestBlock.Height)
+ require.Equal(reorgBlocks[2].Hash, latestBlock.Hash)
+}
+
+func (s *blockStorageTestSuite) TestWatermarkMultipleTags() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+ startHeight := s.config.Chain.BlockStartHeight
+
+ tag1 := uint32(1)
+ tag2 := uint32(2)
+
+ // Create blocks for tag1
+ blocks1 := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 5, tag1)
+ err := s.accessor.PersistBlockMetas(ctx, true, blocks1, nil)
+ require.NoError(err)
+
+ // Create blocks for tag2
+ blocks2 := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 10, tag2)
+ err = s.accessor.PersistBlockMetas(ctx, true, blocks2, nil)
+ require.NoError(err)
+
+ // Verify each tag has its own latest block
+ latestBlock1, err := s.accessor.GetLatestBlock(ctx, tag1)
+ require.NoError(err)
+ require.Equal(blocks1[4].Height, latestBlock1.Height)
+
+ latestBlock2, err := s.accessor.GetLatestBlock(ctx, tag2)
+ require.NoError(err)
+ require.Equal(blocks2[9].Height, latestBlock2.Height)
+}
+
+func (s *blockStorageTestSuite) TestGetBlocksByHeightRangeStillWorks() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+ startHeight := s.config.Chain.BlockStartHeight
+
+ // Create blocks without watermark
+ blocks := testutil.MakeBlockMetadatasFromStartHeight(startHeight, 10, tag)
+ err := s.accessor.PersistBlockMetas(ctx, false, blocks, nil)
+ require.NoError(err)
+
+ // GetBlocksByHeightRange should still work even without watermark
+ // This is important for defense-in-depth validation
+ fetchedBlocks, err := s.accessor.GetBlocksByHeightRange(ctx, tag, startHeight, startHeight+10)
+ require.NoError(err)
+ require.Len(fetchedBlocks, 10)
+
+ // Verify the blocks are correct
+ sort.Slice(fetchedBlocks, func(i, j int) bool {
+ return fetchedBlocks[i].Height < fetchedBlocks[j].Height
+ })
+ for i := 0; i < 10; i++ {
+ s.equalProto(blocks[i], fetchedBlocks[i])
+ }
+}
+
+func TestIntegrationBlockStorageTestSuite(t *testing.T) {
+ require := testutil.Require(t)
+ cfg, err := config.New()
+ require.NoError(err)
+ suite.Run(t, &blockStorageTestSuite{config: cfg})
+}
diff --git a/internal/storage/metastorage/postgres/connection.go b/internal/storage/metastorage/postgres/connection.go
new file mode 100644
index 0000000..fb304f4
--- /dev/null
+++ b/internal/storage/metastorage/postgres/connection.go
@@ -0,0 +1,72 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ _ "github.com/lib/pq"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+func newDBConnection(ctx context.Context, cfg *config.PostgresConfig) (*sql.DB, error) {
+ logger := log.WithPackage(log.NewDevelopment())
+
+ // Build PostgreSQL connection string with timeout
+ dsn := fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s sslmode=%s",
+ cfg.Host, cfg.Port, cfg.Database, cfg.User, cfg.Password, cfg.SSLMode)
+
+ // Add connect_timeout if specified
+ if cfg.ConnectTimeout > 0 {
+ dsn += fmt.Sprintf(" connect_timeout=%d", int(cfg.ConnectTimeout.Seconds()))
+ }
+
+ // Debug output for CI troubleshooting
+ logger.Debug("Connecting to PostgreSQL",
+ zap.String("host", cfg.Host),
+ zap.Int("port", cfg.Port),
+ zap.String("database", cfg.Database),
+ zap.String("ssl_mode", cfg.SSLMode))
+
+ // Open database connection
+ db, err := sql.Open("postgres", dsn)
+ if err != nil {
+ logger.Error("Failed to open connection", zap.Error(err))
+ return nil, err
+ }
+
+ if pingErr := db.PingContext(ctx); pingErr != nil {
+ logger.Error("Failed to ping database", zap.Error(pingErr))
+ return nil, pingErr
+ }
+
+ logger.Debug("Successfully connected to PostgreSQL")
+
+ // Configure connection pool and timeouts
+ db.SetMaxOpenConns(cfg.MaxConnections)
+ db.SetMaxIdleConns(cfg.MinConnections)
+ db.SetConnMaxLifetime(cfg.MaxLifetime)
+ db.SetConnMaxIdleTime(cfg.MaxIdleTime)
+
+ // Set statement timeout if specified
+ if cfg.StatementTimeout > 0 {
+ _, err := db.ExecContext(ctx, fmt.Sprintf("SET statement_timeout = '%dms'", cfg.StatementTimeout.Milliseconds()))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to set statement timeout: %w", err)
+ }
+ }
+
+ // Always run migrations - goose will check which migrations have been applied
+ // and only run new ones. This ensures incremental migrations work properly.
+ logger.Debug("Running database migrations")
+ if err := runMigrations(ctx, db); err != nil {
+ return nil, xerrors.Errorf("failed to run migrations: %w", err)
+ }
+ logger.Debug("Migrations completed successfully")
+
+ return db, nil
+}
diff --git a/internal/storage/metastorage/postgres/connection_pool.go b/internal/storage/metastorage/postgres/connection_pool.go
new file mode 100644
index 0000000..af61b0e
--- /dev/null
+++ b/internal/storage/metastorage/postgres/connection_pool.go
@@ -0,0 +1,185 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/utils/log"
+)
+
+// ConnectionPool manages a shared database connection pool
+type ConnectionPool struct {
+ db *sql.DB
+ config *config.PostgresConfig
+ mu sync.RWMutex
+ closed bool
+ logger *zap.Logger
+}
+
+// connectionPoolManager manages singleton connection pools by connection string
+var (
+ poolManager = &ConnectionPoolManager{
+ pools: make(map[string]*ConnectionPool),
+ }
+)
+
+// ConnectionPoolManager manages multiple connection pools
+type ConnectionPoolManager struct {
+ mu sync.RWMutex
+ pools map[string]*ConnectionPool
+}
+
+// GetConnectionPool returns a shared connection pool for the given config
+func GetConnectionPool(ctx context.Context, cfg *config.PostgresConfig) (*ConnectionPool, error) {
+ return poolManager.GetOrCreate(ctx, cfg)
+}
+
+// GetOrCreate returns an existing connection pool or creates a new one
+func (cpm *ConnectionPoolManager) GetOrCreate(ctx context.Context, cfg *config.PostgresConfig) (*ConnectionPool, error) {
+ // Create a unique key for this configuration
+ key := fmt.Sprintf("%s:%d/%s?user=%s", cfg.Host, cfg.Port, cfg.Database, cfg.User)
+
+ cpm.mu.RLock()
+ if pool, exists := cpm.pools[key]; exists && !pool.closed {
+ cpm.mu.RUnlock()
+ return pool, nil
+ }
+ cpm.mu.RUnlock()
+
+ // Need to create new connection pool
+ cpm.mu.Lock()
+ defer cpm.mu.Unlock()
+
+ // Double-check pattern
+ if pool, exists := cpm.pools[key]; exists && !pool.closed {
+ return pool, nil
+ }
+
+ // Create new connection pool
+ pool, err := NewConnectionPool(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ cpm.pools[key] = pool
+ return pool, nil
+}
+
+// CloseAll closes all connection pools
+func (cpm *ConnectionPoolManager) CloseAll() error {
+ cpm.mu.Lock()
+ defer cpm.mu.Unlock()
+
+ var errors []error
+ for key, pool := range cpm.pools {
+ if err := pool.Close(); err != nil {
+ errors = append(errors, xerrors.Errorf("failed to close pool %s: %w", key, err))
+ }
+ }
+
+ // Clear the pools map
+ cpm.pools = make(map[string]*ConnectionPool)
+
+ if len(errors) > 0 {
+ return xerrors.Errorf("errors closing connection pools: %v", errors)
+ }
+ return nil
+}
+
+// NewConnectionPool creates a new connection pool
+func NewConnectionPool(ctx context.Context, cfg *config.PostgresConfig) (*ConnectionPool, error) {
+ logger := log.WithPackage(log.NewDevelopment())
+
+ db, err := newDBConnection(ctx, cfg)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create database connection: %w", err)
+ }
+
+ pool := &ConnectionPool{
+ db: db,
+ config: cfg,
+ logger: logger,
+ }
+
+ logger.Debug("Created new connection pool",
+ zap.String("host", cfg.Host),
+ zap.Int("port", cfg.Port),
+ zap.String("database", cfg.Database),
+ zap.Int("max_connections", cfg.MaxConnections),
+ )
+
+ return pool, nil
+}
+
+// DB returns the underlying database connection
+func (cp *ConnectionPool) DB() *sql.DB {
+ cp.mu.RLock()
+ defer cp.mu.RUnlock()
+
+ if cp.closed {
+ return nil
+ }
+ return cp.db
+}
+
+// Close closes the connection pool
+func (cp *ConnectionPool) Close() error {
+ cp.mu.Lock()
+ defer cp.mu.Unlock()
+
+ if cp.closed {
+ return nil
+ }
+
+ cp.closed = true
+
+ if cp.db != nil {
+ if err := cp.db.Close(); err != nil {
+ cp.logger.Error("Failed to close database connection", zap.Error(err))
+ return err
+ }
+ cp.logger.Debug("Connection pool closed successfully")
+ }
+
+ return nil
+}
+
+// Stats returns connection pool statistics
+func (cp *ConnectionPool) Stats() sql.DBStats {
+ cp.mu.RLock()
+ defer cp.mu.RUnlock()
+
+ if cp.closed || cp.db == nil {
+ return sql.DBStats{}
+ }
+
+ return cp.db.Stats()
+}
+
+// Health checks if the connection pool is healthy
+func (cp *ConnectionPool) Health(ctx context.Context) error {
+ cp.mu.RLock()
+ defer cp.mu.RUnlock()
+
+ if cp.closed {
+ return xerrors.New("connection pool is closed")
+ }
+
+ if cp.db == nil {
+ return xerrors.New("database connection is nil")
+ }
+
+ return cp.db.PingContext(ctx)
+}
+
+// CloseAllConnectionPools closes all managed connection pools
+// This should be called during application shutdown
+func CloseAllConnectionPools() error {
+ return poolManager.CloseAll()
+}
diff --git a/internal/storage/metastorage/postgres/db/migrations/20240101000002_init_schema.sql b/internal/storage/metastorage/postgres/db/migrations/20240101000002_init_schema.sql
new file mode 100644
index 0000000..125ac70
--- /dev/null
+++ b/internal/storage/metastorage/postgres/db/migrations/20240101000002_init_schema.sql
@@ -0,0 +1,72 @@
+-- +goose Up
+-- Create block_metadata table (append-only storage for every block ever observed)
+CREATE TABLE block_metadata (
+ id BIGSERIAL PRIMARY KEY, -- for canonical_blocks and event fk reference
+ height BIGINT NOT NULL,
+ tag INT NOT NULL,
+ hash VARCHAR(66), -- can hold a "0x"+64-hex string
+ parent_hash VARCHAR(66),
+ parent_height BIGINT NOT NULL DEFAULT 0,
+ object_key_main VARCHAR(255),
+ timestamp BIGINT NOT NULL, -- Unix timestamp in seconds
+ skipped BOOLEAN NOT NULL DEFAULT FALSE
+);
+
+-- Enforce uniqueness rules based on block processing status:
+-- 1. For normal blocks (skipped = FALSE), (tag, hash) must be unique, even if hash is NULL.
+-- 2. For skipped blocks (skipped = TRUE), hash is always NULL, so enforce uniqueness on (tag, height) instead.
+-- These partial unique indexes ensure correct behavior for both cases without violating constraints on NULLs.
+CREATE UNIQUE INDEX unique_tag_hash_regular ON block_metadata(tag, hash)
+WHERE hash IS NOT NULL AND NOT skipped;
+CREATE UNIQUE INDEX unique_tag_height_skipped ON block_metadata(tag, height)
+WHERE skipped = true;
+
+-- Create canonical_blocks table (track the "winning" block at each height)
+CREATE TABLE canonical_blocks (
+ height BIGINT NOT NULL,
+ block_metadata_id BIGINT NOT NULL,
+ tag INT NOT NULL,
+ -- Constraints
+ PRIMARY KEY (height, tag),
+ UNIQUE (
+ height,
+ tag,
+ block_metadata_id
+ ), -- Prevent same block from being canonical multiple times
+ FOREIGN KEY (block_metadata_id) REFERENCES block_metadata (id) ON DELETE RESTRICT
+);
+
+-- Supports: JOINs between canonical_blocks and block_metadata tables
+CREATE INDEX idx_canonical_block_metadata_fk ON canonical_blocks (block_metadata_id);
+
+-- Create block_events table (append-only stream of all blockchain state changes)
+CREATE TYPE event_type_enum AS ENUM ('BLOCK_ADDED', 'BLOCK_REMOVED', 'UNKNOWN');
+
+CREATE TABLE block_events (
+ event_tag INT NOT NULL DEFAULT 0, -- version
+ event_sequence BIGINT NOT NULL, -- monotonically-increasing per tag
+ event_type event_type_enum NOT NULL,
+ block_metadata_id BIGINT NOT NULL, -- fk referencing block_metadata
+ height BIGINT NOT NULL,
+ hash VARCHAR(66),
+ -- Constraints
+ PRIMARY KEY (event_tag, event_sequence),
+ FOREIGN KEY (block_metadata_id) REFERENCES block_metadata (id) ON DELETE RESTRICT
+);
+
+-- Supports: GetEventsByBlockHeight(), GetFirstEventIdByBlockHeight()
+CREATE INDEX idx_events_height_tag ON block_events (height, event_tag);
+-- Supports: JOINs to get full block details from events
+CREATE INDEX idx_events_block_meta ON block_events (block_metadata_id);
+
+-- +goose Down
+-- Drop tables in reverse order due to foreign key constraints
+
+DROP TABLE IF EXISTS block_events;
+
+DROP TABLE IF EXISTS canonical_blocks;
+
+DROP TABLE IF EXISTS block_metadata;
+
+-- Drop custom types
+DROP TYPE IF EXISTS event_type_enum;
\ No newline at end of file
diff --git a/internal/storage/metastorage/postgres/db/migrations/20240101000003_add_timestamp_index.sql b/internal/storage/metastorage/postgres/db/migrations/20240101000003_add_timestamp_index.sql
new file mode 100644
index 0000000..08d3e3b
--- /dev/null
+++ b/internal/storage/metastorage/postgres/db/migrations/20240101000003_add_timestamp_index.sql
@@ -0,0 +1,7 @@
+-- +goose Up
+-- Add timestamp index to block_metadata table for efficient time-based queries
+CREATE INDEX idx_block_metadata_timestamp ON block_metadata(timestamp);
+
+-- +goose Down
+-- Remove the timestamp index
+DROP INDEX IF EXISTS idx_block_metadata_timestamp;
\ No newline at end of file
diff --git a/internal/storage/metastorage/postgres/db/migrations/20250129000001_add_watermark.sql b/internal/storage/metastorage/postgres/db/migrations/20250129000001_add_watermark.sql
new file mode 100644
index 0000000..bc3421a
--- /dev/null
+++ b/internal/storage/metastorage/postgres/db/migrations/20250129000001_add_watermark.sql
@@ -0,0 +1,30 @@
+-- +goose Up
+-- Add is_watermark column to canonical_blocks table
+-- This column controls visibility of blocks to the streamer
+-- Blocks are written with is_watermark=FALSE initially, then set to TRUE after validation
+ALTER TABLE canonical_blocks ADD COLUMN is_watermark BOOLEAN NOT NULL DEFAULT FALSE;
+
+-- Create partial index for efficient watermark queries
+-- This index only includes watermarked blocks, keeping it small
+CREATE INDEX idx_canonical_watermark ON canonical_blocks (tag, height DESC) WHERE is_watermark = TRUE;
+
+-- Set watermark on the current highest block for each tag to prevent GetLatestBlock failures
+-- This ensures continuity during migration by marking existing latest blocks as validated
+-- Use CTE for better performance on large tables (avoids correlated subquery)
+WITH max_heights AS (
+ SELECT tag, MAX(height) as max_height
+ FROM canonical_blocks
+ GROUP BY tag
+)
+UPDATE canonical_blocks
+SET is_watermark = TRUE
+FROM max_heights
+WHERE canonical_blocks.tag = max_heights.tag
+ AND canonical_blocks.height = max_heights.max_height;
+
+-- +goose Down
+-- Drop the partial index first
+DROP INDEX IF EXISTS idx_canonical_watermark;
+
+-- Drop the is_watermark column
+ALTER TABLE canonical_blocks DROP COLUMN IF EXISTS is_watermark;
\ No newline at end of file
diff --git a/internal/storage/metastorage/postgres/event_storage.go b/internal/storage/metastorage/postgres/event_storage.go
new file mode 100644
index 0000000..75af580
--- /dev/null
+++ b/internal/storage/metastorage/postgres/event_storage.go
@@ -0,0 +1,651 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/lib/pq"
+
+ "github.com/coinbase/chainstorage/internal/storage/internal/errors"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ pgmodel "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres/model"
+ "github.com/coinbase/chainstorage/internal/utils/instrument"
+)
+
+const (
+ addEventsSafePadding = int64(20)
+)
+
+type (
+ eventStorageImpl struct {
+ db *sql.DB
+ instrumentAddEvents instrument.Instrument
+ instrumentGetEventByEventId instrument.InstrumentWithResult[*model.EventEntry]
+ instrumentGetEventsAfterEventId instrument.InstrumentWithResult[[]*model.EventEntry]
+ instrumentGetEventsByEventIdRange instrument.InstrumentWithResult[[]*model.EventEntry]
+ instrumentGetMaxEventId instrument.InstrumentWithResult[int64]
+ instrumentSetMaxEventId instrument.Instrument
+ instrumentGetFirstEventIdByBlockHeight instrument.InstrumentWithResult[int64]
+ instrumentGetEventsByBlockHeight instrument.InstrumentWithResult[[]*model.EventEntry]
+ }
+)
+
+func newEventStorage(db *sql.DB, params Params) (internal.EventStorage, error) {
+ metrics := params.Metrics.SubScope("event_storage").Tagged(map[string]string{
+ "storage_type": "postgres",
+ })
+ storage := &eventStorageImpl{
+ db: db,
+ instrumentAddEvents: instrument.New(metrics, "add_events"),
+ instrumentGetEventByEventId: instrument.NewWithResult[*model.EventEntry](metrics, "get_event_by_event_id"),
+ instrumentGetEventsAfterEventId: instrument.NewWithResult[[]*model.EventEntry](metrics, "get_events_after_event_id"),
+ instrumentGetEventsByEventIdRange: instrument.NewWithResult[[]*model.EventEntry](metrics, "get_events_by_event_id_range"),
+ instrumentGetMaxEventId: instrument.NewWithResult[int64](metrics, "get_max_event_id"),
+ instrumentSetMaxEventId: instrument.New(metrics, "set_max_event_id"),
+ instrumentGetFirstEventIdByBlockHeight: instrument.NewWithResult[int64](metrics, "get_first_event_id_by_block_height"),
+ instrumentGetEventsByBlockHeight: instrument.NewWithResult[[]*model.EventEntry](metrics, "get_events_by_block_height"),
+ }
+ return storage, nil
+}
+
+func (e *eventStorageImpl) AddEvents(ctx context.Context, eventTag uint32, events []*model.BlockEvent) error {
+ if len(events) == 0 {
+ return nil
+ }
+ return e.instrumentAddEvents.Instrument(ctx, func(ctx context.Context) error {
+ maxEventId, err := e.GetMaxEventId(ctx, eventTag)
+ var startEventId int64
+ if err != nil {
+ if !xerrors.Is(err, errors.ErrNoEventHistory) {
+ return xerrors.Errorf("failed to get max event id: %w", err)
+ }
+ startEventId = model.EventIdStartValue
+ } else {
+ startEventId = maxEventId + 1
+ }
+
+ eventEntries := model.ConvertBlockEventsToEventEntries(events, eventTag, startEventId)
+ return e.AddEventEntries(ctx, eventTag, eventEntries)
+ })
+}
+
+func (e *eventStorageImpl) AddEventEntries(ctx context.Context, eventTag uint32, eventEntries []*model.EventEntry) error {
+ if len(eventEntries) == 0 {
+ return nil
+ }
+ return e.instrumentAddEvents.Instrument(ctx, func(ctx context.Context) error {
+ startEventId := eventEntries[0].EventId
+ var eventsToValidate []*model.EventEntry
+ startFetchId := startEventId - addEventsSafePadding
+ if startFetchId < model.EventIdStartValue {
+ startFetchId = model.EventIdStartValue
+ }
+ if startFetchId < startEventId {
+ beforeEvents, err := e.GetEventsByEventIdRange(ctx, eventTag, startFetchId, startEventId)
+ if err != nil {
+ return xerrors.Errorf("failed to fetch events: %w", err)
+ }
+ eventsToValidate = append(beforeEvents, eventEntries...)
+ } else {
+ eventsToValidate = eventEntries
+ }
+
+ if err := internal.ValidateEvents(eventsToValidate); err != nil {
+ return xerrors.Errorf("events failed validation: %w", err)
+ }
+
+ // Create transaction with timeout context for event operations
+ txCtx, cancel := context.WithTimeout(ctx, 180*time.Second)
+ defer cancel()
+
+ tx, err := e.db.BeginTx(txCtx, nil)
+ if err != nil {
+ return xerrors.Errorf("failed to start transaction: %w", err)
+ }
+ committed := false
+ defer func() {
+ if !committed {
+ if rollbackErr := tx.Rollback(); rollbackErr != nil {
+ _ = rollbackErr
+ }
+ }
+ }()
+
+ // Stage incoming events (no per-row lookups) for set-based processing
+ if _, err := tx.ExecContext(ctx, `
+ CREATE TEMP TABLE temp_events (
+ event_tag INT NOT NULL,
+ event_sequence BIGINT NOT NULL,
+ event_type TEXT NOT NULL,
+ height BIGINT NOT NULL,
+ hash VARCHAR(66),
+ bm_tag INT NOT NULL,
+ skipped BOOLEAN NOT NULL
+ ) ON COMMIT DROP
+ `); err != nil {
+ return xerrors.Errorf("failed to create temp_events: %w", err)
+ }
+
+ stmt, err := tx.Prepare(pq.CopyIn("temp_events",
+ "event_tag", "event_sequence", "event_type", "height", "hash", "bm_tag", "skipped"))
+ if err != nil {
+ return xerrors.Errorf("failed to prepare COPY for temp_events: %w", err)
+ }
+
+ for _, e := range eventEntries {
+ if _, err := stmt.Exec(
+ eventTag,
+ e.EventId,
+ pgmodel.EventTypeToString(e.EventType),
+ e.BlockHeight,
+ e.BlockHash,
+ int(e.Tag),
+ e.BlockSkipped,
+ ); err != nil {
+ _ = stmt.Close()
+ return xerrors.Errorf("failed to buffer temp_events row: %w", err)
+ }
+ }
+
+ if _, err := stmt.Exec(); err != nil {
+ _ = stmt.Close()
+ return xerrors.Errorf("failed to finalize COPY temp_events: %w", err)
+ }
+ if err := stmt.Close(); err != nil {
+ return xerrors.Errorf("failed to close COPY statement: %w", err)
+ }
+
+ // Ensure skipped block_metadata rows exist in bulk
+ if _, err := tx.ExecContext(ctx, `
+ INSERT INTO block_metadata (height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped)
+ SELECT DISTINCT e.height, e.bm_tag, NULL, NULL, 0, NULL, 0, true
+ FROM temp_events e
+ WHERE e.skipped = true
+ ON CONFLICT (tag, height) WHERE skipped = true DO NOTHING
+ `); err != nil {
+ return xerrors.Errorf("failed to upsert skipped block_metadata: %w", err)
+ }
+
+ // Insert non-skipped events by joining on (tag, hash) with fallback for DefaultBlockTag via UNION ALL
+ if _, err := tx.ExecContext(ctx, `
+ INSERT INTO block_events (event_tag, event_sequence, event_type, block_metadata_id, height, hash)
+ SELECT e.event_tag, e.event_sequence, e.event_type::event_type_enum, bm.id, e.height, e.hash
+ FROM temp_events e
+ JOIN block_metadata bm
+ ON bm.tag = e.bm_tag AND bm.hash = e.hash AND bm.skipped = false
+ WHERE e.skipped = false
+ UNION ALL
+ SELECT e.event_tag, e.event_sequence, e.event_type::event_type_enum, bm.id, e.height, e.hash
+ FROM temp_events e
+ JOIN block_metadata bm
+ ON e.bm_tag = $1 AND bm.tag = 0 AND bm.hash = e.hash AND bm.skipped = false
+ WHERE e.skipped = false
+ ON CONFLICT (event_tag, event_sequence) DO NOTHING
+ `, model.DefaultBlockTag); err != nil {
+ return xerrors.Errorf("failed to insert non-skipped events: %w", err)
+ }
+
+ // Insert skipped events by joining on (tag, height, skipped=true) with fallback for DefaultBlockTag via UNION ALL
+ if _, err := tx.ExecContext(ctx, `
+ INSERT INTO block_events (event_tag, event_sequence, event_type, block_metadata_id, height, hash)
+ SELECT e.event_tag, e.event_sequence, e.event_type::event_type_enum, bm.id, e.height, e.hash
+ FROM temp_events e
+ JOIN block_metadata bm
+ ON bm.tag = e.bm_tag AND bm.height = e.height AND bm.skipped = true
+ WHERE e.skipped = true
+ UNION ALL
+ SELECT e.event_tag, e.event_sequence, e.event_type::event_type_enum, bm.id, e.height, e.hash
+ FROM temp_events e
+ JOIN block_metadata bm
+ ON e.bm_tag = $1 AND bm.tag = 0 AND bm.height = e.height AND bm.skipped = true
+ WHERE e.skipped = true
+ ON CONFLICT (event_tag, event_sequence) DO NOTHING
+ `, model.DefaultBlockTag); err != nil {
+ return xerrors.Errorf("failed to insert skipped events: %w", err)
+ }
+
+ if err := tx.Commit(); err != nil {
+ return xerrors.Errorf("failed to commit transaction: %w", err)
+ }
+ committed = true
+ return nil
+ })
+}
+
+func (e *eventStorageImpl) GetEventByEventId(ctx context.Context, eventTag uint32, eventId int64) (*model.EventEntry, error) {
+ return e.instrumentGetEventByEventId.Instrument(ctx, func(ctx context.Context) (*model.EventEntry, error) {
+ var eventEntry model.EventEntry
+ var eventTypeStr string
+ var blockHash sql.NullString
+ var tag sql.NullInt32
+ var parentHash sql.NullString
+ var skipped sql.NullBool
+ var timestamp sql.NullInt64
+
+ err := e.db.QueryRowContext(ctx, `
+ SELECT be.event_sequence, be.event_type, be.height, be.hash,
+ bm.tag, bm.parent_hash, bm.skipped, bm.timestamp, be.event_tag
+ FROM block_events be
+ LEFT JOIN block_metadata bm ON be.block_metadata_id = bm.id
+ WHERE be.event_tag = $1 AND be.event_sequence = $2
+ `, eventTag, eventId).Scan(
+ &eventEntry.EventId,
+ &eventTypeStr,
+ &eventEntry.BlockHeight,
+ &blockHash,
+ &tag,
+ &parentHash,
+ &skipped,
+ ×tamp,
+ &eventEntry.EventTag,
+ )
+
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, errors.ErrItemNotFound
+ }
+ return nil, xerrors.Errorf("failed to get event by event id: %w", err)
+ }
+
+ // Handle null values from LEFT JOIN
+ if blockHash.Valid {
+ eventEntry.BlockHash = blockHash.String
+ } else {
+ eventEntry.BlockHash = ""
+ }
+ if tag.Valid {
+ eventEntry.Tag = uint32(tag.Int32)
+ } else {
+ eventEntry.Tag = model.DefaultBlockTag
+ }
+ if parentHash.Valid {
+ eventEntry.ParentHash = parentHash.String
+ } else {
+ eventEntry.ParentHash = ""
+ }
+ if skipped.Valid {
+ eventEntry.BlockSkipped = skipped.Bool
+ } else {
+ eventEntry.BlockSkipped = false
+ }
+ if timestamp.Valid {
+ eventEntry.BlockTimestamp = timestamp.Int64
+ } else {
+ eventEntry.BlockTimestamp = 0
+ }
+
+ // switch to defaultTag is not set
+ if eventEntry.Tag == 0 {
+ eventEntry.Tag = model.DefaultBlockTag
+ }
+
+ eventEntry.EventType = pgmodel.ParseEventType(eventTypeStr)
+ return &eventEntry, nil
+ })
+}
+
+func (e *eventStorageImpl) GetEventsAfterEventId(ctx context.Context, eventTag uint32, eventId int64, maxEvents uint64) ([]*model.EventEntry, error) {
+ return e.instrumentGetEventsAfterEventId.Instrument(ctx, func(ctx context.Context) ([]*model.EventEntry, error) {
+ rows, err := e.db.QueryContext(ctx, `
+ SELECT be.event_sequence, be.event_type, be.height, be.hash, bm.tag, bm.parent_hash,
+ bm.skipped, bm.timestamp, be.event_tag
+ FROM block_events be
+ LEFT JOIN block_metadata bm ON be.block_metadata_id = bm.id
+ WHERE be.event_tag = $1 AND be.event_sequence > $2
+ ORDER BY be.event_sequence ASC
+ LIMIT $3
+ `, eventTag, eventId, maxEvents)
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get events after event id: %w", err)
+ }
+
+ var result []*model.EventEntry
+ var scanErr error
+ defer func() {
+ if closeErr := rows.Close(); closeErr != nil && scanErr == nil {
+ scanErr = xerrors.Errorf("failed to close rows: %w", closeErr)
+ }
+ }()
+
+ result, scanErr = e.scanEventEntries(rows)
+ return result, scanErr
+ })
+}
+
+func (e *eventStorageImpl) GetEventsByEventIdRange(ctx context.Context, eventTag uint32, minEventId int64, maxEventId int64) ([]*model.EventEntry, error) {
+ return e.instrumentGetEventsByEventIdRange.Instrument(ctx, func(ctx context.Context) ([]*model.EventEntry, error) {
+ rows, err := e.db.QueryContext(ctx, `
+ SELECT be.event_sequence, be.event_type, be.height, be.hash, bm.tag, bm.parent_hash,
+ bm.skipped, bm.timestamp, be.event_tag
+ FROM block_events be
+ LEFT JOIN block_metadata bm ON be.block_metadata_id = bm.id
+ WHERE be.event_tag = $1 AND be.event_sequence >= $2 AND be.event_sequence < $3
+ ORDER BY be.event_sequence ASC
+ `, eventTag, minEventId, maxEventId)
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get events by event id range: %w", err)
+ }
+
+ var events []*model.EventEntry
+ var scanErr error
+ defer func() {
+ if closeErr := rows.Close(); closeErr != nil && scanErr == nil && events != nil {
+ scanErr = xerrors.Errorf("failed to close rows: %w", closeErr)
+ }
+ }()
+
+ events, scanErr = e.scanEventEntries(rows)
+ if scanErr != nil {
+ return nil, scanErr
+ }
+
+ // Validate that we have all events in the range
+ expectedCount := maxEventId - minEventId
+ if int64(len(events)) != expectedCount {
+ return nil, errors.ErrItemNotFound
+ }
+
+ // Check for close error one more time
+ if scanErr != nil {
+ return nil, scanErr
+ }
+
+ return events, nil
+ })
+}
+
+func (e *eventStorageImpl) GetMaxEventId(ctx context.Context, eventTag uint32) (int64, error) {
+ return e.instrumentGetMaxEventId.Instrument(ctx, func(ctx context.Context) (int64, error) {
+ var maxEventId sql.NullInt64
+ err := e.db.QueryRowContext(ctx, `
+ SELECT MAX(event_sequence) FROM block_events WHERE event_tag = $1
+ `, eventTag).Scan(&maxEventId) //watermark
+ if err != nil {
+ return 0, xerrors.Errorf("failed to get max event id: %w", err)
+ }
+ if !maxEventId.Valid {
+ return 0, errors.ErrNoEventHistory
+ }
+ return maxEventId.Int64, nil
+ })
+}
+
+// basically if we have events 1,2,3,4,5,6,7 and call SetMaxEventId(ctx, eventTag, 4), then we will delete all events after 4
+func (e *eventStorageImpl) SetMaxEventId(ctx context.Context, eventTag uint32, maxEventId int64) error {
+ return e.instrumentSetMaxEventId.Instrument(ctx, func(ctx context.Context) error {
+ if maxEventId < model.EventIdStartValue && maxEventId != model.EventIdDeleted {
+ return xerrors.Errorf("invalid max event id: %d", maxEventId)
+ }
+
+ txCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ tx, err := e.db.BeginTx(txCtx, nil)
+ if err != nil {
+ return xerrors.Errorf("failed to start transaction: %w", err)
+ }
+ committed := false
+ defer func() {
+ if !committed {
+ if rollbackErr := tx.Rollback(); rollbackErr != nil {
+ // Log the rollback error but don't override the original error
+ _ = rollbackErr
+ }
+ }
+ }()
+
+ if maxEventId == model.EventIdDeleted {
+ // Delete all events for this tag
+ _, err = tx.ExecContext(txCtx, `
+ DELETE FROM block_events WHERE event_tag = $1
+ `, eventTag)
+ if err != nil {
+ return xerrors.Errorf("failed to delete events: %w", err)
+ }
+ } else {
+ // Validate the new max event ID exists
+ var exists bool
+ err = tx.QueryRowContext(txCtx, `
+ SELECT EXISTS(SELECT 1 FROM block_events WHERE event_tag = $1 AND event_sequence = $2)
+ `, eventTag, maxEventId).Scan(&exists)
+ if err != nil {
+ return xerrors.Errorf("failed to validate max event id: %w", err)
+ }
+ if !exists {
+ return xerrors.Errorf("event entry with max event id %d does not exist", maxEventId)
+ }
+ // Delete events beyond the max event ID
+ _, err = tx.ExecContext(txCtx, `
+ DELETE FROM block_events WHERE event_tag = $1 AND event_sequence > $2
+ `, eventTag, maxEventId)
+ if err != nil {
+ return xerrors.Errorf("failed to delete events beyond max event id: %w", err)
+ }
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ return xerrors.Errorf("failed to commit transaction: %w", err)
+ }
+ committed = true
+ return nil
+ })
+}
+
+func (e *eventStorageImpl) GetFirstEventIdByBlockHeight(ctx context.Context, eventTag uint32, blockHeight uint64) (int64, error) {
+ return e.instrumentGetFirstEventIdByBlockHeight.Instrument(ctx, func(ctx context.Context) (int64, error) {
+ var firstEventId int64
+
+ err := e.db.QueryRowContext(ctx, `
+ SELECT MIN(be.event_sequence)
+ FROM block_events be
+ WHERE be.event_tag = $1 AND be.height = $2
+ `, eventTag, blockHeight).Scan(&firstEventId)
+
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return 0, errors.ErrItemNotFound
+ }
+ return 0, xerrors.Errorf("failed to get first event id by block height: %w", err)
+ }
+
+ return firstEventId, nil
+ })
+}
+
+func (e *eventStorageImpl) GetEventsByBlockHeight(ctx context.Context, eventTag uint32, blockHeight uint64) ([]*model.EventEntry, error) {
+ return e.instrumentGetEventsByBlockHeight.Instrument(ctx, func(ctx context.Context) ([]*model.EventEntry, error) {
+ rows, err := e.db.QueryContext(ctx, `
+ SELECT be.event_sequence, be.event_type, be.height, be.hash, bm.tag, bm.parent_hash,
+ bm.skipped, bm.timestamp, be.event_tag
+ FROM block_events be
+ LEFT JOIN block_metadata bm ON be.block_metadata_id = bm.id
+ WHERE be.event_tag = $1 AND be.height = $2
+ ORDER BY be.event_sequence ASC
+ `, eventTag, blockHeight)
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get events by block height: %w", err)
+ }
+
+ var events []*model.EventEntry
+ var scanErr error
+ defer func() {
+ if closeErr := rows.Close(); closeErr != nil && scanErr == nil {
+ scanErr = xerrors.Errorf("failed to close rows: %w", closeErr)
+ }
+ }()
+
+ events, scanErr = e.scanEventEntries(rows)
+ if scanErr != nil {
+ return nil, scanErr
+ }
+
+ if len(events) == 0 {
+ return nil, errors.ErrItemNotFound
+ }
+
+ // Check for close error one more time
+ if scanErr != nil {
+ return nil, scanErr
+ }
+
+ return events, nil
+ })
+}
+
+// Helper functions
+func (e *eventStorageImpl) getOrCreateBlockMetadataId(ctx context.Context, tx *sql.Tx, eventEntry *model.EventEntry) (int64, error) {
+ // For skipped blocks, create or find block metadata with specific fields
+ if eventEntry.BlockSkipped {
+ // Try to find existing block metadata for this skipped event
+ var blockMetadataId int64
+ err := tx.QueryRowContext(ctx, `
+ SELECT id FROM block_metadata WHERE tag = $1 AND height = $2 AND skipped = true
+ `, eventEntry.Tag, eventEntry.BlockHeight).Scan(&blockMetadataId)
+ if err == nil {
+ return blockMetadataId, nil
+ }
+ // If not found and eventEntry.Tag is DefaultBlockTag, try with tag = 0
+ if err == sql.ErrNoRows && eventEntry.Tag == model.DefaultBlockTag {
+ err = tx.QueryRowContext(ctx, `
+ SELECT id FROM block_metadata WHERE tag = $1 AND height = $2 AND skipped = true
+ `, uint32(0), eventEntry.BlockHeight).Scan(&blockMetadataId)
+
+ if err == nil {
+ return blockMetadataId, nil
+ }
+ }
+
+ // If block metadata not found for skipped event, create it
+ if err == sql.ErrNoRows {
+ return e.createSkippedBlockMetadata(ctx, tx, eventEntry)
+ }
+ return 0, xerrors.Errorf("failed to query block metadata: %w", err)
+ }
+
+ // For non-skipped blocks, look up by tag and hash
+ // First try with the eventEntry.Tag
+ var blockMetadataId int64
+ err := tx.QueryRowContext(ctx, ` SELECT id FROM block_metadata WHERE tag = $1 AND hash = $2
+ `, eventEntry.Tag, eventEntry.BlockHash).Scan(&blockMetadataId)
+ if err == nil {
+ return blockMetadataId, nil
+ }
+ // If not found and eventEntry.Tag is DefaultBlockTag, try with tag = 0
+ if err == sql.ErrNoRows && eventEntry.Tag == model.DefaultBlockTag {
+ err = tx.QueryRowContext(ctx, `
+ SELECT id FROM block_metadata WHERE tag = $1 AND hash = $2
+ `, uint32(0), eventEntry.BlockHash).Scan(&blockMetadataId)
+
+ if err == nil {
+ return blockMetadataId, nil
+ }
+ }
+
+ // If we get here, the block metadata was not found
+ if err == sql.ErrNoRows {
+ return 0, xerrors.Errorf("block metadata not found for tag %d and hash %s", eventEntry.Tag, eventEntry.BlockHash)
+ }
+ return 0, xerrors.Errorf("failed to query block metadata: %w", err)
+}
+
+// createSkippedBlockMetadata creates a new block_metadata entry for a skipped block
+func (e *eventStorageImpl) createSkippedBlockMetadata(ctx context.Context, tx *sql.Tx, eventEntry *model.EventEntry) (int64, error) {
+ // Create block metadata for skipped block with NULL values as specified
+ var blockMetadataId int64
+ err := tx.QueryRowContext(ctx, `
+ INSERT INTO block_metadata (height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped)
+ VALUES ($1, $2, NULL, NULL, $3, NULL, $4, true)
+ ON CONFLICT (tag, height) WHERE skipped = true DO UPDATE SET
+ hash = EXCLUDED.hash,
+ parent_hash = EXCLUDED.parent_hash,
+ parent_height = EXCLUDED.parent_height,
+ object_key_main = EXCLUDED.object_key_main,
+ timestamp = EXCLUDED.timestamp,
+ skipped = EXCLUDED.skipped
+ RETURNING id
+ `, eventEntry.BlockHeight, eventEntry.Tag, 0, 0).Scan(&blockMetadataId)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to create block metadata for skipped block: %w", err)
+ }
+
+ return blockMetadataId, nil
+}
+
+func (e *eventStorageImpl) scanEventEntries(rows *sql.Rows) ([]*model.EventEntry, error) {
+ var events []*model.EventEntry
+
+ for rows.Next() {
+ var eventEntry model.EventEntry
+ var eventTypeStr string
+ var blockHash sql.NullString
+ var tag sql.NullInt32
+ var parentHash sql.NullString
+ var skipped sql.NullBool
+ var timestamp sql.NullInt64
+
+ err := rows.Scan(
+ &eventEntry.EventId,
+ &eventTypeStr,
+ &eventEntry.BlockHeight,
+ &blockHash,
+ &tag,
+ &parentHash,
+ &skipped,
+ ×tamp,
+ &eventEntry.EventTag,
+ )
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to scan event entry: %w", err)
+ }
+
+ // Handle null values from LEFT JOIN
+ if blockHash.Valid {
+ eventEntry.BlockHash = blockHash.String
+ } else {
+ eventEntry.BlockHash = ""
+ }
+ if tag.Valid {
+ eventEntry.Tag = uint32(tag.Int32)
+ } else {
+ eventEntry.Tag = model.DefaultBlockTag
+ }
+ if parentHash.Valid {
+ eventEntry.ParentHash = parentHash.String
+ } else {
+ eventEntry.ParentHash = ""
+ }
+ if skipped.Valid {
+ eventEntry.BlockSkipped = skipped.Bool
+ } else {
+ eventEntry.BlockSkipped = false
+ }
+ if timestamp.Valid {
+ eventEntry.BlockTimestamp = timestamp.Int64
+ } else {
+ eventEntry.BlockTimestamp = 0
+ }
+
+ // switch to defaultTag is not set
+ if eventEntry.Tag == 0 {
+ eventEntry.Tag = model.DefaultBlockTag
+ }
+
+ eventEntry.EventType = pgmodel.ParseEventType(eventTypeStr)
+ events = append(events, &eventEntry)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, xerrors.Errorf("error iterating over rows: %w", err)
+ }
+
+ return events, nil
+}
diff --git a/internal/storage/metastorage/postgres/event_storage_integration_test.go b/internal/storage/metastorage/postgres/event_storage_integration_test.go
new file mode 100644
index 0000000..144ad15
--- /dev/null
+++ b/internal/storage/metastorage/postgres/event_storage_integration_test.go
@@ -0,0 +1,598 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "go.uber.org/fx"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage/internal/errors"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type eventStorageTestSuite struct {
+ suite.Suite
+ accessor internal.MetaStorage
+ config *config.Config
+ tag uint32
+ eventTag uint32
+ db *sql.DB
+}
+
+func (s *eventStorageTestSuite) SetupTest() {
+ require := testutil.Require(s.T())
+ var accessor internal.MetaStorage
+ cfg, err := config.New()
+ require.NoError(err)
+
+ // Skip tests if Postgres is not configured
+ if cfg.AWS.Postgres == nil {
+ s.T().Skip("Postgres not configured, skipping test suite")
+ return
+ }
+
+ app := testapp.New(
+ s.T(),
+ fx.Provide(NewMetaStorage),
+ testapp.WithIntegration(),
+ testapp.WithConfig(s.config),
+ fx.Populate(&accessor),
+ )
+ defer app.Close()
+ s.accessor = accessor
+ s.tag = 1
+ s.eventTag = 0
+
+ // Get database connection for cleanup
+ db, err := newDBConnection(context.Background(), cfg.AWS.Postgres)
+ require.NoError(err)
+ s.db = db
+}
+
+func (s *eventStorageTestSuite) TearDownTest() {
+ if s.db != nil {
+ ctx := context.Background()
+ s.T().Log("Clearing database tables after test")
+ // Clear all tables in reverse order due to foreign key constraints
+ tables := []string{"block_events", "canonical_blocks", "block_metadata"}
+ for _, table := range tables {
+ _, err := s.db.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s", table))
+ if err != nil {
+ s.T().Logf("Failed to clear table %s: %v", table, err)
+ }
+ }
+ }
+}
+
+func (s *eventStorageTestSuite) TearDownSuite() {
+ if s.db != nil {
+ s.db.Close()
+ }
+}
+func (s *eventStorageTestSuite) addEvents(eventTag uint32, startHeight uint64, numEvents uint64, tag uint32) {
+ // First, add block metadata for the events
+ blockMetas := testutil.MakeBlockMetadatasFromStartHeight(startHeight, int(numEvents), tag)
+ ctx := context.TODO()
+ err := s.accessor.PersistBlockMetas(ctx, true, blockMetas, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Then add events
+ blockEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, startHeight, startHeight+numEvents, tag)
+ err = s.accessor.AddEvents(ctx, eventTag, blockEvents)
+ if err != nil {
+ panic(err)
+ }
+}
+func (s *eventStorageTestSuite) verifyEvents(eventTag uint32, numEvents uint64, tag uint32) {
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+
+ watermark, err := s.accessor.GetMaxEventId(ctx, eventTag)
+ if err != nil {
+ panic(err)
+ }
+ require.Equal(watermark-model.EventIdStartValue, int64(numEvents-1))
+
+ // fetch range with missing item
+ _, err = s.accessor.GetEventsByEventIdRange(ctx, eventTag, model.EventIdStartValue, model.EventIdStartValue+int64(numEvents+100))
+ require.Error(err)
+ require.True(xerrors.Is(err, errors.ErrItemNotFound))
+
+ // fetch valid range
+ fetchedEvents, err := s.accessor.GetEventsByEventIdRange(ctx, eventTag, model.EventIdStartValue, model.EventIdStartValue+int64(numEvents))
+ if err != nil {
+ panic(err)
+ }
+ require.NotNil(fetchedEvents)
+ require.Equal(uint64(len(fetchedEvents)), numEvents)
+
+ numFollowingEventsToFetch := uint64(10)
+ for i, event := range fetchedEvents {
+ require.Equal(int64(i)+model.EventIdStartValue, event.EventId)
+ require.Equal(uint64(i), event.BlockHeight)
+ require.Equal(api.BlockchainEvent_BLOCK_ADDED, event.EventType)
+ require.Equal(tag, event.Tag)
+ require.Equal(eventTag, event.EventTag)
+
+ expectedNumEvents := numFollowingEventsToFetch
+ if uint64(event.EventId)+numFollowingEventsToFetch >= numEvents {
+ expectedNumEvents = numEvents - 1 - uint64(event.EventId-model.EventIdStartValue)
+ }
+ followingEvents, err := s.accessor.GetEventsAfterEventId(ctx, eventTag, event.EventId, numFollowingEventsToFetch)
+ if err != nil {
+ panic(err)
+ }
+ require.Equal(uint64(len(followingEvents)), expectedNumEvents)
+ for j, followingEvent := range followingEvents {
+ require.Equal(int64(i+j+1)+model.EventIdStartValue, followingEvent.EventId)
+ require.Equal(uint64(i+j+1), followingEvent.BlockHeight)
+ require.Equal(api.BlockchainEvent_BLOCK_ADDED, followingEvent.EventType)
+ require.Equal(eventTag, followingEvent.EventTag)
+ }
+ }
+}
+
+func (s *eventStorageTestSuite) TestSetMaxEventId() {
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+ watermark, err := s.accessor.GetMaxEventId(ctx, s.eventTag)
+ require.NoError(err)
+ require.Equal(model.EventIdStartValue+int64(numEvents-1), watermark)
+
+ // reset it to a new value
+ newEventId := int64(5)
+ err = s.accessor.SetMaxEventId(ctx, s.eventTag, newEventId)
+ require.NoError(err)
+ watermark, err = s.accessor.GetMaxEventId(ctx, s.eventTag)
+ require.NoError(err)
+ require.Equal(watermark, newEventId)
+
+ // reset it to invalid value
+ invalidEventId := int64(-1)
+ err = s.accessor.SetMaxEventId(ctx, s.eventTag, invalidEventId)
+ require.Error(err)
+
+ // reset it to value bigger than current max
+ invalidEventId = newEventId + 10
+ err = s.accessor.SetMaxEventId(ctx, s.eventTag, invalidEventId)
+ require.Error(err)
+
+ // reset it to EventIdDeleted
+ err = s.accessor.SetMaxEventId(ctx, s.eventTag, model.EventIdDeleted)
+ require.NoError(err)
+ _, err = s.accessor.GetMaxEventId(ctx, s.eventTag)
+ require.Error(err)
+ require.Equal(errors.ErrNoEventHistory, err)
+}
+
+func (s *eventStorageTestSuite) TestSetMaxEventIdNonDefaultEventTag() {
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+ numEvents := uint64(100)
+ eventTag := uint32(1)
+ s.addEvents(eventTag, 0, numEvents, s.tag)
+ watermark, err := s.accessor.GetMaxEventId(ctx, eventTag)
+ require.NoError(err)
+ require.Equal(model.EventIdStartValue+int64(numEvents-1), watermark)
+
+ // reset it to a new value
+ newEventId := int64(5)
+ err = s.accessor.SetMaxEventId(ctx, eventTag, newEventId)
+ require.NoError(err)
+ watermark, err = s.accessor.GetMaxEventId(ctx, eventTag)
+ require.NoError(err)
+ require.Equal(watermark, newEventId)
+
+ // reset it to invalid value
+ invalidEventId := int64(-1)
+ err = s.accessor.SetMaxEventId(ctx, eventTag, invalidEventId)
+ require.Error(err)
+
+ // reset it to value bigger than current max
+ invalidEventId = newEventId + 10
+ err = s.accessor.SetMaxEventId(ctx, eventTag, invalidEventId)
+ require.Error(err)
+
+ // reset it to EventIdDeleted
+ err = s.accessor.SetMaxEventId(ctx, eventTag, model.EventIdDeleted)
+ require.NoError(err)
+ _, err = s.accessor.GetMaxEventId(ctx, eventTag)
+ require.Error(err)
+ require.Equal(errors.ErrNoEventHistory, err)
+}
+
+////////////////////////////////////////////////////////////
+
+func (s *eventStorageTestSuite) TestAddEvents() {
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+ s.verifyEvents(s.eventTag, numEvents, s.tag)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsNonDefaultEventTag() {
+ numEvents := uint64(100)
+ s.addEvents(uint32(1), 0, numEvents, s.tag)
+ s.verifyEvents(uint32(1), numEvents, s.tag)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsDefaultTag() {
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, 0)
+ s.verifyEvents(s.eventTag, numEvents, model.DefaultBlockTag)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsNonDefaultTag() {
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, 2)
+ s.verifyEvents(s.eventTag, numEvents, 2)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsMultipleTimes() {
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+ s.addEvents(s.eventTag, numEvents, numEvents, s.tag)
+ numEvents = numEvents * 2
+ s.verifyEvents(s.eventTag, numEvents, s.tag)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsMultipleTimesNonDefaultEventTag() {
+ numEvents := uint64(100)
+ eventTag := uint32(1)
+ s.addEvents(eventTag, 0, numEvents, s.tag)
+ s.addEvents(eventTag, numEvents, numEvents, s.tag)
+ numEvents = numEvents * 2
+ s.verifyEvents(eventTag, numEvents, s.tag)
+}
+
+////////////////////////////////////////////////////////////
+
+func (s *eventStorageTestSuite) TestAddEventsDiscontinuousChain_NotSkipped() {
+ require := testutil.Require(s.T())
+ numEvents := uint64(100)
+
+ // First, add block metadata for the initial events
+ blockMetas := testutil.MakeBlockMetadatasFromStartHeight(0, int(numEvents), s.tag)
+ ctx := context.TODO()
+ err := s.accessor.PersistBlockMetas(ctx, true, blockMetas, nil)
+ require.NoError(err)
+
+ blockEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, 0, numEvents, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add block metadata for the additional events that will be tested
+ additionalBlockMetas := testutil.MakeBlockMetadatasFromStartHeight(numEvents, 10, s.tag)
+ err = s.accessor.PersistBlockMetas(ctx, true, additionalBlockMetas, nil)
+ require.NoError(err)
+
+ // have add event for height numEvents-1 again, invalid
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents-1, numEvents+4, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.Error(err)
+
+ // missing event for height numEvents, invalid
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+2, numEvents+7, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.Error(err)
+
+ // hash mismatch, invalid
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+2, numEvents+7, s.tag, testutil.WithBlockHashFormat("HashMismatch0x%s"))
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.Error(err)
+
+ // continuous, should be able to add them
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents, numEvents+7, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+}
+
+func (s *eventStorageTestSuite) TestAddEventsDiscontinuousChain_Skipped() {
+ require := testutil.Require(s.T())
+ numEvents := uint64(100)
+ ctx := context.TODO()
+
+ // Use the helper method to create initial events
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+
+ // Create block metadata ONLY for non-skipped heights
+ // Heights 101, 102, 106, 107, 108, 109 will have block metadata (non-skipped)
+ // Heights 100, 103, 104, 105 will be skipped (automatically created as skipped)
+ nonSkippedBlockMetas := []*api.BlockMetadata{
+ testutil.MakeBlockMetadata(numEvents+1, s.tag), // height 101
+ testutil.MakeBlockMetadata(numEvents+2, s.tag), // height 102
+ testutil.MakeBlockMetadata(numEvents+6, s.tag), // height 106
+ testutil.MakeBlockMetadata(numEvents+7, s.tag), // height 107
+ testutil.MakeBlockMetadata(numEvents+8, s.tag), // height 108
+ testutil.MakeBlockMetadata(numEvents+9, s.tag), // height 109
+ }
+
+ // Set proper parent relationships for blocks that come after gaps
+ // Block 106 should point to block 102 (since 103, 104, 105 are skipped/missing)
+ nonSkippedBlockMetas[2].ParentHeight = nonSkippedBlockMetas[1].Height // 106 -> 102
+ nonSkippedBlockMetas[2].ParentHash = nonSkippedBlockMetas[1].Hash
+
+ // Block 107 should point to block 106
+ nonSkippedBlockMetas[3].ParentHeight = nonSkippedBlockMetas[2].Height // 107 -> 106
+ nonSkippedBlockMetas[3].ParentHash = nonSkippedBlockMetas[2].Hash
+
+ // Block 108 should point to block 107
+ nonSkippedBlockMetas[4].ParentHeight = nonSkippedBlockMetas[3].Height // 108 -> 107
+ nonSkippedBlockMetas[4].ParentHash = nonSkippedBlockMetas[3].Hash
+
+ // Block 109 should point to block 108
+ nonSkippedBlockMetas[5].ParentHeight = nonSkippedBlockMetas[4].Height // 109 -> 108
+ nonSkippedBlockMetas[5].ParentHash = nonSkippedBlockMetas[4].Hash
+
+ err := s.accessor.PersistBlockMetas(ctx, true, nonSkippedBlockMetas, nil)
+ require.NoError(err)
+
+ // Test case: chain normal growing case, [+0(skipped), +1]
+ // Height 100 is skipped, height 101 is normal
+ blockEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents, numEvents+1, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+1, numEvents+2, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Test case: chain normal growing case, +0(skipped), +1, [+2, +3(skipped)]
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+2, numEvents+3, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+3, numEvents+4, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Test case: chain normal growing case, +0(skipped), +1, +2, +3(skipped), [+4(skipped), +5(skipped)]
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+4, numEvents+5, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+5, numEvents+6, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Test case: rollback case, +6, +7, +8(skipped), [-8(skipped), -7]
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+6, numEvents+8, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+8, numEvents+9, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+8, numEvents+9, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+7, numEvents+8, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Test case: rollback case, +7(skipped), +8, [-8, -7(skipped)]
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+7, numEvents+8, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+8, numEvents+9, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+8, numEvents+9, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+7, numEvents+8, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Test case: rollback case, +7(skipped), +8(skipped), [-8(skipped), -7(skipped)]
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+7, numEvents+8, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_ADDED, numEvents+8, numEvents+9, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+8, numEvents+9, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ blockEvents = testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents+7, numEvents+8, s.tag, testutil.WithBlockSkipped())
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ // Verify that skipped blocks created block_metadata entries but not canonical_blocks entries
+ s.verifySkippedBlockHandling(ctx, numEvents, s.tag)
+}
+
+// verifySkippedBlockHandling verifies that skipped blocks have block_metadata entries but not canonical_blocks entries
+func (s *eventStorageTestSuite) verifySkippedBlockHandling(ctx context.Context, numEvents uint64, tag uint32) {
+ require := testutil.Require(s.T())
+
+ // Check that skipped heights have block_metadata entries with skipped=true
+ skippedHeights := []uint64{numEvents, numEvents + 3, numEvents + 4, numEvents + 5}
+ for _, height := range skippedHeights {
+ var count int
+ err := s.db.QueryRowContext(ctx, `
+ SELECT COUNT(*) FROM block_metadata
+ WHERE tag = $1 AND height = $2 AND skipped = true AND hash IS NULL
+ `, tag, height).Scan(&count)
+ require.NoError(err)
+ require.Greater(count, 0, "Expected skipped block metadata for height %d", height)
+
+ // Verify that skipped blocks do NOT have canonical_blocks entries
+ err = s.db.QueryRowContext(ctx, `
+ SELECT COUNT(*) FROM canonical_blocks
+ WHERE tag = $1 AND height = $2
+ `, tag, height).Scan(&count)
+ require.NoError(err)
+ require.Equal(0, count, "Skipped blocks should not have canonical entries for height %d", height)
+ }
+
+ // Check that non-skipped heights have both block_metadata and canonical_blocks entries
+ nonSkippedHeights := []uint64{numEvents + 1, numEvents + 2, numEvents + 6, numEvents + 7, numEvents + 8, numEvents + 9}
+ for _, height := range nonSkippedHeights {
+ var count int
+ // Should have block_metadata entry with skipped=false
+ err := s.db.QueryRowContext(ctx, `
+ SELECT COUNT(*) FROM block_metadata
+ WHERE tag = $1 AND height = $2 AND skipped = false AND hash IS NOT NULL
+ `, tag, height).Scan(&count)
+ require.NoError(err)
+ require.Greater(count, 0, "Expected non-skipped block metadata for height %d", height)
+
+ // Should have canonical_blocks entry
+ err = s.db.QueryRowContext(ctx, `
+ SELECT COUNT(*) FROM canonical_blocks
+ WHERE tag = $1 AND height = $2
+ `, tag, height).Scan(&count)
+ require.NoError(err)
+ require.Greater(count, 0, "Expected canonical entry for non-skipped height %d", height)
+ }
+}
+
+////////////////////////////////////////////////////////////
+
+func (s *eventStorageTestSuite) TestGetFirstEventIdByBlockHeight() {
+ require := testutil.Require(s.T())
+ numEvents := uint64(100)
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+
+ // add the remove events again so for each height, there should be two events
+ for i := int64(numEvents - 1); i >= 0; i-- {
+ // Add block metadata for the removal event
+ blockMetas := testutil.MakeBlockMetadatasFromStartHeight(uint64(i), 1, s.tag)
+ ctx := context.TODO()
+ err := s.accessor.PersistBlockMetas(ctx, true, blockMetas, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ removeEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, uint64(i), uint64(i+1), s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, removeEvents)
+ if err != nil {
+ panic(err)
+ }
+ eventId, err := s.accessor.GetFirstEventIdByBlockHeight(ctx, s.eventTag, uint64(i))
+ if err != nil {
+ panic(err)
+ }
+ require.Equal(i+model.EventIdStartValue, eventId)
+ }
+}
+
+func (s *eventStorageTestSuite) TestGetFirstEventIdByBlockHeightNonDefaultEventTag() {
+ require := testutil.Require(s.T())
+ numEvents := uint64(100)
+ eventTag := uint32(1)
+ ctx := context.TODO()
+ s.addEvents(eventTag, 0, numEvents, s.tag)
+
+ // fetch event for blockHeight=0
+ eventId, err := s.accessor.GetFirstEventIdByBlockHeight(ctx, eventTag, uint64(0))
+ require.NoError(err)
+ require.Equal(eventId, model.EventIdStartValue)
+
+ // add the remove events again so for each height, there should be two events
+ for i := int64(numEvents - 1); i >= 0; i-- {
+ // Add block metadata for the removal event
+ blockMetas := testutil.MakeBlockMetadatasFromStartHeight(uint64(i), 1, s.tag)
+ err := s.accessor.PersistBlockMetas(ctx, true, blockMetas, nil)
+ require.NoError(err)
+
+ removeEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, uint64(i), uint64(i+1), s.tag)
+ err = s.accessor.AddEvents(ctx, eventTag, removeEvents)
+ require.NoError(err)
+ eventId, err := s.accessor.GetFirstEventIdByBlockHeight(ctx, eventTag, uint64(i))
+ require.NoError(err)
+ require.Equal(i+model.EventIdStartValue, eventId)
+ }
+}
+
+func (s *eventStorageTestSuite) TestGetEventByEventId() {
+ const (
+ eventId = int64(10)
+ numEvents = uint64(20)
+ )
+
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+
+ event, err := s.accessor.GetEventByEventId(ctx, s.eventTag, eventId)
+ require.NoError(err)
+ require.Equal(event.EventId, eventId)
+ require.Equal(event.BlockHeight, uint64(eventId-1))
+}
+
+func (s *eventStorageTestSuite) TestGetEventByEventId_InvalidEventId() {
+ const (
+ eventId = int64(30)
+ numEvents = uint64(20)
+ )
+
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+
+ _, err := s.accessor.GetEventByEventId(ctx, s.eventTag, eventId)
+ require.Error(err)
+}
+
+func (s *eventStorageTestSuite) TestGetEventsByBlockHeight() {
+ const (
+ blockHeight = uint64(19)
+ numEvents = uint64(20)
+ )
+
+ require := testutil.Require(s.T())
+ ctx := context.TODO()
+
+ // +0, +1, ..., +19, -19,
+ s.addEvents(s.eventTag, 0, numEvents, s.tag)
+
+ // Add block metadata for the removal event
+ blockMetas := testutil.MakeBlockMetadatasFromStartHeight(numEvents-1, 1, s.tag)
+ err := s.accessor.PersistBlockMetas(ctx, true, blockMetas, nil)
+ require.NoError(err)
+
+ blockEvents := testutil.MakeBlockEvents(api.BlockchainEvent_BLOCK_REMOVED, numEvents-1, numEvents, s.tag)
+ err = s.accessor.AddEvents(ctx, s.eventTag, blockEvents)
+ require.NoError(err)
+
+ events, err := s.accessor.GetEventsByBlockHeight(ctx, s.eventTag, blockHeight)
+ require.NoError(err)
+ require.Equal(2, len(events))
+ for _, event := range events {
+ require.Equal(blockHeight, event.BlockHeight)
+ }
+}
+
+func TestIntegrationEventStorageTestSuite(t *testing.T) {
+ require := testutil.Require(t)
+ // Test with eth-mainnet for stream version
+ cfg, err := config.New()
+ require.NoError(err)
+ suite.Run(t, &eventStorageTestSuite{config: cfg})
+}
diff --git a/internal/storage/metastorage/postgres/event_storage_test.go b/internal/storage/metastorage/postgres/event_storage_test.go
new file mode 100644
index 0000000..defade1
--- /dev/null
+++ b/internal/storage/metastorage/postgres/event_storage_test.go
@@ -0,0 +1,103 @@
+package postgres
+
+import (
+ "testing"
+
+ pgmodel "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres/model"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+func TestEventTypeToString(t *testing.T) {
+ require := testutil.Require(t)
+
+ tests := []struct {
+ name string
+ eventType api.BlockchainEvent_Type
+ expected string
+ }{
+ {
+ name: "BLOCK_ADDED",
+ eventType: api.BlockchainEvent_BLOCK_ADDED,
+ expected: "BLOCK_ADDED",
+ },
+ {
+ name: "BLOCK_REMOVED",
+ eventType: api.BlockchainEvent_BLOCK_REMOVED,
+ expected: "BLOCK_REMOVED",
+ },
+ {
+ name: "UNKNOWN",
+ eventType: api.BlockchainEvent_UNKNOWN,
+ expected: "UNKNOWN",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ result := pgmodel.EventTypeToString(test.eventType)
+ require.Equal(test.expected, result)
+ })
+ }
+}
+
+func TestParseEventType(t *testing.T) {
+ require := testutil.Require(t)
+
+ tests := []struct {
+ name string
+ input string
+ expected api.BlockchainEvent_Type
+ }{
+ {
+ name: "BLOCK_ADDED",
+ input: "BLOCK_ADDED",
+ expected: api.BlockchainEvent_BLOCK_ADDED,
+ },
+ {
+ name: "BLOCK_REMOVED",
+ input: "BLOCK_REMOVED",
+ expected: api.BlockchainEvent_BLOCK_REMOVED,
+ },
+ {
+ name: "UNKNOWN",
+ input: "UNKNOWN",
+ expected: api.BlockchainEvent_UNKNOWN,
+ },
+ {
+ name: "Invalid string",
+ input: "INVALID_TYPE",
+ expected: api.BlockchainEvent_UNKNOWN,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ result := pgmodel.ParseEventType(test.input)
+ require.Equal(test.expected, result)
+ })
+ }
+}
+
+func TestEventTypeConversion(t *testing.T) {
+ require := testutil.Require(t)
+
+ eventTypes := []api.BlockchainEvent_Type{
+ api.BlockchainEvent_BLOCK_ADDED,
+ api.BlockchainEvent_BLOCK_REMOVED,
+ api.BlockchainEvent_UNKNOWN,
+ }
+
+ for _, eventType := range eventTypes {
+ t.Run(eventType.String(), func(t *testing.T) {
+ // Convert to string
+ eventTypeStr := pgmodel.EventTypeToString(eventType)
+
+ // Convert back to enum
+ convertedEventType := pgmodel.ParseEventType(eventTypeStr)
+
+ // Should be the same
+ require.Equal(eventType, convertedEventType)
+ })
+ }
+}
diff --git a/internal/storage/metastorage/postgres/meta_storage.go b/internal/storage/metastorage/postgres/meta_storage.go
new file mode 100644
index 0000000..4329aa6
--- /dev/null
+++ b/internal/storage/metastorage/postgres/meta_storage.go
@@ -0,0 +1,78 @@
+package postgres
+
+import (
+ "context"
+
+ "go.uber.org/fx"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+)
+
+type (
+ metaStorageImpl struct {
+ internal.BlockStorage
+ internal.EventStorage
+ internal.TransactionStorage
+ }
+
+ Params struct {
+ fx.In
+ fxparams.Params
+ }
+
+ metaStorageFactory struct {
+ params Params
+ }
+)
+
+func NewMetaStorage(params Params) (internal.Result, error) {
+ // Use shared connection pool instead of creating new connections
+ pool, err := GetConnectionPool(context.Background(), params.Config.AWS.Postgres)
+ if err != nil {
+ return internal.Result{}, err
+ }
+
+ db := pool.DB()
+ if db == nil {
+ return internal.Result{}, xerrors.New("connection pool returned nil database connection")
+ }
+ // Create storage implementations with database connection
+ blockStorage, err := newBlockStorage(db, params)
+ if err != nil {
+ return internal.Result{}, err
+ }
+
+ eventStorage, err := newEventStorage(db, params)
+ if err != nil {
+ return internal.Result{}, err
+ }
+
+ transactionStorage, err := newTransactionStorage(db, params)
+ if err != nil {
+ return internal.Result{}, err
+ }
+
+ // Combine into meta storage
+ metaStorage := &metaStorageImpl{
+ BlockStorage: blockStorage,
+ EventStorage: eventStorage,
+ TransactionStorage: transactionStorage,
+ }
+
+ return internal.Result{
+ BlockStorage: blockStorage,
+ EventStorage: eventStorage,
+ TransactionStorage: transactionStorage,
+ MetaStorage: metaStorage,
+ }, nil
+}
+
+func (f *metaStorageFactory) Create() (internal.Result, error) {
+ return NewMetaStorage(f.params)
+}
+
+func NewFactory(params Params) internal.MetaStorageFactory {
+ return &metaStorageFactory{params}
+}
diff --git a/internal/storage/metastorage/postgres/migrator.go b/internal/storage/metastorage/postgres/migrator.go
new file mode 100644
index 0000000..b96c15d
--- /dev/null
+++ b/internal/storage/metastorage/postgres/migrator.go
@@ -0,0 +1,33 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "embed"
+
+ "github.com/pressly/goose/v3"
+ "golang.org/x/xerrors"
+)
+
+//go:embed db/migrations/*.sql
+var embedMigrations embed.FS
+
+// GetEmbeddedMigrations returns the embedded migrations filesystem
+// This is used by the admin db-migrate command
+func GetEmbeddedMigrations() embed.FS {
+ return embedMigrations
+}
+
+func runMigrations(ctx context.Context, db *sql.DB) error {
+ goose.SetBaseFS(embedMigrations)
+
+ if err := goose.SetDialect("postgres"); err != nil {
+ return xerrors.Errorf("failed to set goose dialect: %w", err)
+ }
+
+ if err := goose.UpContext(ctx, db, "db/migrations"); err != nil {
+ return xerrors.Errorf("failed to run migrations: %w", err)
+ }
+
+ return nil
+}
diff --git a/internal/storage/metastorage/postgres/model/block_event.go b/internal/storage/metastorage/postgres/model/block_event.go
new file mode 100644
index 0000000..ca84543
--- /dev/null
+++ b/internal/storage/metastorage/postgres/model/block_event.go
@@ -0,0 +1,61 @@
+package model
+
+import (
+ "database/sql"
+
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+func BlockEventFromRow(row *sql.Row) (*api.BlockchainEvent, error) {
+ var event api.BlockchainEvent
+ var eventTypeStr string
+ var blockHash string
+ var blockHeight uint64
+ var eventSequence int64
+ var eventTag uint32
+
+ err := row.Scan(
+ &eventSequence,
+ &eventTypeStr,
+ &blockHeight,
+ &blockHash,
+ &eventTag,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ event.Type = ParseEventType(eventTypeStr)
+ event.SequenceNum = eventSequence
+ event.EventTag = eventTag
+ event.Block = &api.BlockIdentifier{
+ Hash: blockHash,
+ Height: blockHeight,
+ }
+
+ return &event, nil
+}
+
+// ParseEventType converts a string representation of event type to the protobuf enum
+func ParseEventType(eventTypeStr string) api.BlockchainEvent_Type {
+ switch eventTypeStr {
+ case "BLOCK_ADDED":
+ return api.BlockchainEvent_BLOCK_ADDED
+ case "BLOCK_REMOVED":
+ return api.BlockchainEvent_BLOCK_REMOVED
+ default:
+ return api.BlockchainEvent_UNKNOWN
+ }
+}
+
+// EventTypeToString converts the protobuf enum to string representation
+func EventTypeToString(eventType api.BlockchainEvent_Type) string {
+ switch eventType {
+ case api.BlockchainEvent_BLOCK_ADDED:
+ return "BLOCK_ADDED"
+ case api.BlockchainEvent_BLOCK_REMOVED:
+ return "BLOCK_REMOVED"
+ default:
+ return "UNKNOWN"
+ }
+}
diff --git a/internal/storage/metastorage/postgres/model/block_metadata.go b/internal/storage/metastorage/postgres/model/block_metadata.go
new file mode 100644
index 0000000..c43d6c1
--- /dev/null
+++ b/internal/storage/metastorage/postgres/model/block_metadata.go
@@ -0,0 +1,78 @@
+package model
+
+import (
+ "database/sql"
+
+ "github.com/coinbase/chainstorage/internal/utils/utils"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+// Scanner interface that both *sql.Row and *sql.Rows implement
+type Scanner interface {
+ Scan(dest ...interface{}) error
+}
+
+// scanBlockMetadata scans a single row into a BlockMetadata struct
+// Schema: id, height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped
+func scanBlockMetadata(scanner Scanner) (*api.BlockMetadata, error) {
+ var block api.BlockMetadata
+ var timestamp int64
+ var id int64 // We get this but don't need it in the result
+
+ err := scanner.Scan(
+ &id,
+ &block.Height,
+ &block.Tag,
+ &block.Hash,
+ &block.ParentHash,
+ &block.ParentHeight,
+ &block.ObjectKeyMain,
+ ×tamp,
+ &block.Skipped,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ block.Timestamp = utils.ToTimestamp(timestamp)
+ return &block, nil
+}
+
+// BlockMetadataFromRow converts a postgres row into a BlockMetadata proto
+// Used for direct block_metadata table queries
+// Schema: id, height, tag, hash, parent_hash, parent_height, object_key_main, timestamp, skipped
+func BlockMetadataFromRow(db *sql.DB, row *sql.Row) (*api.BlockMetadata, error) {
+ return scanBlockMetadata(row)
+}
+
+// BlockMetadataFromCanonicalRow converts a postgres row from canonical join into a BlockMetadata proto
+// Used for queries that join canonical_blocks with block_metadata
+// Schema: bm.id, bm.height, bm.tag, bm.hash, bm.parent_hash, bm.parent_height, bm.object_key_main, bm.timestamp, bm.skipped
+func BlockMetadataFromCanonicalRow(db *sql.DB, row *sql.Row) (*api.BlockMetadata, error) {
+ return scanBlockMetadata(row)
+}
+
+// scanBlockMetadataRows scans multiple rows into BlockMetadata structs
+func scanBlockMetadataRows(rows *sql.Rows) ([]*api.BlockMetadata, error) {
+ var blocks []*api.BlockMetadata
+ for rows.Next() {
+ block, err := scanBlockMetadata(rows)
+ if err != nil {
+ return nil, err
+ }
+ blocks = append(blocks, block)
+ }
+ return blocks, nil
+}
+
+// BlockMetadataFromRows converts multiple postgres rows into BlockMetadata protos
+// Used for direct block_metadata table queries
+func BlockMetadataFromRows(db *sql.DB, rows *sql.Rows) ([]*api.BlockMetadata, error) {
+ return scanBlockMetadataRows(rows)
+}
+
+// BlockMetadataFromCanonicalRows converts multiple postgres rows from canonical joins into BlockMetadata protos
+// Used for queries that join canonical_blocks with block_metadata
+func BlockMetadataFromCanonicalRows(db *sql.DB, rows *sql.Rows) ([]*api.BlockMetadata, error) {
+ return scanBlockMetadataRows(rows)
+}
diff --git a/internal/storage/metastorage/postgres/module.go b/internal/storage/metastorage/postgres/module.go
new file mode 100644
index 0000000..502ab6e
--- /dev/null
+++ b/internal/storage/metastorage/postgres/module.go
@@ -0,0 +1,30 @@
+package postgres
+
+import (
+ "context"
+
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+)
+
+var Module = fx.Options(
+ fx.Provide(fx.Annotated{
+ Name: "metastorage/postgres",
+ Target: NewFactory,
+ }),
+ fx.Invoke(registerClosePoolsHook),
+)
+
+func registerClosePoolsHook(lc fx.Lifecycle, logger *zap.Logger) {
+ lc.Append(fx.Hook{
+ OnStart: func(context.Context) error { return nil },
+ OnStop: func(ctx context.Context) error {
+ if err := CloseAllConnectionPools(); err != nil {
+ logger.Error("failed to close PostgreSQL connection pools", zap.Error(err))
+ return err
+ }
+ logger.Info("PostgreSQL connection pools closed successfully")
+ return nil
+ },
+ })
+}
diff --git a/internal/storage/metastorage/postgres/transaction_storage.go b/internal/storage/metastorage/postgres/transaction_storage.go
new file mode 100644
index 0000000..ea0a297
--- /dev/null
+++ b/internal/storage/metastorage/postgres/transaction_storage.go
@@ -0,0 +1,33 @@
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/internal"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+)
+
+type (
+ transactionStorageImpl struct {
+ db *sql.DB
+ }
+)
+
+func newTransactionStorage(db *sql.DB, params Params) (internal.TransactionStorage, error) {
+ accessor := &transactionStorageImpl{
+ db: db,
+ }
+ return accessor, nil
+}
+
+func (t *transactionStorageImpl) AddTransactions(ctx context.Context, transaction []*model.Transaction, parallelism int) error {
+ // TODO: Implement transaction insertion
+ return errors.New("not implemented")
+}
+
+func (t *transactionStorageImpl) GetTransaction(ctx context.Context, tag uint32, transactionHash string) ([]*model.Transaction, error) {
+ // TODO: Implement get transaction
+ return nil, errors.New("not implemented")
+}
diff --git a/internal/storage/utils/compress.go b/internal/storage/utils/compress.go
new file mode 100644
index 0000000..44ca1a1
--- /dev/null
+++ b/internal/storage/utils/compress.go
@@ -0,0 +1,115 @@
+package utils
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/klauspost/compress/zstd"
+ "golang.org/x/xerrors"
+
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+const (
+ GzipFileSuffix = ".gzip"
+ ZstdFileSuffix = ".zstd"
+)
+
+type Compressor interface {
+ Compress(data []byte) ([]byte, error)
+ Decompress(data []byte) ([]byte, error)
+ GetObjectKey(key string) string
+}
+
+func GetCompressionType(fileURL string) api.Compression {
+ ext := filepath.Ext(fileURL)
+ switch ext {
+ case GzipFileSuffix:
+ return api.Compression_GZIP
+ case ZstdFileSuffix:
+ return api.Compression_ZSTD
+ }
+ return api.Compression_NONE
+}
+
+func CompressorFactory(compressionType api.Compression) (Compressor, error) {
+ switch compressionType {
+ case api.Compression_GZIP:
+ return &GzipCompressor{}, nil
+ case api.Compression_ZSTD:
+ return &ZstdCompressor{}, nil
+ default:
+ return nil, errors.New("unsupported compression type")
+ }
+}
+
+// ------ GZIP ------
+type GzipCompressor struct{}
+
+func (g *GzipCompressor) Compress(data []byte) ([]byte, error) {
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+
+ if _, err := writer.Write(data); err != nil {
+ return nil, xerrors.Errorf("failed to write compressed data with gzip: %w", err)
+ }
+ if err := writer.Close(); err != nil {
+ return nil, xerrors.Errorf("failed to close gzip writer: %w", err)
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (g *GzipCompressor) Decompress(data []byte) ([]byte, error) {
+ reader, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to initiate gzip reader: %w", err)
+ }
+ decoded, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to read data: %w", err)
+ }
+ if err := reader.Close(); err != nil {
+ return nil, xerrors.Errorf("failed to close gzip reader: %w", err)
+ }
+ return decoded, nil
+}
+
+func (g *GzipCompressor) GetObjectKey(key string) string {
+ return fmt.Sprintf("%s%s", key, GzipFileSuffix)
+}
+
+// ------ ZSTD ------
+type ZstdCompressor struct{}
+
+func (z *ZstdCompressor) Compress(data []byte) ([]byte, error) {
+ writer, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedDefault))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to write compressed data with zstd: %w", err)
+ }
+ if err := writer.Close(); err != nil {
+ return nil, xerrors.Errorf("failed to close zstd writer: %w", err)
+ }
+ return writer.EncodeAll(data, nil), nil
+}
+
+func (z *ZstdCompressor) Decompress(data []byte) ([]byte, error) {
+ decoder, err := zstd.NewReader(nil)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to initiate zstd reader: %w", err)
+ }
+ defer decoder.Close()
+ decoded, err := decoder.DecodeAll(data, nil)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to read data with zstd: %w", err)
+ }
+ return decoded, nil
+}
+
+func (z *ZstdCompressor) GetObjectKey(key string) string {
+ return fmt.Sprintf("%s%s", key, ZstdFileSuffix)
+}
diff --git a/internal/storage/utils/utils.go b/internal/storage/utils/utils.go
index fd1a765..e14fe2b 100644
--- a/internal/storage/utils/utils.go
+++ b/internal/storage/utils/utils.go
@@ -1,81 +1,49 @@
package utils
import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "strings"
-
"golang.org/x/xerrors"
api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
)
-const (
- GzipFileSuffix = ".gzip"
-)
-
-func GetCompressionType(fileURL string) api.Compression {
- if strings.HasSuffix(fileURL, GzipFileSuffix) {
- return api.Compression_GZIP
- }
- return api.Compression_NONE
-}
-
func Compress(data []byte, compression api.Compression) ([]byte, error) {
if compression == api.Compression_NONE {
return data, nil
}
- if compression == api.Compression_GZIP {
- var buf bytes.Buffer
- zw := gzip.NewWriter(&buf)
- if _, err := zw.Write(data); err != nil {
- return nil, xerrors.Errorf("failed to write compressed data: %w", err)
- }
- if err := zw.Close(); err != nil {
- return nil, xerrors.Errorf("failed to close writer: %w", err)
- }
-
- return buf.Bytes(), nil
+ compressor, err := CompressorFactory(compression)
+ if err != nil {
+ return nil, err
}
-
- return nil, xerrors.Errorf("failed to compress with unsupported type %v", compression.String())
+ coded, err := compressor.Compress(data)
+ if err != nil {
+ return nil, err
+ }
+ return coded, nil
}
func Decompress(data []byte, compression api.Compression) ([]byte, error) {
if compression == api.Compression_NONE {
return data, nil
}
-
- if compression == api.Compression_GZIP {
- zr, err := gzip.NewReader(bytes.NewBuffer(data))
- if err != nil {
- return nil, xerrors.Errorf("failed to initiate reader: %w", err)
- }
- decoded, err := ioutil.ReadAll(zr)
- if err != nil {
- return nil, xerrors.Errorf("failed to read data: %w", err)
- }
- if err := zr.Close(); err != nil {
- return nil, xerrors.Errorf("failed to close reader: %w", err)
- }
- return decoded, nil
+ compressor, err := CompressorFactory(compression)
+ if err != nil {
+ return nil, err
}
-
- return nil, xerrors.Errorf("failed to decompress with unsupported type %v", compression.String())
+ decoded, err := compressor.Decompress(data)
+ if err != nil {
+ return nil, err
+ }
+ return decoded, nil
}
func GetObjectKey(key string, compression api.Compression) (string, error) {
if compression == api.Compression_NONE {
return key, nil
}
-
- if compression == api.Compression_GZIP {
- key = fmt.Sprintf("%s%s", key, GzipFileSuffix)
- return key, nil
+ compressor, err := CompressorFactory(compression)
+ if err != nil {
+ return "", xerrors.Errorf("failed to Get Object Key with: %w", err)
}
-
- return "", xerrors.Errorf("failed to get object key with unsupported type %v", compression.String())
+ return compressor.GetObjectKey(key), nil
}
diff --git a/internal/storage/utils/utils_test.go b/internal/storage/utils/utils_test.go
index e626d11..0ada79e 100644
--- a/internal/storage/utils/utils_test.go
+++ b/internal/storage/utils/utils_test.go
@@ -1,6 +1,7 @@
package utils
import (
+ "bytes"
"testing"
"github.com/coinbase/chainstorage/internal/utils/testutil"
@@ -24,6 +25,14 @@ func TestGetCompressionType(t *testing.T) {
fileURL: "a.gzip",
compression: api.Compression_GZIP,
},
+ {
+ fileURL: "bzstd",
+ compression: api.Compression_NONE,
+ },
+ {
+ fileURL: "b.zstd",
+ compression: api.Compression_ZSTD,
+ },
}
for _, test := range tests {
t.Run(test.fileURL, func(t *testing.T) {
@@ -54,6 +63,20 @@ func TestCompress(t *testing.T) {
}`),
api.Compression_GZIP,
},
+ {
+ "emptyData",
+ []byte{},
+ api.Compression_ZSTD,
+ },
+ {
+ "blockDataCompression",
+ []byte(`
+ {
+ "hash": "0xbaa42c",
+ "number": "0xacc290",
+ }`),
+ api.Compression_ZSTD,
+ },
{
"blockData",
[]byte(`
@@ -73,7 +96,7 @@ func TestCompress(t *testing.T) {
decompressed, err := Decompress(compressed, test.compression)
require.NoError(err)
- require.Equal(decompressed, test.data)
+ require.True(bytes.Equal(decompressed, test.data))
})
}
}
@@ -94,6 +117,11 @@ func TestGetObjectKey(t *testing.T) {
api.Compression_NONE,
"key2",
},
+ {
+ "key3",
+ api.Compression_ZSTD,
+ "key3.zstd",
+ },
}
for _, test := range tests {
t.Run(test.key, func(t *testing.T) {
diff --git a/internal/tally/prometheus_reporter.go b/internal/tally/prometheus_reporter.go
new file mode 100644
index 0000000..4cb78a4
--- /dev/null
+++ b/internal/tally/prometheus_reporter.go
@@ -0,0 +1,470 @@
+package tally
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "maps"
+ "math"
+ "net/http"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/collectors"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/uber-go/tally/v4"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+
+ "github.com/coinbase/chainstorage/internal/config"
+)
+
+var defaultBuckets = []float64{
+ 0.1, 0.2, 0.3, 0.5, 0.7, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 12, 15, 20, 25, 30, 40, 50, 75, 100, 200, 300, 500, 750,
+ 1000, 2000, 3000, 5000, 7000, 10000,
+}
+
+type prometheusReporter struct {
+ stats *prometheusStats
+}
+
+func newPrometheusReporter(
+ cfg *config.PrometheusConfig,
+ lifecycle fx.Lifecycle,
+ logger *zap.Logger,
+) tally.StatsReporter {
+ opts := []prometheusStatsOption{}
+ if cfg.Namespace != "" {
+ opts = append(opts, withPrometheusNamespace(cfg.Namespace))
+ }
+ if len(cfg.GlobalLabels) > 0 {
+ opts = append(opts, withPrometheusLabels(cfg.GlobalLabels))
+ }
+ if len(cfg.DefaultHistogramBuckets) > 0 {
+ opts = append(opts, withDefaultPrometheusHistogramBuckets(defaultBuckets))
+ }
+ if len(cfg.HistogramBuckets) > 0 {
+ opts = append(opts, withPrometheusHistogramBuckets(cfg.HistogramBuckets))
+ }
+
+ s := newPrometheusStats(logger, opts...)
+
+ mux := http.NewServeMux()
+
+ metricsPath := "/metrics"
+ if cfg.MetricsPath != "" {
+ metricsPath = cfg.MetricsPath
+ }
+ mux.Handle(metricsPath, s.MetricsHandler())
+
+ addr := fmt.Sprintf(":%d", cfg.Port)
+ srv := &http.Server{
+ Addr: addr,
+ Handler: mux,
+ }
+
+ lifecycle.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ logger.Info("prometheus metrics server starting", zap.String("address", addr))
+
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ if err != http.ErrServerClosed {
+ logger.Error("prometheus metrics server failed to start", zap.Error(err))
+ }
+ }
+ }()
+
+ return nil
+ },
+ OnStop: func(ctx context.Context) error {
+ logger.Info("prometheus metrics server stopping", zap.String("address", addr))
+ return srv.Shutdown(ctx)
+ },
+ })
+
+ return &prometheusReporter{
+ stats: s,
+ }
+}
+
+func (p *prometheusReporter) labels() prometheus.Labels {
+ return p.stats.Labels()
+}
+
+func (p *prometheusReporter) Capabilities() tally.Capabilities {
+ return p
+}
+
+func (p *prometheusReporter) Reporting() bool {
+ return true
+}
+
+func (p *prometheusReporter) Tagging() bool {
+ return true
+}
+
+func (p *prometheusReporter) Flush() {
+ // no-op
+}
+
+func (p *prometheusReporter) ReportCounter(name string, tags map[string]string, value int64) {
+ p.stats.Count(name, value, p.tags(tags))
+}
+
+func (p *prometheusReporter) ReportGauge(name string, tags map[string]string, value float64) {
+ p.stats.Gauge(name, value, p.tags(tags))
+}
+
+func (p *prometheusReporter) ReportHistogramDurationSamples(
+ name string,
+ tags map[string]string,
+ buckets tally.Buckets,
+ bucketLowerBound time.Duration,
+ bucketUpperBound time.Duration,
+ samples int64,
+) {
+ panic("unimplemented")
+}
+
+func (p *prometheusReporter) ReportHistogramValueSamples(
+ name string,
+ tags map[string]string,
+ buckets tally.Buckets,
+ bucketLowerBound float64,
+ bucketUpperBound float64,
+ samples int64,
+) {
+ panic("unimplemented")
+}
+
+func (p *prometheusReporter) ReportTimer(
+ name string,
+ tags map[string]string,
+ interval time.Duration,
+) {
+ p.stats.Timing(name, interval, p.tags(tags))
+}
+
+func (p *prometheusReporter) tags(tags map[string]string) map[string]string {
+ if len(tags) == 0 {
+ return p.labels()
+ }
+
+ m := make(map[string]string)
+ maps.Copy(m, p.labels())
+ maps.Copy(m, tags)
+
+ return m
+}
+
+type prometheusStats struct {
+ mux sync.RWMutex
+ logger *zap.Logger
+
+ counters map[string]*prometheus.CounterVec
+ gauges map[string]*prometheus.GaugeVec
+ histograms map[string]*prometheus.HistogramVec
+ histogramBuckets map[string][]float64
+ defaultBuckets []float64
+ reg *prometheus.Registry
+
+ globalLabels prometheus.Labels
+ namespace string
+}
+
+type prometheusStatsOption func(*prometheusStats)
+
+func withPrometheusNamespace(namespace string) prometheusStatsOption {
+ return func(s *prometheusStats) {
+ s.namespace = namespace
+ }
+}
+
+func withPrometheusLabels(labels map[string]string) prometheusStatsOption {
+ return func(s *prometheusStats) {
+ for k, v := range labels {
+ s.globalLabels[k] = v
+ }
+ }
+}
+
+func withPrometheusHistogramBuckets(buckets map[string][]float64) prometheusStatsOption {
+ return func(s *prometheusStats) {
+ for k, v := range buckets {
+ s.histogramBuckets[k] = v
+ }
+ }
+}
+
+func withDefaultPrometheusHistogramBuckets(buckets []float64) prometheusStatsOption {
+ return func(s *prometheusStats) {
+ s.defaultBuckets = buckets
+ }
+}
+
+func newPrometheusStats(logger *zap.Logger, opts ...prometheusStatsOption) *prometheusStats {
+ s := &prometheusStats{
+ logger: logger,
+ counters: make(map[string]*prometheus.CounterVec),
+ gauges: make(map[string]*prometheus.GaugeVec),
+ histograms: make(map[string]*prometheus.HistogramVec),
+ histogramBuckets: make(map[string][]float64),
+ defaultBuckets: defaultBuckets,
+ globalLabels: make(prometheus.Labels),
+ namespace: "",
+ reg: prometheus.NewRegistry(),
+ }
+
+ // Add go runtime metrics and process collectors.
+ s.reg.MustRegister(
+ collectors.NewGoCollector(),
+ collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
+ )
+
+ for _, opt := range opts {
+ opt(s)
+ }
+
+ return s
+}
+
+func (c *prometheusStats) Labels() prometheus.Labels {
+ return c.globalLabels
+}
+
+func (c *prometheusStats) MetricsHandler() http.Handler {
+ return promhttp.HandlerFor(c.reg, promhttp.HandlerOpts{Registry: c.reg})
+}
+
+func (c *prometheusStats) Count(key string, n interface{}, tags map[string]string) {
+ v, err := toFloat64(n)
+ if err != nil {
+ return
+ }
+
+ op, err := c.loadCount(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.count.error", zap.Error(err))
+ return
+ }
+ op.With(labels(tags)).Add(v)
+}
+
+func (c *prometheusStats) Inc(key string, tags map[string]string) {
+ op, err := c.loadGauge(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.inc.error", zap.Error(err))
+ return
+ }
+ op.With(labels(tags)).Inc()
+}
+
+func (c *prometheusStats) Dec(key string, tags map[string]string) {
+ op, err := c.loadGauge(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.dec.error", zap.Error(err))
+ return
+ }
+ op.With(labels(tags)).Dec()
+}
+
+func (c *prometheusStats) Gauge(key string, n interface{}, tags map[string]string) {
+ v, err := toFloat64(n)
+ if err != nil {
+ return
+ }
+
+ op, err := c.loadGauge(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.gauge.error", zap.Error(err))
+ return
+ }
+ op.With(labels(tags)).Set(v)
+}
+
+func (c *prometheusStats) Histogram(key string, n interface{}, tags map[string]string) {
+ v, err := toFloat64(n)
+ if err != nil {
+ return
+ }
+
+ op, err := c.loadHistogram(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.histogram.error", zap.Error(err))
+ return
+ }
+ op.With(labels(tags)).Observe(v)
+}
+
+func (c *prometheusStats) Timing(key string, t time.Duration, tags map[string]string) {
+ op, err := c.loadHistogram(key, tags)
+ if err != nil {
+ c.logger.Warn("prometheus.timing.error", zap.Error(err))
+ return
+ }
+
+ op.With(labels(tags)).Observe(float64(t) / float64(time.Millisecond))
+}
+
+func (c *prometheusStats) loadGauge(key string, tags map[string]string) (*prometheus.GaugeVec, error) {
+ key = c.key(key)
+ id, labelNames := labelKey(key, tags)
+
+ c.mux.RLock()
+ gauge, ok := c.gauges[id]
+ c.mux.RUnlock()
+ if ok {
+ return gauge, nil
+ }
+
+ c.mux.Lock()
+ gauge, err := registerMetric(c.reg, prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: c.namespace,
+ Name: key,
+ ConstLabels: c.globalLabels,
+ }, labelNames))
+ if err != nil {
+ c.mux.Unlock()
+ return nil, err
+ }
+ c.gauges[id] = gauge
+ c.mux.Unlock()
+
+ return gauge, nil
+}
+
+func (c *prometheusStats) loadCount(key string, tags map[string]string) (*prometheus.CounterVec, error) {
+ key = c.key(key)
+ id, labelNames := labelKey(key, tags)
+
+ c.mux.RLock()
+ counter, ok := c.counters[id]
+ c.mux.RUnlock()
+ if ok {
+ return counter, nil
+ }
+
+ c.mux.Lock()
+ counter, err := registerMetric(c.reg, prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: c.namespace,
+ Name: key,
+ ConstLabels: c.globalLabels,
+ }, labelNames))
+ if err != nil {
+ c.mux.Unlock()
+ return nil, err
+ }
+ c.counters[id] = counter
+ c.mux.Unlock()
+
+ return counter, nil
+}
+
+func labelKey(key string, tags map[string]string) (id string, labelNames []string) {
+ for k := range labels(tags) {
+ labelNames = append(labelNames, k)
+ }
+
+ sort.Strings(labelNames)
+ newKey := strings.Join(append([]string{key}, labelNames...), ".")
+
+ return newKey, labelNames
+}
+
+func (c *prometheusStats) loadHistogram(key string, tags map[string]string) (*prometheus.HistogramVec, error) {
+ key = c.key(key)
+ id, labelNames := labelKey(key, tags)
+
+ c.mux.RLock()
+ histogram, registered := c.histograms[id]
+ histogramBuckets, hasBuckets := c.histogramBuckets[key]
+ c.mux.RUnlock()
+
+ if registered {
+ return histogram, nil
+ }
+
+ if !hasBuckets {
+ histogramBuckets = defaultBuckets
+ }
+
+ c.mux.Lock()
+ histogram, err := registerMetric(c.reg, prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: c.namespace,
+ Name: key,
+ ConstLabels: c.globalLabels,
+ Buckets: histogramBuckets,
+ }, labelNames))
+ if err != nil {
+ c.mux.Unlock()
+ return nil, err
+ }
+ c.histograms[id] = histogram
+ c.mux.Unlock()
+
+ return histogram, nil
+}
+
+func (c *prometheusStats) key(key string) string {
+ return strings.ReplaceAll(key, ".", "_")
+}
+
+func labels(tags map[string]string) prometheus.Labels {
+ if len(tags) > 0 {
+ return prometheus.Labels(tags)
+ }
+ return prometheus.Labels{}
+}
+
+func registerMetric[T prometheus.Collector](
+ reg prometheus.Registerer,
+ metric T,
+) (T, error) {
+ var err error
+ if reg != nil {
+ err = reg.Register(metric)
+ } else {
+ err = prometheus.Register(metric)
+ }
+ if err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ existing, ok := are.ExistingCollector.(T)
+ if !ok {
+ return metric, fmt.Errorf("metric with different type already exists")
+ }
+
+ return existing, nil
+ }
+ }
+
+ return metric, err
+}
+
+func toFloat64(n interface{}) (float64, error) {
+ var v float64
+ switch n := n.(type) {
+ case float64:
+ v = n
+ case float32:
+ v = float64(n)
+ case int:
+ v = float64(n)
+ case int8:
+ v = float64(n)
+ case int16:
+ v = float64(n)
+ case int32:
+ v = float64(n)
+ case int64:
+ v = float64(n)
+ default:
+ // NaN
+ return math.NaN(), errors.New("failed to convert value to float64")
+ }
+ return v, nil
+}
diff --git a/internal/tally/stats_reporter.go b/internal/tally/stats_reporter.go
index 734d2a2..73bc3f2 100644
--- a/internal/tally/stats_reporter.go
+++ b/internal/tally/stats_reporter.go
@@ -1,10 +1,6 @@
package tally
import (
- "context"
- "time"
-
- smirastatsd "github.com/smira/go-statsd"
"github.com/uber-go/tally/v4"
"go.uber.org/fx"
"go.uber.org/zap"
@@ -19,97 +15,15 @@ type (
Logger *zap.Logger
Config *config.Config
}
-
- reporter struct {
- client *smirastatsd.Client
- }
-)
-
-const (
- reportingInterval = time.Second
-)
-
-var (
- // hardcoding this to be datadog format
- // we need think about whats the best way to set it up in config such that
- // when we switch reporter impl, config will still be backward compatible
- tagFormat = smirastatsd.TagFormatDatadog
)
func NewStatsReporter(params StatsReporterParams) tally.StatsReporter {
- if params.Config.StatsD == nil {
+ switch {
+ case params.Config.StatsD != nil:
+ return newStatsDReporter(params.Config.StatsD, params.Lifecycle, params.Logger)
+ case params.Config.Prometheus != nil:
+ return newPrometheusReporter(params.Config.Prometheus, params.Lifecycle, params.Logger)
+ default:
return tally.NullStatsReporter
}
- cfg := params.Config.StatsD
- client := smirastatsd.NewClient(
- cfg.Address,
- smirastatsd.MetricPrefix(cfg.Prefix),
- smirastatsd.TagStyle(tagFormat),
- smirastatsd.ReportInterval(reportingInterval),
- )
- params.Logger.Info("initialized statsd client")
- params.Lifecycle.Append(fx.Hook{
- OnStop: func(ctx context.Context) error {
- return client.Close()
- },
- })
- return &reporter{
- client: client,
- }
-}
-
-func convertTags(tagsMap map[string]string) []smirastatsd.Tag {
- tags := make([]smirastatsd.Tag, 0, len(tagsMap))
- for key, value := range tagsMap {
- tags = append(tags, smirastatsd.StringTag(key, value))
- }
- return tags
-}
-
-func (r *reporter) ReportCounter(name string, tags map[string]string, value int64) {
- r.client.Incr(name, value, convertTags(tags)...)
-}
-
-func (r *reporter) ReportGauge(name string, tags map[string]string, value float64) {
- r.client.FGauge(name, value, convertTags(tags)...)
-}
-
-func (r *reporter) ReportTimer(name string, tags map[string]string, value time.Duration) {
- r.client.PrecisionTiming(name, value, convertTags(tags)...)
-}
-
-func (r *reporter) ReportHistogramValueSamples(
- name string,
- tags map[string]string,
- buckets tally.Buckets,
- bucketLowerBound,
- bucketUpperBound float64,
- samples int64) {
- panic("no implemented")
-}
-
-func (r *reporter) ReportHistogramDurationSamples(
- name string,
- tags map[string]string,
- buckets tally.Buckets,
- bucketLowerBound,
- bucketUpperBound time.Duration,
- samples int64) {
- panic("no implemented")
-}
-
-func (r *reporter) Capabilities() tally.Capabilities {
- return r
-}
-
-func (r *reporter) Reporting() bool {
- return true
-}
-
-func (r *reporter) Tagging() bool {
- return true
-}
-
-func (r *reporter) Flush() {
- // no-op
}
diff --git a/internal/tally/stats_reporter_test.go b/internal/tally/stats_reporter_test.go
index 834a1cf..cd26320 100644
--- a/internal/tally/stats_reporter_test.go
+++ b/internal/tally/stats_reporter_test.go
@@ -47,3 +47,28 @@ func TestNewReporterDefaultWithStatsD(t *testing.T) {
require.Equal(true, reporter.Capabilities().Tagging())
})
}
+
+func TestNewReporterDefaultWithPrometheus(t *testing.T) {
+ testapp.TestAllConfigs(t, func(t *testing.T, cfg *config.Config) {
+ require := testutil.Require(t)
+ cfg.Prometheus = &config.PrometheusConfig{
+ // use any available port
+ Port: 0,
+ }
+
+ var reporter tally.StatsReporter
+ app := testapp.New(
+ t,
+ testapp.WithConfig(cfg),
+ fx.Provide(NewStatsReporter),
+ fx.Populate(&reporter),
+ )
+
+ // close app after the test so that the port is released
+ t.Cleanup(app.Close)
+
+ require.NotEqual(tally.NullStatsReporter, reporter)
+ require.Equal(true, reporter.Capabilities().Reporting())
+ require.Equal(true, reporter.Capabilities().Tagging())
+ })
+}
diff --git a/internal/tally/statsd_reporter.go b/internal/tally/statsd_reporter.go
new file mode 100644
index 0000000..787d90d
--- /dev/null
+++ b/internal/tally/statsd_reporter.go
@@ -0,0 +1,103 @@
+package tally
+
+import (
+ "context"
+ "time"
+
+ smirastatsd "github.com/smira/go-statsd"
+ "github.com/uber-go/tally/v4"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+
+ "github.com/coinbase/chainstorage/internal/config"
+)
+
+type statsDReporter struct {
+ client *smirastatsd.Client
+}
+
+func newStatsDReporter(
+ cfg *config.StatsDConfig,
+ lifecycle fx.Lifecycle,
+ logger *zap.Logger,
+) tally.StatsReporter {
+ // hardcoding this to be datadog format
+ // we need think about whats the best way to set it up in config such that
+ // when we switch reporter impl, config will still be backward compatible
+ tagFormat := smirastatsd.TagFormatDatadog
+
+ client := smirastatsd.NewClient(
+ cfg.Address,
+ smirastatsd.MetricPrefix(cfg.Prefix),
+ smirastatsd.TagStyle(tagFormat),
+ smirastatsd.ReportInterval(reportingInterval),
+ )
+ logger.Info("initialized statsd client")
+ lifecycle.Append(fx.Hook{
+ OnStop: func(ctx context.Context) error {
+ return client.Close()
+ },
+ })
+
+ return &statsDReporter{
+ client: client,
+ }
+}
+
+func (r *statsDReporter) convertTags(tagsMap map[string]string) []smirastatsd.Tag {
+ tags := make([]smirastatsd.Tag, 0, len(tagsMap))
+ for key, value := range tagsMap {
+ tags = append(tags, smirastatsd.StringTag(key, value))
+ }
+ return tags
+}
+
+func (r *statsDReporter) ReportCounter(name string, tags map[string]string, value int64) {
+ r.client.Incr(name, value, r.convertTags(tags)...)
+}
+
+func (r *statsDReporter) ReportGauge(name string, tags map[string]string, value float64) {
+ r.client.FGauge(name, value, r.convertTags(tags)...)
+}
+
+func (r *statsDReporter) ReportTimer(name string, tags map[string]string, value time.Duration) {
+ r.client.PrecisionTiming(name, value, r.convertTags(tags)...)
+}
+
+func (r *statsDReporter) ReportHistogramValueSamples(
+ name string,
+ tags map[string]string,
+ buckets tally.Buckets,
+ bucketLowerBound,
+ bucketUpperBound float64,
+ samples int64,
+) {
+ panic("no implemented")
+}
+
+func (r *statsDReporter) ReportHistogramDurationSamples(
+ name string,
+ tags map[string]string,
+ buckets tally.Buckets,
+ bucketLowerBound,
+ bucketUpperBound time.Duration,
+ samples int64,
+) {
+ panic("no implemented")
+}
+
+func (r *statsDReporter) Capabilities() tally.Capabilities {
+ return r
+}
+
+func (r *statsDReporter) Reporting() bool {
+ return true
+}
+
+func (r *statsDReporter) Tagging() bool {
+ return true
+}
+
+func (r *statsDReporter) Flush() {
+ // no-op
+}
diff --git a/internal/tally/tally.go b/internal/tally/tally.go
index 9d065cb..44090b4 100644
--- a/internal/tally/tally.go
+++ b/internal/tally/tally.go
@@ -2,6 +2,7 @@ package tally
import (
"context"
+ "time"
"github.com/uber-go/tally/v4"
"go.uber.org/fx"
@@ -10,6 +11,10 @@ import (
"github.com/coinbase/chainstorage/internal/utils/consts"
)
+const (
+ reportingInterval = time.Second
+)
+
type (
MetricParams struct {
fx.In
@@ -25,8 +30,8 @@ func NewRootScope(params MetricParams) tally.Scope {
Reporter: params.Reporter,
Tags: params.Config.GetCommonTags(),
}
- //report interval will be set on reporter
- scope, closer := tally.NewRootScope(opts, 0)
+ // report interval will be set on reporter
+ scope, closer := tally.NewRootScope(opts, reportingInterval)
params.Lifecycle.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
return closer.Close()
diff --git a/internal/utils/fixtures/parser/tron/raw_block_header.json b/internal/utils/fixtures/parser/tron/raw_block_header.json
new file mode 100644
index 0000000..e3451e4
--- /dev/null
+++ b/internal/utils/fixtures/parser/tron/raw_block_header.json
@@ -0,0 +1,58 @@
+{
+ "baseFeePerGas": "0x0",
+ "difficulty": "0x0",
+ "extraData": "0x",
+ "gasLimit": "0x2b3b43dc6",
+ "gasUsed": "0xb1006d",
+ "hash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x8b0359acac03bac62cbf89c4b787cb10b3c3f513",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x4034f5c",
+ "parentHash": "0x0000000004034f5b43c5934257b3d1f1a313bba4af0a4dd2f778fda9e641b615",
+ "receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "sha3Uncles": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "size": "0x1a366",
+ "stateRoot": "0x",
+ "timestamp": "0x6745cb8a",
+ "totalDifficulty": "0x0",
+ "transactions": [
+ {
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "from": "0x25a51e3e65287539b8d4eb559cbca4488a08bb00",
+ "gas": "0x1fced",
+ "gasPrice": "0xd2",
+ "hash": "0xd581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ "input": "0xa9059cbb0000000000000000000000009dc5da2b3c502661c8448ba88bacf7f0b22272ad0000000000000000000000000000000000000000000000000000000000027165",
+ "nonce": "0x0000000000000000",
+ "r": "0x8178c20b4100cdab4eadd22cefb4944504b51272d6693a4e5b4a00ae8b237313",
+ "s": "0x36acd444b8e94dc157824da1aba4325df38e2c8e806826f4c71b06148e88dd91",
+ "to": "0xa614f803b6fd780986a42c78ec9c7f77e6ded13c",
+ "transactionIndex": "0x0",
+ "type": "0x0",
+ "v": "0x1c",
+ "value": "0x0"
+ },
+ {
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "from": "0x89ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "gas": "0x12197",
+ "gasPrice": "0xd2",
+ "hash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "input": "0xaf6f48960000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6000000000000000000000000000000000000000000000000000000bf9e4899ba0000000000000000000000000000000000000000000000000000000000000001",
+ "nonce": "0x0000000000000000",
+ "r": "0xe30301c81bcbdf7e69116543964366b84bd34606115cc5cae96927fb5214a6ea",
+ "s": "0x219db63879a044df44b855f6e481398942c9d5ab774a2a1fae16d3646f418e1f",
+ "to": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "transactionIndex": "0x45",
+ "type": "0x0",
+ "v": "0x1b",
+ "value": "0x0"
+ }
+ ],
+ "transactionsRoot": "0xd270690faa58558c2b03ae600334f71f9d5a0ad42d7313852fb3742e8576eec9",
+ "uncles": []
+}
\ No newline at end of file
diff --git a/internal/utils/fixtures/parser/tron/raw_block_trace_tx_info.json b/internal/utils/fixtures/parser/tron/raw_block_trace_tx_info.json
new file mode 100644
index 0000000..99d0dff
--- /dev/null
+++ b/internal/utils/fixtures/parser/tron/raw_block_trace_tx_info.json
@@ -0,0 +1,193 @@
+[
+ {
+ "type": "TransferContract",
+ "log": [
+ {
+ "address": "a614f803b6fd780986a42c78ec9c7f77e6ded13c",
+ "data": "0000000000000000000000000000000000000000000000000000000000027165",
+ "topics": [
+ "ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "00000000000000000000000025a51e3e65287539b8d4eb559cbca4488a08bb00",
+ "0000000000000000000000009dc5da2b3c502661c8448ba88bacf7f0b22272ad"
+ ]
+ }
+ ],
+ "blockNumber": 67325788,
+ "contractResult": [
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ ],
+ "blockTimeStamp": 1732627338000,
+ "receipt": {
+ "result": "SUCCESS",
+ "energy_penalty_total": 100635,
+ "energy_usage": 130285,
+ "energy_usage_total": 130285,
+ "net_usage": 345
+ },
+ "id": "d581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ "contract_address": "41a614f803b6fd780986a42c78ec9c7f77e6ded13c"
+ },
+ {
+ "type": "TransferAssetContract",
+ "log": [
+ {
+ "address": "c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "data": "00000000000000000000000000000000000000000000000000000001f9873bc7000000000000000000000000000000000000000000000000093732ae413feb69000000000000000000000000000000000000000000000000093732b42dd59ebe0000000000000000000000000000000000000000000000000000801f33d9f651000000000000000000000000000000000000000000000000000000000036b158",
+ "topics": [
+ "da6e3523d5765dedff9534b488c7e508318178571c144293451989755e9379e7",
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ ]
+ },
+ {
+ "address": "c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "data": "000000000000000000000000000000000000000000000000093732a856669e8f000000000000000000000000000000000000000000000000093732b42dd59ebe000000000000000000000000000000000000000000000000000000bf9e4899ba000000000000000000000000000000000000000000000000000000000000a3810000000000000000000000000000000000000000000000000000000000000000",
+ "topics": [
+ "74fed619850adf4ba83cfb92b9566b424e3de6de4d9a7adc3b1909ea58421a55",
+ "00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ ]
+ },
+ {
+ "address": "c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "data": "000000000000000000000000000000000000000000000000000000bf9e4899ba",
+ "topics": [
+ "f2def54ec5eba61fd8f18d019c7beaf6a47df317fb798b3263ad69ec227c9261",
+ "00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ ]
+ },
+ {
+ "address": "c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "data": "000000000000000000000000000000000000000000000000000000bf9e4899ba0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000c032ffd0000000000000000000000000000000000000000000000000000000054e4691a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000093732b42dd59ebe",
+ "topics": [
+ "f7e21d5bf17851f93ab7bda7e390841620f59dfbe9d86add32824f33bd40d3f5",
+ "00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6"
+ ]
+ }
+ ],
+ "blockNumber": 67325788,
+ "contractResult": [
+ "0000000000000000000000000000000000000000000000000000000054e4691a"
+ ],
+ "blockTimeStamp": 1732627338000,
+ "fee": 379,
+ "receipt": {
+ "result": "SUCCESS",
+ "energy_usage": 68976,
+ "energy_usage_total": 74135,
+ "origin_energy_usage": 5159,
+ "net_fee": 379
+ },
+ "id": "e14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "contract_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "internal_transactions": [
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "callValueInfo": [
+ {
+ "callValue": 100
+ },
+ {
+ "callValue": 100
+ }
+ ],
+ "hash": "499bdbdfaae021dd510c70b433bc48d88d8ca6e0b7aee13ce6d726114e365aaf"
+ },
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "41e8667633c747066c70672c58207cc745a9860527",
+ "callValueInfo": [
+ {
+ "tokenId": "1004777",
+ "callValue": 1000000000000000
+ },
+ {
+ "callValue": 1000
+ }
+ ],
+ "hash": "997225b56440a9bd172f05f44a663830b72093a12502551cda99b0bc7c60cbc1"
+ },
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "41e8667633c747066c70672c58207cc745a9860527",
+ "callValueInfo": [
+ {
+ "tokenId": "1004777",
+ "callValue": 1000
+ },
+ {
+ "tokenId": "1004777",
+ "callValue": 100
+ }
+ ],
+ "hash": "7ac8dd16dede5c512330f5033c8fd6f5390d742aa51b805f805098109eb54fe9"
+ },
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "41c64e69acde1c7b16c2a3efcdbbdaa96c3644c2b3",
+ "callValueInfo": [
+ {
+ "tokenId": "1004777",
+ "callValue": 100
+ },
+ {
+ "callValue": 100000
+ }
+ ],
+ "hash": "cf6f699d9bdae8aa25fae310a06bb60a29a7812548cf3c1d83c737fd1a22c0ee"
+ },
+ {
+ "caller_address": "41c64e69acde1c7b16c2a3efcdbbdaa96c3644c2b3",
+ "note": "63616c6c",
+ "transferTo_address": "41c64e69acde1c7b16c2a3efcdbbdaa96c3644c2b3",
+ "callValueInfo": [
+ {}
+ ],
+ "hash": "95787b9a6558c7b6b624d0c1bece9723a7f4c3d414010b6ac105ae5f5aebffbc"
+ },
+ {
+ "caller_address": "41c64e69acde1c7b16c2a3efcdbbdaa96c3644c2b3",
+ "note": "756e44656c65676174655265736f757263654f66456e65726779",
+ "transferTo_address": "414d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "callValueInfo": [
+ {
+ "callValue": 822994311610
+ },
+ {
+ "callValue": 2000000
+ }
+
+ ],
+ "hash": "14526162e31d969ef0dca9b902d51ecc0ffab87dc936dce62022f368119043af"
+ },
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "41e8667633c747066c70672c58207cc745a9860527",
+ "callValueInfo": [
+ {}
+ ],
+ "hash": "8e088220a26ca8d794786e78096e71259cf8744cccdc4f07a8129aa8ee29bb98"
+ },
+ {
+ "caller_address": "41c60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "note": "63616c6c",
+ "transferTo_address": "4189ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "callValueInfo": [
+ {
+ "callValue": 1424255258
+ }
+ ],
+ "hash": "83b1d41ba953aab4da6e474147f647599ea53bb3213306897127b57e85ddd1ca"
+ }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/internal/utils/fixtures/parser/tron/raw_block_tx_receipt.json b/internal/utils/fixtures/parser/tron/raw_block_tx_receipt.json
new file mode 100644
index 0000000..e5f9a5b
--- /dev/null
+++ b/internal/utils/fixtures/parser/tron/raw_block_tx_receipt.json
@@ -0,0 +1,112 @@
+[
+ {
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x1fced",
+ "effectiveGasPrice": "0xd2",
+ "from": "0x25a51e3e65287539b8d4eb559cbca4488a08bb00",
+ "gasUsed": "0x1fced",
+ "logs": [
+ {
+ "address": "0xa614f803b6fd780986a42c78ec9c7f77e6ded13c",
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "data": "0x0000000000000000000000000000000000000000000000000000000000027165",
+ "logIndex": "0x0",
+ "removed": false,
+ "topics": [
+ "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "0x00000000000000000000000025a51e3e65287539b8d4eb559cbca4488a08bb00",
+ "0x0000000000000000000000009dc5da2b3c502661c8448ba88bacf7f0b22272ad"
+ ],
+ "transactionHash": "0xd581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": "0xa614f803b6fd780986a42c78ec9c7f77e6ded13c",
+ "transactionHash": "0xd581afa9158fbed69fb10d6a2245ad45d912a3da03ff24d59f3d2f6df6fd9529",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ },
+ {
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x15dc77",
+ "effectiveGasPrice": "0xd2",
+ "from": "0x89ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "gasUsed": "0x12197",
+ "logs": [
+ {
+ "address": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "data": "0x00000000000000000000000000000000000000000000000000000001f9873bc7000000000000000000000000000000000000000000000000093732ae413feb69000000000000000000000000000000000000000000000000093732b42dd59ebe0000000000000000000000000000000000000000000000000000801f33d9f651000000000000000000000000000000000000000000000000000000000036b158",
+ "logIndex": "0x10",
+ "removed": false,
+ "topics": [
+ "0xda6e3523d5765dedff9534b488c7e508318178571c144293451989755e9379e7",
+ "0x0000000000000000000000000000000000000000000000000000000000000001"
+ ],
+ "transactionHash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "transactionIndex": "0x45"
+ },
+ {
+ "address": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "data": "0x000000000000000000000000000000000000000000000000093732a856669e8f000000000000000000000000000000000000000000000000093732b42dd59ebe000000000000000000000000000000000000000000000000000000bf9e4899ba000000000000000000000000000000000000000000000000000000000000a3810000000000000000000000000000000000000000000000000000000000000000",
+ "logIndex": "0x11",
+ "removed": false,
+ "topics": [
+ "0x74fed619850adf4ba83cfb92b9566b424e3de6de4d9a7adc3b1909ea58421a55",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0x0000000000000000000000000000000000000000000000000000000000000001"
+ ],
+ "transactionHash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "transactionIndex": "0x45"
+ },
+ {
+ "address": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "data": "0x000000000000000000000000000000000000000000000000000000bf9e4899ba",
+ "logIndex": "0x12",
+ "removed": false,
+ "topics": [
+ "0xf2def54ec5eba61fd8f18d019c7beaf6a47df317fb798b3263ad69ec227c9261",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6",
+ "0x0000000000000000000000000000000000000000000000000000000000000001"
+ ],
+ "transactionHash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "transactionIndex": "0x45"
+ },
+ {
+ "address": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "blockHash": "0x0000000004034f5cd8946001c721db6457608ad887b3734c825d55826c3c3c87",
+ "blockNumber": "0x4034f5c",
+ "data": "0x000000000000000000000000000000000000000000000000000000bf9e4899ba0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000c032ffd0000000000000000000000000000000000000000000000000000000054e4691a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000093732b42dd59ebe",
+ "logIndex": "0x13",
+ "removed": false,
+ "topics": [
+ "0xf7e21d5bf17851f93ab7bda7e390841620f59dfbe9d86add32824f33bd40d3f5",
+ "0x00000000000000000000000089ae01b878dffc8088222adf1fb08ebadfeea53a",
+ "0x0000000000000000000000004d12f87c18a914dddbc2b27f378ad126a79b76b6"
+ ],
+ "transactionHash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "transactionIndex": "0x45"
+ }
+ ],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": "0xc60a6f5c81431c97ed01b61698b6853557f3afd4",
+ "transactionHash": "0xe14935e6144007163609bb49292897ba81bf7ee93bf28ba4cc5ebd0d6b95f4b9",
+ "transactionIndex": "0x45",
+ "type": "0x0"
+ }
+]
\ No newline at end of file
diff --git a/internal/utils/instrument/instrument.go b/internal/utils/instrument/instrument.go
index ada8e15..cd4961d 100644
--- a/internal/utils/instrument/instrument.go
+++ b/internal/utils/instrument/instrument.go
@@ -244,7 +244,7 @@ func (i *instrumentWithResult[T]) onSuccess(logger *zap.Logger, span tracer.Span
func (i *instrumentWithResult[T]) onSuccessWithFilter(logger *zap.Logger, span tracer.Span, finishTime time.Time, err error) {
i.successWithFilter.Inc(1)
- logger.Info(i.loggerMsg, zap.Error(err))
+ logger.Debug(i.loggerMsg, zap.Error(err))
span.Finish(tracer.FinishTime(finishTime), tracer.WithError(err))
}
diff --git a/internal/workflow/activity/activity.go b/internal/workflow/activity/activity.go
index 62545d3..323ebe3 100644
--- a/internal/workflow/activity/activity.go
+++ b/internal/workflow/activity/activity.go
@@ -28,6 +28,7 @@ const (
ActivityEventLoader = "activity.event_loader"
ActivityReplicator = "activity.replicator"
ActivityUpdateWatermark = "activity.update_watermark"
+ ActivityLatestBlock = "activity.latest_block"
loggerMsg = "activity.request"
diff --git a/internal/workflow/activity/latest_block.go b/internal/workflow/activity/latest_block.go
new file mode 100644
index 0000000..99d0256
--- /dev/null
+++ b/internal/workflow/activity/latest_block.go
@@ -0,0 +1,78 @@
+package activity
+
+import (
+ "context"
+
+ "go.temporal.io/sdk/workflow"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/gateway"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type (
+ LatestBlock struct {
+ baseActivity
+ config *config.Config
+ logger *zap.Logger
+ client gateway.Client
+ }
+
+ LatestBlockParams struct {
+ fx.In
+ fxparams.Params
+ Runtime cadence.Runtime
+ Client gateway.Client
+ }
+
+ LatestBlockRequest struct {
+ }
+
+ LatestBlockResponse struct {
+ Height uint64
+ }
+)
+
+func NewLatestBlock(params LatestBlockParams) *LatestBlock {
+ r := &LatestBlock{
+ baseActivity: newBaseActivity(ActivityLatestBlock, params.Runtime),
+ config: params.Config,
+ logger: params.Logger,
+ client: params.Client,
+ }
+ r.register(r.execute)
+ return r
+}
+
+func (r *LatestBlock) Execute(ctx workflow.Context, request *LatestBlockRequest) (*LatestBlockResponse, error) {
+ var response LatestBlockResponse
+ err := r.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (r *LatestBlock) execute(ctx context.Context, request *LatestBlockRequest) (*LatestBlockResponse, error) {
+ if err := r.validateRequest(request); err != nil {
+ return nil, err
+ }
+
+ logger := r.getLogger(ctx).With(zap.Reflect("request", request))
+
+ latestBlock, err := r.client.GetLatestBlock(ctx, &api.GetLatestBlockRequest{})
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get chainstorage latest block: %w", err)
+ }
+
+ logger.Debug("GetLatestBlock",
+ zap.Uint64("height", latestBlock.GetHeight()),
+ zap.String("hash", latestBlock.GetHash()),
+ )
+
+ return &LatestBlockResponse{
+ Height: latestBlock.GetHeight(),
+ }, nil
+}
diff --git a/internal/workflow/activity/migrator.go b/internal/workflow/activity/migrator.go
new file mode 100644
index 0000000..aa54801
--- /dev/null
+++ b/internal/workflow/activity/migrator.go
@@ -0,0 +1,913 @@
+package activity
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/uber-go/tally/v4"
+ "go.temporal.io/sdk/activity"
+ "go.temporal.io/sdk/workflow"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage"
+ dynamodb_storage "github.com/coinbase/chainstorage/internal/storage/metastorage/dynamodb"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ postgres_storage "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type (
+ Migrator struct {
+ baseActivity
+ config *config.Config
+ session *session.Session
+ dynamoClient *dynamodb.DynamoDB
+ blockTable string
+ metrics tally.Scope
+ }
+
+ BlockToMigrate struct {
+ Height uint64
+ Hash string
+ ParentHash string
+ EventSeq int64 // Event sequence for ordering
+ Skipped bool // Whether this is a skipped block
+ }
+
+ GetLatestBlockHeightActivity struct {
+ baseActivity
+ config *config.Config
+ session *session.Session
+ dynamoClient *dynamodb.DynamoDB
+ blockTable string
+ metrics tally.Scope
+ }
+
+ GetLatestBlockFromPostgresActivity struct {
+ baseActivity
+ config *config.Config
+ metrics tally.Scope
+ }
+
+ GetLatestEventFromPostgresActivity struct {
+ baseActivity
+ config *config.Config
+ metrics tally.Scope
+ }
+
+ GetMaxEventIdActivity struct {
+ baseActivity
+ config *config.Config
+ session *session.Session
+ metrics tally.Scope
+ }
+
+ MigratorParams struct {
+ fx.In
+ fxparams.Params
+ Runtime cadence.Runtime
+ Session *session.Session
+ }
+
+ MigratorRequest struct {
+ StartEventSequence int64 // Start event sequence (no more StartHeight!)
+ EndEventSequence int64 // End event sequence (exclusive)
+ EventTag uint32
+ Tag uint32
+ Parallelism int // Number of concurrent workers
+ }
+
+ MigratorResponse struct {
+ BlocksMigrated int
+ EventsMigrated int
+ Success bool
+ Message string
+ }
+
+ MigrationData struct {
+ SourceStorage metastorage.MetaStorage
+ DestStorage metastorage.MetaStorage
+ Config *config.Config
+ DynamoClient *dynamodb.DynamoDB
+ BlockTable string
+ }
+)
+
+func NewMigrator(params MigratorParams) *Migrator {
+ a := &Migrator{
+ baseActivity: newBaseActivity(ActivityMigrator, params.Runtime),
+ config: params.Config,
+ session: params.Session,
+ dynamoClient: dynamodb.New(params.Session),
+ blockTable: params.Config.AWS.DynamoDB.BlockTable,
+ metrics: params.Metrics,
+ }
+ a.register(a.execute)
+ return a
+}
+
+func NewGetLatestBlockHeightActivity(params MigratorParams) *GetLatestBlockHeightActivity {
+ a := &GetLatestBlockHeightActivity{
+ baseActivity: newBaseActivity(ActivityGetLatestBlockHeight, params.Runtime),
+ config: params.Config,
+ session: params.Session,
+ dynamoClient: dynamodb.New(params.Session),
+ blockTable: params.Config.AWS.DynamoDB.BlockTable,
+ metrics: params.Metrics,
+ }
+ a.register(a.execute)
+ return a
+}
+
+func NewGetLatestBlockFromPostgresActivity(params MigratorParams) *GetLatestBlockFromPostgresActivity {
+ a := &GetLatestBlockFromPostgresActivity{
+ baseActivity: newBaseActivity(ActivityGetLatestBlockFromPostgres, params.Runtime),
+ config: params.Config,
+ metrics: params.Metrics,
+ }
+ a.register(a.execute)
+ return a
+}
+
+func NewGetLatestEventFromPostgresActivity(params MigratorParams) *GetLatestEventFromPostgresActivity {
+ a := &GetLatestEventFromPostgresActivity{
+ baseActivity: newBaseActivity(ActivityGetLatestEventFromPostgres, params.Runtime),
+ config: params.Config,
+ metrics: params.Metrics,
+ }
+ a.register(a.execute)
+ return a
+}
+
+func NewGetMaxEventIdActivity(params MigratorParams) *GetMaxEventIdActivity {
+ a := &GetMaxEventIdActivity{
+ baseActivity: newBaseActivity(ActivityGetMaxEventId, params.Runtime),
+ config: params.Config,
+ session: params.Session,
+ metrics: params.Metrics,
+ }
+ a.register(a.execute)
+ return a
+}
+
+func (a *Migrator) Execute(ctx workflow.Context, request *MigratorRequest) (*MigratorResponse, error) {
+ var response MigratorResponse
+ err := a.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (a *Migrator) execute(ctx context.Context, request *MigratorRequest) (*MigratorResponse, error) {
+ startTime := time.Now()
+ logger := a.getLogger(ctx).With(
+ zap.Int64("startEventSequence", request.StartEventSequence),
+ zap.Int64("endEventSequence", request.EndEventSequence),
+ zap.Uint32("eventTag", request.EventTag),
+ zap.Int("parallelism", request.Parallelism))
+
+ logger.Info("Starting event-driven migration")
+
+ // Validate event sequence range
+ if request.EndEventSequence < request.StartEventSequence {
+ return nil, xerrors.Errorf("invalid request: EndEventSequence (%d) cannot be less than StartEventSequence (%d)",
+ request.EndEventSequence, request.StartEventSequence)
+ }
+
+ // Add heartbeat mechanism - send heartbeat every 10 seconds to avoid timeout
+ heartbeatTicker := time.NewTicker(10 * time.Second)
+ defer heartbeatTicker.Stop()
+
+ go func() {
+ for range heartbeatTicker.C {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ activity.RecordHeartbeat(ctx, fmt.Sprintf("Processing events [%d, %d), elapsed: %v",
+ request.StartEventSequence, request.EndEventSequence, time.Since(startTime)))
+ }
+ }
+ }()
+
+ // Create storage instances
+ migrationData, err := a.createStorageInstances(ctx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create storage instances: %w", err)
+ }
+
+ // Step 1: Fetch events by sequence (with parallelism)
+ events, err := a.fetchEventsBySequence(ctx, logger, migrationData, request)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to fetch events: %w", err)
+ }
+
+ if len(events) == 0 {
+ logger.Info("No events found in sequence range")
+ return &MigratorResponse{
+ Success: true,
+ Message: "No events to migrate",
+ }, nil
+ }
+
+ // Step 2: Split events into segments and migrate blocks
+ blocksMigrated, err := a.migrateExtractedBlocks(ctx, logger, migrationData, request, events)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to migrate blocks: %w", err)
+ }
+
+ // Step 3: Migrate events
+ eventsMigrated := 0
+ if len(events) > 0 {
+ eventsMigrated, err = a.persistEvents(ctx, logger, migrationData, request, events)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to migrate events: %w", err)
+ }
+ }
+
+ duration := time.Since(startTime)
+ logger.Info("Event-driven migration completed",
+ zap.Int("blocksMigrated", blocksMigrated),
+ zap.Int("eventsMigrated", eventsMigrated),
+ zap.Duration("duration", duration))
+
+ return &MigratorResponse{
+ BlocksMigrated: blocksMigrated,
+ EventsMigrated: eventsMigrated,
+ Success: true,
+ Message: fmt.Sprintf("Migrated %d blocks and %d events in %v",
+ blocksMigrated, eventsMigrated, duration),
+ }, nil
+}
+
+func (a *Migrator) createStorageInstances(ctx context.Context) (*MigrationData, error) {
+ logger := a.getLogger(ctx)
+
+ // Create DynamoDB storage directly
+ dynamoDBParams := dynamodb_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ Session: a.session,
+ }
+ sourceResult, err := dynamodb_storage.NewMetaStorage(dynamoDBParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create DynamoDB storage: %w", err)
+ }
+
+ // Create PostgreSQL storage using shared connection pool
+ postgresParams := postgres_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ }
+ destResult, err := postgres_storage.NewMetaStorage(postgresParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create PostgreSQL storage: %w", err)
+ }
+
+ return &MigrationData{
+ SourceStorage: sourceResult.MetaStorage,
+ DestStorage: destResult.MetaStorage,
+ Config: a.config,
+ DynamoClient: a.dynamoClient,
+ BlockTable: a.blockTable,
+ }, nil
+}
+
+func (a *Migrator) fetchEventsBySequence(ctx context.Context, logger *zap.Logger,
+ data *MigrationData, request *MigratorRequest) ([]*model.EventEntry, error) {
+
+ startSeq := request.StartEventSequence
+ endSeq := request.EndEventSequence
+ totalSequences := endSeq - startSeq
+
+ logger.Info("Fetching events by sequence range",
+ zap.Int64("startSeq", startSeq),
+ zap.Int64("endSeq", endSeq),
+ zap.Int64("totalSequences", totalSequences))
+
+ // Record heartbeat before starting the fetch
+ activity.RecordHeartbeat(ctx, fmt.Sprintf("Starting to fetch events [%d, %d)", startSeq, endSeq))
+
+ // GetEventsByEventIdRange already handles the fetching efficiently internally
+ // No need for additional batching/parallelism as DynamoDB Query operation
+ // is already optimized for sequential range scans
+ events, err := data.SourceStorage.GetEventsByEventIdRange(
+ ctx, request.EventTag, startSeq, endSeq)
+
+ if err != nil {
+ // Events must be continuous - any missing events indicate data integrity issues
+ return nil, xerrors.Errorf("failed to fetch events [%d,%d): %w",
+ startSeq, endSeq, err)
+ }
+
+ // Record heartbeat after fetch completes
+ activity.RecordHeartbeat(ctx, fmt.Sprintf("Fetched %d events from range [%d, %d)",
+ len(events), startSeq, endSeq))
+
+ // Validate we got the expected number of events
+ expectedCount := int(totalSequences)
+ if len(events) != expectedCount {
+ return nil, xerrors.Errorf("missing events: expected %d events but got %d for range [%d,%d)",
+ expectedCount, len(events), startSeq, endSeq)
+ }
+
+ // Sort events by EventId to ensure proper ordering for gap validation
+ // DynamoDB Query should return sorted results, but we sort to be defensive
+ sort.Slice(events, func(i, j int) bool {
+ return events[i].EventId < events[j].EventId
+ })
+
+ // Validate event IDs are continuous (no gaps)
+ for i, event := range events {
+ expectedEventId := startSeq + int64(i)
+ if event.EventId != expectedEventId {
+ return nil, xerrors.Errorf("gap in event sequence: expected event ID %d but got %d at index %d",
+ expectedEventId, event.EventId, i)
+ }
+ }
+
+ logger.Info("Fetched events successfully",
+ zap.Int("totalEvents", len(events)))
+
+ return events, nil
+}
+
+func (a *Migrator) migrateExtractedBlocks(ctx context.Context, logger *zap.Logger,
+ data *MigrationData, request *MigratorRequest,
+ events []*model.EventEntry) (int, error) {
+
+ // Build segments directly from events
+ segments := a.buildSegmentsFromEvents(logger, events)
+
+ // If no segments were created (edge case), return
+ if len(segments) == 0 {
+ logger.Warn("No segments created from blocks")
+ return 0, nil
+ }
+
+ totalBlocksPersistedCount := 0
+
+ // Process each segment
+ for segmentIdx, segment := range segments {
+ if len(segment.Blocks) == 0 {
+ continue
+ }
+
+ logger.Info("Processing block segment",
+ zap.Int("segmentIndex", segmentIdx),
+ zap.Int("blockCount", len(segment.Blocks)))
+
+ // Fetch actual block data from DynamoDB for this segment (with order preservation)
+ segmentBlocksWithInfo, err := a.fetchBlockData(ctx, logger, data, request, segment.Blocks)
+ if err != nil {
+ return totalBlocksPersistedCount, xerrors.Errorf("failed to fetch block data for segment %d: %w", segmentIdx, err)
+ }
+
+ // Convert to BlockMetadata slice
+ segmentBlocks := make([]*api.BlockMetadata, len(segmentBlocksWithInfo))
+ for i, blockWithInfo := range segmentBlocksWithInfo {
+ segmentBlocks[i] = blockWithInfo.BlockMetadata
+ }
+
+ // Get parent block for validation if not genesis
+ var lastBlock *api.BlockMetadata
+ if len(segmentBlocks) > 0 && segmentBlocks[0].Height > 0 {
+ parentHeight := segmentBlocks[0].Height - 1
+ parentBlock, err := data.DestStorage.GetBlockByHeight(ctx, request.Tag, parentHeight)
+ if err == nil {
+ lastBlock = parentBlock
+ } else if !errors.Is(err, storage.ErrItemNotFound) {
+ logger.Warn("Could not fetch parent block",
+ zap.Uint64("parentHeight", parentHeight),
+ zap.Error(err))
+ }
+ }
+
+ // Bulk persist the segment with validation
+ err = data.DestStorage.PersistBlockMetas(ctx, false, segmentBlocks, lastBlock)
+ if err != nil {
+ logger.Error("Failed to persist segment",
+ zap.Int("segmentIndex", segmentIdx),
+ zap.Int("blockCount", len(segmentBlocks)),
+ zap.Error(err))
+ return totalBlocksPersistedCount, xerrors.Errorf("failed to persist segment %d: %w", segmentIdx, err)
+ }
+
+ totalBlocksPersistedCount += len(segmentBlocks)
+
+ // Record heartbeat if in activity context
+ if activity.IsActivity(ctx) {
+ activity.RecordHeartbeat(ctx, fmt.Sprintf("Migrated segment %d/%d: %d blocks (total: %d)",
+ segmentIdx+1, len(segments), len(segmentBlocks), totalBlocksPersistedCount))
+ }
+ }
+
+ logger.Info("All segments migrated successfully",
+ zap.Int("totalSegments", len(segments)),
+ zap.Int("totalBlocksPersisted", totalBlocksPersistedCount))
+
+ return totalBlocksPersistedCount, nil
+}
+
+// BlockSegment represents a continuous segment of blocks to persist together
+type BlockSegment struct {
+ Blocks []BlockToMigrate
+}
+
+// buildSegmentsFromEvents creates block segments from events, starting new segments after BLOCK_REMOVED events
+func (a *Migrator) buildSegmentsFromEvents(logger *zap.Logger,
+ events []*model.EventEntry) []*BlockSegment {
+
+ var segments []*BlockSegment
+ var currentSegment *BlockSegment = nil
+
+ // Process events in order (already sorted by EventSeq)
+ for _, event := range events {
+ switch event.EventType {
+ case api.BlockchainEvent_BLOCK_REMOVED:
+ // End current segment
+ currentSegment = nil
+
+ case api.BlockchainEvent_BLOCK_ADDED:
+ if currentSegment == nil {
+ // Create new segment
+ currentSegment = &BlockSegment{
+ Blocks: []BlockToMigrate{},
+ }
+ segments = append(segments, currentSegment)
+ }
+ // Create block directly from event and append to current segment
+ currentSegment.Blocks = append(currentSegment.Blocks, BlockToMigrate{
+ Height: event.BlockHeight,
+ Hash: event.BlockHash,
+ ParentHash: event.ParentHash,
+ EventSeq: event.EventId,
+ Skipped: event.BlockSkipped,
+ })
+ }
+ }
+
+ // Log segments for debugging
+ for i, segment := range segments {
+ if len(segment.Blocks) > 0 {
+ logger.Debug("Block segment",
+ zap.Int("index", i),
+ zap.Int("blocks", len(segment.Blocks)),
+ zap.Uint64("firstHeight", segment.Blocks[0].Height),
+ zap.Uint64("lastHeight", segment.Blocks[len(segment.Blocks)-1].Height))
+ }
+ }
+
+ return segments
+}
+
+type BlockWithInfo struct {
+ *api.BlockMetadata
+ Height uint64
+ Hash string
+ EventSeq int64
+}
+
+// WorkItem for parallel fetching with index preservation
+type WorkItem struct {
+ Block BlockToMigrate
+ Index int
+}
+
+func (a *Migrator) fetchBlockData(ctx context.Context, logger *zap.Logger,
+ data *MigrationData, request *MigratorRequest,
+ blocksToMigrate []BlockToMigrate) ([]*BlockWithInfo, error) {
+
+ if len(blocksToMigrate) == 0 {
+ return nil, nil
+ }
+
+ // Determine parallelism
+ parallelism := request.Parallelism
+ if parallelism <= 0 {
+ parallelism = 1
+ }
+ if parallelism > len(blocksToMigrate) {
+ parallelism = 1
+ }
+
+ logger.Debug("Fetching block data in parallel",
+ zap.Int("totalBlocks", len(blocksToMigrate)),
+ zap.Int("parallelism", parallelism))
+
+ // Pre-allocate result array
+ results := make([]*BlockWithInfo, len(blocksToMigrate))
+
+ // Channel for work items (index, block)
+ workChan := make(chan WorkItem, len(blocksToMigrate))
+
+ // Send work items with their index
+ for i, block := range blocksToMigrate {
+ workChan <- WorkItem{Block: block, Index: i}
+ }
+ close(workChan)
+
+ // Worker function - fetches block and writes directly to results[index]
+ var wg sync.WaitGroup
+ for i := 0; i < parallelism; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for item := range workChan {
+ var blockMeta *api.BlockMetadata
+
+ // For skipped blocks, create metadata directly
+ if item.Block.Skipped {
+ blockMeta = &api.BlockMetadata{
+ Tag: request.Tag,
+ Height: item.Block.Height,
+ Skipped: true,
+ Hash: "",
+ ParentHash: "",
+ ParentHeight: 0,
+ Timestamp: nil,
+ }
+ } else {
+ // For regular blocks, fetch from source storage
+ var err error
+ blockMeta, err = data.SourceStorage.GetBlockByHash(ctx, request.Tag, item.Block.Height, item.Block.Hash)
+ if err != nil {
+ logger.Warn("Failed to fetch block by hash",
+ zap.Uint64("height", item.Block.Height),
+ zap.String("hash", item.Block.Hash),
+ zap.Error(err))
+ continue // Skip this block
+ }
+ }
+
+ // Write directly to the correct index
+ results[item.Index] = &BlockWithInfo{
+ BlockMetadata: blockMeta,
+ Height: item.Block.Height,
+ Hash: item.Block.Hash,
+ EventSeq: item.Block.EventSeq,
+ }
+ }
+ }()
+ }
+
+ // Wait for all workers to complete
+ wg.Wait()
+
+ // Filter out nils (from failed fetches) and count skipped
+ var validResults []*BlockWithInfo
+ skippedCount := 0
+ for _, blockInfo := range results {
+ if blockInfo != nil {
+ validResults = append(validResults, blockInfo)
+ if blockInfo.BlockMetadata != nil && blockInfo.BlockMetadata.Skipped {
+ skippedCount++
+ }
+ }
+ }
+
+ logger.Debug("Completed parallel block data fetch",
+ zap.Int("requested", len(blocksToMigrate)),
+ zap.Int("fetched", len(validResults)),
+ zap.Int("skippedBlocks", skippedCount))
+
+ return validResults, nil
+}
+
+func (a *Migrator) persistEvents(ctx context.Context, logger *zap.Logger,
+ data *MigrationData, request *MigratorRequest,
+ events []*model.EventEntry) (int, error) {
+
+ if len(events) == 0 {
+ return 0, nil
+ }
+
+ // Events are already sorted by EventId from fetchEventsBySequence
+ firstId := events[0].EventId
+ lastId := events[len(events)-1].EventId
+
+ logger.Info("Persisting events batch",
+ zap.Int("totalEvents", len(events)),
+ zap.Int64("firstEventId", firstId),
+ zap.Int64("lastEventId", lastId))
+
+ // Check for gaps in event sequences (for debugging)
+ var gaps []string
+ for i := 1; i < len(events); i++ {
+ if events[i].EventId != events[i-1].EventId+1 {
+ gap := fmt.Sprintf("position=%d: %d->%d (gap=%d)",
+ i, events[i-1].EventId, events[i].EventId,
+ events[i].EventId-events[i-1].EventId-1)
+ gaps = append(gaps, gap)
+ }
+ }
+ if len(gaps) > 0 {
+ logger.Warn("Found gaps in event sequences",
+ zap.Strings("gaps", gaps))
+ }
+
+ // Persist all events in a single call
+ persistStart := time.Now()
+ if err := data.DestStorage.AddEventEntries(ctx, request.EventTag, events); err != nil {
+ return 0, xerrors.Errorf("failed to persist events: %w", err)
+ }
+
+ persistDuration := time.Since(persistStart)
+ logger.Info("Events persisted successfully",
+ zap.Int("eventCount", len(events)),
+ zap.Duration("persistDuration", persistDuration),
+ zap.Float64("eventsPerSecond", float64(len(events))/persistDuration.Seconds()))
+
+ return len(events), nil
+}
+
+type GetLatestBlockHeightRequest struct {
+ Tag uint32
+}
+
+type GetLatestBlockHeightResponse struct {
+ Height uint64
+}
+
+type GetLatestBlockFromPostgresRequest struct {
+ Tag uint32
+}
+
+type GetLatestBlockFromPostgresResponse struct {
+ Height uint64
+ Found bool // true if a block was found, false if no blocks exist yet
+}
+
+type GetLatestEventFromPostgresRequest struct {
+ EventTag uint32
+}
+
+type GetLatestEventFromPostgresResponse struct {
+ Sequence int64 // Event sequence number
+ Height uint64 // Block height (for backward compatibility)
+ Found bool // true if events were found, false if no events exist yet
+}
+
+type GetMaxEventIdRequest struct {
+ EventTag uint32
+}
+
+type GetMaxEventIdResponse struct {
+ MaxEventId int64 // Maximum event ID in DynamoDB
+ Found bool // true if events were found
+}
+
+func (a *Migrator) GetLatestBlockHeight(ctx context.Context, req *GetLatestBlockHeightRequest) (*GetLatestBlockHeightResponse, error) {
+ migrationData, err := a.createStorageInstances(ctx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create storage instances: %w", err)
+ }
+ latestBlock, err := migrationData.SourceStorage.GetLatestBlock(ctx, req.Tag)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get latest block from DynamoDB: %w", err)
+ }
+ return &GetLatestBlockHeightResponse{Height: latestBlock.Height}, nil
+}
+
+func (a *GetLatestBlockHeightActivity) Execute(ctx workflow.Context, request *GetLatestBlockHeightRequest) (*GetLatestBlockHeightResponse, error) {
+ var response GetLatestBlockHeightResponse
+ err := a.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (a *GetLatestBlockHeightActivity) execute(ctx context.Context, request *GetLatestBlockHeightRequest) (*GetLatestBlockHeightResponse, error) {
+ migrationData, err := a.createStorageInstances(ctx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create storage instances: %w", err)
+ }
+ latestBlock, err := migrationData.SourceStorage.GetLatestBlock(ctx, request.Tag)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get latest block from DynamoDB: %w", err)
+ }
+ return &GetLatestBlockHeightResponse{Height: latestBlock.Height}, nil
+}
+
+func (a *GetLatestBlockFromPostgresActivity) Execute(ctx workflow.Context, request *GetLatestBlockFromPostgresRequest) (*GetLatestBlockFromPostgresResponse, error) {
+ var response GetLatestBlockFromPostgresResponse
+ err := a.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (a *GetLatestBlockFromPostgresActivity) execute(ctx context.Context, request *GetLatestBlockFromPostgresRequest) (*GetLatestBlockFromPostgresResponse, error) {
+ if err := a.validateRequest(request); err != nil {
+ return nil, err
+ }
+
+ logger := a.getLogger(ctx).With(zap.Reflect("request", request))
+
+ // Create PostgreSQL storage using shared connection pool to query destination
+ postgresParams := postgres_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ }
+ destResult, err := postgres_storage.NewMetaStorage(postgresParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create PostgreSQL storage: %w", err)
+ }
+
+ latestBlock, err := destResult.MetaStorage.GetLatestBlock(ctx, request.Tag)
+ if err != nil {
+ // Check if it's a "not found" error, which means no blocks migrated yet
+ errStr := strings.ToLower(err.Error())
+ if strings.Contains(errStr, "not found") || strings.Contains(errStr, "no rows") {
+ logger.Info("No blocks found in PostgreSQL destination - starting from beginning")
+ return &GetLatestBlockFromPostgresResponse{
+ Height: 0,
+ Found: false,
+ }, nil
+ }
+ return nil, xerrors.Errorf("failed to get latest block from PostgreSQL: %w", err)
+ }
+
+ logger.Info("Found latest block in PostgreSQL destination", zap.Uint64("height", latestBlock.Height))
+ return &GetLatestBlockFromPostgresResponse{
+ Height: latestBlock.Height,
+ Found: true,
+ }, nil
+}
+
+func (a *GetLatestEventFromPostgresActivity) Execute(ctx workflow.Context, request *GetLatestEventFromPostgresRequest) (*GetLatestEventFromPostgresResponse, error) {
+ var response GetLatestEventFromPostgresResponse
+ err := a.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (a *GetLatestEventFromPostgresActivity) execute(ctx context.Context, request *GetLatestEventFromPostgresRequest) (*GetLatestEventFromPostgresResponse, error) {
+ if err := a.validateRequest(request); err != nil {
+ return nil, err
+ }
+
+ logger := a.getLogger(ctx).With(zap.Reflect("request", request))
+
+ // Create PostgreSQL storage using shared connection pool to query destination
+ postgresParams := postgres_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ }
+
+ destResult, err := postgres_storage.NewMetaStorage(postgresParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create PostgreSQL storage: %w", err)
+ }
+
+ // Get the latest event sequence from PostgreSQL
+ maxEventId, err := destResult.MetaStorage.GetMaxEventId(ctx, request.EventTag)
+ if err != nil {
+ // Check if it's a "no event history" error, which means no events migrated yet
+ errStr := strings.ToLower(err.Error())
+ if strings.Contains(errStr, "no event history") || strings.Contains(errStr, "not found") || strings.Contains(errStr, "no rows") {
+ logger.Info("No events found in PostgreSQL destination - starting from beginning")
+ return &GetLatestEventFromPostgresResponse{
+ Sequence: 0,
+ Height: 0,
+ Found: false,
+ }, nil
+ }
+ return nil, xerrors.Errorf("failed to get latest event sequence from PostgreSQL: %w", err)
+ }
+
+ // Get the event entry to also return the height for backward compatibility
+ eventEntry, err := destResult.MetaStorage.GetEventByEventId(ctx, request.EventTag, maxEventId)
+ if err != nil {
+ // If we can't get the event entry, just return the sequence
+ logger.Warn("Failed to get event entry for max sequence, returning sequence only",
+ zap.Int64("maxEventId", maxEventId), zap.Error(err))
+ return &GetLatestEventFromPostgresResponse{
+ Sequence: maxEventId,
+ Height: 0,
+ Found: true,
+ }, nil
+ }
+
+ logger.Info("Found latest event in PostgreSQL destination",
+ zap.Int64("sequence", maxEventId),
+ zap.Uint64("height", eventEntry.BlockHeight))
+ return &GetLatestEventFromPostgresResponse{
+ Sequence: maxEventId,
+ Height: eventEntry.BlockHeight,
+ Found: true,
+ }, nil
+}
+
+func (a *GetMaxEventIdActivity) Execute(ctx workflow.Context, request *GetMaxEventIdRequest) (*GetMaxEventIdResponse, error) {
+ var response GetMaxEventIdResponse
+ err := a.executeActivity(ctx, request, &response)
+ return &response, err
+}
+
+func (a *GetMaxEventIdActivity) execute(ctx context.Context, request *GetMaxEventIdRequest) (*GetMaxEventIdResponse, error) {
+ logger := a.getLogger(ctx).With(
+ zap.Uint32("eventTag", request.EventTag),
+ )
+ logger.Info("getting max event ID from DynamoDB")
+
+ // Create DynamoDB storage directly
+ dynamoDBParams := dynamodb_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ Session: a.session,
+ }
+ sourceResult, err := dynamodb_storage.NewMetaStorage(dynamoDBParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create DynamoDB storage: %w", err)
+ }
+
+ // Get max event ID from DynamoDB
+ maxEventId, err := sourceResult.MetaStorage.GetMaxEventId(ctx, request.EventTag)
+ if err != nil {
+ if errors.Is(err, storage.ErrItemNotFound) {
+ logger.Warn("no events found in DynamoDB")
+ return &GetMaxEventIdResponse{
+ MaxEventId: 0,
+ Found: false,
+ }, nil
+ }
+ return nil, xerrors.Errorf("failed to get max event ID: %w", err)
+ }
+
+ logger.Info("found max event ID in DynamoDB",
+ zap.Int64("maxEventId", maxEventId))
+
+ return &GetMaxEventIdResponse{
+ MaxEventId: maxEventId,
+ Found: true,
+ }, nil
+}
+
+func (a *GetLatestBlockHeightActivity) createStorageInstances(ctx context.Context) (*MigrationData, error) {
+ logger := a.getLogger(ctx)
+
+ // Create DynamoDB storage directly
+ dynamoDBParams := dynamodb_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ Session: a.session,
+ }
+ sourceResult, err := dynamodb_storage.NewMetaStorage(dynamoDBParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create DynamoDB storage: %w", err)
+ }
+
+ // Create PostgreSQL storage using shared connection pool
+ postgresParams := postgres_storage.Params{
+ Params: fxparams.Params{
+ Config: a.config,
+ Logger: logger,
+ Metrics: a.metrics,
+ },
+ }
+ destResult, err := postgres_storage.NewMetaStorage(postgresParams)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create PostgreSQL storage: %w", err)
+ }
+
+ return &MigrationData{
+ SourceStorage: sourceResult.MetaStorage,
+ DestStorage: destResult.MetaStorage,
+ Config: a.config,
+ DynamoClient: a.dynamoClient,
+ BlockTable: a.blockTable,
+ }, nil
+}
+
+const (
+ ActivityMigrator = "activity.migrator"
+ ActivityGetLatestBlockHeight = "activity.migrator.GetLatestBlockHeight"
+ ActivityGetLatestBlockFromPostgres = "activity.migrator.GetLatestBlockFromPostgres"
+ ActivityGetLatestEventFromPostgres = "activity.migrator.GetLatestEventFromPostgres"
+ ActivityGetMaxEventId = "activity.migrator.GetMaxEventId"
+)
diff --git a/internal/workflow/activity/migrator_integration_test.go b/internal/workflow/activity/migrator_integration_test.go
new file mode 100644
index 0000000..468de8d
--- /dev/null
+++ b/internal/workflow/activity/migrator_integration_test.go
@@ -0,0 +1,464 @@
+package activity
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "go.uber.org/mock/gomock"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ metastoragemocks "github.com/coinbase/chainstorage/internal/storage/metastorage/mocks"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type migratorIntegrationTestSuite struct {
+ suite.Suite
+ ctrl *gomock.Controller
+ sourceStorage *metastoragemocks.MockMetaStorage
+ destStorage *metastoragemocks.MockMetaStorage
+ logger *zap.Logger
+}
+
+func TestMigratorIntegrationTestSuite(t *testing.T) {
+ suite.Run(t, new(migratorIntegrationTestSuite))
+}
+
+func (s *migratorIntegrationTestSuite) SetupTest() {
+ s.ctrl = gomock.NewController(s.T())
+ s.sourceStorage = metastoragemocks.NewMockMetaStorage(s.ctrl)
+ s.destStorage = metastoragemocks.NewMockMetaStorage(s.ctrl)
+ s.logger, _ = zap.NewDevelopment()
+}
+
+func (s *migratorIntegrationTestSuite) TearDownTest() {
+ s.ctrl.Finish()
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_NoReorg_ValidationPasses() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Setup test data - no reorgs
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102"},
+ }
+
+ // Events contain all the block information needed
+
+ // Mock block data fetching from source
+ block100 := &api.BlockMetadata{
+ Tag: 1, Height: 100, Hash: "0x100", ParentHash: "0x99", ParentHeight: 99,
+ }
+ block101 := &api.BlockMetadata{
+ Tag: 1, Height: 101, Hash: "0x101", ParentHash: "0x100", ParentHeight: 100,
+ }
+ block102 := &api.BlockMetadata{
+ Tag: 1, Height: 102, Hash: "0x102", ParentHash: "0x101", ParentHeight: 101,
+ }
+
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(100), "0x100").
+ Return(block100, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(101), "0x101").
+ Return(block101, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(102), "0x102").
+ Return(block102, nil)
+
+ // For validation, fetch parent block (99) from destination
+ parentBlock := &api.BlockMetadata{
+ Tag: 1, Height: 99, Hash: "0x99", ParentHash: "0x98",
+ }
+ s.destStorage.EXPECT().
+ GetBlockByHeight(ctx, uint32(1), uint64(99)).
+ Return(parentBlock, nil)
+
+ // Expect bulk persist with validation - THIS IS THE KEY TEST
+ // The blocks should be persisted with the parent block for validation
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ []*api.BlockMetadata{block100, block101, block102},
+ parentBlock).
+ Return(nil)
+
+ // Create migrator with mocked storage
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify
+ require.NoError(err)
+ require.Equal(3, count)
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_SingleReorg_CorrectParentValidation() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Setup test data - reorg at height 102
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102a"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0x102a"},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102b"},
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 103, BlockHash: "0x103"},
+ }
+
+ // Events contain all the block information needed
+
+ // Mock block data
+ block100 := &api.BlockMetadata{
+ Tag: 1, Height: 100, Hash: "0x100", ParentHash: "0x99", ParentHeight: 99,
+ }
+ block101 := &api.BlockMetadata{
+ Tag: 1, Height: 101, Hash: "0x101", ParentHash: "0x100", ParentHeight: 100,
+ }
+ block102a := &api.BlockMetadata{
+ Tag: 1, Height: 102, Hash: "0x102a", ParentHash: "0x101", ParentHeight: 101,
+ }
+ block102b := &api.BlockMetadata{
+ Tag: 1, Height: 102, Hash: "0x102b", ParentHash: "0x101", ParentHeight: 101,
+ }
+ block103 := &api.BlockMetadata{
+ Tag: 1, Height: 103, Hash: "0x103", ParentHash: "0x102b", ParentHeight: 102,
+ }
+
+ // Mock fetching blocks from source
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(100), "0x100").
+ Return(block100, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(101), "0x101").
+ Return(block101, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(102), "0x102a").
+ Return(block102a, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(102), "0x102b").
+ Return(block102b, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(103), "0x103").
+ Return(block103, nil)
+
+ // CRITICAL TEST: Two segments with different parent validations
+
+ // Segment 1: blocks 100, 101, 102a - validate against parent height 99
+ parentBlock99 := &api.BlockMetadata{
+ Tag: 1, Height: 99, Hash: "0x99", ParentHash: "0x98",
+ }
+ s.destStorage.EXPECT().
+ GetBlockByHeight(ctx, uint32(1), uint64(99)).
+ Return(parentBlock99, nil)
+
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ []*api.BlockMetadata{block100, block101, block102a},
+ parentBlock99).
+ Return(nil)
+
+ // Segment 2: blocks 102b, 103 - validate against parent height 101 (reorg point)
+ s.destStorage.EXPECT().
+ GetBlockByHeight(ctx, uint32(1), uint64(101)).
+ Return(block101, nil)
+
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ []*api.BlockMetadata{block102b, block103},
+ block101).
+ Return(nil)
+
+ // Create migrator
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify
+ require.NoError(err)
+ require.Equal(5, count) // Total blocks persisted
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_ValidationFailure() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Setup test data - blocks with broken chain
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101"},
+ }
+
+ // Events contain all the block information needed
+
+ block100 := &api.BlockMetadata{
+ Tag: 1, Height: 100, Hash: "0x100", ParentHash: "0x99", ParentHeight: 99,
+ }
+ block101 := &api.BlockMetadata{
+ Tag: 1, Height: 101, Hash: "0x101", ParentHash: "0xWRONG", ParentHeight: 100, // Wrong parent
+ }
+
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(100), "0x100").
+ Return(block100, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(101), "0x101").
+ Return(block101, nil)
+
+ // Get parent for validation
+ parentBlock := &api.BlockMetadata{
+ Tag: 1, Height: 99, Hash: "0x99", ParentHash: "0x98",
+ }
+ s.destStorage.EXPECT().
+ GetBlockByHeight(ctx, uint32(1), uint64(99)).
+ Return(parentBlock, nil)
+
+ // Expect PersistBlockMetas to fail due to validation error
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ []*api.BlockMetadata{block100, block101},
+ parentBlock).
+ Return(xerrors.Errorf("chain is not continuous"))
+
+ // Create migrator
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute - should fail
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify failure
+ require.Error(err)
+ require.Contains(err.Error(), "chain is not continuous")
+ require.Equal(0, count)
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_ComplexReorg_MultipleSegments() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Complex scenario: multiple reorgs
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101a", ParentHash: "0x100"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0x101a"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101b", ParentHash: "0x100"},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102a", ParentHash: "0x101b"},
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0x102a"},
+ {EventId: 7, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0x101b"},
+ {EventId: 8, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101c", ParentHash: "0x100"},
+ {EventId: 9, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102b", ParentHash: "0x101c"},
+ }
+
+ // Events contain all the block information needed
+
+ // Mock blocks
+ block100 := &api.BlockMetadata{Tag: 1, Height: 100, Hash: "0x100", ParentHash: "0x99", ParentHeight: 99}
+ block101a := &api.BlockMetadata{Tag: 1, Height: 101, Hash: "0x101a", ParentHash: "0x100", ParentHeight: 100}
+ block101b := &api.BlockMetadata{Tag: 1, Height: 101, Hash: "0x101b", ParentHash: "0x100", ParentHeight: 100}
+ block102a := &api.BlockMetadata{Tag: 1, Height: 102, Hash: "0x102a", ParentHash: "0x101b", ParentHeight: 101}
+ block101c := &api.BlockMetadata{Tag: 1, Height: 101, Hash: "0x101c", ParentHash: "0x100", ParentHeight: 100}
+ block102b := &api.BlockMetadata{Tag: 1, Height: 102, Hash: "0x102b", ParentHash: "0x101c", ParentHeight: 101}
+
+ // Mock fetching
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(100), "0x100").Return(block100, nil)
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(101), "0x101a").Return(block101a, nil)
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(101), "0x101b").Return(block101b, nil)
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(102), "0x102a").Return(block102a, nil)
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(101), "0x101c").Return(block101c, nil)
+ s.sourceStorage.EXPECT().GetBlockByHash(ctx, uint32(1), uint64(102), "0x102b").Return(block102b, nil)
+
+ // Three segments expected:
+ // Segment 1: [100, 101a] - parent 99
+ parentBlock99 := &api.BlockMetadata{Tag: 1, Height: 99, Hash: "0x99"}
+ s.destStorage.EXPECT().GetBlockByHeight(ctx, uint32(1), uint64(99)).Return(parentBlock99, nil)
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false, []*api.BlockMetadata{block100, block101a}, parentBlock99).
+ Return(nil)
+
+ // Segment 2: [101b, 102a] - parent 100 (reorg at 101)
+ s.destStorage.EXPECT().GetBlockByHeight(ctx, uint32(1), uint64(100)).Return(block100, nil)
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false, []*api.BlockMetadata{block101b, block102a}, block100).
+ Return(nil)
+
+ // Segment 3: [101c, 102b] - parent 100 (second reorg at 101)
+ s.destStorage.EXPECT().GetBlockByHeight(ctx, uint32(1), uint64(100)).Return(block100, nil).Times(1)
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false, []*api.BlockMetadata{block101c, block102b}, block100).
+ Return(nil)
+
+ // Create migrator
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify
+ require.NoError(err)
+ require.Equal(6, count)
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_SkippedBlocks() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Test with skipped blocks
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100", ParentHash: "0x99", BlockSkipped: false},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "", ParentHash: "", BlockSkipped: true}, // Skipped
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102", ParentHash: "0x100", BlockSkipped: false},
+ }
+
+ // Events contain all the block information needed
+
+ // Regular blocks
+ block100 := &api.BlockMetadata{
+ Tag: 1, Height: 100, Hash: "0x100", ParentHash: "0x99", ParentHeight: 99,
+ }
+ block102 := &api.BlockMetadata{
+ Tag: 1, Height: 102, Hash: "0x102", ParentHash: "0x100", ParentHeight: 100,
+ }
+
+ // Skipped blocks shouldn't be fetched from source
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(100), "0x100").
+ Return(block100, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(102), "0x102").
+ Return(block102, nil)
+ // NO expectation for block 101 since it's skipped
+
+ // Get parent for validation
+ parentBlock := &api.BlockMetadata{
+ Tag: 1, Height: 99, Hash: "0x99",
+ }
+ s.destStorage.EXPECT().
+ GetBlockByHeight(ctx, uint32(1), uint64(99)).
+ Return(parentBlock, nil)
+
+ // Expect persistence including the skipped block (created inline)
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ gomock.Any(), // We'll validate this in a custom matcher
+ parentBlock).
+ DoAndReturn(func(ctx context.Context, updateWatermark bool, blocks []*api.BlockMetadata, lastBlock *api.BlockMetadata) error {
+ // Verify the skipped block was created correctly
+ require.Len(blocks, 3)
+ require.Equal(uint64(101), blocks[1].Height)
+ require.True(blocks[1].Skipped)
+ require.Empty(blocks[1].Hash)
+ require.Empty(blocks[1].ParentHash)
+ return nil
+ })
+
+ // Create migrator
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify
+ require.NoError(err)
+ require.Equal(3, count)
+}
+
+func (s *migratorIntegrationTestSuite) TestMigrateExtractedBlocks_ParentNotFound_ContinuesWithoutValidation() {
+ require := testutil.Require(s.T())
+ ctx := context.Background()
+
+ // Genesis or missing parent scenario
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 0, BlockHash: "0x0"}, // Genesis
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 1, BlockHash: "0x1"},
+ }
+
+ // Events contain all the block information needed
+
+ block0 := &api.BlockMetadata{
+ Tag: 1, Height: 0, Hash: "0x0", ParentHash: "", ParentHeight: 0,
+ }
+ block1 := &api.BlockMetadata{
+ Tag: 1, Height: 1, Hash: "0x1", ParentHash: "0x0", ParentHeight: 0,
+ }
+
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(0), "0x0").
+ Return(block0, nil)
+ s.sourceStorage.EXPECT().
+ GetBlockByHash(ctx, uint32(1), uint64(1), "0x1").
+ Return(block1, nil)
+
+ // ParentHeight is 0, so no parent fetch attempted
+ // Persist without validation (lastBlock = nil)
+ s.destStorage.EXPECT().
+ PersistBlockMetas(ctx, false,
+ []*api.BlockMetadata{block0, block1},
+ nil). // No parent validation for genesis
+ Return(nil)
+
+ // Create migrator
+ migrator := &Migrator{}
+ data := &MigrationData{
+ SourceStorage: s.sourceStorage,
+ DestStorage: s.destStorage,
+ }
+ request := &MigratorRequest{
+ Tag: 1,
+ Parallelism: 1,
+ }
+
+ // Execute
+ count, err := migrator.migrateExtractedBlocks(ctx, s.logger, data, request, events)
+
+ // Verify
+ require.NoError(err)
+ require.Equal(2, count)
+}
diff --git a/internal/workflow/activity/migrator_test.go b/internal/workflow/activity/migrator_test.go
new file mode 100644
index 0000000..f199bf5
--- /dev/null
+++ b/internal/workflow/activity/migrator_test.go
@@ -0,0 +1,418 @@
+package activity
+
+import (
+ "testing"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/suite"
+ "go.temporal.io/sdk/testsuite"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ "github.com/coinbase/chainstorage/protos/coinbase/c3/common"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type migratorActivityTestSuite struct {
+ suite.Suite
+ testsuite.WorkflowTestSuite
+ ctrl *gomock.Controller
+ app testapp.TestApp
+ logger *zap.Logger
+ migrator *Migrator
+ getLatestHeight *GetLatestBlockHeightActivity
+ getLatestFromPostgres *GetLatestBlockFromPostgresActivity
+ getLatestEvent *GetLatestEventFromPostgresActivity
+ getMaxEventId *GetMaxEventIdActivity
+ env *cadence.TestEnv
+ cfg *config.Config
+}
+
+func TestMigratorActivityTestSuite(t *testing.T) {
+ suite.Run(t, new(migratorActivityTestSuite))
+}
+
+func (s *migratorActivityTestSuite) SetupTest() {
+ require := testutil.Require(s.T())
+
+ s.env = cadence.NewTestActivityEnv(s)
+ s.ctrl = gomock.NewController(s.T())
+
+ cfg, err := config.New(
+ config.WithBlockchain(common.Blockchain_BLOCKCHAIN_ETHEREUM),
+ config.WithNetwork(common.Network_NETWORK_ETHEREUM_MAINNET),
+ config.WithEnvironment(config.EnvLocal),
+ )
+ require.NoError(err)
+ s.cfg = cfg
+
+ var deps struct {
+ fx.In
+ Migrator *Migrator
+ GetLatestHeight *GetLatestBlockHeightActivity
+ GetLatestFromPostgres *GetLatestBlockFromPostgresActivity
+ GetLatestEventFromPostgres *GetLatestEventFromPostgresActivity
+ GetMaxEventId *GetMaxEventIdActivity
+ }
+
+ s.app = testapp.New(
+ s.T(),
+ Module,
+ cadence.WithTestEnv(s.env),
+ testapp.WithConfig(cfg),
+ fx.Populate(&deps),
+ )
+
+ s.migrator = deps.Migrator
+ s.getLatestHeight = deps.GetLatestHeight
+ s.getLatestFromPostgres = deps.GetLatestFromPostgres
+ s.getLatestEvent = deps.GetLatestEventFromPostgres
+ s.getMaxEventId = deps.GetMaxEventId
+ s.logger = s.app.Logger()
+
+}
+
+func (s *migratorActivityTestSuite) TearDownTest() {
+ s.app.Close()
+ s.ctrl.Finish()
+ s.env.AssertExpectations(s.T())
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_NoReorgs() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test case: No reorgs - single segment
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb", ParentHash: "0xa"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc", ParentHash: "0xb"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 1, "Should have 1 segment when no reorgs")
+ require.Len(segments[0].Blocks, 3)
+
+ // Verify blocks are in correct order
+ require.Equal(uint64(100), segments[0].Blocks[0].Height)
+ require.Equal("0xa", segments[0].Blocks[0].Hash)
+ require.Equal(uint64(101), segments[0].Blocks[1].Height)
+ require.Equal("0xb", segments[0].Blocks[1].Hash)
+ require.Equal(uint64(102), segments[0].Blocks[2].Height)
+ require.Equal("0xc", segments[0].Blocks[2].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_SingleReorg() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test case: Single reorg in middle
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb", ParentHash: "0xa"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc1", ParentHash: "0xb"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0xc1"},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc2", ParentHash: "0xb"},
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 103, BlockHash: "0xd", ParentHash: "0xc2"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 2, "Should have 2 segments with 1 reorg")
+
+ // First segment: blocks before reorg
+ require.Len(segments[0].Blocks, 3)
+ require.Equal(uint64(100), segments[0].Blocks[0].Height)
+ require.Equal(uint64(101), segments[0].Blocks[1].Height)
+ require.Equal(uint64(102), segments[0].Blocks[2].Height)
+ require.Equal("0xc1", segments[0].Blocks[2].Hash)
+
+ // Second segment: blocks after reorg
+ require.Len(segments[1].Blocks, 2)
+ require.Equal(uint64(102), segments[1].Blocks[0].Height)
+ require.Equal("0xc2", segments[1].Blocks[0].Hash)
+ require.Equal(uint64(103), segments[1].Blocks[1].Height)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_MultipleReorgs() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test case: Multiple reorgs
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb1", ParentHash: "0xa"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0xb1"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb2", ParentHash: "0xa"},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc1", ParentHash: "0xb2"},
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0xc1"},
+ {EventId: 7, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0xb2"},
+ {EventId: 8, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb3", ParentHash: "0xa"},
+ {EventId: 9, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc2", ParentHash: "0xb3"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 3, "Should have 3 segments with 2 reorgs")
+
+ // First segment
+ require.Len(segments[0].Blocks, 2)
+ require.Equal("0xa", segments[0].Blocks[0].Hash)
+ require.Equal("0xb1", segments[0].Blocks[1].Hash)
+
+ // Second segment
+ require.Len(segments[1].Blocks, 2)
+ require.Equal("0xb2", segments[1].Blocks[0].Hash)
+ require.Equal("0xc1", segments[1].Blocks[1].Hash)
+
+ // Third segment
+ require.Len(segments[2].Blocks, 2)
+ require.Equal("0xb3", segments[2].Blocks[0].Hash)
+ require.Equal("0xc2", segments[2].Blocks[1].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_StartingWithRemoved() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test case: Starting with BLOCK_REMOVED
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0xold"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0xold"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb", ParentHash: "0xa"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc", ParentHash: "0xb"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 1, "Should have 1 segment when starting with REMOVED")
+ require.Len(segments[0].Blocks, 2)
+ require.Equal("0xb", segments[0].Blocks[0].Hash)
+ require.Equal("0xc", segments[0].Blocks[1].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_EmptyEvents() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, []*model.EventEntry{})
+ require.Nil(segments, "Should return nil for empty events")
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_ComplexReorgPattern() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Complex pattern: build up chain, then deep reorg
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0x100", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101a", ParentHash: "0x100"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102a", ParentHash: "0x101a"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 103, BlockHash: "0x103a", ParentHash: "0x102a"},
+ // Deep reorg back to block 100
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 103, BlockHash: "0x103a"},
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102, BlockHash: "0x102a"},
+ {EventId: 7, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101, BlockHash: "0x101a"},
+ // Build new chain
+ {EventId: 8, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0x101b", ParentHash: "0x100"},
+ {EventId: 9, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0x102b", ParentHash: "0x101b"},
+ {EventId: 10, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 103, BlockHash: "0x103b", ParentHash: "0x102b"},
+ {EventId: 11, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 104, BlockHash: "0x104", ParentHash: "0x103b"},
+ {EventId: 12, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 105, BlockHash: "0x105", ParentHash: "0x104"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 2, "Should have 2 segments")
+
+ // First segment: original chain
+ require.Len(segments[0].Blocks, 4)
+ require.Equal("0x100", segments[0].Blocks[0].Hash)
+ require.Equal("0x103a", segments[0].Blocks[3].Hash)
+
+ // Second segment: new chain after reorg
+ require.Len(segments[1].Blocks, 5)
+ require.Equal("0x101b", segments[1].Blocks[0].Hash)
+ require.Equal("0x105", segments[1].Blocks[4].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_SkippedBlocks() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test with skipped blocks
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99", BlockSkipped: false},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "", ParentHash: "", BlockSkipped: true},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc", ParentHash: "0xa", BlockSkipped: false},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 1)
+ require.Len(segments[0].Blocks, 3)
+
+ // Verify skipped block is included
+ require.Equal(uint64(100), segments[0].Blocks[0].Height)
+ require.False(segments[0].Blocks[0].Skipped)
+ require.Equal(uint64(101), segments[0].Blocks[1].Height)
+ require.True(segments[0].Blocks[1].Skipped)
+ require.Equal(uint64(102), segments[0].Blocks[2].Height)
+ require.False(segments[0].Blocks[2].Skipped)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_OnlyRemovedEvents() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Only BLOCK_REMOVED events - no blocks to migrate
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 100},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Nil(segments, "Should return nil when only REMOVED events")
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_AlternatingAddRemove() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Alternating ADD and REMOVE pattern
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 100},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xb", ParentHash: "0x99"},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 100},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xc", ParentHash: "0x99"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 3, "Should have 3 segments")
+
+ // Each segment should have one block
+ for i, segment := range segments {
+ require.Len(segment.Blocks, 1, "Segment %d should have 1 block", i)
+ require.Equal(uint64(100), segment.Blocks[0].Height)
+ }
+ require.Equal("0xa", segments[0].Blocks[0].Hash)
+ require.Equal("0xb", segments[1].Blocks[0].Hash)
+ require.Equal("0xc", segments[2].Blocks[0].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_ConsecutiveRemovals() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Multiple consecutive BLOCK_REMOVED events
+ events := []*model.EventEntry{
+ {EventId: 1, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 2, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb", ParentHash: "0xa"},
+ {EventId: 3, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 101},
+ {EventId: 4, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 100},
+ {EventId: 5, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 99}, // Removing even further back
+ {EventId: 6, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 99, BlockHash: "0xnew99", ParentHash: "0x98"},
+ {EventId: 7, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xnew100", ParentHash: "0xnew99"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 2)
+
+ // First segment: original blocks
+ require.Len(segments[0].Blocks, 2)
+ require.Equal("0xa", segments[0].Blocks[0].Hash)
+ require.Equal("0xb", segments[0].Blocks[1].Hash)
+
+ // Second segment: new blocks after deep reorg
+ require.Len(segments[1].Blocks, 2)
+ require.Equal("0xnew99", segments[1].Blocks[0].Hash)
+ require.Equal("0xnew100", segments[1].Blocks[1].Hash)
+}
+
+func (s *migratorActivityTestSuite) TestFetchBlockData_OrderPreservation() {
+ require := testutil.Require(s.T())
+
+ // Create test blocks in specific order
+ blocksToMigrate := []BlockToMigrate{
+ {Height: 102, Hash: "0xc", EventSeq: 3},
+ {Height: 100, Hash: "0xa", EventSeq: 1},
+ {Height: 101, Hash: "0xb", EventSeq: 2},
+ }
+
+ // Test that WorkItem structure exists and has correct fields
+ workItem := WorkItem{
+ Block: blocksToMigrate[0],
+ Index: 0,
+ }
+ require.Equal(uint64(102), workItem.Block.Height)
+ require.Equal(0, workItem.Index)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_EventSequenceOrdering() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test that blocks maintain event sequence ordering within segments
+ events := []*model.EventEntry{
+ {EventId: 10, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 100, BlockHash: "0xa", ParentHash: "0x99"},
+ {EventId: 20, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 101, BlockHash: "0xb", ParentHash: "0xa"},
+ {EventId: 30, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xc", ParentHash: "0xb"},
+ {EventId: 40, EventType: api.BlockchainEvent_BLOCK_REMOVED, BlockHeight: 102},
+ {EventId: 50, EventType: api.BlockchainEvent_BLOCK_ADDED, BlockHeight: 102, BlockHash: "0xd", ParentHash: "0xb"},
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 2)
+
+ // Verify EventSeq is preserved
+ require.Equal(int64(10), segments[0].Blocks[0].EventSeq)
+ require.Equal(int64(20), segments[0].Blocks[1].EventSeq)
+ require.Equal(int64(30), segments[0].Blocks[2].EventSeq)
+ require.Equal(int64(50), segments[1].Blocks[0].EventSeq)
+}
+
+func (s *migratorActivityTestSuite) TestBuildSegmentsFromEvents_LargeReorgChain() {
+ require := testutil.Require(s.T())
+ logger := s.app.Logger()
+
+ // Test with a large number of blocks and reorgs
+ events := make([]*model.EventEntry, 0, 1000)
+
+ // Add 100 blocks
+ for i := 0; i < 100; i++ {
+ events = append(events, &model.EventEntry{
+ EventId: int64(i + 1),
+ EventType: api.BlockchainEvent_BLOCK_ADDED,
+ BlockHeight: uint64(100 + i),
+ BlockHash: string(rune('a'+i%26)) + "orig",
+ ParentHash: string(rune('a'+(i-1)%26)) + "orig",
+ })
+ }
+
+ // Remove last 50 blocks
+ for i := 99; i >= 50; i-- {
+ events = append(events, &model.EventEntry{
+ EventId: int64(101 + (99 - i)),
+ EventType: api.BlockchainEvent_BLOCK_REMOVED,
+ BlockHeight: uint64(100 + i),
+ })
+ }
+
+ // Add 60 new blocks
+ for i := 0; i < 60; i++ {
+ events = append(events, &model.EventEntry{
+ EventId: int64(151 + i),
+ EventType: api.BlockchainEvent_BLOCK_ADDED,
+ BlockHeight: uint64(150 + i),
+ BlockHash: string(rune('a'+i%26)) + "new",
+ ParentHash: string(rune('a'+(i-1)%26)) + "new",
+ })
+ }
+
+ segments := s.migrator.buildSegmentsFromEvents(logger, events)
+ require.Len(segments, 2)
+ require.Len(segments[0].Blocks, 100)
+ require.Len(segments[1].Blocks, 60)
+}
diff --git a/internal/workflow/activity/module.go b/internal/workflow/activity/module.go
index c739188..53b75d9 100644
--- a/internal/workflow/activity/module.go
+++ b/internal/workflow/activity/module.go
@@ -18,5 +18,11 @@ var Module = fx.Options(
fx.Provide(NewEventReconciler),
fx.Provide(NewEventLoader),
fx.Provide(NewReplicator),
+ fx.Provide(NewLatestBlock),
fx.Provide(NewUpdateWatermark),
+ fx.Provide(NewMigrator),
+ fx.Provide(NewGetLatestBlockHeightActivity),
+ fx.Provide(NewGetLatestBlockFromPostgresActivity),
+ fx.Provide(NewGetLatestEventFromPostgresActivity),
+ fx.Provide(NewGetMaxEventIdActivity),
)
diff --git a/internal/workflow/activity/replicator.go b/internal/workflow/activity/replicator.go
index aea3aa6..b34998c 100644
--- a/internal/workflow/activity/replicator.go
+++ b/internal/workflow/activity/replicator.go
@@ -6,6 +6,8 @@ import (
"net/http"
"time"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
"go.temporal.io/sdk/workflow"
"go.uber.org/fx"
"go.uber.org/zap"
@@ -67,8 +69,10 @@ type (
}
ReplicatorResponse struct {
- StartHeight uint64
- EndHeight uint64
+ StartHeight uint64
+ EndHeight uint64
+ LatestBlockHeight uint64
+ LatestBlockTimestamp *timestamppb.Timestamp
}
)
@@ -109,7 +113,7 @@ func (a *Replicator) downloadBlockData(ctx context.Context, url string) ([]byte,
httpResp, err := a.httpClient.Do(req)
if err != nil {
- return nil, retry.Retryable(xerrors.Errorf("failed to download block file: %w", err))
+ return nil, retry.Retryable(xerrors.Errorf("failed to download block file: %w, url: %s", err, url))
}
finalizer := finalizer.WithCloser(httpResp.Body)
@@ -139,26 +143,25 @@ func (a *Replicator) prepareRawBlockData(ctx context.Context, blockFile *api.Blo
}
var rawBytes []byte
var compressedBytes []byte
- switch blockFile.Compression {
- case api.Compression_NONE:
- rawBytes = bodyBytes
- if compression == api.Compression_GZIP {
- compressedBytes, err = storage_utils.Compress(rawBytes, compression)
- if err != nil {
- return nil, xerrors.Errorf("failed to compress block data with type %v: %w", compression.String(), err)
- }
- }
- case api.Compression_GZIP:
+
+ sourceCompressionType := blockFile.Compression
+ if sourceCompressionType == compression {
compressedBytes = bodyBytes
- if compression == api.Compression_NONE {
- rawBytes, err = storage_utils.Decompress(rawBytes, blockFile.Compression)
- if err != nil {
- return nil, xerrors.Errorf("failed to decompress block data with type %v: %w", blockFile.Compression.String(), err)
- }
+ if sourceCompressionType == api.Compression_NONE {
+ rawBytes = bodyBytes
+ }
+
+ } else {
+ rawBytes, err = storage_utils.Decompress(bodyBytes, sourceCompressionType)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decompress block data with type %v: %w", sourceCompressionType.String(), err)
+ }
+ compressedBytes, err = storage_utils.Compress(rawBytes, compression)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to compress block data with type %v: %w", compression.String(), err)
}
- default:
- return nil, xerrors.Errorf("unknown block file compression type %v", blockFile.Compression.String())
}
+
metadata := &api.BlockMetadata{
Tag: blockFile.Tag,
Hash: blockFile.Hash,
@@ -173,7 +176,6 @@ func (a *Replicator) prepareRawBlockData(ctx context.Context, blockFile *api.Blo
// TODO remove this after the api upgrade
if metadata.Timestamp == nil || (metadata.Timestamp.Nanos == 0 && metadata.Timestamp.Seconds == 0) {
block := new(api.Block)
- rawBytes := rawBytes
if len(rawBytes) == 0 {
rawBytes, err = storage_utils.Decompress(bodyBytes, blockFile.Compression)
if err != nil {
@@ -197,7 +199,7 @@ func (a *Replicator) prepareRawBlockData(ctx context.Context, blockFile *api.Blo
case api.Compression_NONE:
rawBlockData.BlockData = rawBytes
return rawBlockData, nil
- case api.Compression_GZIP:
+ case api.Compression_GZIP, api.Compression_ZSTD:
rawBlockData.BlockData = compressedBytes
return rawBlockData, nil
default:
@@ -229,6 +231,35 @@ func (a *Replicator) execute(ctx context.Context, request *ReplicatorRequest) (*
i := i
group.Go(func() error {
blockFile := blocks.Files[i]
+ if blockFile.GetSkipped() || blockFile.GetFileUrl() == "" {
+ if blockFile.GetSkipped() {
+ logger.Debug(
+ "block file skipped; skip download",
+ zap.Uint32("tag", blockFile.Tag),
+ zap.Uint64("height", blockFile.Height),
+ zap.Bool("skipped", blockFile.Skipped),
+ )
+ } else {
+ logger.Warn(
+ "block file url missing; skip download",
+ zap.Uint32("tag", blockFile.Tag),
+ zap.Uint64("height", blockFile.Height),
+ zap.Bool("skipped", blockFile.Skipped),
+ )
+ }
+
+ blockMetas[i] = &api.BlockMetadata{
+ Tag: blockFile.Tag,
+ Hash: blockFile.Hash,
+ ParentHash: blockFile.ParentHash,
+ Height: blockFile.Height,
+ ParentHeight: blockFile.ParentHeight,
+ Skipped: blockFile.Skipped,
+ Timestamp: blockFile.BlockTimestamp,
+ }
+ return nil
+ }
+
logger.Debug(
"downloading block",
zap.Uint32("tag", blockFile.Tag),
@@ -258,7 +289,9 @@ func (a *Replicator) execute(ctx context.Context, request *ReplicatorRequest) (*
}
return &ReplicatorResponse{
- StartHeight: request.StartHeight,
- EndHeight: request.EndHeight,
+ StartHeight: request.StartHeight,
+ EndHeight: request.EndHeight,
+ LatestBlockHeight: blockMetas[len(blockMetas)-1].Height,
+ LatestBlockTimestamp: blockMetas[len(blockMetas)-1].Timestamp,
}, nil
}
diff --git a/internal/workflow/activity/syncer.go b/internal/workflow/activity/syncer.go
index 512880f..b7be339 100644
--- a/internal/workflow/activity/syncer.go
+++ b/internal/workflow/activity/syncer.go
@@ -218,10 +218,14 @@ func (a *Syncer) execute(ctx context.Context, request *SyncerRequest) (*SyncerRe
})
}
} else {
+ // Record heartbeat before potentially slow blockchain call
+ a.heartbeater.RecordHeartbeat(ctx)
inMetadatas, err = a.masterBlockchainClient.BatchGetBlockMetadata(ctx, request.Tag, start, end)
if err != nil {
return nil, xerrors.Errorf("failed to get metadata for blocks from %d to %d: %w", start, end-1, err)
}
+ // Record heartbeat after blockchain call
+ a.heartbeater.RecordHeartbeat(ctx)
// Check if the first block to be synced is a valid descendant of the local fork block.
// If the condition is not met, it is likely that master node experienced a block chain reorg right after
@@ -610,6 +614,9 @@ func (a *Syncer) getBlocksInParallel(
for i := 0; i < parallelism; i++ {
g.Go(func() error {
for metadata := range inputChannel {
+ // Record heartbeat before processing each block to prevent timeout
+ a.heartbeater.RecordHeartbeat(ctx)
+
block, err := a.safeGetBlock(ctx, logger, metadata, withBestEffort, dataCompression, fastSync, transactionIndexingParallelism)
if err != nil {
logger.Warn("failed to get block",
@@ -622,6 +629,9 @@ func (a *Syncer) getBlocksInParallel(
reprocessChannel <- nil
outChannel <- block
}
+
+ // Record heartbeat after processing each block
+ a.heartbeater.RecordHeartbeat(ctx)
}
return nil
})
diff --git a/internal/workflow/backfiller.go b/internal/workflow/backfiller.go
index 2d23906..eb9740e 100644
--- a/internal/workflow/backfiller.go
+++ b/internal/workflow/backfiller.go
@@ -78,7 +78,11 @@ func NewBackfiller(params BackfillerParams) *Backfiller {
}
func (w *Backfiller) Execute(ctx context.Context, request *BackfillerRequest) (client.WorkflowRun, error) {
- return w.startWorkflow(ctx, w.name, request)
+ workflowId := w.name
+ if v, ok := ctx.Value("workflowId").(string); ok && v != "" {
+ workflowId = v
+ }
+ return w.startWorkflow(ctx, workflowId, request)
}
func (w *Backfiller) execute(ctx workflow.Context, request *BackfillerRequest) error {
diff --git a/internal/workflow/integration_test/migrator_integration_test.go b/internal/workflow/integration_test/migrator_integration_test.go
new file mode 100644
index 0000000..5e0a764
--- /dev/null
+++ b/internal/workflow/integration_test/migrator_integration_test.go
@@ -0,0 +1,409 @@
+package integration
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+ "go.temporal.io/sdk/testsuite"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+
+ "github.com/coinbase/chainstorage/internal/blockchain/client"
+ "github.com/coinbase/chainstorage/internal/blockchain/jsonrpc"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser"
+ "github.com/coinbase/chainstorage/internal/blockchain/restapi"
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/dlq"
+ "github.com/coinbase/chainstorage/internal/s3"
+ "github.com/coinbase/chainstorage/internal/storage"
+ "github.com/coinbase/chainstorage/internal/storage/blobstorage"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/model"
+ "github.com/coinbase/chainstorage/internal/storage/metastorage/postgres"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ "github.com/coinbase/chainstorage/internal/workflow"
+ "github.com/coinbase/chainstorage/protos/coinbase/c3/common"
+ api "github.com/coinbase/chainstorage/protos/coinbase/chainstorage"
+)
+
+type MigratorIntegrationTestSuite struct {
+ suite.Suite
+ testsuite.WorkflowTestSuite
+}
+
+type testDependencies struct {
+ fx.In
+ Migrator *workflow.Migrator
+ BlobStorage blobstorage.BlobStorage
+ MetaStorage metastorage.MetaStorage // DynamoDB
+ MetaStoragePG metastorage.MetaStorage `name:"pg"` // PostgreSQL
+ Parser parser.Parser
+ Client client.Client `name:"slave"`
+}
+
+func TestIntegrationMigratorTestSuite(t *testing.T) {
+ suite.Run(t, new(MigratorIntegrationTestSuite))
+}
+
+// Helper function to create test app with common dependencies
+func (s *MigratorIntegrationTestSuite) createTestApp(env *cadence.TestEnv, timeout time.Duration) (testapp.TestApp, *testDependencies) {
+ var deps testDependencies
+ env.SetTestTimeout(timeout)
+
+ app := testapp.New(
+ s.T(),
+ testapp.WithFunctional(),
+ testapp.WithBlockchainNetwork(common.Blockchain_BLOCKCHAIN_ETHEREUM, common.Network_NETWORK_ETHEREUM_MAINNET),
+ cadence.WithTestEnv(env),
+ workflow.Module,
+ client.Module,
+ jsonrpc.Module,
+ restapi.Module,
+ s3.Module,
+ storage.Module,
+ parser.Module,
+ dlq.Module,
+ // Provide PostgreSQL MetaStorage with name "pg" for migration tests
+ fx.Provide(fx.Annotated{
+ Name: "pg",
+ Target: func(params postgres.Params) (metastorage.MetaStorage, error) {
+ result, err := postgres.NewMetaStorage(params)
+ if err != nil {
+ return nil, err
+ }
+ return result.MetaStorage, nil
+ },
+ }),
+ fx.Populate(&deps),
+ )
+
+ return app, &deps
+}
+
+// Helper function to create test events with blocks
+func (s *MigratorIntegrationTestSuite) createTestEvents(
+ ctx context.Context,
+ deps *testDependencies,
+ tag uint32,
+ eventTag uint32,
+ startSequence, endSequence int64,
+ includeReorg bool,
+) error {
+ require := testutil.Require(s.T())
+
+ events := make([]*model.EventEntry, 0, endSequence-startSequence)
+
+ // Create events with some BLOCK_ADDED and BLOCK_REMOVED events
+ for seq := startSequence; seq < endSequence; seq++ {
+ var eventType api.BlockchainEvent_Type
+ var blockHeight uint64
+ var blockHash string
+ var parentHash string
+
+ // Every 10th event is a BLOCK_ADDED
+ if seq%10 == 0 {
+ eventType = api.BlockchainEvent_BLOCK_ADDED
+ blockHeight = uint64(17035140 + seq/10)
+ blockHash = generateHash(blockHeight, 0)
+ if blockHeight > 17035140 {
+ parentHash = generateHash(blockHeight-1, 0)
+ }
+ } else if includeReorg && seq%100 == 5 {
+ // Add some BLOCK_REMOVED events for reorg simulation
+ eventType = api.BlockchainEvent_BLOCK_REMOVED
+ blockHeight = uint64(17035140 + seq/10)
+ blockHash = generateHash(blockHeight, 1) // Different hash for removed block
+ parentHash = generateHash(blockHeight-1, 0)
+ } else {
+ // Most events are other types (not block-related)
+ eventType = api.BlockchainEvent_UNKNOWN
+ blockHeight = uint64(17035140 + seq/10)
+ blockHash = ""
+ parentHash = ""
+ }
+
+ event := &model.EventEntry{
+ EventId: seq,
+ EventType: eventType,
+ BlockHeight: blockHeight,
+ BlockHash: blockHash,
+ ParentHash: parentHash,
+ Tag: tag,
+ EventTag: eventTag,
+ }
+ events = append(events, event)
+
+ // Add reorg at specific height
+ if includeReorg && seq == startSequence+50 {
+ // Add another BLOCK_ADDED at same height (reorg)
+ reorgEvent := &model.EventEntry{
+ EventId: seq + 1,
+ EventType: api.BlockchainEvent_BLOCK_ADDED,
+ BlockHeight: blockHeight,
+ BlockHash: generateHash(blockHeight, 2), // Different hash for reorg
+ ParentHash: parentHash,
+ Tag: tag,
+ EventTag: eventTag,
+ }
+ events = append(events, reorgEvent)
+ seq++ // Skip next sequence since we used it
+ }
+ }
+
+ // Store events in DynamoDB
+ err := deps.MetaStorage.AddEventEntries(ctx, eventTag, events)
+ require.NoError(err, "Failed to store events in DynamoDB")
+
+ // For each BLOCK_ADDED event, fetch and store the actual block
+ for _, event := range events {
+ if event.EventType == api.BlockchainEvent_BLOCK_ADDED {
+ // Fetch block from blockchain
+ block, err := deps.Client.GetBlockByHeight(ctx, tag, event.BlockHeight)
+ if err != nil {
+ // If can't fetch from chain, create a mock block
+ block = &api.Block{
+ Metadata: &api.BlockMetadata{
+ Tag: tag,
+ Height: event.BlockHeight,
+ Hash: event.BlockHash,
+ ParentHash: event.ParentHash,
+ ParentHeight: event.BlockHeight - 1,
+ },
+ }
+ }
+
+ // Upload to blob storage
+ objectKey, err := deps.BlobStorage.Upload(ctx, block, api.Compression_GZIP)
+ require.NoError(err, "Failed to upload block at height %d", event.BlockHeight)
+
+ // Update metadata with object key
+ block.Metadata.ObjectKeyMain = objectKey
+
+ // Store in DynamoDB metadata storage
+ err = deps.MetaStorage.PersistBlockMetas(ctx, true, []*api.BlockMetadata{block.Metadata}, nil)
+ require.NoError(err, "Failed to store block in DynamoDB at height %d", event.BlockHeight)
+ }
+ }
+
+ return nil
+}
+
+// Helper to generate deterministic hash for testing
+func generateHash(height uint64, variant int) string {
+ return fmt.Sprintf("0x%x", height*1000+uint64(variant))
+}
+
+// Test event-driven migration
+func (s *MigratorIntegrationTestSuite) TestMigratorIntegration_EventDriven() {
+ const (
+ tag = uint32(1)
+ eventTag = uint32(3)
+ startSequence = int64(1000)
+ endSequence = int64(1500)
+ batchSize = 100
+ )
+
+ require := testutil.Require(s.T())
+
+ // Setup
+ env := cadence.NewTestEnv(s)
+ app, deps := s.createTestApp(env, 30*time.Minute)
+ defer app.Close()
+
+ ctx := context.Background()
+
+ // Create test events with blocks
+ err := s.createTestEvents(ctx, deps, tag, eventTag, startSequence, endSequence, false)
+ require.NoError(err)
+
+ // Execute event-driven migrator workflow
+ migratorRequest := &workflow.MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: uint64(batchSize),
+ Parallelism: 4,
+ }
+
+ migratorRun, err := deps.Migrator.Execute(ctx, migratorRequest)
+ require.NoError(err)
+
+ err = migratorRun.Get(ctx, nil)
+ require.NoError(err)
+
+ // Verify migration
+ // Check that events were migrated to PostgreSQL
+ maxEventId, err := deps.MetaStoragePG.GetMaxEventId(ctx, eventTag)
+ require.NoError(err)
+ require.GreaterOrEqual(maxEventId, endSequence-1)
+
+ app.Logger().Info("Event-driven migration test passed",
+ zap.Int64("startSequence", startSequence),
+ zap.Int64("endSequence", endSequence),
+ zap.Int64("maxEventId", maxEventId))
+}
+
+// Test event-driven migration with reorgs
+func (s *MigratorIntegrationTestSuite) TestMigratorIntegration_WithReorgs() {
+ const (
+ tag = uint32(1)
+ eventTag = uint32(3)
+ startSequence = int64(2000)
+ endSequence = int64(2200)
+ batchSize = 50
+ )
+
+ require := testutil.Require(s.T())
+
+ // Setup
+ env := cadence.NewTestEnv(s)
+ app, deps := s.createTestApp(env, 20*time.Minute)
+ defer app.Close()
+
+ ctx := context.Background()
+
+ // Create test events with reorgs
+ err := s.createTestEvents(ctx, deps, tag, eventTag, startSequence, endSequence, true)
+ require.NoError(err)
+
+ // Execute event-driven migrator workflow
+ migratorRequest := &workflow.MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: uint64(batchSize),
+ Parallelism: 2, // Lower parallelism for reorg handling
+ }
+
+ migratorRun, err := deps.Migrator.Execute(ctx, migratorRequest)
+ require.NoError(err)
+
+ err = migratorRun.Get(ctx, nil)
+ require.NoError(err)
+
+ app.Logger().Info("Migration with reorgs test passed",
+ zap.Int64("startSequence", startSequence),
+ zap.Int64("endSequence", endSequence))
+}
+
+// Test auto-resume functionality
+func (s *MigratorIntegrationTestSuite) TestMigratorIntegration_AutoResume() {
+ const (
+ tag = uint32(1)
+ eventTag = uint32(3)
+ initialStart = int64(3000)
+ midPoint = int64(3100)
+ endSequence = int64(3200)
+ batchSize = 50
+ )
+
+ require := testutil.Require(s.T())
+
+ // Setup
+ env := cadence.NewTestEnv(s)
+ app, deps := s.createTestApp(env, 20*time.Minute)
+ defer app.Close()
+
+ ctx := context.Background()
+
+ // Create all test events
+ err := s.createTestEvents(ctx, deps, tag, eventTag, initialStart, endSequence, false)
+ require.NoError(err)
+
+ // First migration - partial
+ firstRequest := &workflow.MigratorRequest{
+ StartEventSequence: initialStart,
+ EndEventSequence: midPoint,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: uint64(batchSize),
+ Parallelism: 4,
+ }
+
+ migratorRun, err := deps.Migrator.Execute(ctx, firstRequest)
+ require.NoError(err)
+
+ err = migratorRun.Get(ctx, nil)
+ require.NoError(err)
+
+ // Second migration - auto-resume
+ resumeRequest := &workflow.MigratorRequest{
+ StartEventSequence: 0, // Will be auto-detected
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: uint64(batchSize),
+ Parallelism: 4,
+ AutoResume: true,
+ }
+
+ migratorRun, err = deps.Migrator.Execute(ctx, resumeRequest)
+ require.NoError(err)
+
+ err = migratorRun.Get(ctx, nil)
+ require.NoError(err)
+
+ // Verify all events migrated
+ maxEventId, err := deps.MetaStoragePG.GetMaxEventId(ctx, eventTag)
+ require.NoError(err)
+ require.GreaterOrEqual(maxEventId, endSequence-1)
+
+ app.Logger().Info("Auto-resume migration test passed",
+ zap.Int64("finalMaxEventId", maxEventId))
+}
+
+// Test large batch migration with checkpointing
+func (s *MigratorIntegrationTestSuite) TestMigratorIntegration_LargeBatch() {
+ const (
+ tag = uint32(1)
+ eventTag = uint32(3)
+ startSequence = int64(10000)
+ endSequence = int64(15000) // 5000 events
+ batchSize = 500
+ checkpointSize = 2000
+ )
+
+ require := testutil.Require(s.T())
+
+ // Setup
+ env := cadence.NewTestEnv(s)
+ app, deps := s.createTestApp(env, 30*time.Minute)
+ defer app.Close()
+
+ ctx := context.Background()
+
+ // Create test events
+ err := s.createTestEvents(ctx, deps, tag, eventTag, startSequence, endSequence, false)
+ require.NoError(err)
+
+ // Execute with checkpoint configuration
+ migratorRequest := &workflow.MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: uint64(batchSize),
+ CheckpointSize: uint64(checkpointSize),
+ Parallelism: 8,
+ }
+
+ migratorRun, err := deps.Migrator.Execute(ctx, migratorRequest)
+ require.NoError(err)
+
+ // This should trigger checkpoints during execution
+ err = migratorRun.Get(ctx, nil)
+ // May get continue-as-new error due to checkpointing
+ if err != nil {
+ require.Contains(err.Error(), "continue as new")
+ }
+
+ app.Logger().Info("Large batch migration test completed",
+ zap.Int64("startSequence", startSequence),
+ zap.Int64("endSequence", endSequence))
+}
diff --git a/internal/workflow/migrator.go b/internal/workflow/migrator.go
new file mode 100644
index 0000000..9e93066
--- /dev/null
+++ b/internal/workflow/migrator.go
@@ -0,0 +1,389 @@
+package workflow
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "go.temporal.io/sdk/client"
+ "go.temporal.io/sdk/workflow"
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/utils/fxparams"
+ "github.com/coinbase/chainstorage/internal/workflow/activity"
+)
+
+type (
+ Migrator struct {
+ baseWorkflow
+ migrator *activity.Migrator
+ getLatestBlockHeight *activity.GetLatestBlockHeightActivity
+ getLatestBlockFromPostgres *activity.GetLatestBlockFromPostgresActivity
+ getLatestEventFromPostgres *activity.GetLatestEventFromPostgresActivity
+ getMaxEventId *activity.GetMaxEventIdActivity
+ }
+
+ MigratorParams struct {
+ fx.In
+ fxparams.Params
+ Runtime cadence.Runtime
+ Migrator *activity.Migrator
+ GetLatestBlockHeight *activity.GetLatestBlockHeightActivity
+ GetLatestBlockFromPostgres *activity.GetLatestBlockFromPostgresActivity
+ GetLatestEventFromPostgres *activity.GetLatestEventFromPostgresActivity
+ GetMaxEventId *activity.GetMaxEventIdActivity
+ }
+
+ MigratorRequest struct {
+ StartEventSequence int64 // Start event sequence
+ EndEventSequence int64 // End event sequence (0 = auto-detect)
+ EventTag uint32
+ Tag uint32
+ BatchSize uint64 // Optional. If not specified, it is read from the workflow config.
+ CheckpointSize uint64 // Optional. If not specified, it is read from the workflow config.
+ Parallelism int // Optional. If not specified, it is read from the workflow config.
+ BackoffInterval string // Optional. If not specified, it is read from the workflow config.
+ ContinuousSync bool // Optional. Whether to continuously sync data in infinite loop mode
+ SyncInterval string // Optional. Interval for continuous sync (e.g., "1m", "30s"). Defaults to 1 minute if not specified or invalid.
+ AutoResume bool // Optional. Automatically determine StartEventSequence from latest event in PostgreSQL destination
+ }
+)
+
+var (
+ _ InstrumentedRequest = (*MigratorRequest)(nil)
+)
+
+const (
+ // migrator metrics. need to have `workflow.migrator` as prefix
+ migratorHeightGauge = "workflow.migrator.height"
+ migratorBlocksCounter = "workflow.migrator.blocks_migrated"
+ migratorEventsCounter = "workflow.migrator.events_migrated"
+ migratorProgressGauge = "workflow.migrator.progress"
+)
+
+func NewMigrator(params MigratorParams) *Migrator {
+ w := &Migrator{
+ baseWorkflow: newBaseWorkflow(¶ms.Config.Workflows.Migrator, params.Runtime),
+ migrator: params.Migrator,
+ getLatestBlockHeight: params.GetLatestBlockHeight,
+ getLatestBlockFromPostgres: params.GetLatestBlockFromPostgres,
+ getLatestEventFromPostgres: params.GetLatestEventFromPostgres,
+ getMaxEventId: params.GetMaxEventId,
+ }
+ w.registerWorkflow(w.execute)
+ return w
+}
+
+func (w *Migrator) Execute(ctx context.Context, request *MigratorRequest) (client.WorkflowRun, error) {
+ workflowID := w.name
+ if request.Tag != 0 {
+ workflowID = fmt.Sprintf("%s/block_tag=%d", w.name, request.Tag)
+ }
+ return w.startMigratorWorkflow(ctx, workflowID, request)
+}
+
+// startMigratorWorkflow starts a migrator workflow with a custom reuse policy
+// that allows restarting failed workflows but prevents concurrent execution
+func (w *Migrator) startMigratorWorkflow(ctx context.Context, workflowID string, request *MigratorRequest) (client.WorkflowRun, error) {
+ if err := w.validateRequestCtx(ctx, request); err != nil {
+ return nil, err
+ }
+ cfg := w.config.Base()
+ workflowOptions := client.StartWorkflowOptions{
+ ID: workflowID,
+ TaskQueue: cfg.TaskList,
+ WorkflowRunTimeout: cfg.WorkflowRunTimeout,
+ WorkflowExecutionErrorWhenAlreadyStarted: true,
+ RetryPolicy: w.getRetryPolicy(cfg.WorkflowRetry),
+ }
+ execution, err := w.runtime.ExecuteWorkflow(ctx, workflowOptions, w.name, request)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to execute workflow: %w", err)
+ }
+ return execution, nil
+}
+
+func (w *Migrator) execute(ctx workflow.Context, request *MigratorRequest) error {
+ return w.executeWorkflow(ctx, request, func() error {
+ if err := w.validateRequest(request); err != nil {
+ return err
+ }
+
+ var cfg config.MigratorWorkflowConfig
+ if err := w.readConfig(ctx, &cfg); err != nil {
+ return xerrors.Errorf("failed to read config: %w", err)
+ }
+
+ // Event-driven migration always processes both blocks and events
+
+ batchSize := cfg.BatchSize
+ if request.BatchSize > 0 {
+ batchSize = request.BatchSize
+ }
+
+ checkpointSize := cfg.CheckpointSize
+ if request.CheckpointSize > 0 {
+ checkpointSize = request.CheckpointSize
+ }
+
+ parallelism := cfg.Parallelism
+ if parallelism <= 0 {
+ parallelism = 1 // Fallback default if config not set
+ }
+ if request.Parallelism > 0 {
+ parallelism = request.Parallelism
+ }
+
+ backoffInterval := cfg.BackoffInterval
+ if request.BackoffInterval != "" {
+ parsedInterval, err := time.ParseDuration(request.BackoffInterval)
+ if err != nil {
+ return xerrors.Errorf("failed to parse backoff interval: %w", err)
+ }
+ backoffInterval = parsedInterval
+ }
+
+ // Use config's continuous sync setting as default if not explicitly set in request
+ continuousSync := cfg.ContinuousSync || request.ContinuousSync
+
+ syncInterval := defaultSyncInterval
+ if cfg.SyncInterval > 0 {
+ syncInterval = cfg.SyncInterval
+ }
+ if request.SyncInterval != "" {
+ interval, err := time.ParseDuration(request.SyncInterval)
+ if err == nil {
+ syncInterval = interval
+ }
+ }
+
+ tag := cfg.GetEffectiveBlockTag(request.Tag)
+ eventTag := cfg.GetEffectiveEventTag(request.EventTag)
+ metrics := w.getMetricsHandler(ctx).WithTags(map[string]string{
+ tagBlockTag: strconv.Itoa(int(tag)),
+ })
+ logger := w.getLogger(ctx).With(
+ zap.Reflect("request", request),
+ zap.Reflect("config", cfg),
+ zap.Uint32("effectiveEventTag", eventTag),
+ )
+
+ // Set up activity options early so we can use activities
+ ctx = w.withActivityOptions(ctx)
+
+ // Handle auto-resume functionality - use latest event sequence
+ if request.AutoResume && request.StartEventSequence == 0 {
+ logger.Info("AutoResume enabled, querying PostgreSQL destination for latest migrated event")
+ postgresEventResp, err := w.getLatestEventFromPostgres.Execute(ctx, &activity.GetLatestEventFromPostgresRequest{EventTag: eventTag})
+ if err != nil {
+ return xerrors.Errorf("failed to get latest event from PostgreSQL: %w", err)
+ }
+
+ if postgresEventResp.Found {
+ // Resume from the next event sequence
+ request.StartEventSequence = postgresEventResp.Sequence + 1
+ logger.Info("Auto-resume: found latest event in PostgreSQL destination",
+ zap.Int64("latestEventSequence", postgresEventResp.Sequence),
+ zap.Int64("resumeFromSequence", request.StartEventSequence))
+ } else {
+ // No events found in destination, start from the beginning
+ request.StartEventSequence = 1 // Events start at 1
+ logger.Info("Auto-resume: no events found in PostgreSQL destination, starting from beginning")
+ }
+ }
+
+ // Handle end event sequence auto-detection if not provided
+ if request.EndEventSequence == 0 {
+ logger.Info("No end event sequence provided, fetching max event ID from DynamoDB...")
+
+ // Query DynamoDB for the actual max event ID
+ maxEventResp, err := w.getMaxEventId.Execute(ctx, &activity.GetMaxEventIdRequest{
+ EventTag: eventTag,
+ })
+ if err != nil {
+ return xerrors.Errorf("failed to get max event ID from DynamoDB: %w", err)
+ }
+
+ if !maxEventResp.Found {
+ logger.Warn("No events found in DynamoDB")
+ if continuousSync {
+ // In continuous sync, if no events exist yet, wait and retry
+ logger.Info("No events in DynamoDB, waiting for sync interval before retry",
+ zap.Duration("syncInterval", syncInterval))
+ err := workflow.Sleep(ctx, syncInterval)
+ if err != nil {
+ return xerrors.Errorf("workflow sleep failed while waiting for events: %w", err)
+ }
+ // Continue as new to retry
+ newRequest := *request
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
+ }
+ return xerrors.New("No events found in DynamoDB to migrate")
+ }
+
+ request.EndEventSequence = maxEventResp.MaxEventId
+ logger.Info("Found max event ID in DynamoDB",
+ zap.Int64("maxEventId", maxEventResp.MaxEventId),
+ zap.Int64("startEventSequence", request.StartEventSequence))
+ }
+
+ // Validate end sequence after auto-detection and auto-resume
+ if !continuousSync && request.StartEventSequence >= request.EndEventSequence {
+ return xerrors.Errorf("startEventSequence (%d) must be less than endEventSequence (%d)",
+ request.StartEventSequence, request.EndEventSequence)
+ }
+
+ // Additional handling for continuous sync:
+ // If EndEventSequence <= StartEventSequence, we are caught up.
+ if continuousSync && request.EndEventSequence != 0 && request.EndEventSequence <= request.StartEventSequence {
+ logger.Info("Continuous sync: caught up (no new events).",
+ zap.Int64("startEventSequence", request.StartEventSequence),
+ zap.Int64("endEventSequence", request.EndEventSequence))
+
+ // No special event catch-up needed - normal migration handles this
+
+ // Prepare for next cycle
+ newRequest := *request
+ newRequest.StartEventSequence = request.EndEventSequence
+ newRequest.EndEventSequence = 0 // re-detect on next cycle
+
+ // Wait for syncInterval before starting a new continuous sync workflow
+ logger.Info("waiting for sync interval before next catch-up cycle",
+ zap.Duration("syncInterval", syncInterval))
+ err := workflow.Sleep(ctx, syncInterval)
+ if err != nil {
+ return xerrors.Errorf("workflow sleep failed during caught-up continuous sync: %w", err)
+ }
+
+ logger.Info("starting next continuous sync cycle after catch-up",
+ zap.Int64("nextStartEventSequence", newRequest.StartEventSequence))
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
+ }
+
+ // Special case: if auto-resume found we're already caught up
+ if request.AutoResume && request.StartEventSequence >= request.EndEventSequence {
+ logger.Info("Auto-resume detected: already caught up, no migration needed",
+ zap.Int64("startEventSequence", request.StartEventSequence),
+ zap.Int64("endEventSequence", request.EndEventSequence))
+ return nil // Successfully completed with no work to do
+ }
+
+ // Log migration mode
+ logger.Info("Starting event-driven migration workflow")
+
+ logger.Info("migrator workflow started")
+
+ totalEventRange := request.EndEventSequence - request.StartEventSequence
+ processedEvents := int64(0)
+
+ for batchStart := request.StartEventSequence; batchStart < request.EndEventSequence; batchStart += int64(batchSize) {
+ // Check for checkpoint - only check after processing at least one batch
+ processedSoFar := batchStart - request.StartEventSequence
+ if processedSoFar > 0 && processedSoFar >= int64(checkpointSize) {
+ newRequest := *request
+ newRequest.StartEventSequence = batchStart
+ logger.Info("checkpoint reached", zap.Reflect("newRequest", newRequest))
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
+ }
+
+ batchEnd := batchStart + int64(batchSize)
+ if batchEnd > request.EndEventSequence {
+ batchEnd = request.EndEventSequence
+ }
+
+ logger.Info("migrating event batch",
+ zap.Int64("batchStart", batchStart),
+ zap.Int64("batchEnd", batchEnd))
+
+ // Execute a single migrator activity for the entire batch.
+ migratorRequest := &activity.MigratorRequest{
+ StartEventSequence: batchStart,
+ EndEventSequence: batchEnd,
+ EventTag: eventTag,
+ Tag: tag,
+ Parallelism: parallelism,
+ }
+
+ response, err := w.migrator.Execute(ctx, migratorRequest)
+ if err != nil {
+ logger.Error(
+ "failed to migrate batch",
+ zap.Int64("batchStart", batchStart),
+ zap.Int64("batchEnd", batchEnd),
+ zap.Error(err),
+ )
+ return xerrors.Errorf("failed to migrate batch [%v, %v): %w", batchStart, batchEnd, err)
+ }
+ if !response.Success {
+ logger.Error(
+ "migration batch failed",
+ zap.Int64("batchStart", batchStart),
+ zap.Int64("batchEnd", batchEnd),
+ zap.String("message", response.Message),
+ )
+ return xerrors.Errorf("migration batch failed [%v, %v): %s", batchStart, batchEnd, response.Message)
+ }
+
+ // Update metrics for the whole batch after all shards complete
+ processedEvents += batchEnd - batchStart
+ progress := float64(processedEvents) / float64(totalEventRange) * 100
+
+ metrics.Gauge(migratorHeightGauge).Update(float64(batchEnd - 1))
+ metrics.Counter(migratorBlocksCounter).Inc(int64(response.BlocksMigrated))
+ metrics.Counter(migratorEventsCounter).Inc(int64(response.EventsMigrated))
+ metrics.Gauge(migratorProgressGauge).Update(progress)
+
+ logger.Info(
+ "migrated batch successfully",
+ zap.Int64("batchStart", batchStart),
+ zap.Int64("batchEnd", batchEnd),
+ zap.Int("blocksMigrated", response.BlocksMigrated),
+ zap.Int("eventsMigrated", response.EventsMigrated),
+ zap.Float64("progress", progress),
+ )
+
+ // Add backoff if configured
+ if backoffInterval > 0 {
+ _ = workflow.Sleep(ctx, backoffInterval)
+ }
+ }
+
+ if continuousSync {
+ logger.Info("continuous sync enabled, preparing for next sync cycle")
+ newRequest := *request
+ newRequest.StartEventSequence = request.EndEventSequence
+ newRequest.EndEventSequence = 0 // Will be auto-detected on next cycle
+ newRequest.AutoResume = false // AutoResume should only happen on first workflow run
+
+ // Wait for syncInterval before starting a new continuous sync workflow
+ logger.Info("waiting for sync interval before next cycle",
+ zap.Duration("syncInterval", syncInterval))
+ err := workflow.Sleep(ctx, syncInterval)
+ if err != nil {
+ return xerrors.Errorf("workflow sleep failed during continuous sync: %w", err)
+ }
+
+ logger.Info("starting new continuous sync workflow",
+ zap.Int64("nextStartEventSequence", newRequest.StartEventSequence),
+ zap.Reflect("newRequest", newRequest))
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
+ }
+
+ logger.Info("migrator workflow finished",
+ zap.Int64("totalEvents", totalEventRange),
+ zap.Int64("processedEvents", processedEvents))
+
+ return nil
+ })
+}
+
+func (r *MigratorRequest) GetTags() map[string]string {
+ return map[string]string{
+ tagBlockTag: strconv.Itoa(int(r.Tag)),
+ }
+}
diff --git a/internal/workflow/migrator_test.go b/internal/workflow/migrator_test.go
new file mode 100644
index 0000000..d59f51c
--- /dev/null
+++ b/internal/workflow/migrator_test.go
@@ -0,0 +1,325 @@
+package workflow
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+ "go.temporal.io/sdk/testsuite"
+ "go.uber.org/fx"
+
+ "github.com/coinbase/chainstorage/internal/cadence"
+ "github.com/coinbase/chainstorage/internal/config"
+ "github.com/coinbase/chainstorage/internal/utils/testapp"
+ "github.com/coinbase/chainstorage/internal/utils/testutil"
+ "github.com/coinbase/chainstorage/internal/workflow/activity"
+)
+
+const (
+ migratorCheckpointSize = 50000
+ migratorBatchSize = 5000
+)
+
+type migratorTestSuite struct {
+ suite.Suite
+ testsuite.WorkflowTestSuite
+ env *cadence.TestEnv
+ migrator *Migrator
+ app testapp.TestApp
+ cfg *config.Config
+}
+
+func TestMigratorTestSuite(t *testing.T) {
+ suite.Run(t, new(migratorTestSuite))
+}
+
+func (s *migratorTestSuite) SetupTest() {
+ require := testutil.Require(s.T())
+
+ // Override config to speed up the test
+ cfg, err := config.New()
+ require.NoError(err)
+ cfg.Workflows.Migrator.BatchSize = migratorBatchSize
+ cfg.Workflows.Migrator.CheckpointSize = migratorCheckpointSize
+ cfg.Workflows.Migrator.BackoffInterval = time.Second
+ cfg.Workflows.Migrator.Parallelism = 8
+ s.cfg = cfg
+
+ s.env = cadence.NewTestEnv(s)
+ s.app = testapp.New(
+ s.T(),
+ Module,
+ testapp.WithConfig(s.cfg),
+ cadence.WithTestEnv(s.env),
+ fx.Populate(&s.migrator),
+ )
+}
+
+func (s *migratorTestSuite) TearDownTest() {
+ s.app.Close()
+ s.env.AssertExpectations(s.T())
+}
+
+func (s *migratorTestSuite) TestMigrator_EventDriven_Success() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000)
+ endSequence := int64(6000) // 5000 events, fits in one batch
+ tag := uint32(1)
+ eventTag := uint32(0)
+
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ require.Equal(tag, request.Tag)
+ expectedEventTag := s.cfg.Workflows.Migrator.GetEffectiveEventTag(eventTag)
+ require.Equal(expectedEventTag, request.EventTag)
+
+ // Event-driven migration processes both blocks and events
+ eventCount := request.EndEventSequence - request.StartEventSequence
+ blockCount := eventCount / 10 // Assume roughly 10% are BLOCK_ADDED events
+
+ return &activity.MigratorResponse{
+ BlocksMigrated: int(blockCount),
+ EventsMigrated: int(eventCount),
+ Success: true,
+ Message: "Event-driven migration completed successfully",
+ }, nil
+ })
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ })
+ require.NoError(err)
+}
+
+func (s *migratorTestSuite) TestMigrator_WithCheckpoint() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000)
+ endSequence := startSequence + int64(migratorCheckpointSize) + 10000 // Exceed checkpoint
+ tag := uint32(1)
+ eventTag := uint32(0)
+
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(&activity.MigratorResponse{
+ BlocksMigrated: 500,
+ EventsMigrated: 5000,
+ Success: true,
+ Message: "Migration batch completed",
+ }, nil)
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ })
+ require.Error(err)
+ require.True(IsContinueAsNewError(err))
+}
+
+func (s *migratorTestSuite) TestMigrator_CustomBatchSize() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000)
+ endSequence := int64(3000)
+ tag := uint32(1)
+ eventTag := uint32(0)
+ customBatchSize := uint64(500)
+
+ callCount := 0
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ callCount++
+ // Each batch should be customBatchSize
+ batchSize := request.EndEventSequence - request.StartEventSequence
+ require.LessOrEqual(batchSize, int64(customBatchSize))
+
+ return &activity.MigratorResponse{
+ BlocksMigrated: 50,
+ EventsMigrated: int(batchSize),
+ Success: true,
+ Message: "Batch completed",
+ }, nil
+ })
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: customBatchSize,
+ })
+ require.NoError(err)
+ // Should have been called 4 times (2000 events / 500 per batch)
+ require.Equal(4, callCount)
+}
+
+func (s *migratorTestSuite) TestMigrator_AutoResume() {
+ require := testutil.Require(s.T())
+
+ tag := uint32(1)
+ eventTag := uint32(3)
+
+ // Mock GetLatestEventFromPostgres to return a sequence
+ s.env.OnActivity(activity.ActivityGetLatestEventFromPostgres, mock.Anything, mock.Anything).
+ Return(&activity.GetLatestEventFromPostgresResponse{
+ Sequence: int64(5000),
+ Height: uint64(1000),
+ Found: true,
+ }, nil).Once()
+
+ // The workflow should resume from sequence 5001
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ // Should start from next sequence after latest
+ require.Equal(int64(5001), request.StartEventSequence)
+
+ return &activity.MigratorResponse{
+ BlocksMigrated: 100,
+ EventsMigrated: 1000,
+ Success: true,
+ Message: "Resumed migration",
+ }, nil
+ })
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: 0, // Will be auto-detected
+ EndEventSequence: 10000,
+ Tag: tag,
+ EventTag: eventTag,
+ AutoResume: true,
+ })
+ require.NoError(err)
+}
+
+func (s *migratorTestSuite) TestMigrator_ContinuousSync() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000)
+ tag := uint32(1)
+ eventTag := uint32(0)
+
+ // Mock GetMaxEventId activity since EndEventSequence is 0
+ s.env.OnActivity(activity.ActivityGetMaxEventId, mock.Anything, mock.Anything).
+ Return(&activity.GetMaxEventIdResponse{
+ MaxEventId: int64(6000),
+ Found: true,
+ }, nil).Once()
+
+ callCount := 0
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ callCount++
+ if callCount > 2 {
+ // Stop after 2 iterations to prevent infinite loop in test
+ s.env.CancelWorkflow()
+ }
+
+ return &activity.MigratorResponse{
+ BlocksMigrated: 100,
+ EventsMigrated: 1000,
+ Success: true,
+ Message: "Batch migrated",
+ }, nil
+ })
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: 0, // Will be auto-detected for continuous sync
+ Tag: tag,
+ EventTag: eventTag,
+ ContinuousSync: true,
+ SyncInterval: "1s",
+ })
+
+ // Should get a continue-as-new error for continuous sync
+ if err != nil {
+ require.True(IsContinueAsNewError(err) || s.env.IsWorkflowCompleted())
+ }
+}
+
+func (s *migratorTestSuite) TestMigrator_Parallelism() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000)
+ endSequence := int64(6000)
+ tag := uint32(1)
+ eventTag := uint32(0)
+ parallelism := 4
+
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ // Verify parallelism is passed through
+ require.Equal(parallelism, request.Parallelism)
+
+ eventCount := request.EndEventSequence - request.StartEventSequence
+ return &activity.MigratorResponse{
+ BlocksMigrated: int(eventCount / 10),
+ EventsMigrated: int(eventCount),
+ Success: true,
+ Message: "Parallel migration completed",
+ }, nil
+ })
+
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ Parallelism: parallelism,
+ })
+ require.NoError(err)
+}
+
+func (s *migratorTestSuite) TestMigrator_LargeMigration() {
+ require := testutil.Require(s.T())
+
+ startSequence := int64(1000000)
+ endSequence := int64(2000000) // 1 million events
+ tag := uint32(1)
+ eventTag := uint32(0)
+
+ batchCount := 0
+ s.env.OnActivity(activity.ActivityMigrator, mock.Anything, mock.Anything).
+ Return(func(ctx context.Context, request *activity.MigratorRequest) (*activity.MigratorResponse, error) {
+ batchCount++
+
+ // Each batch should be the configured batch size
+ batchSize := request.EndEventSequence - request.StartEventSequence
+ require.LessOrEqual(batchSize, int64(migratorBatchSize))
+
+ // Simulate processing
+ blockCount := batchSize / 10
+
+ return &activity.MigratorResponse{
+ BlocksMigrated: int(blockCount),
+ EventsMigrated: int(batchSize),
+ Success: true,
+ Message: "Large batch processed",
+ }, nil
+ })
+
+ // This should trigger checkpoints
+ _, err := s.migrator.Execute(context.Background(), &MigratorRequest{
+ StartEventSequence: startSequence,
+ EndEventSequence: endSequence,
+ Tag: tag,
+ EventTag: eventTag,
+ BatchSize: migratorBatchSize,
+ CheckpointSize: migratorCheckpointSize,
+ })
+
+ // Should hit checkpoint and continue-as-new
+ require.Error(err)
+ require.True(IsContinueAsNewError(err))
+
+ // Should have processed checkpoint size worth of events
+ expectedBatches := int(migratorCheckpointSize / migratorBatchSize)
+ require.Equal(expectedBatches, batchCount)
+}
diff --git a/internal/workflow/module.go b/internal/workflow/module.go
index c966fb6..c756454 100644
--- a/internal/workflow/module.go
+++ b/internal/workflow/module.go
@@ -17,6 +17,7 @@ var Module = fx.Options(
fx.Provide(NewCrossValidator),
fx.Provide(NewEventBackfiller),
fx.Provide(NewReplicator),
+ fx.Provide(NewMigrator),
)
const (
diff --git a/internal/workflow/poller.go b/internal/workflow/poller.go
index 68832d3..9e938d1 100644
--- a/internal/workflow/poller.go
+++ b/internal/workflow/poller.go
@@ -204,6 +204,19 @@ func (w *Poller) execute(ctx workflow.Context, request *PollerRequest) error {
}
sessionCtx, err = workflow.CreateSession(ctx, so)
if err != nil {
+ if IsErrSessionFailed(ctx, err) || IsScheduleToStartTimeout(err) {
+ request.RetryableErrorCount++
+ errMetricName := w.getRetryableErrorMetricName(ctx, err)
+ metrics.Counter(errMetricName).Inc(1)
+
+ if request.RetryableErrorCount <= RetryableErrorLimit {
+ if err := workflow.Sleep(ctx, backoffInterval); err != nil {
+ return err
+ }
+ return w.continueAsNew(ctx, request)
+ }
+ return xerrors.Errorf("retryable errors on session creation exceeded threshold: %w", err)
+ }
return xerrors.Errorf("failed to create workflow session: %w", err)
}
defer workflow.CompleteSession(sessionCtx)
diff --git a/internal/workflow/replicator.go b/internal/workflow/replicator.go
index d4f9ea9..9eede0f 100644
--- a/internal/workflow/replicator.go
+++ b/internal/workflow/replicator.go
@@ -3,6 +3,8 @@ package workflow
import (
"context"
"strconv"
+ "strings"
+ "time"
"go.temporal.io/sdk/client"
"go.temporal.io/sdk/workflow"
@@ -10,6 +12,7 @@ import (
"go.uber.org/zap"
"golang.org/x/xerrors"
+ "github.com/coinbase/chainstorage/internal/blockchain/parser"
"github.com/coinbase/chainstorage/internal/cadence"
"github.com/coinbase/chainstorage/internal/config"
"github.com/coinbase/chainstorage/internal/utils/fxparams"
@@ -22,6 +25,7 @@ type (
Replicator struct {
baseWorkflow
replicator *activity.Replicator
+ latestBLock *activity.LatestBlock
updateWatermark *activity.UpdateWatermark
}
@@ -30,22 +34,34 @@ type (
fxparams.Params
Runtime cadence.Runtime
Replicator *activity.Replicator
+ LatestBLock *activity.LatestBlock
UpdateWatermark *activity.UpdateWatermark
}
ReplicatorRequest struct {
Tag uint32
StartHeight uint64
- EndHeight uint64 `validate:"gt=0,gtfield=StartHeight"`
+ EndHeight uint64 `validate:"eq=0|gtfield=StartHeight"`
UpdateWatermark bool
DataCompression string // Optional. If not specified, it is read from the workflow config.
BatchSize uint64 // Optional. If not specified, it is read from the workflow config.
MiniBatchSize uint64 // Optional. If not specified, it is read from the workflow config.
CheckpointSize uint64 // Optional. If not specified, it is read from the workflow config.
Parallelism int // Optional. If not specified, it is read from the workflow config.
+ ContinuousSync bool // Optional. Whether to continuously sync data
+ SyncInterval string // Optional. Interval for continuous sync
}
)
+const defaultSyncInterval = 1 * time.Minute
+
+const (
+ // Replicator metrics. need to have `workflow.replicator` as prefix
+ replicatorHeightGauge = "workflow.replicator.height"
+ replicatorGapGauge = "workflow.replicator.gap"
+ replicatorTimeSinceLastBlockGauge = "workflow.replicator.time_since_last_block"
+)
+
// GetTags implements InstrumentedRequest.
func (r *ReplicatorRequest) GetTags() map[string]string {
return map[string]string{
@@ -61,6 +77,7 @@ func NewReplicator(params ReplicatorParams) *Replicator {
w := &Replicator{
baseWorkflow: newBaseWorkflow(¶ms.Config.Workflows.Replicator, params.Runtime),
replicator: params.Replicator,
+ latestBLock: params.LatestBLock,
updateWatermark: params.UpdateWatermark,
}
w.registerWorkflow(w.execute)
@@ -121,6 +138,27 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
logger.Info("workflow started", zap.Uint64("batchSize", batchSize))
ctx = w.withActivityOptions(ctx)
+ metrics := w.runtime.GetMetricsHandler(ctx).WithTags(map[string]string{
+ tagBlockTag: strconv.Itoa(int(request.Tag)),
+ })
+
+ syncInterval := defaultSyncInterval
+ if request.SyncInterval != "" {
+ interval, err := time.ParseDuration(request.SyncInterval)
+ if err == nil {
+ syncInterval = interval
+ }
+ }
+
+ if request.ContinuousSync && request.EndHeight == 0 {
+ latestBlockResponse, err := w.latestBLock.Execute(ctx, &activity.LatestBlockRequest{})
+ if err != nil {
+ return xerrors.Errorf("failed to get latest block through activity: %w", err)
+ }
+ var chainConfig config.ChainConfig
+ request.EndHeight = latestBlockResponse.Height - chainConfig.IrreversibleDistance
+ }
+
for startHeight := request.StartHeight; startHeight < request.EndHeight; startHeight = startHeight + batchSize {
if startHeight >= request.StartHeight+checkpointSize {
newRequest := *request
@@ -148,6 +186,9 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
reprocessChannel := workflow.NewNamedBufferedChannel(ctx, "replicator.reprocess", miniBatchCount)
defer reprocessChannel.Close()
+ responsesChannel := workflow.NewNamedBufferedChannel(ctx, "replicator.mini-batches.response", parallelism+miniBatchCount)
+ defer responsesChannel.Close()
+
// Phase 1: running mini batches in parallel.
for i := 0; i < parallelism; i++ {
workflow.Go(ctx, func(ctx workflow.Context) {
@@ -161,7 +202,7 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
if batchEnd > endHeight {
batchEnd = endHeight
}
- _, err := w.replicator.Execute(ctx, &activity.ReplicatorRequest{
+ replicatorResponse, err := w.replicator.Execute(ctx, &activity.ReplicatorRequest{
Tag: tag,
StartHeight: batchStart,
EndHeight: batchEnd,
@@ -176,6 +217,7 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
zap.Error(err),
)
}
+ responsesChannel.Send(ctx, *replicatorResponse)
}
})
}
@@ -192,7 +234,7 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
if batchEnd > endHeight {
batchEnd = endHeight
}
- _, err := w.replicator.Execute(ctx, &activity.ReplicatorRequest{
+ retryResponse, err := w.replicator.Execute(ctx, &activity.ReplicatorRequest{
Tag: tag,
StartHeight: batchStart,
EndHeight: batchEnd,
@@ -202,19 +244,89 @@ func (w *Replicator) execute(ctx workflow.Context, request *ReplicatorRequest) e
if err != nil {
return xerrors.Errorf("failed to replicate block from %d to %d: %w", batchStart, batchEnd, err)
}
+ responsesChannel.Send(ctx, *retryResponse)
}
// Phase 3: update watermark
if request.UpdateWatermark {
+ var validateStart uint64
+ if startHeight == 0 {
+ validateStart = startHeight
+ } else {
+ validateStart = startHeight - 1
+ }
_, err := w.updateWatermark.Execute(ctx, &activity.UpdateWatermarkRequest{
Tag: request.Tag,
- ValidateStart: startHeight - 1,
+ ValidateStart: validateStart,
BlockHeight: endHeight - 1,
})
if err != nil {
+ // Check if the error is due to chain discontinuity (reorg)
+ // Use string matching because Temporal error serialization breaks xerrors.Is
+ if strings.Contains(err.Error(), parser.ErrInvalidChain.Error()) {
+ // Reorg detected - restart from a safe point
+ logger.Warn("Chain discontinuity detected, likely due to reorg. Restarting from earlier height",
+ zap.Uint64("currentStartHeight", startHeight),
+ zap.Uint64("irreversibleDistance", cfg.IrreversibleDistance),
+ zap.Error(err))
+
+ // Calculate the new start height by going back by irreversible_distance
+ // Ensure we go back at least 1 block, even if IrreversibleDistance is 0
+ var newStartHeight uint64
+ reorgDistance := cfg.IrreversibleDistance
+ if reorgDistance == 0 {
+ reorgDistance = 1 // Go back at least 1 block
+ }
+
+ if startHeight > reorgDistance {
+ newStartHeight = startHeight - reorgDistance
+ } else {
+ newStartHeight = 0
+ }
+
+ // Create a new request starting from the safe point
+ newRequest := *request
+ newRequest.StartHeight = newStartHeight
+ logger.Info("Restarting replicator workflow after reorg",
+ zap.Uint64("newStartHeight", newStartHeight),
+ zap.Uint64("endHeight", request.EndHeight))
+
+ // Continue as new workflow to handle the reorg
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
+ }
return xerrors.Errorf("failed to update watermark: %w", err)
}
}
+
+ var latestResp activity.ReplicatorResponse
+ for {
+ var resp activity.ReplicatorResponse
+ if ok := responsesChannel.ReceiveAsync(&resp); !ok {
+ break
+ }
+ if resp.LatestBlockHeight > latestResp.LatestBlockHeight {
+ latestResp = resp
+ }
+ }
+ if latestResp != (activity.ReplicatorResponse{}) {
+ metrics.Gauge(replicatorHeightGauge).Update(float64(latestResp.LatestBlockHeight))
+ metrics.Gauge(replicatorGapGauge).Update(float64(request.EndHeight - latestResp.LatestBlockHeight + 1))
+ metrics.Gauge(replicatorTimeSinceLastBlockGauge).Update(utils.SinceTimestamp(latestResp.LatestBlockTimestamp).Seconds())
+ }
+ }
+
+ if request.ContinuousSync {
+ logger.Info("new continuous sync workflow")
+ newRequest := *request
+ newRequest.StartHeight = request.EndHeight
+ newRequest.EndHeight = 0
+ // Wait for syncInterval minutes before starting a new continuous sync workflow.
+ err := workflow.Sleep(ctx, syncInterval)
+ if err != nil {
+ return xerrors.Errorf("workflow await failed: %w", err)
+ }
+ logger.Info("start new continuous sync workflow")
+ return workflow.NewContinueAsNewError(ctx, w.name, &newRequest)
}
logger.Info("workflow finished")
diff --git a/internal/workflow/workflow.go b/internal/workflow/workflow.go
index d321ecb..366ebaa 100644
--- a/internal/workflow/workflow.go
+++ b/internal/workflow/workflow.go
@@ -38,6 +38,7 @@ type (
crossValidator *CrossValidator
eventBackfiller *EventBackfiller
replicator *Replicator
+ migrator *Migrator
}
ManagerParams struct {
@@ -53,6 +54,7 @@ type (
CrossValidator *CrossValidator
EventBackfiller *EventBackfiller
Replicator *Replicator
+ Migrator *Migrator
}
InstrumentedRequest interface {
@@ -90,6 +92,7 @@ func NewManager(params ManagerParams) *Manager {
crossValidator: params.CrossValidator,
eventBackfiller: params.EventBackfiller,
replicator: params.Replicator,
+ migrator: params.Migrator,
}
params.Lifecycle.Append(fx.Hook{
diff --git a/internal/workflow/workflow_identity.go b/internal/workflow/workflow_identity.go
index 320434d..22ef3f3 100644
--- a/internal/workflow/workflow_identity.go
+++ b/internal/workflow/workflow_identity.go
@@ -19,6 +19,7 @@ const (
CrossValidatorIdentity
EventBackfillerIdentity
ReplicatorIdentity
+ MigratorIdentity
)
var workflowIdentityToString = map[WorkflowIdentity]string{
@@ -30,6 +31,7 @@ var workflowIdentityToString = map[WorkflowIdentity]string{
CrossValidatorIdentity: "workflow.cross_validator",
EventBackfillerIdentity: "workflow.event_backfiller",
ReplicatorIdentity: "workflow.replicator",
+ MigratorIdentity: "workflow.migrator",
}
var workflowIdentities = map[string]WorkflowIdentity{
@@ -41,6 +43,7 @@ var workflowIdentities = map[string]WorkflowIdentity{
"cross_validator": CrossValidatorIdentity,
"event_backfiller": EventBackfillerIdentity,
"replicator": ReplicatorIdentity,
+ "migrator": MigratorIdentity,
}
func GetWorkflowIdentify(name string) WorkflowIdentity {
@@ -105,6 +108,11 @@ func (w WorkflowIdentity) UnmarshalJsonStringToRequest(str string) (any, error)
if err = decoder.Decode(&req); err == nil {
return req, nil
}
+ case MigratorIdentity:
+ var req MigratorRequest
+ if err = decoder.Decode(&req); err == nil {
+ return req, nil
+ }
default:
err = xerrors.Errorf("unsupported workflow identity: %v", w)
}
diff --git a/protos/coinbase/c3/common/common.pb.go b/protos/coinbase/c3/common/common.pb.go
index 3b82ead..2a90a80 100644
--- a/protos/coinbase/c3/common/common.pb.go
+++ b/protos/coinbase/c3/common/common.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/c3/common/common.proto
package common
@@ -25,19 +25,29 @@ const (
type Blockchain int32
const (
- Blockchain_BLOCKCHAIN_UNKNOWN Blockchain = 0
- Blockchain_BLOCKCHAIN_SOLANA Blockchain = 11
- Blockchain_BLOCKCHAIN_BITCOIN Blockchain = 16
- Blockchain_BLOCKCHAIN_ETHEREUM Blockchain = 17
- Blockchain_BLOCKCHAIN_DOGECOIN Blockchain = 26
- Blockchain_BLOCKCHAIN_BSC Blockchain = 31
- Blockchain_BLOCKCHAIN_AVACCHAIN Blockchain = 32
- Blockchain_BLOCKCHAIN_POLYGON Blockchain = 35
- Blockchain_BLOCKCHAIN_OPTIMISM Blockchain = 39
- Blockchain_BLOCKCHAIN_ARBITRUM Blockchain = 41
- Blockchain_BLOCKCHAIN_APTOS Blockchain = 47 // L1 network using the Move language (originally created for Libra/Diem)
- Blockchain_BLOCKCHAIN_FANTOM Blockchain = 51
- Blockchain_BLOCKCHAIN_BASE Blockchain = 56 // Coinbase L2
+ Blockchain_BLOCKCHAIN_UNKNOWN Blockchain = 0
+ Blockchain_BLOCKCHAIN_SOLANA Blockchain = 11
+ Blockchain_BLOCKCHAIN_BITCOIN Blockchain = 16
+ Blockchain_BLOCKCHAIN_ETHEREUM Blockchain = 17
+ Blockchain_BLOCKCHAIN_BITCOINCASH Blockchain = 18
+ Blockchain_BLOCKCHAIN_LITECOIN Blockchain = 19
+ Blockchain_BLOCKCHAIN_DOGECOIN Blockchain = 26
+ Blockchain_BLOCKCHAIN_TRON Blockchain = 30
+ Blockchain_BLOCKCHAIN_BSC Blockchain = 31
+ Blockchain_BLOCKCHAIN_AVACCHAIN Blockchain = 32
+ Blockchain_BLOCKCHAIN_POLYGON Blockchain = 35
+ Blockchain_BLOCKCHAIN_OPTIMISM Blockchain = 39
+ Blockchain_BLOCKCHAIN_ARBITRUM Blockchain = 41
+ Blockchain_BLOCKCHAIN_APTOS Blockchain = 47 // L1 network using the Move language (originally created for Libra/Diem)
+ Blockchain_BLOCKCHAIN_FANTOM Blockchain = 51
+ Blockchain_BLOCKCHAIN_BASE Blockchain = 56 // Coinbase L2
+ Blockchain_BLOCKCHAIN_STORY Blockchain = 60
+ Blockchain_BLOCKCHAIN_ETHEREUMCLASSIC Blockchain = 61 // Ethereum Classic
+ Blockchain_BLOCKCHAIN_PLASMA Blockchain = 62 // Plasma
+ Blockchain_BLOCKCHAIN_MONAD Blockchain = 63 // Monad
+ Blockchain_BLOCKCHAIN_ABSTRACT Blockchain = 64 // Abstract
+ Blockchain_BLOCKCHAIN_MEGAETH Blockchain = 65 // MegaETH
+ Blockchain_BLOCKCHAIN_SEISMIC Blockchain = 66 // Seismic
)
// Enum value maps for Blockchain.
@@ -47,7 +57,10 @@ var (
11: "BLOCKCHAIN_SOLANA",
16: "BLOCKCHAIN_BITCOIN",
17: "BLOCKCHAIN_ETHEREUM",
+ 18: "BLOCKCHAIN_BITCOINCASH",
+ 19: "BLOCKCHAIN_LITECOIN",
26: "BLOCKCHAIN_DOGECOIN",
+ 30: "BLOCKCHAIN_TRON",
31: "BLOCKCHAIN_BSC",
32: "BLOCKCHAIN_AVACCHAIN",
35: "BLOCKCHAIN_POLYGON",
@@ -56,21 +69,38 @@ var (
47: "BLOCKCHAIN_APTOS",
51: "BLOCKCHAIN_FANTOM",
56: "BLOCKCHAIN_BASE",
+ 60: "BLOCKCHAIN_STORY",
+ 61: "BLOCKCHAIN_ETHEREUMCLASSIC",
+ 62: "BLOCKCHAIN_PLASMA",
+ 63: "BLOCKCHAIN_MONAD",
+ 64: "BLOCKCHAIN_ABSTRACT",
+ 65: "BLOCKCHAIN_MEGAETH",
+ 66: "BLOCKCHAIN_SEISMIC",
}
Blockchain_value = map[string]int32{
- "BLOCKCHAIN_UNKNOWN": 0,
- "BLOCKCHAIN_SOLANA": 11,
- "BLOCKCHAIN_BITCOIN": 16,
- "BLOCKCHAIN_ETHEREUM": 17,
- "BLOCKCHAIN_DOGECOIN": 26,
- "BLOCKCHAIN_BSC": 31,
- "BLOCKCHAIN_AVACCHAIN": 32,
- "BLOCKCHAIN_POLYGON": 35,
- "BLOCKCHAIN_OPTIMISM": 39,
- "BLOCKCHAIN_ARBITRUM": 41,
- "BLOCKCHAIN_APTOS": 47,
- "BLOCKCHAIN_FANTOM": 51,
- "BLOCKCHAIN_BASE": 56,
+ "BLOCKCHAIN_UNKNOWN": 0,
+ "BLOCKCHAIN_SOLANA": 11,
+ "BLOCKCHAIN_BITCOIN": 16,
+ "BLOCKCHAIN_ETHEREUM": 17,
+ "BLOCKCHAIN_BITCOINCASH": 18,
+ "BLOCKCHAIN_LITECOIN": 19,
+ "BLOCKCHAIN_DOGECOIN": 26,
+ "BLOCKCHAIN_TRON": 30,
+ "BLOCKCHAIN_BSC": 31,
+ "BLOCKCHAIN_AVACCHAIN": 32,
+ "BLOCKCHAIN_POLYGON": 35,
+ "BLOCKCHAIN_OPTIMISM": 39,
+ "BLOCKCHAIN_ARBITRUM": 41,
+ "BLOCKCHAIN_APTOS": 47,
+ "BLOCKCHAIN_FANTOM": 51,
+ "BLOCKCHAIN_BASE": 56,
+ "BLOCKCHAIN_STORY": 60,
+ "BLOCKCHAIN_ETHEREUMCLASSIC": 61,
+ "BLOCKCHAIN_PLASMA": 62,
+ "BLOCKCHAIN_MONAD": 63,
+ "BLOCKCHAIN_ABSTRACT": 64,
+ "BLOCKCHAIN_MEGAETH": 65,
+ "BLOCKCHAIN_SEISMIC": 66,
}
)
@@ -106,33 +136,47 @@ func (Blockchain) EnumDescriptor() ([]byte, []int) {
type Network int32
const (
- Network_NETWORK_UNKNOWN Network = 0
- Network_NETWORK_SOLANA_MAINNET Network = 22
- Network_NETWORK_SOLANA_TESTNET Network = 23
- Network_NETWORK_BITCOIN_MAINNET Network = 33
- Network_NETWORK_BITCOIN_TESTNET Network = 34
- Network_NETWORK_ETHEREUM_MAINNET Network = 35
- Network_NETWORK_ETHEREUM_TESTNET Network = 36
- Network_NETWORK_ETHEREUM_GOERLI Network = 66
- Network_NETWORK_DOGECOIN_MAINNET Network = 56
- Network_NETWORK_DOGECOIN_TESTNET Network = 57
- Network_NETWORK_BSC_MAINNET Network = 70
- Network_NETWORK_BSC_TESTNET Network = 71
- Network_NETWORK_AVACCHAIN_MAINNET Network = 72
- Network_NETWORK_AVACCHAIN_TESTNET Network = 73
- Network_NETWORK_POLYGON_MAINNET Network = 78
- Network_NETWORK_POLYGON_TESTNET Network = 79
- Network_NETWORK_OPTIMISM_MAINNET Network = 86
- Network_NETWORK_OPTIMISM_TESTNET Network = 87
- Network_NETWORK_ARBITRUM_MAINNET Network = 91
- Network_NETWORK_ARBITRUM_TESTNET Network = 92
- Network_NETWORK_APTOS_MAINNET Network = 103
- Network_NETWORK_APTOS_TESTNET Network = 104
- Network_NETWORK_FANTOM_MAINNET Network = 111
- Network_NETWORK_FANTOM_TESTNET Network = 112
- Network_NETWORK_BASE_MAINNET Network = 123 // Coinbase L2 running on Ethereum mainnet
- Network_NETWORK_BASE_GOERLI Network = 125 // Coinbase L2 running on Ethereum Goerli
- Network_NETWORK_ETHEREUM_HOLESKY Network = 136
+ Network_NETWORK_UNKNOWN Network = 0
+ Network_NETWORK_SOLANA_MAINNET Network = 22
+ Network_NETWORK_SOLANA_TESTNET Network = 23
+ Network_NETWORK_BITCOIN_MAINNET Network = 33
+ Network_NETWORK_BITCOIN_TESTNET Network = 34
+ Network_NETWORK_ETHEREUM_MAINNET Network = 35
+ Network_NETWORK_ETHEREUM_TESTNET Network = 36
+ Network_NETWORK_BITCOINCASH_MAINNET Network = 37
+ Network_NETWORK_BITCOINCASH_TESTNET Network = 38
+ Network_NETWORK_LITECOIN_MAINNET Network = 39
+ Network_NETWORK_LITECOIN_TESTNET Network = 40
+ Network_NETWORK_TRON_MAINNET Network = 64
+ Network_NETWORK_TRON_TESTNET Network = 65
+ Network_NETWORK_ETHEREUM_GOERLI Network = 66
+ Network_NETWORK_DOGECOIN_MAINNET Network = 56
+ Network_NETWORK_DOGECOIN_TESTNET Network = 57
+ Network_NETWORK_BSC_MAINNET Network = 70
+ Network_NETWORK_BSC_TESTNET Network = 71
+ Network_NETWORK_AVACCHAIN_MAINNET Network = 72
+ Network_NETWORK_AVACCHAIN_TESTNET Network = 73
+ Network_NETWORK_POLYGON_MAINNET Network = 78
+ Network_NETWORK_POLYGON_TESTNET Network = 79
+ Network_NETWORK_OPTIMISM_MAINNET Network = 86
+ Network_NETWORK_OPTIMISM_TESTNET Network = 87
+ Network_NETWORK_ARBITRUM_MAINNET Network = 91
+ Network_NETWORK_ARBITRUM_TESTNET Network = 92
+ Network_NETWORK_APTOS_MAINNET Network = 103
+ Network_NETWORK_APTOS_TESTNET Network = 104
+ Network_NETWORK_FANTOM_MAINNET Network = 111
+ Network_NETWORK_FANTOM_TESTNET Network = 112
+ Network_NETWORK_BASE_MAINNET Network = 123 // Coinbase L2 running on Ethereum mainnet
+ Network_NETWORK_BASE_GOERLI Network = 125 // Coinbase L2 running on Ethereum Goerli
+ Network_NETWORK_ETHEREUM_HOLESKY Network = 136
+ Network_NETWORK_STORY_MAINNET Network = 140
+ Network_NETWORK_ETHEREUMCLASSIC_MAINNET Network = 141
+ Network_NETWORK_PLASMA_MAINNET Network = 142
+ Network_NETWORK_MONAD_MAINNET Network = 143
+ Network_NETWORK_ABSTRACT_MAINNET Network = 144
+ Network_NETWORK_MEGAETH_MAINNET Network = 145
+ Network_NETWORK_SEISMIC_TESTNET Network = 146
+ Network_NETWORK_SEISMIC_MAINNET Network = 147
)
// Enum value maps for Network.
@@ -145,6 +189,12 @@ var (
34: "NETWORK_BITCOIN_TESTNET",
35: "NETWORK_ETHEREUM_MAINNET",
36: "NETWORK_ETHEREUM_TESTNET",
+ 37: "NETWORK_BITCOINCASH_MAINNET",
+ 38: "NETWORK_BITCOINCASH_TESTNET",
+ 39: "NETWORK_LITECOIN_MAINNET",
+ 40: "NETWORK_LITECOIN_TESTNET",
+ 64: "NETWORK_TRON_MAINNET",
+ 65: "NETWORK_TRON_TESTNET",
66: "NETWORK_ETHEREUM_GOERLI",
56: "NETWORK_DOGECOIN_MAINNET",
57: "NETWORK_DOGECOIN_TESTNET",
@@ -165,35 +215,57 @@ var (
123: "NETWORK_BASE_MAINNET",
125: "NETWORK_BASE_GOERLI",
136: "NETWORK_ETHEREUM_HOLESKY",
+ 140: "NETWORK_STORY_MAINNET",
+ 141: "NETWORK_ETHEREUMCLASSIC_MAINNET",
+ 142: "NETWORK_PLASMA_MAINNET",
+ 143: "NETWORK_MONAD_MAINNET",
+ 144: "NETWORK_ABSTRACT_MAINNET",
+ 145: "NETWORK_MEGAETH_MAINNET",
+ 146: "NETWORK_SEISMIC_TESTNET",
+ 147: "NETWORK_SEISMIC_MAINNET",
}
Network_value = map[string]int32{
- "NETWORK_UNKNOWN": 0,
- "NETWORK_SOLANA_MAINNET": 22,
- "NETWORK_SOLANA_TESTNET": 23,
- "NETWORK_BITCOIN_MAINNET": 33,
- "NETWORK_BITCOIN_TESTNET": 34,
- "NETWORK_ETHEREUM_MAINNET": 35,
- "NETWORK_ETHEREUM_TESTNET": 36,
- "NETWORK_ETHEREUM_GOERLI": 66,
- "NETWORK_DOGECOIN_MAINNET": 56,
- "NETWORK_DOGECOIN_TESTNET": 57,
- "NETWORK_BSC_MAINNET": 70,
- "NETWORK_BSC_TESTNET": 71,
- "NETWORK_AVACCHAIN_MAINNET": 72,
- "NETWORK_AVACCHAIN_TESTNET": 73,
- "NETWORK_POLYGON_MAINNET": 78,
- "NETWORK_POLYGON_TESTNET": 79,
- "NETWORK_OPTIMISM_MAINNET": 86,
- "NETWORK_OPTIMISM_TESTNET": 87,
- "NETWORK_ARBITRUM_MAINNET": 91,
- "NETWORK_ARBITRUM_TESTNET": 92,
- "NETWORK_APTOS_MAINNET": 103,
- "NETWORK_APTOS_TESTNET": 104,
- "NETWORK_FANTOM_MAINNET": 111,
- "NETWORK_FANTOM_TESTNET": 112,
- "NETWORK_BASE_MAINNET": 123,
- "NETWORK_BASE_GOERLI": 125,
- "NETWORK_ETHEREUM_HOLESKY": 136,
+ "NETWORK_UNKNOWN": 0,
+ "NETWORK_SOLANA_MAINNET": 22,
+ "NETWORK_SOLANA_TESTNET": 23,
+ "NETWORK_BITCOIN_MAINNET": 33,
+ "NETWORK_BITCOIN_TESTNET": 34,
+ "NETWORK_ETHEREUM_MAINNET": 35,
+ "NETWORK_ETHEREUM_TESTNET": 36,
+ "NETWORK_BITCOINCASH_MAINNET": 37,
+ "NETWORK_BITCOINCASH_TESTNET": 38,
+ "NETWORK_LITECOIN_MAINNET": 39,
+ "NETWORK_LITECOIN_TESTNET": 40,
+ "NETWORK_TRON_MAINNET": 64,
+ "NETWORK_TRON_TESTNET": 65,
+ "NETWORK_ETHEREUM_GOERLI": 66,
+ "NETWORK_DOGECOIN_MAINNET": 56,
+ "NETWORK_DOGECOIN_TESTNET": 57,
+ "NETWORK_BSC_MAINNET": 70,
+ "NETWORK_BSC_TESTNET": 71,
+ "NETWORK_AVACCHAIN_MAINNET": 72,
+ "NETWORK_AVACCHAIN_TESTNET": 73,
+ "NETWORK_POLYGON_MAINNET": 78,
+ "NETWORK_POLYGON_TESTNET": 79,
+ "NETWORK_OPTIMISM_MAINNET": 86,
+ "NETWORK_OPTIMISM_TESTNET": 87,
+ "NETWORK_ARBITRUM_MAINNET": 91,
+ "NETWORK_ARBITRUM_TESTNET": 92,
+ "NETWORK_APTOS_MAINNET": 103,
+ "NETWORK_APTOS_TESTNET": 104,
+ "NETWORK_FANTOM_MAINNET": 111,
+ "NETWORK_FANTOM_TESTNET": 112,
+ "NETWORK_BASE_MAINNET": 123,
+ "NETWORK_BASE_GOERLI": 125,
+ "NETWORK_ETHEREUM_HOLESKY": 136,
+ "NETWORK_STORY_MAINNET": 140,
+ "NETWORK_ETHEREUMCLASSIC_MAINNET": 141,
+ "NETWORK_PLASMA_MAINNET": 142,
+ "NETWORK_MONAD_MAINNET": 143,
+ "NETWORK_ABSTRACT_MAINNET": 144,
+ "NETWORK_MEGAETH_MAINNET": 145,
+ "NETWORK_SEISMIC_TESTNET": 146,
+ "NETWORK_SEISMIC_MAINNET": 147,
}
)
@@ -230,80 +302,122 @@ var file_coinbase_c3_common_common_proto_rawDesc = []byte{
0x0a, 0x1f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x33, 0x2f, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x12, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x33, 0x2e, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2a, 0xbf, 0x02, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2a, 0xb5, 0x04, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x63,
0x68, 0x61, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41,
0x49, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11,
0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x53, 0x4f, 0x4c, 0x41, 0x4e,
0x41, 0x10, 0x0b, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49,
0x4e, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e, 0x10, 0x10, 0x12, 0x17, 0x0a, 0x13, 0x42,
0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45,
- 0x55, 0x4d, 0x10, 0x11, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41,
- 0x49, 0x4e, 0x5f, 0x44, 0x4f, 0x47, 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x10, 0x1a, 0x12, 0x12, 0x0a,
- 0x0e, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x53, 0x43, 0x10,
- 0x1f, 0x12, 0x18, 0x0a, 0x14, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f,
- 0x41, 0x56, 0x41, 0x43, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x20, 0x12, 0x16, 0x0a, 0x12, 0x42,
- 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f,
- 0x4e, 0x10, 0x23, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49,
- 0x4e, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x53, 0x4d, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13,
- 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54,
- 0x52, 0x55, 0x4d, 0x10, 0x29, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48,
- 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x50, 0x54, 0x4f, 0x53, 0x10, 0x2f, 0x12, 0x15, 0x0a, 0x11, 0x42,
- 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x46, 0x41, 0x4e, 0x54, 0x4f, 0x4d,
- 0x10, 0x33, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e,
- 0x5f, 0x42, 0x41, 0x53, 0x45, 0x10, 0x38, 0x2a, 0x87, 0x06, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77,
- 0x6f, 0x72, 0x6b, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57,
- 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x4f, 0x4c, 0x41, 0x4e, 0x41, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e,
- 0x45, 0x54, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f,
- 0x53, 0x4f, 0x4c, 0x41, 0x4e, 0x41, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x17,
- 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x49, 0x54, 0x43,
- 0x4f, 0x49, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x21, 0x12, 0x1b, 0x0a,
- 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e,
- 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x22, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45,
- 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x4d,
- 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x23, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57,
- 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x54, 0x45, 0x53,
- 0x54, 0x4e, 0x45, 0x54, 0x10, 0x24, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52,
- 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x47, 0x4f, 0x45, 0x52, 0x4c,
- 0x49, 0x10, 0x42, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x44,
- 0x4f, 0x47, 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10,
- 0x38, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x44, 0x4f, 0x47,
- 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x39, 0x12,
- 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x53, 0x43, 0x5f, 0x4d,
- 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x46, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54, 0x57,
- 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x53, 0x43, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10,
- 0x47, 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x56, 0x41,
- 0x43, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x48,
- 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x56, 0x41, 0x43,
- 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x49, 0x12,
- 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47,
- 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x4e, 0x12, 0x1b, 0x0a, 0x17,
- 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x5f,
- 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x4f, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54,
- 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x53, 0x4d, 0x5f, 0x4d, 0x41,
- 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x56, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f,
- 0x52, 0x4b, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x53, 0x4d, 0x5f, 0x54, 0x45, 0x53, 0x54,
- 0x4e, 0x45, 0x54, 0x10, 0x57, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b,
- 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45,
- 0x54, 0x10, 0x5b, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41,
- 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10,
- 0x5c, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x50, 0x54,
- 0x4f, 0x53, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x67, 0x12, 0x19, 0x0a, 0x15,
- 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x50, 0x54, 0x4f, 0x53, 0x5f, 0x54, 0x45,
- 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x68, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57, 0x4f,
- 0x52, 0x4b, 0x5f, 0x46, 0x41, 0x4e, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45,
- 0x54, 0x10, 0x6f, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x46,
- 0x41, 0x4e, 0x54, 0x4f, 0x4d, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x70, 0x12,
- 0x18, 0x0a, 0x14, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x41, 0x53, 0x45, 0x5f,
- 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x7b, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54,
- 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x47, 0x4f, 0x45, 0x52, 0x4c, 0x49,
- 0x10, 0x7d, 0x12, 0x1d, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54,
- 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x48, 0x4f, 0x4c, 0x45, 0x53, 0x4b, 0x59, 0x10, 0x88,
- 0x01, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x69,
- 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x55, 0x4d, 0x10, 0x11, 0x12, 0x1a, 0x0a, 0x16, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41,
+ 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e, 0x43, 0x41, 0x53, 0x48, 0x10, 0x12,
+ 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4c,
+ 0x49, 0x54, 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f,
+ 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x44, 0x4f, 0x47, 0x45, 0x43, 0x4f, 0x49, 0x4e,
+ 0x10, 0x1a, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e,
+ 0x5f, 0x54, 0x52, 0x4f, 0x4e, 0x10, 0x1e, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
+ 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x53, 0x43, 0x10, 0x1f, 0x12, 0x18, 0x0a, 0x14, 0x42,
+ 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x43, 0x43, 0x48,
+ 0x41, 0x49, 0x4e, 0x10, 0x20, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48,
+ 0x41, 0x49, 0x4e, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x10, 0x23, 0x12, 0x17, 0x0a,
+ 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x50, 0x54, 0x49,
+ 0x4d, 0x49, 0x53, 0x4d, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43,
+ 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x10, 0x29, 0x12,
+ 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x50,
+ 0x54, 0x4f, 0x53, 0x10, 0x2f, 0x12, 0x15, 0x0a, 0x11, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48,
+ 0x41, 0x49, 0x4e, 0x5f, 0x46, 0x41, 0x4e, 0x54, 0x4f, 0x4d, 0x10, 0x33, 0x12, 0x13, 0x0a, 0x0f,
+ 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x41, 0x53, 0x45, 0x10,
+ 0x38, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f,
+ 0x53, 0x54, 0x4f, 0x52, 0x59, 0x10, 0x3c, 0x12, 0x1e, 0x0a, 0x1a, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
+ 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x43, 0x4c,
+ 0x41, 0x53, 0x53, 0x49, 0x43, 0x10, 0x3d, 0x12, 0x15, 0x0a, 0x11, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
+ 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x50, 0x4c, 0x41, 0x53, 0x4d, 0x41, 0x10, 0x3e, 0x12, 0x14,
+ 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x4f, 0x4e,
+ 0x41, 0x44, 0x10, 0x3f, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41,
+ 0x49, 0x4e, 0x5f, 0x41, 0x42, 0x53, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x40, 0x12, 0x16, 0x0a,
+ 0x12, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x47, 0x41,
+ 0x45, 0x54, 0x48, 0x10, 0x41, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x43, 0x48,
+ 0x41, 0x49, 0x4e, 0x5f, 0x53, 0x45, 0x49, 0x53, 0x4d, 0x49, 0x43, 0x10, 0x42, 0x2a, 0xad, 0x09,
+ 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x45, 0x54,
+ 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a,
+ 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x4f, 0x4c, 0x41, 0x4e, 0x41,
+ 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x45,
+ 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x4f, 0x4c, 0x41, 0x4e, 0x41, 0x5f, 0x54, 0x45, 0x53,
+ 0x54, 0x4e, 0x45, 0x54, 0x10, 0x17, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52,
+ 0x4b, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45,
+ 0x54, 0x10, 0x21, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42,
+ 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x22,
+ 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45,
+ 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x23, 0x12, 0x1c,
+ 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45,
+ 0x55, 0x4d, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x24, 0x12, 0x1f, 0x0a, 0x1b,
+ 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e, 0x43,
+ 0x41, 0x53, 0x48, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x25, 0x12, 0x1f, 0x0a,
+ 0x1b, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x49, 0x54, 0x43, 0x4f, 0x49, 0x4e,
+ 0x43, 0x41, 0x53, 0x48, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x26, 0x12, 0x1c,
+ 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4c, 0x49, 0x54, 0x45, 0x43, 0x4f,
+ 0x49, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x27, 0x12, 0x1c, 0x0a, 0x18,
+ 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4c, 0x49, 0x54, 0x45, 0x43, 0x4f, 0x49, 0x4e,
+ 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x28, 0x12, 0x18, 0x0a, 0x14, 0x4e, 0x45,
+ 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x54, 0x52, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e,
+ 0x45, 0x54, 0x10, 0x40, 0x12, 0x18, 0x0a, 0x14, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f,
+ 0x54, 0x52, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x41, 0x12, 0x1b,
+ 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45,
+ 0x55, 0x4d, 0x5f, 0x47, 0x4f, 0x45, 0x52, 0x4c, 0x49, 0x10, 0x42, 0x12, 0x1c, 0x0a, 0x18, 0x4e,
+ 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x44, 0x4f, 0x47, 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x5f,
+ 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x38, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54,
+ 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x44, 0x4f, 0x47, 0x45, 0x43, 0x4f, 0x49, 0x4e, 0x5f, 0x54, 0x45,
+ 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x39, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54, 0x57, 0x4f,
+ 0x52, 0x4b, 0x5f, 0x42, 0x53, 0x43, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x46,
+ 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x53, 0x43, 0x5f,
+ 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x47, 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x45, 0x54,
+ 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x56, 0x41, 0x43, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x4d,
+ 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x48, 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x45, 0x54, 0x57,
+ 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x56, 0x41, 0x43, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x54, 0x45,
+ 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x49, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f,
+ 0x52, 0x4b, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e,
+ 0x45, 0x54, 0x10, 0x4e, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f,
+ 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10,
+ 0x4f, 0x12, 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4f, 0x50, 0x54,
+ 0x49, 0x4d, 0x49, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x56, 0x12,
+ 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d,
+ 0x49, 0x53, 0x4d, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x57, 0x12, 0x1c, 0x0a,
+ 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55,
+ 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x5b, 0x12, 0x1c, 0x0a, 0x18, 0x4e,
+ 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f,
+ 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x5c, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x45, 0x54,
+ 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x41, 0x50, 0x54, 0x4f, 0x53, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e,
+ 0x45, 0x54, 0x10, 0x67, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f,
+ 0x41, 0x50, 0x54, 0x4f, 0x53, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x68, 0x12,
+ 0x1a, 0x0a, 0x16, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x46, 0x41, 0x4e, 0x54, 0x4f,
+ 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x6f, 0x12, 0x1a, 0x0a, 0x16, 0x4e,
+ 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x46, 0x41, 0x4e, 0x54, 0x4f, 0x4d, 0x5f, 0x54, 0x45,
+ 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x70, 0x12, 0x18, 0x0a, 0x14, 0x4e, 0x45, 0x54, 0x57, 0x4f,
+ 0x52, 0x4b, 0x5f, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10,
+ 0x7b, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x42, 0x41, 0x53,
+ 0x45, 0x5f, 0x47, 0x4f, 0x45, 0x52, 0x4c, 0x49, 0x10, 0x7d, 0x12, 0x1d, 0x0a, 0x18, 0x4e, 0x45,
+ 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x5f, 0x48,
+ 0x4f, 0x4c, 0x45, 0x53, 0x4b, 0x59, 0x10, 0x88, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x45, 0x54,
+ 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e,
+ 0x45, 0x54, 0x10, 0x8c, 0x01, 0x12, 0x24, 0x0a, 0x1f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b,
+ 0x5f, 0x45, 0x54, 0x48, 0x45, 0x52, 0x45, 0x55, 0x4d, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x49, 0x43,
+ 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x4e,
+ 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x4c, 0x41, 0x53, 0x4d, 0x41, 0x5f, 0x4d, 0x41,
+ 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x8e, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x45, 0x54, 0x57,
+ 0x4f, 0x52, 0x4b, 0x5f, 0x4d, 0x4f, 0x4e, 0x41, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45,
+ 0x54, 0x10, 0x8f, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f,
+ 0x41, 0x42, 0x53, 0x54, 0x52, 0x41, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54,
+ 0x10, 0x90, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x4d,
+ 0x45, 0x47, 0x41, 0x45, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x91,
+ 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x45, 0x49,
+ 0x53, 0x4d, 0x49, 0x43, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x4e, 0x45, 0x54, 0x10, 0x92, 0x01, 0x12,
+ 0x1c, 0x0a, 0x17, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x53, 0x45, 0x49, 0x53, 0x4d,
+ 0x49, 0x43, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x4e, 0x45, 0x54, 0x10, 0x93, 0x01, 0x42, 0x3c, 0x5a,
+ 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x69, 0x6e,
+ 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
+ 0x65, 0x2f, 0x63, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
diff --git a/protos/coinbase/c3/common/common.proto b/protos/coinbase/c3/common/common.proto
index 9bf93d0..07d641f 100644
--- a/protos/coinbase/c3/common/common.proto
+++ b/protos/coinbase/c3/common/common.proto
@@ -11,7 +11,10 @@ enum Blockchain {
BLOCKCHAIN_SOLANA = 11;
BLOCKCHAIN_BITCOIN = 16;
BLOCKCHAIN_ETHEREUM = 17;
+ BLOCKCHAIN_BITCOINCASH = 18;
+ BLOCKCHAIN_LITECOIN = 19;
BLOCKCHAIN_DOGECOIN = 26;
+ BLOCKCHAIN_TRON = 30;
BLOCKCHAIN_BSC = 31;
BLOCKCHAIN_AVACCHAIN = 32;
BLOCKCHAIN_POLYGON = 35;
@@ -20,6 +23,13 @@ enum Blockchain {
BLOCKCHAIN_APTOS = 47; // L1 network using the Move language (originally created for Libra/Diem)
BLOCKCHAIN_FANTOM = 51;
BLOCKCHAIN_BASE = 56; // Coinbase L2
+ BLOCKCHAIN_STORY = 60;
+ BLOCKCHAIN_ETHEREUMCLASSIC = 61; // Ethereum Classic
+ BLOCKCHAIN_PLASMA = 62; // Plasma
+ BLOCKCHAIN_MONAD = 63; // Monad
+ BLOCKCHAIN_ABSTRACT = 64; // Abstract
+ BLOCKCHAIN_MEGAETH = 65; // MegaETH
+ BLOCKCHAIN_SEISMIC = 66; // Seismic
}
// Network defines an enumeration of supported networks.
@@ -35,6 +45,16 @@ enum Network {
NETWORK_ETHEREUM_MAINNET = 35;
NETWORK_ETHEREUM_TESTNET = 36;
+
+ NETWORK_BITCOINCASH_MAINNET = 37;
+ NETWORK_BITCOINCASH_TESTNET = 38;
+
+ NETWORK_LITECOIN_MAINNET = 39;
+ NETWORK_LITECOIN_TESTNET = 40;
+
+ NETWORK_TRON_MAINNET = 64;
+ NETWORK_TRON_TESTNET = 65;
+
NETWORK_ETHEREUM_GOERLI = 66;
NETWORK_DOGECOIN_MAINNET = 56;
@@ -65,4 +85,19 @@ enum Network {
NETWORK_BASE_GOERLI = 125; // Coinbase L2 running on Ethereum Goerli
NETWORK_ETHEREUM_HOLESKY = 136;
+
+ NETWORK_STORY_MAINNET = 140;
+
+ NETWORK_ETHEREUMCLASSIC_MAINNET = 141;
+
+ NETWORK_PLASMA_MAINNET = 142;
+
+ NETWORK_MONAD_MAINNET = 143;
+
+ NETWORK_ABSTRACT_MAINNET = 144;
+
+ NETWORK_MEGAETH_MAINNET = 145;
+
+ NETWORK_SEISMIC_TESTNET = 146;
+ NETWORK_SEISMIC_MAINNET = 147;
}
diff --git a/protos/coinbase/chainstorage/api.pb.go b/protos/coinbase/chainstorage/api.pb.go
index f18d1ef..ebc4227 100644
--- a/protos/coinbase/chainstorage/api.pb.go
+++ b/protos/coinbase/chainstorage/api.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/api.proto
package chainstorage
@@ -28,6 +28,7 @@ const (
Compression_NONE Compression = 0
// Compressed using gzip.
Compression_GZIP Compression = 1
+ Compression_ZSTD Compression = 2
)
// Enum value maps for Compression.
@@ -35,10 +36,12 @@ var (
Compression_name = map[int32]string{
0: "NONE",
1: "GZIP",
+ 2: "ZSTD",
}
Compression_value = map[string]int32{
"NONE": 0,
"GZIP": 1,
+ "ZSTD": 2,
}
)
@@ -2194,6 +2197,140 @@ func (x *GetVerifiedAccountStateResponse) GetResponse() *ValidateAccountStateRes
return nil
}
+type GetBlockByTimestampRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Tag uint32 `protobuf:"varint,1,opt,name=tag,proto3" json:"tag,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Unix timestamp in seconds
+}
+
+func (x *GetBlockByTimestampRequest) Reset() {
+ *x = GetBlockByTimestampRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_coinbase_chainstorage_api_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlockByTimestampRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlockByTimestampRequest) ProtoMessage() {}
+
+func (x *GetBlockByTimestampRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_coinbase_chainstorage_api_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlockByTimestampRequest.ProtoReflect.Descriptor instead.
+func (*GetBlockByTimestampRequest) Descriptor() ([]byte, []int) {
+ return file_coinbase_chainstorage_api_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *GetBlockByTimestampRequest) GetTag() uint32 {
+ if x != nil {
+ return x.Tag
+ }
+ return 0
+}
+
+func (x *GetBlockByTimestampRequest) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+type GetBlockByTimestampResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Tag uint32 `protobuf:"varint,1,opt,name=tag,proto3" json:"tag,omitempty"`
+ Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
+ ParentHash string `protobuf:"bytes,3,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"`
+ Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"`
+ Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Unix timestamp in seconds
+}
+
+func (x *GetBlockByTimestampResponse) Reset() {
+ *x = GetBlockByTimestampResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_coinbase_chainstorage_api_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlockByTimestampResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlockByTimestampResponse) ProtoMessage() {}
+
+func (x *GetBlockByTimestampResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_coinbase_chainstorage_api_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlockByTimestampResponse.ProtoReflect.Descriptor instead.
+func (*GetBlockByTimestampResponse) Descriptor() ([]byte, []int) {
+ return file_coinbase_chainstorage_api_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *GetBlockByTimestampResponse) GetTag() uint32 {
+ if x != nil {
+ return x.Tag
+ }
+ return 0
+}
+
+func (x *GetBlockByTimestampResponse) GetHash() string {
+ if x != nil {
+ return x.Hash
+ }
+ return ""
+}
+
+func (x *GetBlockByTimestampResponse) GetParentHash() string {
+ if x != nil {
+ return x.ParentHash
+ }
+ return ""
+}
+
+func (x *GetBlockByTimestampResponse) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+
+func (x *GetBlockByTimestampResponse) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
var File_coinbase_chainstorage_api_proto protoreflect.FileDescriptor
var file_coinbase_chainstorage_api_proto_rawDesc = []byte{
@@ -2467,139 +2604,162 @@ var file_coinbase_chainstorage_api_proto_rawDesc = []byte{
0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x56, 0x61,
0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x21, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a,
- 0x04, 0x47, 0x5a, 0x49, 0x50, 0x10, 0x01, 0x2a, 0x2b, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69,
- 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41,
- 0x52, 0x4c, 0x49, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x54, 0x45,
- 0x53, 0x54, 0x10, 0x01, 0x32, 0xaa, 0x0f, 0x0a, 0x0c, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65,
- 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
- 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
- 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
- 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63,
- 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a,
- 0x14, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42, 0x79,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
- 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e,
- 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x42, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x03, 0x74, 0x61, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0x9a, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42,
+ 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x03, 0x74, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69,
+ 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68,
+ 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2a,
+ 0x2b, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x08,
+ 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x5a, 0x49, 0x50,
+ 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x5a, 0x53, 0x54, 0x44, 0x10, 0x02, 0x2a, 0x2b, 0x0a, 0x0f,
+ 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a,
+ 0x06, 0x4c, 0x41, 0x54, 0x45, 0x53, 0x54, 0x10, 0x01, 0x32, 0xa8, 0x10, 0x0a, 0x0c, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65,
+ 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, 0x2e, 0x63,
+ 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69,
+ 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x0c, 0x47, 0x65, 0x74,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42,
- 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64,
- 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x29, 0x2e,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
+ 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65,
+ 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69,
+ 0x6c, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x69,
+ 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x73,
+ 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33,
+ 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46,
+ 0x69, 0x6c, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68,
+ 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61,
+ 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e,
0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63,
- 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f,
- 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
- 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32,
- 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f,
- 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61,
- 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x85, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x2e, 0x63,
- 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61,
+ 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x13, 0x47, 0x65, 0x74,
+ 0x52, 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63,
+ 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52,
+ 0x61, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4e, 0x61,
+ 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x61,
0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x0f, 0x47, 0x65, 0x74,
- 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2d, 0x2e, 0x63,
+ 0x65, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61,
+ 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74,
+ 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42,
+ 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70,
+ 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61,
+ 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73,
+ 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65,
+ 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x88, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x2e, 0x63,
0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x63, 0x6f,
- 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x17,
- 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
- 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
- 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36,
- 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x73, 0x65, 0x74, 0x74,
- 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63,
+ 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52,
+ 0x6f, 0x73, 0x65, 0x74, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f,
0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63,
- 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74,
+ 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f,
+ 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x82, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x79, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x69,
- 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x79, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x34, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43,
+ 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x2e, 0x63,
+ 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63,
+ 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01,
+ 0x0a, 0x16, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, 0x68, 0x61,
+ 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35,
+ 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x42, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x33, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
- 0x42, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e,
+ 0x42, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x14, 0x47, 0x65,
+ 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68,
+ 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61,
+ 0x74, 0x69, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47,
+ 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x17,
+ 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36,
+ 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x42, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x31, 0x2e,
0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61,
- 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x74,
- 0x69, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x69, 0x6e,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x79,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x42, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69,
+ 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f,
0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x69,
- 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2615,7 +2775,7 @@ func file_coinbase_chainstorage_api_proto_rawDescGZIP() []byte {
}
var file_coinbase_chainstorage_api_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_coinbase_chainstorage_api_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
+var file_coinbase_chainstorage_api_proto_msgTypes = make([]protoimpl.MessageInfo, 36)
var file_coinbase_chainstorage_api_proto_goTypes = []interface{}{
(Compression)(0), // 0: coinbase.chainstorage.Compression
(InitialPosition)(0), // 1: coinbase.chainstorage.InitialPosition
@@ -2654,36 +2814,38 @@ var file_coinbase_chainstorage_api_proto_goTypes = []interface{}{
(*GetNativeTransactionResponse)(nil), // 34: coinbase.chainstorage.GetNativeTransactionResponse
(*GetVerifiedAccountStateRequest)(nil), // 35: coinbase.chainstorage.GetVerifiedAccountStateRequest
(*GetVerifiedAccountStateResponse)(nil), // 36: coinbase.chainstorage.GetVerifiedAccountStateResponse
- (*timestamppb.Timestamp)(nil), // 37: google.protobuf.Timestamp
- (*BlockIdentifier)(nil), // 38: coinbase.chainstorage.BlockIdentifier
- (*Block)(nil), // 39: coinbase.chainstorage.Block
- (*NativeBlock)(nil), // 40: coinbase.chainstorage.NativeBlock
- (*RosettaBlock)(nil), // 41: coinbase.chainstorage.RosettaBlock
- (*NativeTransaction)(nil), // 42: coinbase.chainstorage.NativeTransaction
- (*InternalGetVerifiedAccountStateRequest)(nil), // 43: coinbase.chainstorage.InternalGetVerifiedAccountStateRequest
- (*ValidateAccountStateResponse)(nil), // 44: coinbase.chainstorage.ValidateAccountStateResponse
+ (*GetBlockByTimestampRequest)(nil), // 37: coinbase.chainstorage.GetBlockByTimestampRequest
+ (*GetBlockByTimestampResponse)(nil), // 38: coinbase.chainstorage.GetBlockByTimestampResponse
+ (*timestamppb.Timestamp)(nil), // 39: google.protobuf.Timestamp
+ (*BlockIdentifier)(nil), // 40: coinbase.chainstorage.BlockIdentifier
+ (*Block)(nil), // 41: coinbase.chainstorage.Block
+ (*NativeBlock)(nil), // 42: coinbase.chainstorage.NativeBlock
+ (*RosettaBlock)(nil), // 43: coinbase.chainstorage.RosettaBlock
+ (*NativeTransaction)(nil), // 44: coinbase.chainstorage.NativeTransaction
+ (*InternalGetVerifiedAccountStateRequest)(nil), // 45: coinbase.chainstorage.InternalGetVerifiedAccountStateRequest
+ (*ValidateAccountStateResponse)(nil), // 46: coinbase.chainstorage.ValidateAccountStateResponse
}
var file_coinbase_chainstorage_api_proto_depIdxs = []int32{
0, // 0: coinbase.chainstorage.BlockFile.compression:type_name -> coinbase.chainstorage.Compression
- 37, // 1: coinbase.chainstorage.BlockFile.block_timestamp:type_name -> google.protobuf.Timestamp
+ 39, // 1: coinbase.chainstorage.BlockFile.block_timestamp:type_name -> google.protobuf.Timestamp
2, // 2: coinbase.chainstorage.BlockchainEvent.type:type_name -> coinbase.chainstorage.BlockchainEvent.Type
- 38, // 3: coinbase.chainstorage.BlockchainEvent.block:type_name -> coinbase.chainstorage.BlockIdentifier
- 37, // 4: coinbase.chainstorage.GetLatestBlockResponse.timestamp:type_name -> google.protobuf.Timestamp
+ 40, // 3: coinbase.chainstorage.BlockchainEvent.block:type_name -> coinbase.chainstorage.BlockIdentifier
+ 39, // 4: coinbase.chainstorage.GetLatestBlockResponse.timestamp:type_name -> google.protobuf.Timestamp
3, // 5: coinbase.chainstorage.GetBlockFileResponse.file:type_name -> coinbase.chainstorage.BlockFile
3, // 6: coinbase.chainstorage.GetBlockFilesByRangeResponse.files:type_name -> coinbase.chainstorage.BlockFile
- 39, // 7: coinbase.chainstorage.GetRawBlockResponse.block:type_name -> coinbase.chainstorage.Block
- 39, // 8: coinbase.chainstorage.GetRawBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.Block
- 40, // 9: coinbase.chainstorage.GetNativeBlockResponse.block:type_name -> coinbase.chainstorage.NativeBlock
- 40, // 10: coinbase.chainstorage.GetNativeBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.NativeBlock
- 41, // 11: coinbase.chainstorage.GetRosettaBlockResponse.block:type_name -> coinbase.chainstorage.RosettaBlock
- 41, // 12: coinbase.chainstorage.GetRosettaBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.RosettaBlock
+ 41, // 7: coinbase.chainstorage.GetRawBlockResponse.block:type_name -> coinbase.chainstorage.Block
+ 41, // 8: coinbase.chainstorage.GetRawBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.Block
+ 42, // 9: coinbase.chainstorage.GetNativeBlockResponse.block:type_name -> coinbase.chainstorage.NativeBlock
+ 42, // 10: coinbase.chainstorage.GetNativeBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.NativeBlock
+ 43, // 11: coinbase.chainstorage.GetRosettaBlockResponse.block:type_name -> coinbase.chainstorage.RosettaBlock
+ 43, // 12: coinbase.chainstorage.GetRosettaBlocksByRangeResponse.blocks:type_name -> coinbase.chainstorage.RosettaBlock
4, // 13: coinbase.chainstorage.ChainEventsResponse.event:type_name -> coinbase.chainstorage.BlockchainEvent
4, // 14: coinbase.chainstorage.GetChainEventsResponse.events:type_name -> coinbase.chainstorage.BlockchainEvent
4, // 15: coinbase.chainstorage.GetVersionedChainEventResponse.event:type_name -> coinbase.chainstorage.BlockchainEvent
- 38, // 16: coinbase.chainstorage.GetBlockByTransactionResponse.blocks:type_name -> coinbase.chainstorage.BlockIdentifier
- 42, // 17: coinbase.chainstorage.GetNativeTransactionResponse.transactions:type_name -> coinbase.chainstorage.NativeTransaction
- 43, // 18: coinbase.chainstorage.GetVerifiedAccountStateRequest.req:type_name -> coinbase.chainstorage.InternalGetVerifiedAccountStateRequest
- 44, // 19: coinbase.chainstorage.GetVerifiedAccountStateResponse.response:type_name -> coinbase.chainstorage.ValidateAccountStateResponse
+ 40, // 16: coinbase.chainstorage.GetBlockByTransactionResponse.blocks:type_name -> coinbase.chainstorage.BlockIdentifier
+ 44, // 17: coinbase.chainstorage.GetNativeTransactionResponse.transactions:type_name -> coinbase.chainstorage.NativeTransaction
+ 45, // 18: coinbase.chainstorage.GetVerifiedAccountStateRequest.req:type_name -> coinbase.chainstorage.InternalGetVerifiedAccountStateRequest
+ 46, // 19: coinbase.chainstorage.GetVerifiedAccountStateResponse.response:type_name -> coinbase.chainstorage.ValidateAccountStateResponse
5, // 20: coinbase.chainstorage.ChainStorage.GetLatestBlock:input_type -> coinbase.chainstorage.GetLatestBlockRequest
7, // 21: coinbase.chainstorage.ChainStorage.GetBlockFile:input_type -> coinbase.chainstorage.GetBlockFileRequest
9, // 22: coinbase.chainstorage.ChainStorage.GetBlockFilesByRange:input_type -> coinbase.chainstorage.GetBlockFilesByRangeRequest
@@ -2700,24 +2862,26 @@ var file_coinbase_chainstorage_api_proto_depIdxs = []int32{
31, // 33: coinbase.chainstorage.ChainStorage.GetBlockByTransaction:input_type -> coinbase.chainstorage.GetBlockByTransactionRequest
33, // 34: coinbase.chainstorage.ChainStorage.GetNativeTransaction:input_type -> coinbase.chainstorage.GetNativeTransactionRequest
35, // 35: coinbase.chainstorage.ChainStorage.GetVerifiedAccountState:input_type -> coinbase.chainstorage.GetVerifiedAccountStateRequest
- 6, // 36: coinbase.chainstorage.ChainStorage.GetLatestBlock:output_type -> coinbase.chainstorage.GetLatestBlockResponse
- 8, // 37: coinbase.chainstorage.ChainStorage.GetBlockFile:output_type -> coinbase.chainstorage.GetBlockFileResponse
- 10, // 38: coinbase.chainstorage.ChainStorage.GetBlockFilesByRange:output_type -> coinbase.chainstorage.GetBlockFilesByRangeResponse
- 12, // 39: coinbase.chainstorage.ChainStorage.GetRawBlock:output_type -> coinbase.chainstorage.GetRawBlockResponse
- 14, // 40: coinbase.chainstorage.ChainStorage.GetRawBlocksByRange:output_type -> coinbase.chainstorage.GetRawBlocksByRangeResponse
- 16, // 41: coinbase.chainstorage.ChainStorage.GetNativeBlock:output_type -> coinbase.chainstorage.GetNativeBlockResponse
- 18, // 42: coinbase.chainstorage.ChainStorage.GetNativeBlocksByRange:output_type -> coinbase.chainstorage.GetNativeBlocksByRangeResponse
- 20, // 43: coinbase.chainstorage.ChainStorage.GetRosettaBlock:output_type -> coinbase.chainstorage.GetRosettaBlockResponse
- 22, // 44: coinbase.chainstorage.ChainStorage.GetRosettaBlocksByRange:output_type -> coinbase.chainstorage.GetRosettaBlocksByRangeResponse
- 24, // 45: coinbase.chainstorage.ChainStorage.StreamChainEvents:output_type -> coinbase.chainstorage.ChainEventsResponse
- 26, // 46: coinbase.chainstorage.ChainStorage.GetChainEvents:output_type -> coinbase.chainstorage.GetChainEventsResponse
- 28, // 47: coinbase.chainstorage.ChainStorage.GetChainMetadata:output_type -> coinbase.chainstorage.GetChainMetadataResponse
- 30, // 48: coinbase.chainstorage.ChainStorage.GetVersionedChainEvent:output_type -> coinbase.chainstorage.GetVersionedChainEventResponse
- 32, // 49: coinbase.chainstorage.ChainStorage.GetBlockByTransaction:output_type -> coinbase.chainstorage.GetBlockByTransactionResponse
- 34, // 50: coinbase.chainstorage.ChainStorage.GetNativeTransaction:output_type -> coinbase.chainstorage.GetNativeTransactionResponse
- 36, // 51: coinbase.chainstorage.ChainStorage.GetVerifiedAccountState:output_type -> coinbase.chainstorage.GetVerifiedAccountStateResponse
- 36, // [36:52] is the sub-list for method output_type
- 20, // [20:36] is the sub-list for method input_type
+ 37, // 36: coinbase.chainstorage.ChainStorage.GetBlockByTimestamp:input_type -> coinbase.chainstorage.GetBlockByTimestampRequest
+ 6, // 37: coinbase.chainstorage.ChainStorage.GetLatestBlock:output_type -> coinbase.chainstorage.GetLatestBlockResponse
+ 8, // 38: coinbase.chainstorage.ChainStorage.GetBlockFile:output_type -> coinbase.chainstorage.GetBlockFileResponse
+ 10, // 39: coinbase.chainstorage.ChainStorage.GetBlockFilesByRange:output_type -> coinbase.chainstorage.GetBlockFilesByRangeResponse
+ 12, // 40: coinbase.chainstorage.ChainStorage.GetRawBlock:output_type -> coinbase.chainstorage.GetRawBlockResponse
+ 14, // 41: coinbase.chainstorage.ChainStorage.GetRawBlocksByRange:output_type -> coinbase.chainstorage.GetRawBlocksByRangeResponse
+ 16, // 42: coinbase.chainstorage.ChainStorage.GetNativeBlock:output_type -> coinbase.chainstorage.GetNativeBlockResponse
+ 18, // 43: coinbase.chainstorage.ChainStorage.GetNativeBlocksByRange:output_type -> coinbase.chainstorage.GetNativeBlocksByRangeResponse
+ 20, // 44: coinbase.chainstorage.ChainStorage.GetRosettaBlock:output_type -> coinbase.chainstorage.GetRosettaBlockResponse
+ 22, // 45: coinbase.chainstorage.ChainStorage.GetRosettaBlocksByRange:output_type -> coinbase.chainstorage.GetRosettaBlocksByRangeResponse
+ 24, // 46: coinbase.chainstorage.ChainStorage.StreamChainEvents:output_type -> coinbase.chainstorage.ChainEventsResponse
+ 26, // 47: coinbase.chainstorage.ChainStorage.GetChainEvents:output_type -> coinbase.chainstorage.GetChainEventsResponse
+ 28, // 48: coinbase.chainstorage.ChainStorage.GetChainMetadata:output_type -> coinbase.chainstorage.GetChainMetadataResponse
+ 30, // 49: coinbase.chainstorage.ChainStorage.GetVersionedChainEvent:output_type -> coinbase.chainstorage.GetVersionedChainEventResponse
+ 32, // 50: coinbase.chainstorage.ChainStorage.GetBlockByTransaction:output_type -> coinbase.chainstorage.GetBlockByTransactionResponse
+ 34, // 51: coinbase.chainstorage.ChainStorage.GetNativeTransaction:output_type -> coinbase.chainstorage.GetNativeTransactionResponse
+ 36, // 52: coinbase.chainstorage.ChainStorage.GetVerifiedAccountState:output_type -> coinbase.chainstorage.GetVerifiedAccountStateResponse
+ 38, // 53: coinbase.chainstorage.ChainStorage.GetBlockByTimestamp:output_type -> coinbase.chainstorage.GetBlockByTimestampResponse
+ 37, // [37:54] is the sub-list for method output_type
+ 20, // [20:37] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
@@ -3138,6 +3302,30 @@ func file_coinbase_chainstorage_api_proto_init() {
return nil
}
}
+ file_coinbase_chainstorage_api_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBlockByTimestampRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_coinbase_chainstorage_api_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBlockByTimestampResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -3145,7 +3333,7 @@ func file_coinbase_chainstorage_api_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_coinbase_chainstorage_api_proto_rawDesc,
NumEnums: 3,
- NumMessages: 34,
+ NumMessages: 36,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/protos/coinbase/chainstorage/api.proto b/protos/coinbase/chainstorage/api.proto
index 9ead69d..7b2883a 100644
--- a/protos/coinbase/chainstorage/api.proto
+++ b/protos/coinbase/chainstorage/api.proto
@@ -12,6 +12,7 @@ enum Compression {
NONE = 0;
// Compressed using gzip.
GZIP = 1;
+ ZSTD = 2;
}
enum InitialPosition {
@@ -249,6 +250,19 @@ message GetVerifiedAccountStateResponse {
ValidateAccountStateResponse response = 1;
}
+message GetBlockByTimestampRequest {
+ uint32 tag = 1;
+ uint64 timestamp = 2; // Unix timestamp in seconds
+}
+
+message GetBlockByTimestampResponse {
+ uint32 tag = 1;
+ string hash = 2;
+ string parent_hash = 3;
+ uint64 height = 4;
+ uint64 timestamp = 5; // Unix timestamp in seconds
+}
+
service ChainStorage {
rpc GetLatestBlock (GetLatestBlockRequest) returns (GetLatestBlockResponse);
rpc GetBlockFile(GetBlockFileRequest) returns (GetBlockFileResponse);
@@ -266,4 +280,5 @@ service ChainStorage {
rpc GetBlockByTransaction(GetBlockByTransactionRequest) returns (GetBlockByTransactionResponse);
rpc GetNativeTransaction (GetNativeTransactionRequest) returns (GetNativeTransactionResponse);
rpc GetVerifiedAccountState (GetVerifiedAccountStateRequest) returns (GetVerifiedAccountStateResponse);
+ rpc GetBlockByTimestamp (GetBlockByTimestampRequest) returns(GetBlockByTimestampResponse);
}
diff --git a/protos/coinbase/chainstorage/api_grpc.pb.go b/protos/coinbase/chainstorage/api_grpc.pb.go
index d386c98..141c909 100644
--- a/protos/coinbase/chainstorage/api_grpc.pb.go
+++ b/protos/coinbase/chainstorage/api_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.29.4
// source: coinbase/chainstorage/api.proto
package chainstorage
@@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
ChainStorage_GetLatestBlock_FullMethodName = "/coinbase.chainstorage.ChainStorage/GetLatestBlock"
@@ -35,6 +35,7 @@ const (
ChainStorage_GetBlockByTransaction_FullMethodName = "/coinbase.chainstorage.ChainStorage/GetBlockByTransaction"
ChainStorage_GetNativeTransaction_FullMethodName = "/coinbase.chainstorage.ChainStorage/GetNativeTransaction"
ChainStorage_GetVerifiedAccountState_FullMethodName = "/coinbase.chainstorage.ChainStorage/GetVerifiedAccountState"
+ ChainStorage_GetBlockByTimestamp_FullMethodName = "/coinbase.chainstorage.ChainStorage/GetBlockByTimestamp"
)
// ChainStorageClient is the client API for ChainStorage service.
@@ -50,13 +51,14 @@ type ChainStorageClient interface {
GetNativeBlocksByRange(ctx context.Context, in *GetNativeBlocksByRangeRequest, opts ...grpc.CallOption) (*GetNativeBlocksByRangeResponse, error)
GetRosettaBlock(ctx context.Context, in *GetRosettaBlockRequest, opts ...grpc.CallOption) (*GetRosettaBlockResponse, error)
GetRosettaBlocksByRange(ctx context.Context, in *GetRosettaBlocksByRangeRequest, opts ...grpc.CallOption) (*GetRosettaBlocksByRangeResponse, error)
- StreamChainEvents(ctx context.Context, in *ChainEventsRequest, opts ...grpc.CallOption) (ChainStorage_StreamChainEventsClient, error)
+ StreamChainEvents(ctx context.Context, in *ChainEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ChainEventsResponse], error)
GetChainEvents(ctx context.Context, in *GetChainEventsRequest, opts ...grpc.CallOption) (*GetChainEventsResponse, error)
GetChainMetadata(ctx context.Context, in *GetChainMetadataRequest, opts ...grpc.CallOption) (*GetChainMetadataResponse, error)
GetVersionedChainEvent(ctx context.Context, in *GetVersionedChainEventRequest, opts ...grpc.CallOption) (*GetVersionedChainEventResponse, error)
GetBlockByTransaction(ctx context.Context, in *GetBlockByTransactionRequest, opts ...grpc.CallOption) (*GetBlockByTransactionResponse, error)
GetNativeTransaction(ctx context.Context, in *GetNativeTransactionRequest, opts ...grpc.CallOption) (*GetNativeTransactionResponse, error)
GetVerifiedAccountState(ctx context.Context, in *GetVerifiedAccountStateRequest, opts ...grpc.CallOption) (*GetVerifiedAccountStateResponse, error)
+ GetBlockByTimestamp(ctx context.Context, in *GetBlockByTimestampRequest, opts ...grpc.CallOption) (*GetBlockByTimestampResponse, error)
}
type chainStorageClient struct {
@@ -68,8 +70,9 @@ func NewChainStorageClient(cc grpc.ClientConnInterface) ChainStorageClient {
}
func (c *chainStorageClient) GetLatestBlock(ctx context.Context, in *GetLatestBlockRequest, opts ...grpc.CallOption) (*GetLatestBlockResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetLatestBlockResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetLatestBlock_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetLatestBlock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -77,8 +80,9 @@ func (c *chainStorageClient) GetLatestBlock(ctx context.Context, in *GetLatestBl
}
func (c *chainStorageClient) GetBlockFile(ctx context.Context, in *GetBlockFileRequest, opts ...grpc.CallOption) (*GetBlockFileResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetBlockFileResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetBlockFile_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetBlockFile_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -86,8 +90,9 @@ func (c *chainStorageClient) GetBlockFile(ctx context.Context, in *GetBlockFileR
}
func (c *chainStorageClient) GetBlockFilesByRange(ctx context.Context, in *GetBlockFilesByRangeRequest, opts ...grpc.CallOption) (*GetBlockFilesByRangeResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetBlockFilesByRangeResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetBlockFilesByRange_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetBlockFilesByRange_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -95,8 +100,9 @@ func (c *chainStorageClient) GetBlockFilesByRange(ctx context.Context, in *GetBl
}
func (c *chainStorageClient) GetRawBlock(ctx context.Context, in *GetRawBlockRequest, opts ...grpc.CallOption) (*GetRawBlockResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetRawBlockResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetRawBlock_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetRawBlock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -104,8 +110,9 @@ func (c *chainStorageClient) GetRawBlock(ctx context.Context, in *GetRawBlockReq
}
func (c *chainStorageClient) GetRawBlocksByRange(ctx context.Context, in *GetRawBlocksByRangeRequest, opts ...grpc.CallOption) (*GetRawBlocksByRangeResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetRawBlocksByRangeResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetRawBlocksByRange_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetRawBlocksByRange_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -113,8 +120,9 @@ func (c *chainStorageClient) GetRawBlocksByRange(ctx context.Context, in *GetRaw
}
func (c *chainStorageClient) GetNativeBlock(ctx context.Context, in *GetNativeBlockRequest, opts ...grpc.CallOption) (*GetNativeBlockResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNativeBlockResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetNativeBlock_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetNativeBlock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -122,8 +130,9 @@ func (c *chainStorageClient) GetNativeBlock(ctx context.Context, in *GetNativeBl
}
func (c *chainStorageClient) GetNativeBlocksByRange(ctx context.Context, in *GetNativeBlocksByRangeRequest, opts ...grpc.CallOption) (*GetNativeBlocksByRangeResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNativeBlocksByRangeResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetNativeBlocksByRange_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetNativeBlocksByRange_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -131,8 +140,9 @@ func (c *chainStorageClient) GetNativeBlocksByRange(ctx context.Context, in *Get
}
func (c *chainStorageClient) GetRosettaBlock(ctx context.Context, in *GetRosettaBlockRequest, opts ...grpc.CallOption) (*GetRosettaBlockResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetRosettaBlockResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetRosettaBlock_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetRosettaBlock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -140,20 +150,22 @@ func (c *chainStorageClient) GetRosettaBlock(ctx context.Context, in *GetRosetta
}
func (c *chainStorageClient) GetRosettaBlocksByRange(ctx context.Context, in *GetRosettaBlocksByRangeRequest, opts ...grpc.CallOption) (*GetRosettaBlocksByRangeResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetRosettaBlocksByRangeResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetRosettaBlocksByRange_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetRosettaBlocksByRange_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *chainStorageClient) StreamChainEvents(ctx context.Context, in *ChainEventsRequest, opts ...grpc.CallOption) (ChainStorage_StreamChainEventsClient, error) {
- stream, err := c.cc.NewStream(ctx, &ChainStorage_ServiceDesc.Streams[0], ChainStorage_StreamChainEvents_FullMethodName, opts...)
+func (c *chainStorageClient) StreamChainEvents(ctx context.Context, in *ChainEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ChainEventsResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &ChainStorage_ServiceDesc.Streams[0], ChainStorage_StreamChainEvents_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &chainStorageStreamChainEventsClient{stream}
+ x := &grpc.GenericClientStream[ChainEventsRequest, ChainEventsResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -163,26 +175,13 @@ func (c *chainStorageClient) StreamChainEvents(ctx context.Context, in *ChainEve
return x, nil
}
-type ChainStorage_StreamChainEventsClient interface {
- Recv() (*ChainEventsResponse, error)
- grpc.ClientStream
-}
-
-type chainStorageStreamChainEventsClient struct {
- grpc.ClientStream
-}
-
-func (x *chainStorageStreamChainEventsClient) Recv() (*ChainEventsResponse, error) {
- m := new(ChainEventsResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ChainStorage_StreamChainEventsClient = grpc.ServerStreamingClient[ChainEventsResponse]
func (c *chainStorageClient) GetChainEvents(ctx context.Context, in *GetChainEventsRequest, opts ...grpc.CallOption) (*GetChainEventsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetChainEventsResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetChainEvents_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetChainEvents_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -190,8 +189,9 @@ func (c *chainStorageClient) GetChainEvents(ctx context.Context, in *GetChainEve
}
func (c *chainStorageClient) GetChainMetadata(ctx context.Context, in *GetChainMetadataRequest, opts ...grpc.CallOption) (*GetChainMetadataResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetChainMetadataResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetChainMetadata_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetChainMetadata_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -199,8 +199,9 @@ func (c *chainStorageClient) GetChainMetadata(ctx context.Context, in *GetChainM
}
func (c *chainStorageClient) GetVersionedChainEvent(ctx context.Context, in *GetVersionedChainEventRequest, opts ...grpc.CallOption) (*GetVersionedChainEventResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetVersionedChainEventResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetVersionedChainEvent_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetVersionedChainEvent_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -208,8 +209,9 @@ func (c *chainStorageClient) GetVersionedChainEvent(ctx context.Context, in *Get
}
func (c *chainStorageClient) GetBlockByTransaction(ctx context.Context, in *GetBlockByTransactionRequest, opts ...grpc.CallOption) (*GetBlockByTransactionResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetBlockByTransactionResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetBlockByTransaction_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetBlockByTransaction_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -217,8 +219,9 @@ func (c *chainStorageClient) GetBlockByTransaction(ctx context.Context, in *GetB
}
func (c *chainStorageClient) GetNativeTransaction(ctx context.Context, in *GetNativeTransactionRequest, opts ...grpc.CallOption) (*GetNativeTransactionResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNativeTransactionResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetNativeTransaction_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetNativeTransaction_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -226,8 +229,19 @@ func (c *chainStorageClient) GetNativeTransaction(ctx context.Context, in *GetNa
}
func (c *chainStorageClient) GetVerifiedAccountState(ctx context.Context, in *GetVerifiedAccountStateRequest, opts ...grpc.CallOption) (*GetVerifiedAccountStateResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetVerifiedAccountStateResponse)
- err := c.cc.Invoke(ctx, ChainStorage_GetVerifiedAccountState_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, ChainStorage_GetVerifiedAccountState_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *chainStorageClient) GetBlockByTimestamp(ctx context.Context, in *GetBlockByTimestampRequest, opts ...grpc.CallOption) (*GetBlockByTimestampResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(GetBlockByTimestampResponse)
+ err := c.cc.Invoke(ctx, ChainStorage_GetBlockByTimestamp_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -236,7 +250,7 @@ func (c *chainStorageClient) GetVerifiedAccountState(ctx context.Context, in *Ge
// ChainStorageServer is the server API for ChainStorage service.
// All implementations should embed UnimplementedChainStorageServer
-// for forward compatibility
+// for forward compatibility.
type ChainStorageServer interface {
GetLatestBlock(context.Context, *GetLatestBlockRequest) (*GetLatestBlockResponse, error)
GetBlockFile(context.Context, *GetBlockFileRequest) (*GetBlockFileResponse, error)
@@ -247,18 +261,22 @@ type ChainStorageServer interface {
GetNativeBlocksByRange(context.Context, *GetNativeBlocksByRangeRequest) (*GetNativeBlocksByRangeResponse, error)
GetRosettaBlock(context.Context, *GetRosettaBlockRequest) (*GetRosettaBlockResponse, error)
GetRosettaBlocksByRange(context.Context, *GetRosettaBlocksByRangeRequest) (*GetRosettaBlocksByRangeResponse, error)
- StreamChainEvents(*ChainEventsRequest, ChainStorage_StreamChainEventsServer) error
+ StreamChainEvents(*ChainEventsRequest, grpc.ServerStreamingServer[ChainEventsResponse]) error
GetChainEvents(context.Context, *GetChainEventsRequest) (*GetChainEventsResponse, error)
GetChainMetadata(context.Context, *GetChainMetadataRequest) (*GetChainMetadataResponse, error)
GetVersionedChainEvent(context.Context, *GetVersionedChainEventRequest) (*GetVersionedChainEventResponse, error)
GetBlockByTransaction(context.Context, *GetBlockByTransactionRequest) (*GetBlockByTransactionResponse, error)
GetNativeTransaction(context.Context, *GetNativeTransactionRequest) (*GetNativeTransactionResponse, error)
GetVerifiedAccountState(context.Context, *GetVerifiedAccountStateRequest) (*GetVerifiedAccountStateResponse, error)
+ GetBlockByTimestamp(context.Context, *GetBlockByTimestampRequest) (*GetBlockByTimestampResponse, error)
}
-// UnimplementedChainStorageServer should be embedded to have forward compatible implementations.
-type UnimplementedChainStorageServer struct {
-}
+// UnimplementedChainStorageServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedChainStorageServer struct{}
func (UnimplementedChainStorageServer) GetLatestBlock(context.Context, *GetLatestBlockRequest) (*GetLatestBlockResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetLatestBlock not implemented")
@@ -287,7 +305,7 @@ func (UnimplementedChainStorageServer) GetRosettaBlock(context.Context, *GetRose
func (UnimplementedChainStorageServer) GetRosettaBlocksByRange(context.Context, *GetRosettaBlocksByRangeRequest) (*GetRosettaBlocksByRangeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRosettaBlocksByRange not implemented")
}
-func (UnimplementedChainStorageServer) StreamChainEvents(*ChainEventsRequest, ChainStorage_StreamChainEventsServer) error {
+func (UnimplementedChainStorageServer) StreamChainEvents(*ChainEventsRequest, grpc.ServerStreamingServer[ChainEventsResponse]) error {
return status.Errorf(codes.Unimplemented, "method StreamChainEvents not implemented")
}
func (UnimplementedChainStorageServer) GetChainEvents(context.Context, *GetChainEventsRequest) (*GetChainEventsResponse, error) {
@@ -308,6 +326,10 @@ func (UnimplementedChainStorageServer) GetNativeTransaction(context.Context, *Ge
func (UnimplementedChainStorageServer) GetVerifiedAccountState(context.Context, *GetVerifiedAccountStateRequest) (*GetVerifiedAccountStateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVerifiedAccountState not implemented")
}
+func (UnimplementedChainStorageServer) GetBlockByTimestamp(context.Context, *GetBlockByTimestampRequest) (*GetBlockByTimestampResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBlockByTimestamp not implemented")
+}
+func (UnimplementedChainStorageServer) testEmbeddedByValue() {}
// UnsafeChainStorageServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ChainStorageServer will
@@ -317,6 +339,13 @@ type UnsafeChainStorageServer interface {
}
func RegisterChainStorageServer(s grpc.ServiceRegistrar, srv ChainStorageServer) {
+ // If the following call pancis, it indicates UnimplementedChainStorageServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&ChainStorage_ServiceDesc, srv)
}
@@ -487,21 +516,11 @@ func _ChainStorage_StreamChainEvents_Handler(srv interface{}, stream grpc.Server
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(ChainStorageServer).StreamChainEvents(m, &chainStorageStreamChainEventsServer{stream})
+ return srv.(ChainStorageServer).StreamChainEvents(m, &grpc.GenericServerStream[ChainEventsRequest, ChainEventsResponse]{ServerStream: stream})
}
-type ChainStorage_StreamChainEventsServer interface {
- Send(*ChainEventsResponse) error
- grpc.ServerStream
-}
-
-type chainStorageStreamChainEventsServer struct {
- grpc.ServerStream
-}
-
-func (x *chainStorageStreamChainEventsServer) Send(m *ChainEventsResponse) error {
- return x.ServerStream.SendMsg(m)
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ChainStorage_StreamChainEventsServer = grpc.ServerStreamingServer[ChainEventsResponse]
func _ChainStorage_GetChainEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetChainEventsRequest)
@@ -611,6 +630,24 @@ func _ChainStorage_GetVerifiedAccountState_Handler(srv interface{}, ctx context.
return interceptor(ctx, in, info, handler)
}
+func _ChainStorage_GetBlockByTimestamp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBlockByTimestampRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChainStorageServer).GetBlockByTimestamp(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChainStorage_GetBlockByTimestamp_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChainStorageServer).GetBlockByTimestamp(ctx, req.(*GetBlockByTimestampRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ChainStorage_ServiceDesc is the grpc.ServiceDesc for ChainStorage service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -678,6 +715,10 @@ var ChainStorage_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetVerifiedAccountState",
Handler: _ChainStorage_GetVerifiedAccountState_Handler,
},
+ {
+ MethodName: "GetBlockByTimestamp",
+ Handler: _ChainStorage_GetBlockByTimestamp_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/protos/coinbase/chainstorage/blockchain.pb.go b/protos/coinbase/chainstorage/blockchain.pb.go
index 8c52307..237e48a 100644
--- a/protos/coinbase/chainstorage/blockchain.pb.go
+++ b/protos/coinbase/chainstorage/blockchain.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/blockchain_aptos.pb.go b/protos/coinbase/chainstorage/blockchain_aptos.pb.go
index 51500a5..62a2e1b 100644
--- a/protos/coinbase/chainstorage/blockchain_aptos.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_aptos.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_aptos.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/blockchain_bitcoin.pb.go b/protos/coinbase/chainstorage/blockchain_bitcoin.pb.go
index 2eb5e08..0915902 100644
--- a/protos/coinbase/chainstorage/blockchain_bitcoin.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_bitcoin.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_bitcoin.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/blockchain_ethereum.pb.go b/protos/coinbase/chainstorage/blockchain_ethereum.pb.go
index a7215ec..d24f399 100644
--- a/protos/coinbase/chainstorage/blockchain_ethereum.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_ethereum.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_ethereum.proto
package chainstorage
@@ -1203,6 +1203,38 @@ type EthereumTransactionReceipt struct {
//
// *EthereumTransactionReceipt_BlobGasUsed
OptionalBlobGasUsed isEthereumTransactionReceipt_OptionalBlobGasUsed `protobuf_oneof:"optional_blob_gas_used"`
+ // Types that are assignable to OptionalFee:
+ //
+ // *EthereumTransactionReceipt_Fee
+ OptionalFee isEthereumTransactionReceipt_OptionalFee `protobuf_oneof:"optional_fee"`
+ // Types that are assignable to OptionalNetFee:
+ //
+ // *EthereumTransactionReceipt_NetFee
+ OptionalNetFee isEthereumTransactionReceipt_OptionalNetFee `protobuf_oneof:"optional_net_fee"`
+ // Types that are assignable to OptionalNetUsage:
+ //
+ // *EthereumTransactionReceipt_NetUsage
+ OptionalNetUsage isEthereumTransactionReceipt_OptionalNetUsage `protobuf_oneof:"optional_net_usage"`
+ // Types that are assignable to OptionalEnergyUsage:
+ //
+ // *EthereumTransactionReceipt_EnergyUsage
+ OptionalEnergyUsage isEthereumTransactionReceipt_OptionalEnergyUsage `protobuf_oneof:"optional_energy_usage"`
+ // Types that are assignable to OptionalEnergyFee:
+ //
+ // *EthereumTransactionReceipt_EnergyFee
+ OptionalEnergyFee isEthereumTransactionReceipt_OptionalEnergyFee `protobuf_oneof:"optional_energy_fee"`
+ // Types that are assignable to OptionalOriginEnergyUsage:
+ //
+ // *EthereumTransactionReceipt_OriginEnergyUsage
+ OptionalOriginEnergyUsage isEthereumTransactionReceipt_OptionalOriginEnergyUsage `protobuf_oneof:"optional_origin_energy_usage"`
+ // Types that are assignable to OptionalEnergyUsageTotal:
+ //
+ // *EthereumTransactionReceipt_EnergyUsageTotal
+ OptionalEnergyUsageTotal isEthereumTransactionReceipt_OptionalEnergyUsageTotal `protobuf_oneof:"optional_energy_usage_total"`
+ // Types that are assignable to OptionalEnergyPenaltyTotal:
+ //
+ // *EthereumTransactionReceipt_EnergyPenaltyTotal
+ OptionalEnergyPenaltyTotal isEthereumTransactionReceipt_OptionalEnergyPenaltyTotal `protobuf_oneof:"optional_energy_penalty_total"`
}
func (x *EthereumTransactionReceipt) Reset() {
@@ -1419,6 +1451,118 @@ func (x *EthereumTransactionReceipt) GetBlobGasUsed() uint64 {
return 0
}
+func (m *EthereumTransactionReceipt) GetOptionalFee() isEthereumTransactionReceipt_OptionalFee {
+ if m != nil {
+ return m.OptionalFee
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetFee() uint64 {
+ if x, ok := x.GetOptionalFee().(*EthereumTransactionReceipt_Fee); ok {
+ return x.Fee
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalNetFee() isEthereumTransactionReceipt_OptionalNetFee {
+ if m != nil {
+ return m.OptionalNetFee
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetNetFee() uint64 {
+ if x, ok := x.GetOptionalNetFee().(*EthereumTransactionReceipt_NetFee); ok {
+ return x.NetFee
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalNetUsage() isEthereumTransactionReceipt_OptionalNetUsage {
+ if m != nil {
+ return m.OptionalNetUsage
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetNetUsage() uint64 {
+ if x, ok := x.GetOptionalNetUsage().(*EthereumTransactionReceipt_NetUsage); ok {
+ return x.NetUsage
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalEnergyUsage() isEthereumTransactionReceipt_OptionalEnergyUsage {
+ if m != nil {
+ return m.OptionalEnergyUsage
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetEnergyUsage() uint64 {
+ if x, ok := x.GetOptionalEnergyUsage().(*EthereumTransactionReceipt_EnergyUsage); ok {
+ return x.EnergyUsage
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalEnergyFee() isEthereumTransactionReceipt_OptionalEnergyFee {
+ if m != nil {
+ return m.OptionalEnergyFee
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetEnergyFee() uint64 {
+ if x, ok := x.GetOptionalEnergyFee().(*EthereumTransactionReceipt_EnergyFee); ok {
+ return x.EnergyFee
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalOriginEnergyUsage() isEthereumTransactionReceipt_OptionalOriginEnergyUsage {
+ if m != nil {
+ return m.OptionalOriginEnergyUsage
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetOriginEnergyUsage() uint64 {
+ if x, ok := x.GetOptionalOriginEnergyUsage().(*EthereumTransactionReceipt_OriginEnergyUsage); ok {
+ return x.OriginEnergyUsage
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalEnergyUsageTotal() isEthereumTransactionReceipt_OptionalEnergyUsageTotal {
+ if m != nil {
+ return m.OptionalEnergyUsageTotal
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetEnergyUsageTotal() uint64 {
+ if x, ok := x.GetOptionalEnergyUsageTotal().(*EthereumTransactionReceipt_EnergyUsageTotal); ok {
+ return x.EnergyUsageTotal
+ }
+ return 0
+}
+
+func (m *EthereumTransactionReceipt) GetOptionalEnergyPenaltyTotal() isEthereumTransactionReceipt_OptionalEnergyPenaltyTotal {
+ if m != nil {
+ return m.OptionalEnergyPenaltyTotal
+ }
+ return nil
+}
+
+func (x *EthereumTransactionReceipt) GetEnergyPenaltyTotal() uint64 {
+ if x, ok := x.GetOptionalEnergyPenaltyTotal().(*EthereumTransactionReceipt_EnergyPenaltyTotal); ok {
+ return x.EnergyPenaltyTotal
+ }
+ return 0
+}
+
type isEthereumTransactionReceipt_OptionalStatus interface {
isEthereumTransactionReceipt_OptionalStatus()
}
@@ -1480,6 +1624,89 @@ type EthereumTransactionReceipt_BlobGasUsed struct {
func (*EthereumTransactionReceipt_BlobGasUsed) isEthereumTransactionReceipt_OptionalBlobGasUsed() {}
+type isEthereumTransactionReceipt_OptionalFee interface {
+ isEthereumTransactionReceipt_OptionalFee()
+}
+
+type EthereumTransactionReceipt_Fee struct {
+ Fee uint64 `protobuf:"varint,22,opt,name=fee,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_Fee) isEthereumTransactionReceipt_OptionalFee() {}
+
+type isEthereumTransactionReceipt_OptionalNetFee interface {
+ isEthereumTransactionReceipt_OptionalNetFee()
+}
+
+type EthereumTransactionReceipt_NetFee struct {
+ NetFee uint64 `protobuf:"varint,23,opt,name=net_fee,json=netFee,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_NetFee) isEthereumTransactionReceipt_OptionalNetFee() {}
+
+type isEthereumTransactionReceipt_OptionalNetUsage interface {
+ isEthereumTransactionReceipt_OptionalNetUsage()
+}
+
+type EthereumTransactionReceipt_NetUsage struct {
+ NetUsage uint64 `protobuf:"varint,24,opt,name=net_usage,json=netUsage,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_NetUsage) isEthereumTransactionReceipt_OptionalNetUsage() {}
+
+type isEthereumTransactionReceipt_OptionalEnergyUsage interface {
+ isEthereumTransactionReceipt_OptionalEnergyUsage()
+}
+
+type EthereumTransactionReceipt_EnergyUsage struct {
+ EnergyUsage uint64 `protobuf:"varint,25,opt,name=energy_usage,json=energyUsage,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_EnergyUsage) isEthereumTransactionReceipt_OptionalEnergyUsage() {}
+
+type isEthereumTransactionReceipt_OptionalEnergyFee interface {
+ isEthereumTransactionReceipt_OptionalEnergyFee()
+}
+
+type EthereumTransactionReceipt_EnergyFee struct {
+ EnergyFee uint64 `protobuf:"varint,26,opt,name=energy_fee,json=energyFee,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_EnergyFee) isEthereumTransactionReceipt_OptionalEnergyFee() {}
+
+type isEthereumTransactionReceipt_OptionalOriginEnergyUsage interface {
+ isEthereumTransactionReceipt_OptionalOriginEnergyUsage()
+}
+
+type EthereumTransactionReceipt_OriginEnergyUsage struct {
+ OriginEnergyUsage uint64 `protobuf:"varint,27,opt,name=origin_energy_usage,json=originEnergyUsage,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_OriginEnergyUsage) isEthereumTransactionReceipt_OptionalOriginEnergyUsage() {
+}
+
+type isEthereumTransactionReceipt_OptionalEnergyUsageTotal interface {
+ isEthereumTransactionReceipt_OptionalEnergyUsageTotal()
+}
+
+type EthereumTransactionReceipt_EnergyUsageTotal struct {
+ EnergyUsageTotal uint64 `protobuf:"varint,28,opt,name=energy_usage_total,json=energyUsageTotal,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_EnergyUsageTotal) isEthereumTransactionReceipt_OptionalEnergyUsageTotal() {
+}
+
+type isEthereumTransactionReceipt_OptionalEnergyPenaltyTotal interface {
+ isEthereumTransactionReceipt_OptionalEnergyPenaltyTotal()
+}
+
+type EthereumTransactionReceipt_EnergyPenaltyTotal struct {
+ EnergyPenaltyTotal uint64 `protobuf:"varint,29,opt,name=energy_penalty_total,json=energyPenaltyTotal,proto3,oneof"`
+}
+
+func (*EthereumTransactionReceipt_EnergyPenaltyTotal) isEthereumTransactionReceipt_OptionalEnergyPenaltyTotal() {
+}
+
type EthereumEventLog struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1715,25 +1942,26 @@ type EthereumTransactionFlattenedTrace struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- From string `protobuf:"bytes,3,opt,name=from,proto3" json:"from,omitempty"`
- To string `protobuf:"bytes,4,opt,name=to,proto3" json:"to,omitempty"`
- Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
- Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"`
- GasUsed uint64 `protobuf:"varint,7,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
- Input string `protobuf:"bytes,8,opt,name=input,proto3" json:"input,omitempty"`
- Output string `protobuf:"bytes,9,opt,name=output,proto3" json:"output,omitempty"`
- Subtraces uint64 `protobuf:"varint,10,opt,name=subtraces,proto3" json:"subtraces,omitempty"`
- TraceAddress []uint64 `protobuf:"varint,11,rep,packed,name=trace_address,json=traceAddress,proto3" json:"trace_address,omitempty"`
- TraceType string `protobuf:"bytes,12,opt,name=trace_type,json=traceType,proto3" json:"trace_type,omitempty"`
- CallType string `protobuf:"bytes,13,opt,name=call_type,json=callType,proto3" json:"call_type,omitempty"`
- TraceId string `protobuf:"bytes,14,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- Status uint64 `protobuf:"varint,15,opt,name=status,proto3" json:"status,omitempty"`
- BlockHash string `protobuf:"bytes,16,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
- BlockNumber uint64 `protobuf:"varint,17,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
- TransactionHash string `protobuf:"bytes,18,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"`
- TransactionIndex uint64 `protobuf:"varint,19,opt,name=transaction_index,json=transactionIndex,proto3" json:"transaction_index,omitempty"`
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ From string `protobuf:"bytes,3,opt,name=from,proto3" json:"from,omitempty"`
+ To string `protobuf:"bytes,4,opt,name=to,proto3" json:"to,omitempty"`
+ Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+ Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"`
+ GasUsed uint64 `protobuf:"varint,7,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
+ Input string `protobuf:"bytes,8,opt,name=input,proto3" json:"input,omitempty"`
+ Output string `protobuf:"bytes,9,opt,name=output,proto3" json:"output,omitempty"`
+ Subtraces uint64 `protobuf:"varint,10,opt,name=subtraces,proto3" json:"subtraces,omitempty"`
+ TraceAddress []uint64 `protobuf:"varint,11,rep,packed,name=trace_address,json=traceAddress,proto3" json:"trace_address,omitempty"`
+ TraceType string `protobuf:"bytes,12,opt,name=trace_type,json=traceType,proto3" json:"trace_type,omitempty"`
+ CallType string `protobuf:"bytes,13,opt,name=call_type,json=callType,proto3" json:"call_type,omitempty"`
+ TraceId string `protobuf:"bytes,14,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ Status uint64 `protobuf:"varint,15,opt,name=status,proto3" json:"status,omitempty"`
+ BlockHash string `protobuf:"bytes,16,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,17,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ TransactionHash string `protobuf:"bytes,18,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"`
+ TransactionIndex uint64 `protobuf:"varint,19,opt,name=transaction_index,json=transactionIndex,proto3" json:"transaction_index,omitempty"`
+ CallValueInfo []*CallValueInfo `protobuf:"bytes,20,rep,name=call_value_info,json=callValueInfo,proto3" json:"call_value_info,omitempty"`
}
func (x *EthereumTransactionFlattenedTrace) Reset() {
@@ -1901,6 +2129,13 @@ func (x *EthereumTransactionFlattenedTrace) GetTransactionIndex() uint64 {
return 0
}
+func (x *EthereumTransactionFlattenedTrace) GetCallValueInfo() []*CallValueInfo {
+ if x != nil {
+ return x.CallValueInfo
+ }
+ return nil
+}
+
type EthereumTokenTransfer struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2337,6 +2572,61 @@ func (x *EthereumAccountStateResponse) GetCodeHash() string {
return ""
}
+type CallValueInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TokenId string `protobuf:"bytes,1,opt,name=token_id,json=tokenId,proto3" json:"token_id,omitempty"`
+ CallValue int64 `protobuf:"varint,2,opt,name=call_value,json=callValue,proto3" json:"call_value,omitempty"`
+}
+
+func (x *CallValueInfo) Reset() {
+ *x = CallValueInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CallValueInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CallValueInfo) ProtoMessage() {}
+
+func (x *CallValueInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CallValueInfo.ProtoReflect.Descriptor instead.
+func (*CallValueInfo) Descriptor() ([]byte, []int) {
+ return file_coinbase_chainstorage_blockchain_ethereum_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *CallValueInfo) GetTokenId() string {
+ if x != nil {
+ return x.TokenId
+ }
+ return ""
+}
+
+func (x *CallValueInfo) GetCallValue() int64 {
+ if x != nil {
+ return x.CallValue
+ }
+ return 0
+}
+
type EthereumTransactionReceipt_L1FeeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2351,7 +2641,7 @@ type EthereumTransactionReceipt_L1FeeInfo struct {
func (x *EthereumTransactionReceipt_L1FeeInfo) Reset() {
*x = EthereumTransactionReceipt_L1FeeInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[18]
+ mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2364,7 +2654,7 @@ func (x *EthereumTransactionReceipt_L1FeeInfo) String() string {
func (*EthereumTransactionReceipt_L1FeeInfo) ProtoMessage() {}
func (x *EthereumTransactionReceipt_L1FeeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[18]
+ mi := &file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2626,7 +2916,7 @@ var file_coinbase_chainstorage_blockchain_ethereum_proto_rawDesc = []byte{
0x5f, 0x6d, 0x69, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61,
0x6c, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x1f, 0x0a, 0x1d, 0x6f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70,
- 0x65, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x22, 0xdb, 0x08, 0x0a, 0x1a,
+ 0x65, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x22, 0xcf, 0x0c, 0x0a, 0x1a,
0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72,
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01,
@@ -2677,46 +2967,96 @@ var file_coinbase_chainstorage_blockchain_ethereum_proto_rawDesc = []byte{
0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x62, 0x47, 0x61, 0x73, 0x50,
0x72, 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73,
0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x48, 0x05, 0x52, 0x0b, 0x62,
- 0x6c, 0x6f, 0x62, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x1a, 0x88, 0x01, 0x0a, 0x09, 0x4c,
- 0x31, 0x46, 0x65, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x31, 0x5f, 0x67,
- 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c,
- 0x31, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x6c, 0x31, 0x5f, 0x67,
- 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a,
- 0x6c, 0x31, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x31,
- 0x5f, 0x66, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x31, 0x46, 0x65,
- 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6c, 0x31, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6c,
- 0x61, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x31, 0x46, 0x65, 0x65, 0x53,
- 0x63, 0x61, 0x6c, 0x61, 0x72, 0x42, 0x11, 0x0a, 0x0f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61,
- 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x31, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
- 0x42, 0x18, 0x0a, 0x16, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x22, 0x0a, 0x20, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x72,
- 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x19,
- 0x0a, 0x17, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f,
- 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x42, 0x18, 0x0a, 0x16, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75,
- 0x73, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x22, 0xa9, 0x02, 0x0a, 0x10, 0x45, 0x74,
- 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f,
- 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x67,
- 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68,
- 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a,
- 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c,
- 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
- 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a,
- 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74,
- 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0xa0, 0x02, 0x0a, 0x18, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
- 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61,
+ 0x6c, 0x6f, 0x62, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x03, 0x66, 0x65,
+ 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x48, 0x06, 0x52, 0x03, 0x66, 0x65, 0x65, 0x12, 0x19,
+ 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x48,
+ 0x07, 0x52, 0x06, 0x6e, 0x65, 0x74, 0x46, 0x65, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x6e, 0x65, 0x74,
+ 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x48, 0x08, 0x52, 0x08,
+ 0x6e, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x65, 0x6e, 0x65, 0x72,
+ 0x67, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x48, 0x09,
+ 0x52, 0x0b, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0a, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28,
+ 0x04, 0x48, 0x0a, 0x52, 0x09, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x46, 0x65, 0x65, 0x12, 0x30,
+ 0x0a, 0x13, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f,
+ 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0b, 0x52, 0x11, 0x6f,
+ 0x72, 0x69, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0c, 0x52, 0x10,
+ 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x74, 0x61, 0x6c,
+ 0x12, 0x32, 0x0a, 0x14, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c,
+ 0x74, 0x79, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0d,
+ 0x52, 0x12, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x54,
+ 0x6f, 0x74, 0x61, 0x6c, 0x1a, 0x88, 0x01, 0x0a, 0x09, 0x4c, 0x31, 0x46, 0x65, 0x65, 0x49, 0x6e,
+ 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x31, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c, 0x31, 0x47, 0x61, 0x73, 0x55, 0x73,
+ 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x6c, 0x31, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x6c, 0x31, 0x47, 0x61, 0x73, 0x50,
+ 0x72, 0x69, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x31, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x31, 0x46, 0x65, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6c,
+ 0x31, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x31, 0x46, 0x65, 0x65, 0x53, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x42,
+ 0x11, 0x0a, 0x0f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6c,
+ 0x31, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x18, 0x0a, 0x16, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x6e,
+ 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x22, 0x0a, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x5f, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x19, 0x0a, 0x17, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72,
+ 0x69, 0x63, 0x65, 0x42, 0x18, 0x0a, 0x16, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x42, 0x0e, 0x0a,
+ 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x65, 0x42, 0x12, 0x0a,
+ 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x65, 0x74, 0x5f, 0x66, 0x65,
+ 0x65, 0x42, 0x14, 0x0a, 0x12, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x65,
+ 0x74, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x42, 0x17, 0x0a, 0x15, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x42, 0x15, 0x0a, 0x13, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x65,
+ 0x72, 0x67, 0x79, 0x5f, 0x66, 0x65, 0x65, 0x42, 0x1e, 0x0a, 0x1c, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x65, 0x72, 0x67,
+ 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x1f, 0x0a, 0x1d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x65, 0x72, 0x67, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74,
+ 0x79, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x22, 0xa9, 0x02,
+ 0x0a, 0x10, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c,
+ 0x6f, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65,
+ 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d,
+ 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0xa0, 0x02, 0x0a, 0x18, 0x45, 0x74,
+ 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x02, 0x74, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x19, 0x0a, 0x08,
+ 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
+ 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a,
+ 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f,
+ 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x0a,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x74, 0x68,
+ 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xfc, 0x04, 0x0a,
+ 0x21, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61,
0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
@@ -2729,111 +3069,101 @@ var file_coinbase_chainstorage_blockchain_ethereum_proto_rawDesc = []byte{
0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74,
0x70, 0x75, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75,
- 0x74, 0x12, 0x45, 0x0a, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69,
- 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
- 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63,
- 0x65, 0x52, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xae, 0x04, 0x0a, 0x21, 0x45, 0x74, 0x68,
- 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x46, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e, 0x0a, 0x02,
- 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x74, 0x6f, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x03, 0x67, 0x61, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12,
- 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1c, 0x0a,
- 0x09, 0x73, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x09, 0x73, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74,
- 0x72, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0b, 0x20, 0x03,
- 0x28, 0x04, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
- 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x72, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x1b, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08,
- 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
- 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x10, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21,
- 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x11,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65,
- 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x11,
- 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65,
- 0x78, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xe6, 0x03, 0x0a, 0x15, 0x45, 0x74,
- 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x66, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64,
- 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f, 0x6d,
- 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x66, 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74,
- 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x12,
+ 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x0b, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x72, 0x61, 0x63, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73,
+ 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61,
+ 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68,
0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x29, 0x0a,
- 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73,
- 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f,
- 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x67,
- 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68,
- 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
- 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63,
- 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x65, 0x72, 0x63, 0x32, 0x30,
- 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x45,
- 0x52, 0x43, 0x32, 0x30, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65,
- 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x63, 0x32, 0x30, 0x12, 0x44, 0x0a, 0x06, 0x65, 0x72,
- 0x63, 0x37, 0x32, 0x31, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x69,
- 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x45, 0x52, 0x43, 0x37, 0x32, 0x31, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x65, 0x72, 0x63, 0x37, 0x32, 0x31,
- 0x42, 0x10, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66,
- 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x12, 0x45, 0x52, 0x43, 0x32, 0x30, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f, 0x6d,
- 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x66, 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74,
- 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x22, 0x72, 0x0a, 0x13, 0x45, 0x52, 0x43, 0x37, 0x32, 0x31, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x5f,
- 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66,
- 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f,
- 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
- 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x19, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
- 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x6f,
- 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f,
- 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x3b, 0x0a, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
- 0x75, 0x6d, 0x45, 0x78, 0x74, 0x72, 0x61, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x0e,
- 0x65, 0x72, 0x63, 0x32, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x72, 0x63, 0x32, 0x30, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x61, 0x63, 0x74, 0x22, 0x74, 0x0a, 0x1c, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09,
- 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x63, 0x6f, 0x64, 0x65, 0x48, 0x61, 0x73, 0x68, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74,
- 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
- 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
+ 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x4c, 0x0a,
+ 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x63, 0x61,
+ 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xe6, 0x03, 0x0a, 0x15,
+ 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72,
+ 0x6f, 0x6d, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
+ 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68,
+ 0x61, 0x73, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f,
+ 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c,
+ 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
+ 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x65, 0x72, 0x63,
+ 0x32, 0x30, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x69, 0x6e, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x45, 0x52, 0x43, 0x32, 0x30, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73,
+ 0x66, 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x63, 0x32, 0x30, 0x12, 0x44, 0x0a, 0x06,
+ 0x65, 0x72, 0x63, 0x37, 0x32, 0x31, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63,
+ 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x52, 0x43, 0x37, 0x32, 0x31, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x65, 0x72, 0x63, 0x37,
+ 0x32, 0x31, 0x42, 0x10, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x66, 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x12, 0x45, 0x52, 0x43, 0x32, 0x30, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72,
+ 0x6f, 0x6d, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x72, 0x0a, 0x13, 0x45, 0x52, 0x43, 0x37, 0x32, 0x31, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f,
+ 0x6d, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x19, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
+ 0x75, 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72,
+ 0x6f, 0x6f, 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70,
+ 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x3b, 0x0a, 0x12, 0x45, 0x74, 0x68, 0x65,
+ 0x72, 0x65, 0x75, 0x6d, 0x45, 0x78, 0x74, 0x72, 0x61, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x25,
+ 0x0a, 0x0e, 0x65, 0x72, 0x63, 0x32, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x72, 0x63, 0x32, 0x30, 0x43, 0x6f, 0x6e,
+ 0x74, 0x72, 0x61, 0x63, 0x74, 0x22, 0x74, 0x0a, 0x1c, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
+ 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b,
+ 0x0a, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x49, 0x0a, 0x0d, 0x43,
+ 0x61, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x61, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68,
+ 0x61, 0x69, 0x6e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x73, 0x2f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2848,7 +3178,7 @@ func file_coinbase_chainstorage_blockchain_ethereum_proto_rawDescGZIP() []byte {
return file_coinbase_chainstorage_blockchain_ethereum_proto_rawDescData
}
-var file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
var file_coinbase_chainstorage_blockchain_ethereum_proto_goTypes = []interface{}{
(*EthereumBlobdata)(nil), // 0: coinbase.chainstorage.EthereumBlobdata
(*PolygonExtraData)(nil), // 1: coinbase.chainstorage.PolygonExtraData
@@ -2868,32 +3198,34 @@ var file_coinbase_chainstorage_blockchain_ethereum_proto_goTypes = []interface{}
(*EthereumAccountStateProof)(nil), // 15: coinbase.chainstorage.EthereumAccountStateProof
(*EthereumExtraInput)(nil), // 16: coinbase.chainstorage.EthereumExtraInput
(*EthereumAccountStateResponse)(nil), // 17: coinbase.chainstorage.EthereumAccountStateResponse
- (*EthereumTransactionReceipt_L1FeeInfo)(nil), // 18: coinbase.chainstorage.EthereumTransactionReceipt.L1FeeInfo
- (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
+ (*CallValueInfo)(nil), // 18: coinbase.chainstorage.CallValueInfo
+ (*EthereumTransactionReceipt_L1FeeInfo)(nil), // 19: coinbase.chainstorage.EthereumTransactionReceipt.L1FeeInfo
+ (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp
}
var file_coinbase_chainstorage_blockchain_ethereum_proto_depIdxs = []int32{
1, // 0: coinbase.chainstorage.EthereumBlobdata.polygon:type_name -> coinbase.chainstorage.PolygonExtraData
4, // 1: coinbase.chainstorage.EthereumBlock.header:type_name -> coinbase.chainstorage.EthereumHeader
7, // 2: coinbase.chainstorage.EthereumBlock.transactions:type_name -> coinbase.chainstorage.EthereumTransaction
4, // 3: coinbase.chainstorage.EthereumBlock.uncles:type_name -> coinbase.chainstorage.EthereumHeader
- 19, // 4: coinbase.chainstorage.EthereumHeader.timestamp:type_name -> google.protobuf.Timestamp
+ 20, // 4: coinbase.chainstorage.EthereumHeader.timestamp:type_name -> google.protobuf.Timestamp
3, // 5: coinbase.chainstorage.EthereumHeader.withdrawals:type_name -> coinbase.chainstorage.EthereumWithdrawal
5, // 6: coinbase.chainstorage.EthereumTransactionAccessList.access_list:type_name -> coinbase.chainstorage.EthereumTransactionAccess
8, // 7: coinbase.chainstorage.EthereumTransaction.receipt:type_name -> coinbase.chainstorage.EthereumTransactionReceipt
12, // 8: coinbase.chainstorage.EthereumTransaction.token_transfers:type_name -> coinbase.chainstorage.EthereumTokenTransfer
6, // 9: coinbase.chainstorage.EthereumTransaction.transaction_access_list:type_name -> coinbase.chainstorage.EthereumTransactionAccessList
11, // 10: coinbase.chainstorage.EthereumTransaction.flattened_traces:type_name -> coinbase.chainstorage.EthereumTransactionFlattenedTrace
- 19, // 11: coinbase.chainstorage.EthereumTransaction.block_timestamp:type_name -> google.protobuf.Timestamp
+ 20, // 11: coinbase.chainstorage.EthereumTransaction.block_timestamp:type_name -> google.protobuf.Timestamp
9, // 12: coinbase.chainstorage.EthereumTransactionReceipt.logs:type_name -> coinbase.chainstorage.EthereumEventLog
- 18, // 13: coinbase.chainstorage.EthereumTransactionReceipt.l1_fee_info:type_name -> coinbase.chainstorage.EthereumTransactionReceipt.L1FeeInfo
+ 19, // 13: coinbase.chainstorage.EthereumTransactionReceipt.l1_fee_info:type_name -> coinbase.chainstorage.EthereumTransactionReceipt.L1FeeInfo
10, // 14: coinbase.chainstorage.EthereumTransactionTrace.calls:type_name -> coinbase.chainstorage.EthereumTransactionTrace
- 13, // 15: coinbase.chainstorage.EthereumTokenTransfer.erc20:type_name -> coinbase.chainstorage.ERC20TokenTransfer
- 14, // 16: coinbase.chainstorage.EthereumTokenTransfer.erc721:type_name -> coinbase.chainstorage.ERC721TokenTransfer
- 17, // [17:17] is the sub-list for method output_type
- 17, // [17:17] is the sub-list for method input_type
- 17, // [17:17] is the sub-list for extension type_name
- 17, // [17:17] is the sub-list for extension extendee
- 0, // [0:17] is the sub-list for field type_name
+ 18, // 15: coinbase.chainstorage.EthereumTransactionFlattenedTrace.call_value_info:type_name -> coinbase.chainstorage.CallValueInfo
+ 13, // 16: coinbase.chainstorage.EthereumTokenTransfer.erc20:type_name -> coinbase.chainstorage.ERC20TokenTransfer
+ 14, // 17: coinbase.chainstorage.EthereumTokenTransfer.erc721:type_name -> coinbase.chainstorage.ERC721TokenTransfer
+ 18, // [18:18] is the sub-list for method output_type
+ 18, // [18:18] is the sub-list for method input_type
+ 18, // [18:18] is the sub-list for extension type_name
+ 18, // [18:18] is the sub-list for extension extendee
+ 0, // [0:18] is the sub-list for field type_name
}
func init() { file_coinbase_chainstorage_blockchain_ethereum_proto_init() }
@@ -3119,6 +3451,18 @@ func file_coinbase_chainstorage_blockchain_ethereum_proto_init() {
}
}
file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CallValueInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumTransactionReceipt_L1FeeInfo); i {
case 0:
return &v.state
@@ -3156,6 +3500,14 @@ func file_coinbase_chainstorage_blockchain_ethereum_proto_init() {
(*EthereumTransactionReceipt_DepositReceiptVersion)(nil),
(*EthereumTransactionReceipt_BlobGasPrice)(nil),
(*EthereumTransactionReceipt_BlobGasUsed)(nil),
+ (*EthereumTransactionReceipt_Fee)(nil),
+ (*EthereumTransactionReceipt_NetFee)(nil),
+ (*EthereumTransactionReceipt_NetUsage)(nil),
+ (*EthereumTransactionReceipt_EnergyUsage)(nil),
+ (*EthereumTransactionReceipt_EnergyFee)(nil),
+ (*EthereumTransactionReceipt_OriginEnergyUsage)(nil),
+ (*EthereumTransactionReceipt_EnergyUsageTotal)(nil),
+ (*EthereumTransactionReceipt_EnergyPenaltyTotal)(nil),
}
file_coinbase_chainstorage_blockchain_ethereum_proto_msgTypes[12].OneofWrappers = []interface{}{
(*EthereumTokenTransfer_Erc20)(nil),
@@ -3167,7 +3519,7 @@ func file_coinbase_chainstorage_blockchain_ethereum_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_coinbase_chainstorage_blockchain_ethereum_proto_rawDesc,
NumEnums: 0,
- NumMessages: 19,
+ NumMessages: 20,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/protos/coinbase/chainstorage/blockchain_ethereum.proto b/protos/coinbase/chainstorage/blockchain_ethereum.proto
index b5b9d02..0046d49 100644
--- a/protos/coinbase/chainstorage/blockchain_ethereum.proto
+++ b/protos/coinbase/chainstorage/blockchain_ethereum.proto
@@ -168,8 +168,33 @@ message EthereumTransactionReceipt {
oneof optional_blob_gas_used {
uint64 blob_gas_used = 21;
}
+ oneof optional_fee {
+ uint64 fee = 22;
+ }
+ oneof optional_net_fee {
+ uint64 net_fee = 23;
+ }
+ oneof optional_net_usage {
+ uint64 net_usage = 24;
+ }
+ oneof optional_energy_usage {
+ uint64 energy_usage = 25;
+ }
+ oneof optional_energy_fee {
+ uint64 energy_fee = 26;
+ }
+ oneof optional_origin_energy_usage {
+ uint64 origin_energy_usage = 27;
+ }
+ oneof optional_energy_usage_total {
+ uint64 energy_usage_total = 28;
+ }
+ oneof optional_energy_penalty_total {
+ uint64 energy_penalty_total = 29;
+ }
}
+
message EthereumEventLog {
bool removed = 1;
uint64 log_index = 2;
@@ -215,6 +240,7 @@ message EthereumTransactionFlattenedTrace {
uint64 block_number = 17;
string transaction_hash = 18;
uint64 transaction_index = 19;
+ repeated CallValueInfo call_value_info = 20;
}
message EthereumTokenTransfer {
@@ -258,3 +284,8 @@ message EthereumAccountStateResponse {
string storage_hash = 2;
string code_hash = 3;
}
+
+message CallValueInfo {
+ string token_id = 1;
+ int64 call_value = 2;
+}
diff --git a/protos/coinbase/chainstorage/blockchain_ethereum_beacon.pb.go b/protos/coinbase/chainstorage/blockchain_ethereum_beacon.pb.go
index 0ad51ed..eacee15 100644
--- a/protos/coinbase/chainstorage/blockchain_ethereum_beacon.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_ethereum_beacon.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_ethereum_beacon.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/blockchain_rosetta.pb.go b/protos/coinbase/chainstorage/blockchain_rosetta.pb.go
index 3143ac9..5255ef1 100644
--- a/protos/coinbase/chainstorage/blockchain_rosetta.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_rosetta.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_rosetta.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/blockchain_solana.pb.go b/protos/coinbase/chainstorage/blockchain_solana.pb.go
index 7663715..d4b9cfd 100644
--- a/protos/coinbase/chainstorage/blockchain_solana.pb.go
+++ b/protos/coinbase/chainstorage/blockchain_solana.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/chainstorage/blockchain_solana.proto
package chainstorage
diff --git a/protos/coinbase/chainstorage/mocks/mocks.go b/protos/coinbase/chainstorage/mocks/mocks.go
index 142bdab..1a71c96 100644
--- a/protos/coinbase/chainstorage/mocks/mocks.go
+++ b/protos/coinbase/chainstorage/mocks/mocks.go
@@ -42,6 +42,26 @@ func (m *MockChainStorageClient) EXPECT() *MockChainStorageClientMockRecorder {
return m.recorder
}
+// GetBlockByTimestamp mocks base method.
+func (m *MockChainStorageClient) GetBlockByTimestamp(arg0 context.Context, arg1 *chainstorage.GetBlockByTimestampRequest, arg2 ...grpc.CallOption) (*chainstorage.GetBlockByTimestampResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetBlockByTimestamp", varargs...)
+ ret0, _ := ret[0].(*chainstorage.GetBlockByTimestampResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBlockByTimestamp indicates an expected call of GetBlockByTimestamp.
+func (mr *MockChainStorageClientMockRecorder) GetBlockByTimestamp(arg0, arg1 any, arg2 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByTimestamp", reflect.TypeOf((*MockChainStorageClient)(nil).GetBlockByTimestamp), varargs...)
+}
+
// GetBlockByTransaction mocks base method.
func (m *MockChainStorageClient) GetBlockByTransaction(arg0 context.Context, arg1 *chainstorage.GetBlockByTransactionRequest, arg2 ...grpc.CallOption) (*chainstorage.GetBlockByTransactionResponse, error) {
m.ctrl.T.Helper()
diff --git a/protos/coinbase/crypto/rosetta/types/account_identifer.pb.go b/protos/coinbase/crypto/rosetta/types/account_identifer.pb.go
index e3b5971..96b629d 100644
--- a/protos/coinbase/crypto/rosetta/types/account_identifer.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/account_identifer.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/account_identifer.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/amount.pb.go b/protos/coinbase/crypto/rosetta/types/amount.pb.go
index c51382e..7b0a0e3 100644
--- a/protos/coinbase/crypto/rosetta/types/amount.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/amount.pb.go
@@ -6,7 +6,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/amount.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/block.pb.go b/protos/coinbase/crypto/rosetta/types/block.pb.go
index 9a2f9e6..d85a17b 100644
--- a/protos/coinbase/crypto/rosetta/types/block.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/block.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/block.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/coin_change.pb.go b/protos/coinbase/crypto/rosetta/types/coin_change.pb.go
index 2de5d86..575682b 100644
--- a/protos/coinbase/crypto/rosetta/types/coin_change.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/coin_change.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/coin_change.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/network_identifier.pb.go b/protos/coinbase/crypto/rosetta/types/network_identifier.pb.go
index ba8dfb3..3584eec 100644
--- a/protos/coinbase/crypto/rosetta/types/network_identifier.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/network_identifier.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/network_identifier.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/operation.pb.go b/protos/coinbase/crypto/rosetta/types/operation.pb.go
index f34553a..664f5f3 100644
--- a/protos/coinbase/crypto/rosetta/types/operation.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/operation.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/operation.proto
// The stable release for rosetta types
diff --git a/protos/coinbase/crypto/rosetta/types/transaction.pb.go b/protos/coinbase/crypto/rosetta/types/transaction.pb.go
index 1f902d0..330e6e1 100644
--- a/protos/coinbase/crypto/rosetta/types/transaction.pb.go
+++ b/protos/coinbase/crypto/rosetta/types/transaction.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc v5.29.4
// source: coinbase/crypto/rosetta/types/transaction.proto
// The stable release for rosetta types
diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh
index 7072e54..8bd95dc 100755
--- a/scripts/bootstrap.sh
+++ b/scripts/bootstrap.sh
@@ -8,7 +8,7 @@ go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.32.0
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
go install go.uber.org/mock/mockgen@v0.4.0
go install github.com/gordonklaus/ineffassign@v0.0.0-20230610083614-0e73809eb601
-go install github.com/kisielk/errcheck@v1.8.0
+go install github.com/kisielk/errcheck@latest
go install golang.org/x/tools/cmd/goimports@v0.17.0
go mod download
go mod tidy
diff --git a/scripts/init-local-postgres.sh b/scripts/init-local-postgres.sh
new file mode 100755
index 0000000..83004b2
--- /dev/null
+++ b/scripts/init-local-postgres.sh
@@ -0,0 +1,112 @@
+#!/bin/sh
+set -e
+
+echo "🚀 Initializing PostgreSQL for local Chainstorage development..."
+
+# --- Configuration ---
+# Master credentials (from Docker environment)
+MASTER_USER="${POSTGRES_USER:-postgres}"
+MASTER_PASSWORD="${POSTGRES_PASSWORD:-postgres}"
+
+# Shared passwords for all network-specific roles (from Docker environment)
+WORKER_PASSWORD="${CHAINSTORAGE_WORKER_PASSWORD:-worker_password}"
+SERVER_PASSWORD="${CHAINSTORAGE_SERVER_PASSWORD:-server_password}"
+
+# List of all networks to create. Format: _
+NETWORKS="
+ethereum_mainnet
+ethereum_goerli
+ethereum_holesky
+bitcoin_mainnet
+base_mainnet
+base_goerli
+arbitrum_mainnet
+polygon_mainnet
+polygon_testnet
+solana_mainnet
+aptos_mainnet
+avacchain_mainnet
+bsc_mainnet
+fantom_mainnet
+optimism_mainnet
+tron_mainnet
+story_mainnet
+dogecoin_mainnet
+litecoin_mainnet
+bitcoincash_mainnet
+"
+
+# --- Helper Functions ---
+# Function to execute a SQL command against the master 'postgres' database.
+# It uses the default Unix socket connection which is most reliable for init scripts.
+psql_master() {
+ PGPASSWORD="$MASTER_PASSWORD" psql -v ON_ERROR_STOP=1 -U "$MASTER_USER" -d "postgres" --no-password -c "$1"
+}
+
+# Function to execute a SQL command against a specific network database.
+psql_network() {
+ local db_name=$1
+ local sql_command=$2
+ PGPASSWORD="$MASTER_PASSWORD" psql -v ON_ERROR_STOP=1 -U "$MASTER_USER" -d "$db_name" --no-password -c "$sql_command"
+}
+
+# --- Main Logic ---
+# The official postgres entrypoint script executes files in /docker-entrypoint-initdb.d/
+# after the server is initialized but before it's opened for general connections.
+# This means we don't need a separate wait loop; the server is ready for us.
+echo "✅ PostgreSQL server is ready for initialization."
+echo ""
+
+# Loop through all networks to create databases and roles
+for network in $NETWORKS; do
+ if [ -n "$network" ]; then
+ db_name="chainstorage_$network"
+ worker_user="cs_${network}_worker"
+ server_user="cs_${network}_server"
+
+ echo "📦 Setting up: $db_name"
+
+ # --- Create Roles (if they don't exist) ---
+ echo " - Creating role: $worker_user"
+ psql_master "DO \$\$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '$worker_user') THEN CREATE ROLE \"$worker_user\" WITH LOGIN PASSWORD '$WORKER_PASSWORD'; ELSE ALTER ROLE \"$worker_user\" WITH PASSWORD '$WORKER_PASSWORD'; END IF; END \$\$;"
+
+ echo " - Creating role: $server_user"
+ psql_master "DO \$\$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '$server_user') THEN CREATE ROLE \"$server_user\" WITH LOGIN PASSWORD '$SERVER_PASSWORD'; ELSE ALTER ROLE \"$server_user\" WITH PASSWORD '$SERVER_PASSWORD'; END IF; END \$\$;"
+
+ # --- Create Database (if it doesn't exist) ---
+ echo " - Creating database: $db_name"
+ # Use a trick to create database if not exists, since "CREATE DATABASE IF NOT EXISTS" is not available
+ if ! psql_master "SELECT 1 FROM pg_database WHERE datname = '$db_name'" | grep -q 1; then
+ psql_master "CREATE DATABASE \"$db_name\" OWNER \"$worker_user\";"
+ else
+ echo " Database $db_name already exists, skipping creation."
+ fi
+
+ # --- Grant Permissions ---
+ echo " - Granting permissions..."
+ # Grant connect to the server user on the new database
+ psql_master "GRANT CONNECT ON DATABASE \"$db_name\" TO \"$server_user\";"
+
+ # Connect to the new database to set schema permissions
+ # Grant server read-only access
+ psql_network "$db_name" "GRANT USAGE ON SCHEMA public TO \"$server_user\";"
+ psql_network "$db_name" "GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"$server_user\";"
+ psql_network "$db_name" "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO \"$server_user\";"
+
+ # Grant worker full access
+ psql_network "$db_name" "GRANT ALL PRIVILEGES ON SCHEMA public TO \"$worker_user\";"
+ psql_network "$db_name" "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \"$worker_user\";"
+ psql_network "$db_name" "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON TABLES TO \"$worker_user\";"
+
+ echo " - ✅ Setup complete for $db_name"
+ echo ""
+ fi
+done
+
+echo "🎉 All network databases initialized successfully for local development!"
+echo ""
+echo "📋 Summary of Shared Credentials:"
+echo " - All worker roles (e.g., cs_ethereum_mainnet_worker) use password: '$WORKER_PASSWORD'"
+echo " - All server roles (e.g., cs_ethereum_mainnet_server) use password: '$SERVER_PASSWORD'"
+echo ""
+echo "🚀 Ready to start Chainstorage!"
\ No newline at end of file
diff --git a/scripts/init-temporal-postgres.sh b/scripts/init-temporal-postgres.sh
new file mode 100755
index 0000000..be5779a
--- /dev/null
+++ b/scripts/init-temporal-postgres.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+set -e
+
+echo "Setting up PostgreSQL for Temporal..."
+
+# Since temporal is already the main PostgreSQL user, we just need to create the databases
+psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-'EOSQL'
+ -- Grant CREATEDB privilege to temporal so it can create additional databases
+ ALTER ROLE temporal CREATEDB;
+
+ -- Create temporal_visibility database (temporal database already exists as default)
+ CREATE DATABASE temporal_visibility OWNER temporal;
+
+ -- Grant all privileges to temporal role on both databases
+ GRANT ALL PRIVILEGES ON DATABASE temporal TO temporal;
+ GRANT ALL PRIVILEGES ON DATABASE temporal_visibility TO temporal;
+EOSQL
+
+echo "✅ Temporal PostgreSQL setup complete!"
+echo "Databases: temporal, temporal_visibility"
+echo "Roles: temporal (CREATEDB + full access)"
\ No newline at end of file
diff --git a/scripts/protogen-py.sh b/scripts/protogen-py.sh
new file mode 100755
index 0000000..b4e7984
--- /dev/null
+++ b/scripts/protogen-py.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -eo pipefail
+
+python -m grpc_tools.protoc \
+ --python_out=gen/src/python \
+ --grpc_python_out=gen/src/python \
+ --proto_path=protos \
+ protos/coinbase/chainstorage/*.proto \
+ protos/coinbase/c3/common/*.proto \
+ protos/coinbase/crypto/rosetta/types/*.proto
diff --git a/sdk/client.go b/sdk/client.go
index 082d591..a95c95f 100644
--- a/sdk/client.go
+++ b/sdk/client.go
@@ -80,6 +80,11 @@ type (
// Note that this API is still experimental and may change at any time.
GetBlockByTransaction(ctx context.Context, tag uint32, transactionHash string) ([]*api.Block, error)
+ // GetBlockByTimestamp returns the latest block before or at the given timestamp.
+ // The timestamp should be a Unix timestamp (seconds since January 1, 1970 UTC).
+ // If no block is found at or before the timestamp, it returns an error.
+ GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.Block, error)
+
// StreamChainEvents streams raw blocks from ChainStorage.
// The caller is responsible for keeping track of the sequence or sequence_num in BlockchainEvent.
StreamChainEvents(ctx context.Context, cfg StreamingConfiguration) (<-chan *ChainEventResult, error)
@@ -483,6 +488,24 @@ func (c *clientImpl) GetBlockByTransaction(ctx context.Context, tag uint32, tran
return blocks, nil
}
+func (c *clientImpl) GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.Block, error) {
+ resp, err := c.client.GetBlockByTimestamp(ctx, &api.GetBlockByTimestampRequest{
+ Tag: tag,
+ Timestamp: timestamp,
+ })
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get block by timestamp (tag=%v, timestamp=%v): %w", tag, timestamp, err)
+ }
+
+ // Download the block data using the metadata from the response
+ block, err := c.downloadBlock(ctx, resp.Tag, resp.Height, resp.Hash)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to download block data: %w", err)
+ }
+
+ return block, nil
+}
+
func (c *clientImpl) validateBlock(ctx context.Context, rawBlock *api.Block) error {
hash := rawBlock.GetMetadata().GetHash()
height := rawBlock.GetMetadata().GetHeight()
diff --git a/sdk/client_interceptor.go b/sdk/client_interceptor.go
index 0f6c0ea..960aa89 100644
--- a/sdk/client_interceptor.go
+++ b/sdk/client_interceptor.go
@@ -134,6 +134,15 @@ func (c *timeoutableClient) GetBlockByTransaction(ctx context.Context, tag uint3
})
}
+func (c *timeoutableClient) GetBlockByTimestamp(ctx context.Context, tag uint32, timestamp uint64) (*api.Block, error) {
+ return intercept(ctx, c.logger, func(ctx context.Context) (*api.Block, error) {
+ ctx, cancel := context.WithTimeout(ctx, c.mediumTimeout)
+ defer cancel()
+
+ return c.client.GetBlockByTimestamp(ctx, tag, timestamp)
+ })
+}
+
func (c *timeoutableClient) StreamChainEvents(ctx context.Context, cfg StreamingConfiguration) (<-chan *ChainEventResult, error) {
// No timeout is implemented.
return c.client.StreamChainEvents(ctx, cfg)
diff --git a/sdk/mocks/mocks.go b/sdk/mocks/mocks.go
index a9da86b..807fa1e 100644
--- a/sdk/mocks/mocks.go
+++ b/sdk/mocks/mocks.go
@@ -56,6 +56,21 @@ func (mr *MockClientMockRecorder) GetBlock(arg0, arg1, arg2 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockClient)(nil).GetBlock), arg0, arg1, arg2)
}
+// GetBlockByTimestamp mocks base method.
+func (m *MockClient) GetBlockByTimestamp(arg0 context.Context, arg1 uint32, arg2 uint64) (*chainstorage.Block, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBlockByTimestamp", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*chainstorage.Block)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBlockByTimestamp indicates an expected call of GetBlockByTimestamp.
+func (mr *MockClientMockRecorder) GetBlockByTimestamp(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByTimestamp", reflect.TypeOf((*MockClient)(nil).GetBlockByTimestamp), arg0, arg1, arg2)
+}
+
// GetBlockByTransaction mocks base method.
func (m *MockClient) GetBlockByTransaction(arg0 context.Context, arg1 uint32, arg2 string) ([]*chainstorage.Block, error) {
m.ctrl.T.Helper()