From 6200988325b25da225c6103d9030ca83a659a773 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 25 Nov 2025 13:22:22 +0100 Subject: [PATCH 01/75] feat: unsafe sync wip --- common/block_range.go | 11 + common/block_range_test.go | 9 + common/time_tracker.go | 2 +- .../block_notifier/block_notifier_polling.go | 4 +- multidownloader/config.go | 11 +- multidownloader/config_test.go | 10 +- multidownloader/evm_multidownloader.go | 293 +++++++++++++++++- multidownloader/evm_multidownloader_test.go | 5 +- multidownloader/state.go | 22 ++ multidownloader/storage/migrations/0002.sql | 36 +++ .../storage/migrations/migrations.go | 7 + multidownloader/storage/storage_block.go | 54 +++- multidownloader/storage/storage_test.go | 22 +- multidownloader/types/log_query.go | 38 ++- multidownloader/types/log_query_test.go | 19 ++ multidownloader/types/mocks/mock_storager.go | 174 +++++++++++ multidownloader/types/set_sync_segment.go | 66 +++- .../types/set_sync_segment_test.go | 56 +++- multidownloader/types/storager.go | 12 + multidownloader/types/sync_segment.go | 21 +- 20 files changed, 820 insertions(+), 52 deletions(-) create mode 100644 multidownloader/state.go create mode 100644 multidownloader/storage/migrations/0002.sql diff --git a/common/block_range.go b/common/block_range.go index 9c486df88..b4929ffc1 100644 --- a/common/block_range.go +++ b/common/block_range.go @@ -191,3 +191,14 @@ func ChunkedRangeQuery[T any]( return all, nil } + +func (b BlockRange) ListBlockNumbers() []uint64 { + if b.IsEmpty() { + return []uint64{} + } + blockNumbers := make([]uint64, 0, b.CountBlocks()) + for i := b.FromBlock; i <= b.ToBlock; i++ { + blockNumbers = append(blockNumbers, i) + } + return blockNumbers +} diff --git a/common/block_range_test.go b/common/block_range_test.go index be22e95d4..86aa4d940 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -471,3 +471,12 @@ func TestChunkedRangeQuery_EmptyRange(t *testing.T) { require.NoError(t, err) require.Equal(t, empty, result) } + +func TestBlockRange_ListBlockNumbers(t *testing.T) { + bn1 := NewBlockRange(1, 1) + require.Equal(t, []uint64{1}, bn1.ListBlockNumbers()) + bn2 := NewBlockRange(3, 5) + require.Equal(t, []uint64{3, 4, 5}, bn2.ListBlockNumbers()) + bn3 := NewBlockRange(0, 0) + require.Equal(t, []uint64{}, bn3.ListBlockNumbers()) +} diff --git a/common/time_tracker.go b/common/time_tracker.go index 4775d3ced..286be69dd 100644 --- a/common/time_tracker.go +++ b/common/time_tracker.go @@ -18,7 +18,7 @@ type TimeTracker struct { func (t *TimeTracker) String() string { return "TimeTracker{times=" + strconv.Itoa(int(t.times)) + - "lastDuration=" + t.lastDuration.String() + + ", lastDuration=" + t.lastDuration.String() + ", accumulated=" + t.accumulated.String() + "}" } diff --git a/etherman/block_notifier/block_notifier_polling.go b/etherman/block_notifier/block_notifier_polling.go index 51ed45dd8..7401b026a 100644 --- a/etherman/block_notifier/block_notifier_polling.go +++ b/etherman/block_notifier/block_notifier_polling.go @@ -161,7 +161,7 @@ func (b *BlockNotifierPolling) step(ctx context.Context, BlockFinalityType: b.config.BlockFinalityType, } if previousState.lastBlockSeen > currentBlock { - b.logger.Warnf("Block number decreased [finality:%s]: %d -> %d", + b.logger.Infof("Block number decreased [finality:%s]: %d -> %d", b.config.BlockFinalityType.String(), previousState.lastBlockSeen, currentBlock) // It start from scratch because something fails in calculation of block period newState := previousState.initialBlock(currentBlock) @@ -170,7 +170,7 @@ func (b *BlockNotifierPolling) step(ctx context.Context, if currentBlock-previousState.lastBlockSeen != 1 { if !b.config.BlockFinalityType.IsSafe() && !b.config.BlockFinalityType.IsFinalized() { - b.logger.Warnf("Missed block(s) [finality:%s]: %d -> %d", + b.logger.Infof("Missed block(s) [finality:%s]: %d -> %d", b.config.BlockFinalityType.String(), previousState.lastBlockSeen, currentBlock) } diff --git a/multidownloader/config.go b/multidownloader/config.go index 8e9a13847..442b57b5e 100644 --- a/multidownloader/config.go +++ b/multidownloader/config.go @@ -34,12 +34,16 @@ type Config struct { BlockFinality aggkittypes.BlockNumberFinality // WaitPeriodToCheckCatchUp is the duration to wait before checking again if logs are not yet available WaitPeriodToCheckCatchUp types.Duration + // PeriodToCheckReorgs is the duration to wait before checking for reorgs + // If is 0 reorgs are checked only when a new block appears + PeriodToCheckReorgs types.Duration } const ( defaultBlockChunkSize = 10000 defaultMaxParallelBlockHeaderRetrieval = 30 defaultWaitPeriodToCheckCatchUp = time.Second * 10 + defaultPeriodToCheckReorgs = time.Second * 5 ) func NewConfigDefault(name string, basePathDB string) Config { @@ -54,6 +58,7 @@ func NewConfigDefault(name string, basePathDB string) Config { MaxParallelBlockHeaderRetrieval: defaultMaxParallelBlockHeaderRetrieval, BlockFinality: aggkittypes.FinalizedBlock, WaitPeriodToCheckCatchUp: types.NewDuration(defaultWaitPeriodToCheckCatchUp), + PeriodToCheckReorgs: types.NewDuration(defaultPeriodToCheckReorgs), } } @@ -75,10 +80,12 @@ func (cfg *Config) Validate() error { func (cfg *Config) String() string { return fmt.Sprintf("MultidownloaderConfig{Enabled:%t, BlockChunkSize:%d, "+ - "MaxParallelBlockHeaderRetrieval:%d, BlockFinality:%s, WaitPeriodToCheckCatchUp:%s}", + "MaxParallelBlockHeaderRetrieval:%d, BlockFinality:%s, WaitPeriodToCheckCatchUp:%s, "+ + "PeriodToCheckReorgs:%s}", cfg.Enabled, cfg.BlockChunkSize, cfg.MaxParallelBlockHeaderRetrieval, cfg.BlockFinality.String(), - cfg.WaitPeriodToCheckCatchUp.String()) + cfg.WaitPeriodToCheckCatchUp.String(), + cfg.PeriodToCheckReorgs.String()) } diff --git a/multidownloader/config_test.go b/multidownloader/config_test.go index 2a628857a..a501a73b8 100644 --- a/multidownloader/config_test.go +++ b/multidownloader/config_test.go @@ -2,7 +2,6 @@ package multidownloader import ( "testing" - "time" "github.com/agglayer/aggkit/config/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -13,10 +12,12 @@ func TestNewConfigDefault(t *testing.T) { cfg := NewConfigDefault("l1", "/tmp/aggkit/") require.Equal(t, false, cfg.Enabled) require.Equal(t, "/tmp/aggkit/l1_multidownloader.sqlite", cfg.StoragePath) - require.Equal(t, uint32(10000), cfg.BlockChunkSize, "BlockChunkSize should be 10000") - require.Equal(t, 30, cfg.MaxParallelBlockHeaderRetrieval, "MaxParallelBlockHeaderRetrieval should be 30") + require.Equal(t, uint32(defaultBlockChunkSize), cfg.BlockChunkSize, "BlockChunkSize should be 10000") + require.Equal(t, defaultMaxParallelBlockHeaderRetrieval, cfg.MaxParallelBlockHeaderRetrieval, "MaxParallelBlockHeaderRetrieval should be 30") require.Equal(t, aggkittypes.FinalizedBlock, cfg.BlockFinality, "BlockFinality should be FinalizedBlock") - require.Equal(t, types.NewDuration(time.Second*10), cfg.WaitPeriodToCheckCatchUp, "WaitPeriodToCheckCatchUp should be 10 seconds") + require.Equal(t, types.NewDuration(defaultWaitPeriodToCheckCatchUp), cfg.WaitPeriodToCheckCatchUp, "WaitPeriodToCheckCatchUp should be 10 seconds") + require.Equal(t, types.NewDuration(defaultPeriodToCheckReorgs), cfg.PeriodToCheckReorgs, "PeriodToCheckReorgs should be 5 seconds") + require.False(t, cfg.Enabled, "Enabled should be false by default") } @@ -102,5 +103,6 @@ func TestConfig_String(t *testing.T) { require.Contains(t, str, "MaxParallelBlockHeaderRetrieval", "String() should contain MaxParallelBlockHeaderRetrieval") require.Contains(t, str, "BlockFinality", "String() should contain BlockFinality") require.Contains(t, str, "WaitPeriodToCheckCatchUp", "String() should contain WaitPeriodToCheckCatchUp") + require.Contains(t, str, "PeriodToCheckReorgs", "String() should contain PeriodToCheckReorgs") require.Contains(t, str, "Enabled", "String() should contain Enabled") } diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 653cbffac..2e5160fec 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" "sync" + "time" jRPC "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" @@ -19,12 +20,14 @@ import ( "github.com/agglayer/aggkit/multidownloader/storage" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ethrpc "github.com/ethereum/go-ethereum/rpc" ) const ( - safeMode = true + safeMode = mdrtypes.Finalized + unsafeMode = mdrtypes.NotFinalized chunkSizeReductionFactor = 10 minChunkSize = 1 ) @@ -107,17 +110,88 @@ func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) erro return nil } -func (dh *EVMMultidownloader) Start(ctx context.Context) error { - err := dh.Initialize(ctx) +func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + + finalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) if err != nil { - return err + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get finalized block number: %w", err) } - err = dh.sync(ctx, dh.StepSafe, "safe") + committed := false + tx, err := dh.storage.NewTx(ctx) if err != nil { - return err + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot create new tx: %w", err) + } + defer func() { + if !committed { + dh.log.Debugf("MoveUnsafeToSafeIfPossible: rolling back tx") + if err := tx.Rollback(); err != nil { + dh.log.Errorf("MoveUnsafeToSafeIfPossible: error rolling back tx: %v", err) + } + } + }() + + blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, finalizedBlockNumber) + if err != nil { + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) + } + dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) + err = dh.detectReorgs(ctx, blocks) + if err != nil { + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot detect reorgs: %w", err) + } + blockNumbers := make([]uint64, 0, len(blocks)) + for _, block := range blocks { + blockNumbers = append(blockNumbers, block.Number) } + err = dh.storage.UpdateBlockToFinalized(tx, blockNumbers) + if err != nil { + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) + } + committed = true + if err := tx.Commit(); err != nil { + return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot commit tx: %w", err) + } + return nil +} + +func listBlockHeadersToMap(blocks []*aggkittypes.BlockHeader) map[uint64]*aggkittypes.BlockHeader { + result := make(map[uint64]*aggkittypes.BlockHeader, len(blocks)) + for _, block := range blocks { + result[block.Number] = block + } + return result +} + +func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, + blocks []*aggkittypes.BlockHeader) error { + // TODO: optimize this to don't check all blocks + // TODO: Find the first block to reorg + blocksNumber := make([]uint64, 0, len(blocks)) + for _, block := range blocks { + blocksNumber = append(blocksNumber, block.Number) + } + currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) + if err != nil { + return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) + } + // check blocks vs currentBlockHeaders. Must match by number and hash + sotrageBlocks := listBlockHeadersToMap(blocks) + rpcBlocks := listBlockHeadersToMap(currentBlockHeaders) + for number, storageBlock := range sotrageBlocks { + rpcBlock, exists := rpcBlocks[number] + if !exists { + return fmt.Errorf("detectReorgs: block number %d not found in RPC", number) + } + if storageBlock.Hash != rpcBlock.Hash { + return fmt.Errorf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", + number, storageBlock.Hash.String(), rpcBlock.Hash.String()) + } + } return nil } @@ -196,6 +270,91 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { return nil } +func (dh *EVMMultidownloader) Start(ctx context.Context) error { + err := dh.Initialize(ctx) + if err != nil { + return err + } + + dh.log.Infof("checking unsafe blocks on DB...") + + if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { + return err + } + if err = dh.sync(ctx, dh.StepSafe, "safe"); err != nil { + return err + } + for { + dh.log.Infof("Unsafe sync iteration starting...") + if err = dh.sync(ctx, dh.StepUnsafe, "unsafe"); err != nil { + return err + } + dh.log.Infof("waiting new block...") + if err = dh.checkReorgUntilNewBlock(ctx); err != nil { + return err + } + } +} + +func (dh *EVMMultidownloader) StartV2(ctx context.Context) error { + if err := dh.Initialize(ctx); err != nil { + return fmt.Errorf("Start: cannot initialize multidownloader: %w", err) + } + finalizedBlock, err := dh.getFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("Start: cannot get finalized block number: %w", err) + } + dh.log.Infof("Starting multidownloader %s at finalized block %d", dh.name, finalizedBlock) + return nil +} + +// This function check the tip of the chain to prevent any reorg, meanwhile +// wait for a new block to arrive +func (dh *EVMMultidownloader) checkReorgUntilNewBlock(ctx context.Context) error { + initialFinalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) + } + lowestBlock, highestBlock, err := dh.storage.GetRangeBlockHeader(nil, mdrtypes.NotFinalized) + if err != nil { + return fmt.Errorf("checkReorgUntilNewBlock: cannot get highest unsafe block: %w", err) + } + if lowestBlock == nil || highestBlock == nil { + dh.log.Infof("checkReorgUntilNewBlock: no unsafe blocks to check for reorgs") + return nil + } + + for { + select { + case <-time.After(dh.cfg.PeriodToCheckReorgs.Duration): + if err := dh.detectReorgs(ctx, []*aggkittypes.BlockHeader{highestBlock}); err != nil { + return fmt.Errorf("checkReorgUntilNewBlock: cannot check reorg on tip block %d: %w", + highestBlock.Number, err) + } + if err := dh.pendingSync.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager); err != nil { + return fmt.Errorf("checkReorgUntilNewBlock: cannot update TargetToBlock in pendingSync: %w", err) + } + highestBlockPendingToSync := dh.pendingSync.GetHighestBlockNumber() + if highestBlockPendingToSync > highestBlock.Number { + dh.log.Infof("checkReorgUntilNewBlock: new block to sync (old: %d, new: %d), ", + highestBlock.Number, dh.pendingSync.GetHighestBlockNumber()) + return nil + } + finalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) + } + if finalizedBlockNumber != initialFinalizedBlockNumber { + dh.log.Infof("checkReorgUntilNewBlock: finalized block advanced from %d to %d, re-checking reorgs", + initialFinalizedBlockNumber, finalizedBlockNumber) + return nil + } + case <-ctx.Done(): + return fmt.Errorf("checkReorgUntilNewBlock: context done: %w", ctx.Err()) + } + } +} + // sync is an internal function that executes the given stepFunc until it returns done=true or error func (dh *EVMMultidownloader) sync(ctx context.Context, stepFunc func(ctx context.Context) (bool, error), name string) error { @@ -219,7 +378,7 @@ func (dh *EVMMultidownloader) sync(ctx context.Context, } dh.log.Infof("πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰ sync %s completed after %d iterations.", name, iteration) dh.statistics.FinishSyncing() - dh.ShowStatistics(iteration) + //dh.ShowStatistics(iteration) return nil } @@ -242,6 +401,105 @@ func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { return dh.syncedSegments.IsAvailable(query) } +// getTotalPendingBlockRange returns the full pending block range without taking in +// consideration addrs +func (dh *EVMMultidownloader) getTotalPendingBlockRange(ctx context.Context) *aggkitcommon.BlockRange { + dh.mutex.Lock() + defer dh.mutex.Unlock() + br := dh.pendingSync.GetTotalPendingBlockRange() + return br +} + +func (dh *EVMMultidownloader) getUnsafeLogQueries(ctx context.Context, blockHeaders []*aggkittypes.BlockHeader) []mdrtypes.LogQuery { + dh.mutex.Lock() + defer dh.mutex.Unlock() + logQueries := make([]mdrtypes.LogQuery, 0, len(blockHeaders)) + for _, bh := range blockHeaders { + logQueries = append(logQueries, mdrtypes.NewLogQueryBlockHash( + bh.Number, + bh.Hash, + dh.pendingSync.GetAddressesForBlock(bh.Number), + )) + } + return logQueries +} + +func (dh *EVMMultidownloader) newState(queries []mdrtypes.LogQuery) (*State, error) { + dh.mutex.Lock() + state := NewState(dh.syncedSegments.Clone(), dh.pendingSync.Clone()) + dh.mutex.Unlock() + for _, logQueryData := range queries { + err := state.SyncedSegments.AddLogQuery(&logQueryData) + if err != nil { + return nil, fmt.Errorf("Safe/Step: cannot extend synced segments: %w", err) + } + err = state.PendingSync.SubtractLogQuery(&logQueryData) + if err != nil { + return nil, fmt.Errorf("Safe/Step: cannot subtract log query from pending segments: %w", err) + } + } + return state, nil +} +func getContracts(logQueries []mdrtypes.LogQuery) []common.Address { + addressMap := make(map[common.Address]struct{}) + for _, lq := range logQueries { + for _, addr := range lq.Addrs { + addressMap[addr] = struct{}{} + } + } + addresses := make([]common.Address, 0, len(addressMap)) + for addr := range addressMap { + addresses = append(addresses, addr) + } + return addresses +} +func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { + if err := ctx.Err(); err != nil { + return false, err + } + pendingBlockRange := dh.getTotalPendingBlockRange(ctx) + blocks := pendingBlockRange.ListBlockNumbers() + // TODO: Check that the blocks are all inside unsafe range + blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blocks, dh.cfg.MaxParallelBlockHeaderRetrieval) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve %s block headers: %w", pendingBlockRange.String(), err) + } + dh.log.Debugf("Unsafe/Step: querying logs for %s", pendingBlockRange.String()) + logQueries := dh.getUnsafeLogQueries(ctx, blockHeaders) + logs, err := dh.requestMultiplesLogs(ctx, logQueries) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve logs for %s: %w", pendingBlockRange.String(), err) + } + newState, err := dh.newState(logQueries) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to create new state: %w", err) + } + updatedSegments := newState.SyncedSegments.SegmentsByContract(getContracts(logQueries)) + // Store data in storage + dh.log.Debugf("Unsafe/Step: storing data for %s", pendingBlockRange.String()) + err = dh.storeData(ctx, logs, blockHeaders, + updatedSegments, unsafeMode) + if err != nil { + return false, fmt.Errorf("Safe/Step: cannot store data: %w", err) + } + + dh.mutex.Lock() + defer dh.mutex.Unlock() + dh.log.Debugf("Unsafe/Step: updating state in memory %s", pendingBlockRange.String()) + dh.syncedSegments = newState.SyncedSegments + dh.pendingSync = &newState.PendingSync + finished := dh.pendingSync.Finished() + dh.log.Infof("Unsafe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", + dh.statistics.ElapsedSyncing().String(), + pendingBlockRange.String(), + len(logs), + len(blockHeaders), + dh.pendingSync.TotalBlocks(), + dh.statistics.ETA(dh.pendingSync.TotalBlocks())) + return finished, nil +} + // StepSafe performs a safe step syncing logs and block headers from historical data // Returns true when syncing is complete, false if more work remains func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { @@ -429,6 +687,27 @@ func (dh *EVMMultidownloader) getNextQuery(ctx context.Context, chunk uint32, sa return logQueryData, nil } +func (dh *EVMMultidownloader) requestMultiplesLogs( + ctx context.Context, + queries []mdrtypes.LogQuery) ([]types.Log, error) { + var allLogs []types.Log + for _, query := range queries { + dh.log.Debugf("request: querying logs for blockHash=%s", query.String()) + if err := ctx.Err(); err != nil { + return nil, fmt.Errorf("requestMultiplesLogs: context error: %w", err) + } + logs, err := dh.requestLogsSingleTry(ctx, &query) + if err != nil { + return nil, fmt.Errorf("requestMultiplesLogs: ethClient.FilterLogs(%v) failed: %w", + query.String(), err) + } + dh.log.Debugf("request: successfully queried logs for blockHash=%s: returned %d logs", + query.String(), len(logs)) + allLogs = append(allLogs, logs...) + } + return allLogs, nil +} + func (dh *EVMMultidownloader) requestLogs( ctx context.Context) ([]types.Log, *mdrtypes.LogQuery, error) { currentSyncBlockChunkSize := dh.cfg.BlockChunkSize diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index e3849a163..b78570a24 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -33,7 +33,7 @@ const runL1InfoTree = true const l1InfoTreeUseMultidownloader = true func TestEVMMultidownloader(t *testing.T) { - t.Skip("code to test/debug not real unittest") + //t.Skip("code to test/debug not real unittest") cfgLog := log.Config{ Environment: "development", Level: "info", @@ -62,6 +62,7 @@ func TestEVMMultidownloader(t *testing.T) { MaxParallelBlockHeaderRetrieval: 50, BlockFinality: aggkittypes.FinalizedBlock, WaitPeriodToCheckCatchUp: types.NewDuration(time.Second), + PeriodToCheckReorgs: types.NewDuration(time.Second * 10), } mdr, err := NewEVMMultidownloader(logger, cfg, "l1", ethClient, ethRPCClient, @@ -112,7 +113,7 @@ func TestEVMMultidownloader(t *testing.T) { }, multidownloader, reorgDetector, - l1infotreesync.FlagStopOnFinalizedBlockReached, + l1infotreesync.FlagNone, ) require.NoError(t, err) } diff --git a/multidownloader/state.go b/multidownloader/state.go new file mode 100644 index 000000000..81e38fc0b --- /dev/null +++ b/multidownloader/state.go @@ -0,0 +1,22 @@ +package multidownloader + +import mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + +type State struct { + SyncedSegments mdrtypes.SetSyncSegment + PendingSync mdrtypes.SetSyncSegment +} + +func NewState(syncedSegments *mdrtypes.SetSyncSegment, pendingSync *mdrtypes.SetSyncSegment) *State { + return &State{ + SyncedSegments: *syncedSegments, + PendingSync: *pendingSync, + } +} + +func (s *State) Clone() *State { + return &State{ + SyncedSegments: s.SyncedSegments, + PendingSync: s.PendingSync, + } +} diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql new file mode 100644 index 000000000..c5ed39674 --- /dev/null +++ b/multidownloader/storage/migrations/0002.sql @@ -0,0 +1,36 @@ +-- +migrate Down +DROP TABLE IF EXISTS logs_reorged; +-- +migrate Up + +CREATE TABLE logs_reorged ( + chain_id INTEGER NOT NULL, + block_number BIGINT NOT NULL, + address TEXT NOT NULL, -- + topics TEXT NOT NULL, -- list of hashes in JSON + data BLOB, -- + tx_hash TEXT NOT NULL, + tx_index INTEGER NOT NULL, + log_index INTEGER NOT NULL, -- β€œindex” is a reserved keyword + PRIMARY KEY (address, chain_id,block_number, log_index), + FOREIGN KEY (chain_id, block_number) REFERENCES block_reorged(chain_id, block_number) +); + +CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); + +CREATE TABLE block_reorged ( + chain_id INTEGER NOT NULL, + block_number BIGINT NOT NULL, + block_hash TEXT NOT NULL, + block_timestamp INTEGER NOT NULL, + block_parent_hash TEXT, + PRIMARY KEY (chain_id, block_number) +); + +CREATE TABLE reorgs ( + chain_id INTEGER NOT NULL, + detected_at_block BIGINT NOT NULL, + reorged_from_block BIGINT NOT NULL, + reorged_to_block BIGINT NOT NULL, + detected_timestamp INTEGER NOT NULL, + PRIMARY KEY (chain_id, detected_at_block) +); \ No newline at end of file diff --git a/multidownloader/storage/migrations/migrations.go b/multidownloader/storage/migrations/migrations.go index 679c8ffde..dab2e080d 100644 --- a/multidownloader/storage/migrations/migrations.go +++ b/multidownloader/storage/migrations/migrations.go @@ -12,11 +12,18 @@ import ( //go:embed 0001.sql var mig001 string +//go:embed 0002.sql +var mig002 string + var Migrations = []types.Migration{ { ID: "0001", SQL: mig001, }, + { + ID: "0002", + SQL: mig002, + }, } func RunMigrations(logger aggkitcommon.Logger, database *sql.DB) error { diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index 352ce504a..153b6417c 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -6,6 +6,7 @@ import ( "fmt" dbtypes "github.com/agglayer/aggkit/db/types" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/jmoiron/sqlx" "github.com/russross/meddler" @@ -51,6 +52,9 @@ func (b *Blocks) ListHeaders() []*aggkittypes.BlockHeader { func (b *Blocks) IsEmpty() bool { return len(b.Headers) == 0 } +func (b *Blocks) Len() int { + return len(b.Headers) +} func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, header *aggkittypes.BlockHeader, isFinal bool) error { @@ -62,7 +66,10 @@ func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, return a.saveBlocksNoMutex(tx, blockRows) } -func (a *MultidownloaderStorage) updateIsFinal(tx dbtypes.Querier, blockNumbers []uint64) error { +func (a *MultidownloaderStorage) UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error { + if len(blockNumbers) == 0 { + return nil + } if tx == nil { tx = a.db } @@ -81,11 +88,36 @@ func (a *MultidownloaderStorage) updateIsFinal(tx dbtypes.Querier, blockNumbers } return nil } + +// GetRangeBlockHeader retrieves the highest block header stored in the database +// return lowest and highest block headers +func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, isFinal mdtypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + highestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) + if err != nil { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) + } + if highestBlock.IsEmpty() { + return nil, nil, nil + } + if highestBlock.Len() > 1 { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: more than one block returned (%d)", highestBlock.Len()) + } + + lowestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) + if err != nil { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) + } + if lowestBlock.IsEmpty() { + return nil, nil, nil + } + if lowestBlock.Len() > 1 { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:lowest: more than one block returned (%d)", lowestBlock.Len()) + } + return highestBlock.ListHeaders()[0], lowestBlock.ListHeaders()[0], nil +} + func (a *MultidownloaderStorage) GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) { - if tx == nil { - tx = a.db - } a.mutex.RLock() defer a.mutex.RUnlock() blocks, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE block_number = ?", blockNumber) @@ -128,3 +160,17 @@ func (a *MultidownloaderStorage) getBlockHeadersNoMutex(tx dbtypes.Querier, } return result, nil } + +// GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock +func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + blocks, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0 AND block_number <= ?", maxBlock) + if err != nil { + return nil, err + } + return blocks.ListHeaders(), nil +} diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index 08256c9e9..9385aa975 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -311,7 +311,10 @@ func TestStorage_UpdateIsFinal(t *testing.T) { require.Equal(t, block, readBlock, "BlockHeader mismatch") require.False(t, isFinal, "expected block to not be final") - err = storage.updateIsFinal(nil, []uint64{block.Number}) + err = storage.UpdateBlockToFinalized(nil, []uint64{}) + require.NoError(t, err, "if no blocks provided, should be no-op") + + err = storage.UpdateBlockToFinalized(nil, []uint64{block.Number}) require.NoError(t, err, "cannot update IsFinal") readBlock, isFinal, err = storage.GetBlockHeaderByNumber(nil, block.Number) @@ -321,6 +324,23 @@ func TestStorage_UpdateIsFinal(t *testing.T) { require.True(t, isFinal, "expected block to be final") } +func TestStorage_GetRangeBlockHeader(t *testing.T) { + storage := newStorageForTest(t, nil) + block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) + err := storage.saveAggkitBlock(nil, block, false) + require.NoError(t, err, "cannot insert BlockHeader") + + lowest, highest, err := storage.GetRangeBlockHeader(nil, false) + require.NoError(t, err, "cannot get range BlockHeader") + require.Equal(t, block, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block, highest, "highest BlockHeader mismatch") + + lowest, highest, err = storage.GetRangeBlockHeader(nil, true) + require.NoError(t, err, "cannot get range BlockHeader") + require.Equal(t, nil, lowest, "lowest BlockHeader mismatch") + require.Equal(t, nil, highest, "highest BlockHeader mismatch") +} + func TestStorage_logRow_String(t *testing.T) { row := logRow{ Address: exampleAddr1, diff --git a/multidownloader/types/log_query.go b/multidownloader/types/log_query.go index ea3e442eb..2ddc35ccc 100644 --- a/multidownloader/types/log_query.go +++ b/multidownloader/types/log_query.go @@ -13,6 +13,8 @@ import ( type LogQuery struct { Addrs []common.Address BlockRange aggkitcommon.BlockRange + // If BlockHash is set BlockRange contains the corresponding blockNumber + BlockHash *common.Hash } // NewLogQuery creates a new LogQuery @@ -23,12 +25,29 @@ func NewLogQuery(fromBlock uint64, toBlock uint64, addrs []common.Address) LogQu } } +func NewLogQueryBlockHash(blockNumber uint64, blockHash common.Hash, addrs []common.Address) LogQuery { + blockRange := aggkitcommon.BlockRangeZero + if blockNumber != 0 { + blockRange = aggkitcommon.NewBlockRange(blockNumber, blockNumber) + } + return LogQuery{ + Addrs: addrs, + BlockRange: blockRange, + BlockHash: &blockHash, + } +} + // NewLogQueryFromEthereumFilter creates a new LogQuery from an Ethereum FilterQuery func NewLogQueryFromEthereumFilter(query ethereum.FilterQuery) LogQuery { - return LogQuery{ - Addrs: query.Addresses, - BlockRange: aggkitcommon.NewBlockRange(query.FromBlock.Uint64(), query.ToBlock.Uint64()), + if query.BlockHash != nil { + blockNumber := uint64(0) + if query.FromBlock != nil { + blockNumber = query.FromBlock.Uint64() + } + return NewLogQueryBlockHash(blockNumber, *query.BlockHash, query.Addresses) + } + return NewLogQuery(query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses) } // String returns a string representation of the LogQuery @@ -36,11 +55,24 @@ func (l *LogQuery) String() string { if l == nil { return "LogQuery: " } + if l.BlockHash != nil { + bn := " (?)" + if !l.BlockRange.IsEmpty() { + bn = fmt.Sprintf(" (%d)", l.BlockRange.FromBlock) + } + return fmt.Sprintf("LogQuery: addrs=%v, blockHash=%s%s", l.Addrs, l.BlockHash.String(), bn) + } return fmt.Sprintf("LogQuery: addrs=%v, blockRange=%s", l.Addrs, l.BlockRange.String()) } // ToRPCFilterQuery converts the LogQuery to an Ethereum FilterQuery func (l *LogQuery) ToRPCFilterQuery() ethereum.FilterQuery { + if l.BlockHash != nil { + return ethereum.FilterQuery{ + Addresses: l.Addrs, + BlockHash: l.BlockHash, + } + } return ethereum.FilterQuery{ Addresses: l.Addrs, FromBlock: new(big.Int).SetUint64(l.BlockRange.FromBlock), diff --git a/multidownloader/types/log_query_test.go b/multidownloader/types/log_query_test.go index 99a98923d..56bedf50e 100644 --- a/multidownloader/types/log_query_test.go +++ b/multidownloader/types/log_query_test.go @@ -64,3 +64,22 @@ func TestLogQuery_ToRPCFilterQuery(t *testing.T) { require.Equal(t, big.NewInt(1), filter.FromBlock) require.Equal(t, big.NewInt(10), filter.ToBlock) } + +func TestLogQuery_BlockHash(t *testing.T) { + lq := NewLogQueryBlockHash(1234, common.HexToHash("0xabc"), []common.Address{common.HexToAddress("0x123")}) + require.Equal(t, common.HexToHash("0xabc"), *lq.BlockHash) + require.Equal(t, []common.Address{common.HexToAddress("0x123")}, lq.Addrs) + blockHash := common.HexToHash("0xabc") + lq2 := NewLogQueryFromEthereumFilter(ethereum.FilterQuery{ + Addresses: []common.Address{common.HexToAddress("0x123")}, + BlockHash: &blockHash, + }) + require.Equal(t, "LogQuery: addrs=[0x0000000000000000000000000000000000000123], blockHash=0x0000000000000000000000000000000000000000000000000000000000000abc (?)", + lq2.String()) + + rpcFilter := lq.ToRPCFilterQuery() + require.Equal(t, common.HexToHash("0xabc"), *rpcFilter.BlockHash) + require.Equal(t, []common.Address{common.HexToAddress("0x123")}, rpcFilter.Addresses) + require.Equal(t, "LogQuery: addrs=[0x0000000000000000000000000000000000000123], blockHash=0x0000000000000000000000000000000000000000000000000000000000000abc (1234)", + lq.String()) +} diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index 2319d32c0..e79cec81d 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -95,6 +95,65 @@ func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Quer return _c } +// GetBlockHeadersNotFinalized provides a mock function with given fields: tx, maxBlock +func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) { + ret := _m.Called(tx, maxBlock) + + if len(ret) == 0 { + panic("no return value specified for GetBlockHeadersNotFinalized") + } + + var r0 []*aggkittypes.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) ([]*aggkittypes.BlockHeader, error)); ok { + return rf(tx, maxBlock) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64) []*aggkittypes.BlockHeader); ok { + r0 = rf(tx, maxBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64) error); ok { + r1 = rf(tx, maxBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetBlockHeadersNotFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeadersNotFinalized' +type Storager_GetBlockHeadersNotFinalized_Call struct { + *mock.Call +} + +// GetBlockHeadersNotFinalized is a helper method to define mock.On call +// - tx types.Querier +// - maxBlock uint64 +func (_e *Storager_Expecter) GetBlockHeadersNotFinalized(tx interface{}, maxBlock interface{}) *Storager_GetBlockHeadersNotFinalized_Call { + return &Storager_GetBlockHeadersNotFinalized_Call{Call: _e.mock.On("GetBlockHeadersNotFinalized", tx, maxBlock)} +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Run(run func(tx types.Querier, maxBlock uint64)) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Return(_a0 []*aggkittypes.BlockHeader, _a1 error) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, uint64) ([]*aggkittypes.BlockHeader, error)) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Return(run) + return _c +} + // GetEthLogs provides a mock function with given fields: tx, query func (_m *Storager) GetEthLogs(tx types.Querier, query multidownloadertypes.LogQuery) ([]coretypes.Log, error) { ret := _m.Called(tx, query) @@ -154,6 +213,74 @@ func (_c *Storager_GetEthLogs_Call) RunAndReturn(run func(types.Querier, multido return _c } +// GetRangeBlockHeader provides a mock function with given fields: tx, isFinal +func (_m *Storager) GetRangeBlockHeader(tx types.Querier, isFinal multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + ret := _m.Called(tx, isFinal) + + if len(ret) == 0 { + panic("no return value specified for GetRangeBlockHeader") + } + + var r0 *aggkittypes.BlockHeader + var r1 *aggkittypes.BlockHeader + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error)); ok { + return rf(tx, isFinal) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.FinalizedType) *aggkittypes.BlockHeader); ok { + r0 = rf(tx, isFinal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.FinalizedType) *aggkittypes.BlockHeader); ok { + r1 = rf(tx, isFinal) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(2).(func(types.Querier, multidownloadertypes.FinalizedType) error); ok { + r2 = rf(tx, isFinal) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Storager_GetRangeBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRangeBlockHeader' +type Storager_GetRangeBlockHeader_Call struct { + *mock.Call +} + +// GetRangeBlockHeader is a helper method to define mock.On call +// - tx types.Querier +// - isFinal multidownloadertypes.FinalizedType +func (_e *Storager_Expecter) GetRangeBlockHeader(tx interface{}, isFinal interface{}) *Storager_GetRangeBlockHeader_Call { + return &Storager_GetRangeBlockHeader_Call{Call: _e.mock.On("GetRangeBlockHeader", tx, isFinal)} +} + +func (_c *Storager_GetRangeBlockHeader_Call) Run(run func(tx types.Querier, isFinal multidownloadertypes.FinalizedType)) *Storager_GetRangeBlockHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.FinalizedType)) + }) + return _c +} + +func (_c *Storager_GetRangeBlockHeader_Call) Return(lowest *aggkittypes.BlockHeader, highest *aggkittypes.BlockHeader, err error) *Storager_GetRangeBlockHeader_Call { + _c.Call.Return(lowest, highest, err) + return _c +} + +func (_c *Storager_GetRangeBlockHeader_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error)) *Storager_GetRangeBlockHeader_Call { + _c.Call.Return(run) + return _c +} + // GetSyncedBlockRangePerContract provides a mock function with given fields: tx func (_m *Storager) GetSyncedBlockRangePerContract(tx types.Querier) (multidownloadertypes.SetSyncSegment, error) { ret := _m.Called(tx) @@ -424,6 +551,53 @@ func (_c *Storager_SaveEthLogsWithHeaders_Call) RunAndReturn(run func(types.Quer return _c } +// UpdateBlockToFinalized provides a mock function with given fields: tx, blockNumbers +func (_m *Storager) UpdateBlockToFinalized(tx types.Querier, blockNumbers []uint64) error { + ret := _m.Called(tx, blockNumbers) + + if len(ret) == 0 { + panic("no return value specified for UpdateBlockToFinalized") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, []uint64) error); ok { + r0 = rf(tx, blockNumbers) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Storager_UpdateBlockToFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateBlockToFinalized' +type Storager_UpdateBlockToFinalized_Call struct { + *mock.Call +} + +// UpdateBlockToFinalized is a helper method to define mock.On call +// - tx types.Querier +// - blockNumbers []uint64 +func (_e *Storager_Expecter) UpdateBlockToFinalized(tx interface{}, blockNumbers interface{}) *Storager_UpdateBlockToFinalized_Call { + return &Storager_UpdateBlockToFinalized_Call{Call: _e.mock.On("UpdateBlockToFinalized", tx, blockNumbers)} +} + +func (_c *Storager_UpdateBlockToFinalized_Call) Run(run func(tx types.Querier, blockNumbers []uint64)) *Storager_UpdateBlockToFinalized_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].([]uint64)) + }) + return _c +} + +func (_c *Storager_UpdateBlockToFinalized_Call) Return(_a0 error) *Storager_UpdateBlockToFinalized_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Storager_UpdateBlockToFinalized_Call) RunAndReturn(run func(types.Querier, []uint64) error) *Storager_UpdateBlockToFinalized_Call { + _c.Call.Return(run) + return _c +} + // UpdateSyncedStatus provides a mock function with given fields: tx, segments func (_m *Storager) UpdateSyncedStatus(tx types.Querier, segments []multidownloadertypes.SyncSegment) error { ret := _m.Called(tx, segments) diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 5ce083580..40e1c3152 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -48,15 +48,6 @@ func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) SetSyncSegment { return set } -// Segments returns all SyncSegments in the SetSyncSegment -func (s *SetSyncSegment) Segments() []SyncSegment { - result := make([]SyncSegment, 0, len(s.segments)) - for _, segment := range s.segments { - result = append(result, *segment) - } - return result -} - // Add adds a new SyncSegment to the SetSyncSegment, merging block ranges // if the contract address already exists func (s *SetSyncSegment) Add(segment SyncSegment) { @@ -91,13 +82,13 @@ func (f *SetSyncSegment) SubtractSegments(segments *SetSyncSegment) error { return nil } newSegments := f.Clone() - for _, segment := range segments.Segments() { + for _, segment := range segments.segments { previousSegment, exists := newSegments.GetByContract(segment.ContractAddr) if exists { brs := previousSegment.BlockRange.Subtract(segment.BlockRange) switch len(brs) { case 0: - newSegments.Remove(&previousSegment) + newSegments.Empty(&previousSegment) case 1: newSegments.UpdateBlockRange(&previousSegment, brs[0]) default: @@ -215,6 +206,35 @@ func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uin BlockRange: br, }, nil } +func (f *SetSyncSegment) GetHighestBlockNumber() uint64 { + if f == nil || len(f.segments) == 0 { + return 0 + } + highest := uint64(0) + for _, segment := range f.segments { + if segment.BlockRange.ToBlock > highest { + highest = segment.BlockRange.ToBlock + } + } + return highest +} + +func (f *SetSyncSegment) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { + if f == nil || len(f.segments) == 0 { + return nil + } + var totalRange *aggkitcommon.BlockRange + for _, segment := range f.segments { + if totalRange == nil { + br := segment.BlockRange + totalRange = &br + } else { + extended := totalRange.Extend(segment.BlockRange) + totalRange = &extended + } + } + return totalRange +} func (f *SetSyncSegment) GetLowestFromBlockSegment() *SyncSegment { if f == nil || len(f.segments) == 0 { @@ -239,8 +259,21 @@ func (f *SetSyncSegment) GetAddressesForBlockRange(blockRange aggkitcommon.Block return addresses } +func (f *SetSyncSegment) GetAddressesForBlock(blockNumber uint64) []common.Address { + blockRange := aggkitcommon.NewBlockRange(blockNumber, blockNumber) + return f.GetAddressesForBlockRange(blockRange) +} + func (f *SetSyncSegment) Finished() bool { - return f == nil || len(f.segments) == 0 + if f == nil || len(f.segments) == 0 { + return true + } + for _, segment := range f.segments { + if !segment.IsEmpty() { + return false + } + } + return true } func (f *SetSyncSegment) Clone() *SetSyncSegment { @@ -254,6 +287,15 @@ func (f *SetSyncSegment) Clone() *SetSyncSegment { return &newSet } +func (f *SetSyncSegment) Empty(segment *SyncSegment) { + for _, s := range f.segments { + if s.Equal(*segment) { + s.Empty() + return + } + } +} + func (f *SetSyncSegment) Remove(segmentToRemove *SyncSegment) { if f == nil || segmentToRemove == nil { return diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 27a5fff6f..b01a1a924 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -5,6 +5,7 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/etherman/types/mocks" + "github.com/agglayer/aggkit/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" @@ -30,19 +31,6 @@ func TestSetSyncSegment_String(t *testing.T) { require.Contains(t, result, "SyncSegment[0]=") } -func TestSetSyncSegment_Segments(t *testing.T) { - set := NewSetSyncSegment() - segment := SyncSegment{ - ContractAddr: common.HexToAddress("0x123"), - BlockRange: aggkitcommon.NewBlockRange(1, 10), - } - set.segments = []*SyncSegment{&segment} - - result := set.Segments() - require.Len(t, result, 1) - require.Equal(t, segment, result[0]) -} - func TestSetSyncSegment_Add(t *testing.T) { t.Run("add new segment", func(t *testing.T) { set := NewSetSyncSegment() @@ -270,6 +258,16 @@ func TestSetSyncSegment_Finished(t *testing.T) { set.segments = []*SyncSegment{segment} require.False(t, set.Finished()) }) + t.Run("empty segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := &SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + segment.Empty() + set.segments = []*SyncSegment{segment} + require.True(t, set.Finished()) + }) } func TestSetSyncSegment_Clone(t *testing.T) { @@ -405,3 +403,35 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Error(t, err) }) } +func TestSetSyncSegment_AfterFullySync(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123124543423") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: types.LatestBlock, + } + set.Add(segment) + + logQuery := &LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + // The segment is empty so is not returned by GetByContract + segment, exists := set.GetByContract(addr) + require.True(t, exists) + require.True(t, segment.IsEmpty()) + require.True(t, set.Finished()) + require.Equal(t, uint64(0), set.TotalBlocks()) + + mockBlockManager := mocks.NewBlockNotifierManager(t) + mockBlockManager.EXPECT().GetCurrentBlockNumber(mock.Anything, types.LatestBlock).Return(uint64(150), nil).Once() + set.UpdateTargetBlockToNumber(t.Context(), mockBlockManager) + require.Equal(t, uint64(50), set.TotalBlocks()) + segment, exists = set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, "From: 101, To: 150 (50)", segment.BlockRange.String()) +} diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 6e2db6c9c..5508120bd 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -8,6 +8,13 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +type FinalizedType = bool + +const ( + NotFinalized FinalizedType = false + Finalized FinalizedType = true +) + type Storager interface { dbtypes.KeyValueStorager // GetSyncedBlockRangePerContract It returns the synced block range stored in DB @@ -19,4 +26,9 @@ type Storager interface { UpsertSyncerConfigs(tx dbtypes.Querier, configs []ContractConfig) error GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) NewTx(ctx context.Context) (dbtypes.Txer, error) + + GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) + UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error + GetRangeBlockHeader(tx dbtypes.Querier, isFinal FinalizedType) (lowest *aggkittypes.BlockHeader, + highest *aggkittypes.BlockHeader, err error) } diff --git a/multidownloader/types/sync_segment.go b/multidownloader/types/sync_segment.go index a1d4594d5..af99ff1a8 100644 --- a/multidownloader/types/sync_segment.go +++ b/multidownloader/types/sync_segment.go @@ -11,7 +11,8 @@ import ( // SyncSegment represents a segment of blocks, it is used for synced segments but also // for representing segments to be synced type SyncSegment struct { - ContractAddr common.Address + ContractAddr common.Address + // If FromBlock is 0 means that is empty BlockRange aggkitcommon.BlockRange TargetToBlock aggkittypes.BlockNumberFinality } @@ -60,6 +61,24 @@ func (s *SyncSegment) UpdateToBlock(newToBlock uint64) { s.BlockRange.ToBlock = newToBlock } +func (s *SyncSegment) Empty() { + if s == nil { + return + } + // Set FromBlock greater than ToBlock to indicate empty segment + s.BlockRange = aggkitcommon.NewBlockRange( + s.BlockRange.ToBlock+1, + 0, + ) +} + +func (s *SyncSegment) IsEmpty() bool { + if s == nil { + return true + } + return s.BlockRange.FromBlock > s.BlockRange.ToBlock +} + // Equal checks if two SyncSegments are equal func (s SyncSegment) Equal(other SyncSegment) bool { return s.ContractAddr == other.ContractAddr && From 0619aaa09c435963fec21fa13f2ef9e4273e83f2 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 1 Dec 2025 18:43:28 +0100 Subject: [PATCH 02/75] feat: moved state functions to state object --- cmd/run.go | 1 + etherman/batch_requests.go | 18 +- etherman/batch_requests_test.go | 2 +- multidownloader/evm_multidownloader.go | 228 ++++++++++++------- multidownloader/evm_multidownloader_rpc.go | 26 ++- multidownloader/evm_multidownloader_test.go | 42 +++- multidownloader/state.go | 86 ++++++- multidownloader/storage/migrations/0002.sql | 21 +- multidownloader/storage/storage.go | 4 +- multidownloader/storage/storage_block.go | 8 +- multidownloader/storage/storage_test.go | 78 +++---- multidownloader/types/mocks/mock_storager.go | 83 +++++-- multidownloader/types/storager.go | 10 +- 13 files changed, 419 insertions(+), 188 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index dffa2d6fe..6c008d890 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -628,6 +628,7 @@ func runL1MultiDownloaderIfNeeded( l1Client, // rpcClient nil, // storage nil, // blockNotifierManager + nil, // reorgProcessor ) if err != nil { return nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) diff --git a/etherman/batch_requests.go b/etherman/batch_requests.go index d46033475..c6e5e9142 100644 --- a/etherman/batch_requests.go +++ b/etherman/batch_requests.go @@ -56,7 +56,7 @@ func RetrieveBlockHeaders(ctx context.Context, ethClient aggkittypes.BaseEthereumClienter, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { if rpcClient != nil { return RetrieveBlockHeadersBatch(ctx, log, rpcClient, blockNumbers, maxConcurrency) } @@ -69,11 +69,11 @@ func RetrieveBlockHeadersBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { + func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { return retrieveBlockHeadersInBatch(ctx, log, rpcClient, blocks) }, blockNumbers, batchRequestLimitHTTP, maxConcurrency) } @@ -88,8 +88,8 @@ func RetrieveBlockHeadersLegacy(ctx context.Context, return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { - result := make([]*aggkittypes.BlockHeader, len(blocks)) + func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { + result := aggkittypes.NewListBlockHeadersEmpty(len(blocks)) for i, blockNumber := range blocks { header, err := ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) if err != nil { @@ -107,9 +107,9 @@ func retrieveBlockHeadersInBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, -) ([]*aggkittypes.BlockHeader, error) { +) (aggkittypes.ListBlockHeaders, error) { if len(blockNumbers) == 0 { - return make([]*aggkittypes.BlockHeader, 0), nil + return aggkittypes.NewListBlockHeadersEmpty(0), nil } headers := make([]*blockRawEth, len(blockNumbers)) timeTracker := aggkitcommon.NewTimeTracker() @@ -146,9 +146,9 @@ func retrieveBlockHeadersInBatch(ctx context.Context, func retrieveBlockHeadersInBatchParallel( ctx context.Context, logger aggkitcommon.Logger, - funcRetrieval func(context.Context, []uint64) ([]*aggkittypes.BlockHeader, error), + funcRetrieval func(context.Context, []uint64) (aggkittypes.ListBlockHeaders, error), blockNumbers []uint64, - chunckSize, maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + chunckSize, maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { var mu sync.Mutex g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrency) diff --git a/etherman/batch_requests_test.go b/etherman/batch_requests_test.go index d7b37179c..0f6b9d5a6 100644 --- a/etherman/batch_requests_test.go +++ b/etherman/batch_requests_test.go @@ -247,7 +247,7 @@ func TestRetrieveBlockHeadersInBatchParallel(t *testing.T) { result, err := retrieveBlockHeadersInBatchParallel( ctx, logger, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { + func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { t.Logf("Retrieving blocks in batch: %v", blocks) headers := make([]*aggkittypes.BlockHeader, len(blocks)) for i, bn := range blocks { diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 2e5160fec..a59c03589 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -41,15 +41,13 @@ type EVMMultidownloader struct { blockNotifierManager ethermantypes.BlockNotifierManager name string syncersConfig mdrtypes.SetSyncerConfig + reorgProcessor mdrtypes.ReorgProcessor mutex sync.Mutex isInitialized bool - // These are the segments that we need to sync - pendingSync *mdrtypes.SetSyncSegment - // These are the segments that we have already synced - // when a syncer does a `FilterLogs`, it is used to check what is already synced - syncedSegments mdrtypes.SetSyncSegment - statistics *Statistics + state *State // current state of synced and pending segments + + statistics *Statistics } var _ aggkittypes.MultiDownloader = (*EVMMultidownloader)(nil) @@ -62,6 +60,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, storageDB mdrtypes.Storager, blockNotifierManager ethermantypes.BlockNotifierManager, + reorgProcessor mdrtypes.ReorgProcessor, ) (*EVMMultidownloader, error) { if blockNotifierManager == nil { blockNotifierManager = ethermanblocknotifier.NewBlockNotifierManager(log, @@ -86,6 +85,11 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, } } + if reorgProcessor == nil { + log.Infof("NewEVMMultidownloader: creating default ReorgProcessor for multidownloader (%s)", name) + reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB) + } + return &EVMMultidownloader{ log: log, ethClient: ethClient, @@ -96,6 +100,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, syncersConfig: mdrtypes.NewSetSyncerConfig(), statistics: NewStatistics(), name: name, + reorgProcessor: reorgProcessor, }, nil } @@ -114,7 +119,7 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er dh.mutex.Lock() defer dh.mutex.Unlock() - finalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) if err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get finalized block number: %w", err) } @@ -140,14 +145,9 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) err = dh.detectReorgs(ctx, blocks) if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot detect reorgs: %w", err) - } - blockNumbers := make([]uint64, 0, len(blocks)) - for _, block := range blocks { - blockNumbers = append(blockNumbers, block.Number) + return fmt.Errorf("MoveUnsafeToSafeIfPossible: error detecting reorgs: %w", err) } - - err = dh.storage.UpdateBlockToFinalized(tx, blockNumbers) + err = dh.storage.UpdateBlockToFinalized(tx, blocks.BlockNumbers()) if err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) } @@ -155,41 +155,36 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er if err := tx.Commit(); err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot commit tx: %w", err) } - return nil -} -func listBlockHeadersToMap(blocks []*aggkittypes.BlockHeader) map[uint64]*aggkittypes.BlockHeader { - result := make(map[uint64]*aggkittypes.BlockHeader, len(blocks)) - for _, block := range blocks { - result[block.Number] = block - } - return result + return nil } func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, - blocks []*aggkittypes.BlockHeader) error { + blocks aggkittypes.ListBlockHeaders) error { // TODO: optimize this to don't check all blocks // TODO: Find the first block to reorg - blocksNumber := make([]uint64, 0, len(blocks)) - for _, block := range blocks { - blocksNumber = append(blocksNumber, block.Number) - } + blocksNumber := blocks.BlockNumbers() currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) if err != nil { return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) } // check blocks vs currentBlockHeaders. Must match by number and hash - sotrageBlocks := listBlockHeadersToMap(blocks) - rpcBlocks := listBlockHeadersToMap(currentBlockHeaders) - for number, storageBlock := range sotrageBlocks { + storageBlocks := blocks.ToMap() + rpcBlocks := currentBlockHeaders.ToMap() + for _, number := range blocksNumber { rpcBlock, exists := rpcBlocks[number] if !exists { return fmt.Errorf("detectReorgs: block number %d not found in RPC", number) } + storageBlock, exists := storageBlocks[number] + if !exists { + return fmt.Errorf("detectReorgs: block number %d not found in storage", number) + } if storageBlock.Hash != rpcBlock.Hash { - return fmt.Errorf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", - number, storageBlock.Hash.String(), rpcBlock.Hash.String()) + return mdrtypes.NewReorgError(storageBlock.Number, storageBlock.Hash, rpcBlock.Hash, + fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", + number, storageBlock.Hash.String(), rpcBlock.Hash.String())) } } return nil @@ -233,11 +228,13 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { if dh.isInitialized { return fmt.Errorf("initialize: already initialized") } + dh.log.Infof("Initializing multidownloader...") // Check DB compatibility err := dh.CheckDatabase(ctx) if err != nil { return err } + dh.log.Infof("Saving syncer configs to storage...") // Save syncer configs to storage; it overrides previous ones but keeps // the synced segments err = dh.storage.UpsertSyncerConfigs(nil, dh.syncersConfig.ContractConfigs()) @@ -259,25 +256,44 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { if err != nil { return err } - // What is pending to download? - dh.pendingSync = syncSegments.Clone() - err = dh.pendingSync.SubtractSegments(&storageSyncSegments) + newState, err := NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) if err != nil { - return fmt.Errorf("Initialize: cannot calculate pendingSync: %w", err) + return err } - dh.syncedSegments = storageSyncSegments + // What is pending to download? + dh.state = newState dh.isInitialized = true + dh.log.Infof("Initialization completed. state: %s", + dh.state.String()) return nil } - func (dh *EVMMultidownloader) Start(ctx context.Context) error { err := dh.Initialize(ctx) if err != nil { return err } + for { + err = dh.StartStep(ctx) + if err != nil { + reorgErr := mdrtypes.CastReorgError(err) + if reorgErr == nil { + panic("Error running multidownloader: " + err.Error()) + } + dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) + err = dh.reorgProcessor.ProcessReorg(ctx, reorgErr.OffendingBlockNumber) + if err != nil { + panic("Error running multidownloader: " + err.Error()) + } + } + // Breathing, just in case + dh.log.Infof("relauncing sync loop... (waiting 1 second)") + time.Sleep(1 * time.Second) + } +} +func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { dh.log.Infof("checking unsafe blocks on DB...") - + var err error if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { return err } @@ -296,22 +312,10 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { } } -func (dh *EVMMultidownloader) StartV2(ctx context.Context) error { - if err := dh.Initialize(ctx); err != nil { - return fmt.Errorf("Start: cannot initialize multidownloader: %w", err) - } - finalizedBlock, err := dh.getFinalizedBlockNumber(ctx) - if err != nil { - return fmt.Errorf("Start: cannot get finalized block number: %w", err) - } - dh.log.Infof("Starting multidownloader %s at finalized block %d", dh.name, finalizedBlock) - return nil -} - // This function check the tip of the chain to prevent any reorg, meanwhile // wait for a new block to arrive func (dh *EVMMultidownloader) checkReorgUntilNewBlock(ctx context.Context) error { - initialFinalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) + initialFinalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) if err != nil { return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) } @@ -331,16 +335,16 @@ func (dh *EVMMultidownloader) checkReorgUntilNewBlock(ctx context.Context) error return fmt.Errorf("checkReorgUntilNewBlock: cannot check reorg on tip block %d: %w", highestBlock.Number, err) } - if err := dh.pendingSync.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager); err != nil { + if err := dh.state.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager); err != nil { return fmt.Errorf("checkReorgUntilNewBlock: cannot update TargetToBlock in pendingSync: %w", err) } - highestBlockPendingToSync := dh.pendingSync.GetHighestBlockNumber() + highestBlockPendingToSync := dh.state.GetHighestBlockNumberPendingToSync() if highestBlockPendingToSync > highestBlock.Number { dh.log.Infof("checkReorgUntilNewBlock: new block to sync (old: %d, new: %d), ", - highestBlock.Number, dh.pendingSync.GetHighestBlockNumber()) + highestBlock.Number, highestBlockPendingToSync) return nil } - finalizedBlockNumber, err := dh.getFinalizedBlockNumber(ctx) + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) if err != nil { return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) } @@ -398,7 +402,7 @@ func getBlockNumbers(logs []types.Log) []uint64 { func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { dh.mutex.Lock() defer dh.mutex.Unlock() - return dh.syncedSegments.IsAvailable(query) + return dh.state.IsAvailable(query) } // getTotalPendingBlockRange returns the full pending block range without taking in @@ -406,7 +410,7 @@ func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { func (dh *EVMMultidownloader) getTotalPendingBlockRange(ctx context.Context) *aggkitcommon.BlockRange { dh.mutex.Lock() defer dh.mutex.Unlock() - br := dh.pendingSync.GetTotalPendingBlockRange() + br := dh.state.GetTotalPendingBlockRange() return br } @@ -418,7 +422,7 @@ func (dh *EVMMultidownloader) getUnsafeLogQueries(ctx context.Context, blockHead logQueries = append(logQueries, mdrtypes.NewLogQueryBlockHash( bh.Number, bh.Hash, - dh.pendingSync.GetAddressesForBlock(bh.Number), + dh.state.GetAddressesToSyncForBlockNumber(bh.Number), )) } return logQueries @@ -426,14 +430,14 @@ func (dh *EVMMultidownloader) getUnsafeLogQueries(ctx context.Context, blockHead func (dh *EVMMultidownloader) newState(queries []mdrtypes.LogQuery) (*State, error) { dh.mutex.Lock() - state := NewState(dh.syncedSegments.Clone(), dh.pendingSync.Clone()) + state := dh.state.Clone() dh.mutex.Unlock() for _, logQueryData := range queries { - err := state.SyncedSegments.AddLogQuery(&logQueryData) + err := state.Synced.AddLogQuery(&logQueryData) if err != nil { return nil, fmt.Errorf("Safe/Step: cannot extend synced segments: %w", err) } - err = state.PendingSync.SubtractLogQuery(&logQueryData) + err = state.Pending.SubtractLogQuery(&logQueryData) if err != nil { return nil, fmt.Errorf("Safe/Step: cannot subtract log query from pending segments: %w", err) } @@ -453,6 +457,48 @@ func getContracts(logQueries []mdrtypes.LogQuery) []common.Address { } return addresses } + +func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log, + blockHeaders aggkittypes.ListBlockHeaders) error { + blockMap := blockHeaders.ToMap() + for _, lg := range logs { + bh, exists := blockMap[lg.BlockNumber] + if !exists { + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: block header for log block number %d not found", lg.BlockNumber) + } + if bh.Hash != lg.BlockHash { + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: log block hash %s does not match block header hash %s for block number %d", + lg.BlockHash.String(), bh.Hash.String(), lg.BlockNumber) + } + } + return nil +} + +func (dh *EVMMultidownloader) checkParent(ctx context.Context, blockHeader *aggkittypes.BlockHeader) error { + if blockHeader.Number == 0 { + return nil + } + parentHeader, isFinalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockHeader.Number-1) + if err != nil { + return fmt.Errorf("checkParent: cannot get parent block header for block number %d: %w", blockHeader.Number, err) + } + if parentHeader == nil { + return fmt.Errorf("checkParent: parent block header for block number %d not found in storage", blockHeader.Number-1) + } + // Parenthash (from DB) doesn't match parent Hash of first blockHeader, but parent is finalized + // so the discrepancy is the new block that is discarded without reorg (still not in DB) + if isFinalized && blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { + return fmt.Errorf("checkParent: parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", + blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String()) + } + if blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { + // Parenthash mismatch, reorg detected + return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", + blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) + } + return nil +} + func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err := ctx.Err(); err != nil { return false, err @@ -471,11 +517,14 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err != nil { return false, fmt.Errorf("Unsafe/Step: failed to retrieve logs for %s: %w", pendingBlockRange.String(), err) } + if err = dh.checkIntegrityNewLogsBlockHeaders(logs, blockHeaders); err != nil { + return false, err + } newState, err := dh.newState(logQueries) if err != nil { return false, fmt.Errorf("Unsafe/Step: failed to create new state: %w", err) } - updatedSegments := newState.SyncedSegments.SegmentsByContract(getContracts(logQueries)) + updatedSegments := newState.Synced.SegmentsByContract(getContracts(logQueries)) // Store data in storage dh.log.Debugf("Unsafe/Step: storing data for %s", pendingBlockRange.String()) err = dh.storeData(ctx, logs, blockHeaders, @@ -487,16 +536,16 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { dh.mutex.Lock() defer dh.mutex.Unlock() dh.log.Debugf("Unsafe/Step: updating state in memory %s", pendingBlockRange.String()) - dh.syncedSegments = newState.SyncedSegments - dh.pendingSync = &newState.PendingSync - finished := dh.pendingSync.Finished() + dh.state = newState + finished := dh.state.IsSyncFinished() + totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() dh.log.Infof("Unsafe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", dh.statistics.ElapsedSyncing().String(), pendingBlockRange.String(), len(logs), len(blockHeaders), - dh.pendingSync.TotalBlocks(), - dh.statistics.ETA(dh.pendingSync.TotalBlocks())) + totalBlocksPendingToSync, + dh.statistics.ETA(totalBlocksPendingToSync)) return finished, nil } @@ -527,43 +576,39 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { // Calculate new state (not set in memory until commit is successful) dh.mutex.Lock() - newSyncedSegments := dh.syncedSegments.Clone() - newPendingSegments := dh.pendingSync.Clone() + newState := dh.state.Clone() dh.mutex.Unlock() // Update synced segments - err = newSyncedSegments.AddLogQuery(logQueryData) + err = newState.OnNewSyncedLogQuery(logQueryData) if err != nil { - return false, fmt.Errorf("Safe/Step: cannot extend synced segments: %w", err) - } - // from pending blocks remove current query - err = newPendingSegments.SubtractLogQuery(logQueryData) - if err != nil { - return false, fmt.Errorf("Safe/Step: cannot subtract log query from pending segments: %w", err) + return false, fmt.Errorf("Safe/Step: fails OnNewSyncedLogQuery(%s): %w", + logQueryData.String(), err) } + // Update ToBlock in pending segments to be able to calculate if finished - err = newPendingSegments.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) + err = newState.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) if err != nil { return false, fmt.Errorf("Safe/Step: cannot update ToBlock in pendingSync: %w", err) } // Store data in storage err = dh.storeData(ctx, logs, blockHeaders, - newSyncedSegments.SegmentsByContract(logQueryData.Addrs), true) + newState.SyncedSegmentsByContract(logQueryData.Addrs), true) if err != nil { return false, fmt.Errorf("Safe/Step: cannot store data: %w", err) } // Update in-memory synced segments (after valid commit) dh.mutex.Lock() defer dh.mutex.Unlock() - dh.syncedSegments = *newSyncedSegments - dh.pendingSync = newPendingSegments - finished := dh.pendingSync.Finished() + dh.state = newState + finished := dh.state.IsSyncFinished() + totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() dh.log.Infof("Safe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", dh.statistics.ElapsedSyncing().String(), logQueryData.BlockRange.String(), len(logs), len(blockHeaders), - dh.pendingSync.TotalBlocks(), - dh.statistics.ETA(dh.pendingSync.TotalBlocks())) + totalBlocksPendingToSync, + dh.statistics.ETA(totalBlocksPendingToSync)) return finished, nil } func (dh *EVMMultidownloader) storeData( @@ -658,7 +703,16 @@ func extractSuggestedBlockRangeFromErrorMsg(msg string) *aggkitcommon.BlockRange return nil } -func (dh *EVMMultidownloader) getFinalizedBlockNumber(ctx context.Context) (uint64, error) { +func (dh *EVMMultidownloader) GetLatestBlockNumber(ctx context.Context) (uint64, error) { + bn, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock) + if err != nil { + return 0, fmt.Errorf("GetLatestBlockNumber: cannot get latest block (%s): %w", + aggkittypes.LatestBlock.String(), err) + } + return bn, nil +} + +func (dh *EVMMultidownloader) GetFinalizedBlockNumber(ctx context.Context) (uint64, error) { bn, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, dh.cfg.BlockFinality) if err != nil { return 0, fmt.Errorf("Safe/Step: cannot get finalized block (%s): %w", @@ -673,14 +727,14 @@ func (dh *EVMMultidownloader) getNextQuery(ctx context.Context, chunk uint32, sa var err error var maxBlock uint64 if safe { - maxBlock, err = dh.getFinalizedBlockNumber(ctx) + maxBlock, err = dh.GetFinalizedBlockNumber(ctx) if err != nil { return nil, fmt.Errorf("getNextQuery: cannot get finalized block number: %w", err) } } else { maxBlock = 0 } - logQueryData, err := dh.pendingSync.NextQuery(chunk, maxBlock) + logQueryData, err := dh.state.NextQueryToSync(chunk, maxBlock) if err != nil { return nil, fmt.Errorf("getNextQuery: cannot get NextQuery: %w", err) } diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 5753bd23f..78443303c 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -1,6 +1,8 @@ package multidownloader import ( + "context" + "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" ) @@ -22,12 +24,30 @@ func NewEVMMultidownloaderRPC( // Status returns the status of the L1InfoTreeSync component // curl -X POST http://localhost:5576/ "Content-Type: application/json" \ -// -d '{"method":"l1infotreesync_status", "params":[], "id":1}' +// -d '{"method":"multidownloader-l1_status", "params":[], "id":1}' func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { + finalizedBlockNumber, err := b.downloader.GetFinalizedBlockNumber(context.Background()) + if err != nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, "EVMMultidownloaderRPC.Status: getting finalized block number: %v", err) + } + latestBlockNumber, err := b.downloader.GetLatestBlockNumber(context.Background()) + if err != nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, "EVMMultidownloaderRPC.Status: getting latest block number: %v", err) + } + b.downloader.mutex.Lock() + defer b.downloader.mutex.Unlock() + info := struct { - Status string `json:"status"` + Status string `json:"status"` + State string `json:"state,omitempty"` + Pending string `json:"pending,omitempty"` + FinalizedBlockNumber uint64 `json:"finalizedBlockNumber,omitempty"` + LatestBlockNumber uint64 `json:"latestBlockNumber,omitempty"` }{ - Status: "running", + Status: "running", + State: b.downloader.state.String(), + FinalizedBlockNumber: finalizedBlockNumber, + LatestBlockNumber: latestBlockNumber, } return info, nil } diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index b78570a24..43f819771 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + jRPC "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" "github.com/agglayer/aggkit/etherman" @@ -29,8 +30,9 @@ import ( "github.com/stretchr/testify/require" ) -const runL1InfoTree = true +const runL1InfoTree = false const l1InfoTreeUseMultidownloader = true +const storagePath = "../tmp/ut/" func TestEVMMultidownloader(t *testing.T) { //t.Skip("code to test/debug not real unittest") @@ -54,7 +56,7 @@ func TestEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "test") db, err := storage.NewMultidownloaderStorage(logger, storage.MultidownloaderStorageConfig{ - DBPath: "/tmp/mdr_test.sqlite", + DBPath: storagePath + "mdr_test.sqlite", }) require.NoError(t, err) cfg := Config{ @@ -64,9 +66,10 @@ func TestEVMMultidownloader(t *testing.T) { WaitPeriodToCheckCatchUp: types.NewDuration(time.Second), PeriodToCheckReorgs: types.NewDuration(time.Second * 10), } + var rpcServices []jRPC.Service mdr, err := NewEVMMultidownloader(logger, cfg, "l1", ethClient, ethRPCClient, - db, nil) + db, nil, nil) require.NoError(t, err) require.NotNil(t, mdr) err = mdr.RegisterSyncer(aggkittypes.SyncerConfig{ @@ -79,6 +82,7 @@ func TestEVMMultidownloader(t *testing.T) { ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) + rpcServices = append(rpcServices, mdr.GetRPCServices()...) ctx := context.TODO() var l1infotree *l1infotreesync.L1InfoTreeSync if runL1InfoTree == true { @@ -86,13 +90,13 @@ func TestEVMMultidownloader(t *testing.T) { var dbPath string if l1InfoTreeUseMultidownloader { multidownloader = mdr - dbPath = "/tmp/l1infotree_md.sqlite" + dbPath = storagePath + "l1infotree_md.sqlite" } else { multidownloader = aggkitsync.NewAdapterEthClientToMultidownloader(ethClient) - dbPath = "/tmp/l1infotree_eth.sqlite" + dbPath = storagePath + "l1infotree_eth.sqlite" } reorgDetector, err := reorgdetector.New(ethClient, reorgdetector.Config{ - DBPath: "/tmp/l1_reorgdetector.sqlite", + DBPath: storagePath + "l1_reorgdetector.sqlite", CheckReorgsInterval: types.NewDuration(time.Second * 10), FinalizedBlock: aggkittypes.FinalizedBlock, }, reorgdetector.L1) @@ -113,9 +117,29 @@ func TestEVMMultidownloader(t *testing.T) { }, multidownloader, reorgDetector, + //l1infotreesync.FlagStopOnFinalizedBlockReached, l1infotreesync.FlagNone, ) require.NoError(t, err) + rpcServices = append(rpcServices, l1infotree.GetRPCServices()...) + } + if len(rpcServices) > 0 { + log.Infof("Registering %d RPC services", len(rpcServices)) + logger := log.WithFields("module", "RPC") + jRPCServer := jRPC.NewServer( + jRPC.Config{ + Host: "127.0.0.1", + Port: 5576, + MaxRequestsPerIPAndSecond: 10000.0, + }, + rpcServices, + jRPC.WithLogger(logger.GetSugaredLogger()), + ) + go func() { + if err := jRPCServer.Start(); err != nil { + log.Fatal(err) + } + }() } var wg sync.WaitGroup @@ -233,7 +257,7 @@ func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes. func TestEVMMultidownloader_NewEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "evm_multidownloader_test") cfg := NewConfigDefault("test.sqlite", t.TempDir()) - sut, err := NewEVMMultidownloader(logger, cfg, "test", nil, nil, nil, nil) + sut, err := NewEVMMultidownloader(logger, cfg, "test", nil, nil, nil, nil, nil) require.NoError(t, err) require.NotNil(t, sut) require.NotNil(t, sut.blockNotifierManager) @@ -307,7 +331,7 @@ func TestEVMMultidownloader_GetRPCServices(t *testing.T) { require.NoError(t, err) customName := "custom-name" - mdr, err := NewEVMMultidownloader(logger, cfg, customName, ethClient, nil, db, nil) + mdr, err := NewEVMMultidownloader(logger, cfg, customName, ethClient, nil, db, nil, nil) require.NoError(t, err) services := mdr.GetRPCServices() @@ -455,7 +479,7 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM useDB = realDB } // TODO: Add mock for ethRPCClient if needed - mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, useDB, mockBlockNotifierManager) + mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, useDB, mockBlockNotifierManager, nil) require.NoError(t, err) return &testDataEVMMultidownloader{ mockEthClient: ethClient, diff --git a/multidownloader/state.go b/multidownloader/state.go index 81e38fc0b..c55b7f97f 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -1,22 +1,92 @@ package multidownloader -import mdrtypes "github.com/agglayer/aggkit/multidownloader/types" +import ( + "context" + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/etherman/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" +) type State struct { - SyncedSegments mdrtypes.SetSyncSegment - PendingSync mdrtypes.SetSyncSegment + // These are the segments that we have already synced + // when a syncer does a `FilterLogs`, it is used to check what is already synced + Synced mdrtypes.SetSyncSegment + // These are the segments that we need to sync + Pending mdrtypes.SetSyncSegment } -func NewState(syncedSegments *mdrtypes.SetSyncSegment, pendingSync *mdrtypes.SetSyncSegment) *State { +func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) *State { return &State{ - SyncedSegments: *syncedSegments, - PendingSync: *pendingSync, + Synced: *synced, + Pending: *pending, + } +} + +func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, totalToSync mdrtypes.SetSyncSegment) (*State, error) { + err := totalToSync.SubtractSegments(&storageSynced) + if err != nil { + return nil, fmt.Errorf("Initialize: cannot calculate pendingSync: %w", err) } + return NewState(&storageSynced, &totalToSync), nil } func (s *State) Clone() *State { return &State{ - SyncedSegments: s.SyncedSegments, - PendingSync: s.PendingSync, + Synced: s.Synced, + Pending: s.Pending, + } +} +func (s *State) String() string { + return "State{Synced: " + s.Synced.String() + + ", Pending: " + s.Pending.String() + "}" +} + +func (s *State) UpdateTargetBlockToNumber(ctx context.Context, blockNotifier types.BlockNotifierManager) error { + return s.Pending.UpdateTargetBlockToNumber(ctx, blockNotifier) +} + +func (s *State) GetHighestBlockNumberPendingToSync() uint64 { + return s.Pending.GetHighestBlockNumber() +} + +func (s *State) IsAvailable(query mdrtypes.LogQuery) bool { + return s.Synced.IsAvailable(query) +} + +func (s *State) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { + return s.Pending.GetTotalPendingBlockRange() +} + +func (s *State) GetAddressesToSyncForBlockNumber(blockNumber uint64) []common.Address { + return s.Pending.GetAddressesForBlock(blockNumber) +} +func (s *State) IsSyncFinished() bool { + return s.Pending.Finished() +} + +func (s *State) TotalBlocksPendingToSync() uint64 { + return s.Pending.TotalBlocks() +} + +func (s *State) OnNewSyncedLogQuery(logQuery *mdrtypes.LogQuery) error { + err := s.Synced.AddLogQuery(logQuery) + if err != nil { + return fmt.Errorf("OnNewSyncedLogQuery: addding syned segment: %w", err) + } + err = s.Pending.SubtractLogQuery(logQuery) + if err != nil { + return fmt.Errorf("OnNewSyncedLogQuery: subtracting pending segment: %w", err) } + return nil +} + +func (s *State) SyncedSegmentsByContract(addrs []common.Address) []mdrtypes.SyncSegment { + return s.Synced.SegmentsByContract(addrs) +} + +func (s *State) NextQueryToSync(syncBlockChunkSize uint32, maxBlockNumber uint64) (*mdrtypes.LogQuery, error) { + return s.Pending.NextQuery(syncBlockChunkSize, maxBlockNumber) } diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql index c5ed39674..db16b81c0 100644 --- a/multidownloader/storage/migrations/0002.sql +++ b/multidownloader/storage/migrations/0002.sql @@ -3,8 +3,8 @@ DROP TABLE IF EXISTS logs_reorged; -- +migrate Up CREATE TABLE logs_reorged ( - chain_id INTEGER NOT NULL, - block_number BIGINT NOT NULL, + chain_id BIGINT NOT NULL , + block_number BIGINT NOT NULL , address TEXT NOT NULL, -- topics TEXT NOT NULL, -- list of hashes in JSON data BLOB, -- @@ -12,25 +12,28 @@ CREATE TABLE logs_reorged ( tx_index INTEGER NOT NULL, log_index INTEGER NOT NULL, -- β€œindex” is a reserved keyword PRIMARY KEY (address, chain_id,block_number, log_index), - FOREIGN KEY (chain_id, block_number) REFERENCES block_reorged(chain_id, block_number) + FOREIGN KEY (chain_id, block_number) REFERENCES blocks_reorged(chain_id, block_number) ); CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); -CREATE TABLE block_reorged ( - chain_id INTEGER NOT NULL, +CREATE TABLE blocks_reorged ( + chain_id BIGINT NOT NULL REFERENCES reorgs(chain_id), block_number BIGINT NOT NULL, block_hash TEXT NOT NULL, block_timestamp INTEGER NOT NULL, - block_parent_hash TEXT, + block_parent_hash TEXT NOT NULL, PRIMARY KEY (chain_id, block_number) ); CREATE TABLE reorgs ( - chain_id INTEGER NOT NULL, + chain_id BIGINT PRIMARY KEY, detected_at_block BIGINT NOT NULL, reorged_from_block BIGINT NOT NULL, reorged_to_block BIGINT NOT NULL, - detected_timestamp INTEGER NOT NULL, - PRIMARY KEY (chain_id, detected_at_block) + detected_timestamp INTEGER NOT NULL, + network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment + network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment + network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) + description TEXT -- extran information, can be null ); \ No newline at end of file diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index b38fc78e8..e3fae16cd 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -142,7 +142,7 @@ func NewBlockRowsFromLogs(logs []types.Log, isFinal bool) map[uint64]*blockRow { return blockMap } -func NewBlockRowsFromAggkitBlock(blockHeaders []*aggkittypes.BlockHeader, isFinal bool) map[uint64]*blockRow { +func NewBlockRowsFromAggkitBlock(blockHeaders aggkittypes.ListBlockHeaders, isFinal bool) map[uint64]*blockRow { blockMap := make(map[uint64]*blockRow) for _, header := range blockHeaders { blockMap[header.Number] = newBlockRowFromAggkitBlock(header, isFinal) @@ -247,7 +247,7 @@ func (a *MultidownloaderStorage) SaveEthLogs(tx dbtypes.Querier, logs []types.Lo } func (a *MultidownloaderStorage) SaveEthLogsWithHeaders(tx dbtypes.Querier, - blockHeaders []*aggkittypes.BlockHeader, logs []types.Log, isFinal bool) error { + blockHeaders aggkittypes.ListBlockHeaders, logs []types.Log, isFinal bool) error { return a.saveLogsAndBlocks(tx, NewBlockRowsFromAggkitBlock(blockHeaders, isFinal), NewLogRowsFromEthLogs(logs)) } diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index 153b6417c..f49e0dea4 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -41,8 +41,8 @@ func (b *Blocks) Get(number uint64) (*aggkittypes.BlockHeader, bool, error) { return header, isFinal, nil } -func (b *Blocks) ListHeaders() []*aggkittypes.BlockHeader { - headers := make([]*aggkittypes.BlockHeader, 0, len(b.Headers)) +func (b *Blocks) ListHeaders() aggkittypes.ListBlockHeaders { + headers := aggkittypes.NewListBlockHeadersEmpty(len(b.Headers)) for _, header := range b.Headers { headers = append(headers, header) } @@ -117,7 +117,7 @@ func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, isFinal } func (a *MultidownloaderStorage) GetBlockHeaderByNumber(tx dbtypes.Querier, - blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) { + blockNumber uint64) (*aggkittypes.BlockHeader, mdtypes.FinalizedType, error) { a.mutex.RLock() defer a.mutex.RUnlock() blocks, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE block_number = ?", blockNumber) @@ -162,7 +162,7 @@ func (a *MultidownloaderStorage) getBlockHeadersNoMutex(tx dbtypes.Querier, } // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock -func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) { +func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { if tx == nil { tx = a.db } diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index 9385aa975..d9707bb06 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -48,30 +48,6 @@ func TestStorage_Exploratory(t *testing.T) { log.Infof("Retrieved block: %+v", block) } -func TestStorage_GetBlock(t *testing.T) { - storage := newStorageForTest(t, nil) - // BlockBase not present - blockHeader, _, err := storage.GetBlockHeaderByNumber(nil, 1234) - require.NoError(t, err, "cannot get BlockHeader") - require.Nil(t, blockHeader, "expected nil BlockHeader") - block := aggkittypes.NewBlockHeader(1234, exampleTestHash[0], 5678, &exampleTestHash[1]) - err = storage.saveAggkitBlock(nil, block, true) - require.NoError(t, err, "cannot insert BlockHeader") - // Get and verify block - readBlock, isFinal, err := storage.GetBlockHeaderByNumber(nil, 1234) - require.NoError(t, err, "cannot get BlockHeader") - require.NotNil(t, readBlock, "expected non-nil BlockHeader") - require.Equal(t, block, readBlock, "BlockHeader mismatch") - require.True(t, isFinal, "expected block to be final") - - blockNilParentHash := aggkittypes.NewBlockHeader(1235, exampleTestHash[0], 5678, nil) - err = storage.saveAggkitBlock(nil, blockNilParentHash, true) - require.NoError(t, err, "cannot get BlockHeader") - readBlock, _, err = storage.GetBlockHeaderByNumber(nil, blockNilParentHash.Number) - require.NoError(t, err, "cannot get BlockHeader") - require.Equal(t, blockNilParentHash, readBlock, "BlockHeader mismatch") -} - func TestStorage_GetLogs(t *testing.T) { storage := newStorageForTest(t, nil) // Logs not present @@ -324,23 +300,6 @@ func TestStorage_UpdateIsFinal(t *testing.T) { require.True(t, isFinal, "expected block to be final") } -func TestStorage_GetRangeBlockHeader(t *testing.T) { - storage := newStorageForTest(t, nil) - block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) - err := storage.saveAggkitBlock(nil, block, false) - require.NoError(t, err, "cannot insert BlockHeader") - - lowest, highest, err := storage.GetRangeBlockHeader(nil, false) - require.NoError(t, err, "cannot get range BlockHeader") - require.Equal(t, block, lowest, "lowest BlockHeader mismatch") - require.Equal(t, block, highest, "highest BlockHeader mismatch") - - lowest, highest, err = storage.GetRangeBlockHeader(nil, true) - require.NoError(t, err, "cannot get range BlockHeader") - require.Equal(t, nil, lowest, "lowest BlockHeader mismatch") - require.Equal(t, nil, highest, "highest BlockHeader mismatch") -} - func TestStorage_logRow_String(t *testing.T) { row := logRow{ Address: exampleAddr1, @@ -392,3 +351,40 @@ func newStorageForTest(t *testing.T, dbFileFullPath *string) *MultidownloaderSto require.NoError(t, err, "cannot create storage") return storage } + +func populateLogsAndBlocksForTest(t *testing.T, storage *MultidownloaderStorage, + startingBlock uint64, numBlocks int, logsPerBlock int) { + t.Helper() + var blocks []*aggkittypes.BlockHeader + var logs []types.Log + for i := 0; i < numBlocks; i++ { + blockNumber := startingBlock + uint64(i) + blockHash := exampleTestHash[i%len(exampleTestHash)] + var parentHash *common.Hash + if i > 0 { + parentHash = &exampleTestHash[(i-1)%len(exampleTestHash)] + } + block := aggkittypes.NewBlockHeader(blockNumber, blockHash, 1630000000+uint64(i*60), parentHash) + blocks = append(blocks, block) + + for j := 0; j < logsPerBlock; j++ { + logEntry := types.Log{ + Address: exampleAddr1, + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockTimestamp: 1630000000 + uint64(i*60), + Topics: []common.Hash{ + exampleTestHash[j%len(exampleTestHash)], + }, + Data: []byte{0x01, 0x02, byte(j)}, + TxHash: exampleTestHash[(i+j)%len(exampleTestHash)], + TxIndex: uint(100 + j), + Index: uint(10 + j), + } + logs = append(logs, logEntry) + } + } + + err := storage.SaveEthLogsWithHeaders(nil, blocks, logs, true) + require.NoError(t, err, "cannot populate logs and blocks") +} diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index e79cec81d..4ed827dfa 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -96,23 +96,23 @@ func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Quer } // GetBlockHeadersNotFinalized provides a mock function with given fields: tx, maxBlock -func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) { +func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { ret := _m.Called(tx, maxBlock) if len(ret) == 0 { panic("no return value specified for GetBlockHeadersNotFinalized") } - var r0 []*aggkittypes.BlockHeader + var r0 aggkittypes.ListBlockHeaders var r1 error - if rf, ok := ret.Get(0).(func(types.Querier, uint64) ([]*aggkittypes.BlockHeader, error)); ok { + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (aggkittypes.ListBlockHeaders, error)); ok { return rf(tx, maxBlock) } - if rf, ok := ret.Get(0).(func(types.Querier, uint64) []*aggkittypes.BlockHeader); ok { + if rf, ok := ret.Get(0).(func(types.Querier, uint64) aggkittypes.ListBlockHeaders); ok { r0 = rf(tx, maxBlock) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*aggkittypes.BlockHeader) + r0 = ret.Get(0).(aggkittypes.ListBlockHeaders) } } @@ -144,12 +144,12 @@ func (_c *Storager_GetBlockHeadersNotFinalized_Call) Run(run func(tx types.Queri return _c } -func (_c *Storager_GetBlockHeadersNotFinalized_Call) Return(_a0 []*aggkittypes.BlockHeader, _a1 error) *Storager_GetBlockHeadersNotFinalized_Call { +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Return(_a0 aggkittypes.ListBlockHeaders, _a1 error) *Storager_GetBlockHeadersNotFinalized_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, uint64) ([]*aggkittypes.BlockHeader, error)) *Storager_GetBlockHeadersNotFinalized_Call { +func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, uint64) (aggkittypes.ListBlockHeaders, error)) *Storager_GetBlockHeadersNotFinalized_Call { _c.Call.Return(run) return _c } @@ -395,6 +395,63 @@ func (_c *Storager_GetValue_Call) RunAndReturn(run func(types.Querier, string, s return _c } +// InsertReorgAndMoveReorgedBlocksAndLogs provides a mock function with given fields: tx, reorgData +func (_m *Storager) InsertReorgAndMoveReorgedBlocksAndLogs(tx types.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for InsertReorgAndMoveReorgedBlocksAndLogs") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertReorgAndMoveReorgedBlocksAndLogs' +type Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call struct { + *mock.Call +} + +// InsertReorgAndMoveReorgedBlocksAndLogs is a helper method to define mock.On call +// - tx types.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *Storager_Expecter) InsertReorgAndMoveReorgedBlocksAndLogs(tx interface{}, reorgData interface{}) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + return &Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call{Call: _e.mock.On("InsertReorgAndMoveReorgedBlocksAndLogs", tx, reorgData)} +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Run(run func(tx types.Querier, reorgData multidownloadertypes.ReorgData)) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Return(_a0 uint64, _a1 error) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(run) + return _c +} + // InsertValue provides a mock function with given fields: tx, owner, key, value func (_m *Storager) InsertValue(tx types.Querier, owner string, key string, value string) error { ret := _m.Called(tx, owner, key, value) @@ -503,7 +560,7 @@ func (_c *Storager_NewTx_Call) RunAndReturn(run func(context.Context) (types.Txe } // SaveEthLogsWithHeaders provides a mock function with given fields: tx, blockHeaders, logs, isFinal -func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders []*aggkittypes.BlockHeader, logs []coretypes.Log, isFinal bool) error { +func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []coretypes.Log, isFinal bool) error { ret := _m.Called(tx, blockHeaders, logs, isFinal) if len(ret) == 0 { @@ -511,7 +568,7 @@ func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders []*agg } var r0 error - if rf, ok := ret.Get(0).(func(types.Querier, []*aggkittypes.BlockHeader, []coretypes.Log, bool) error); ok { + if rf, ok := ret.Get(0).(func(types.Querier, aggkittypes.ListBlockHeaders, []coretypes.Log, bool) error); ok { r0 = rf(tx, blockHeaders, logs, isFinal) } else { r0 = ret.Error(0) @@ -527,16 +584,16 @@ type Storager_SaveEthLogsWithHeaders_Call struct { // SaveEthLogsWithHeaders is a helper method to define mock.On call // - tx types.Querier -// - blockHeaders []*aggkittypes.BlockHeader +// - blockHeaders aggkittypes.ListBlockHeaders // - logs []coretypes.Log // - isFinal bool func (_e *Storager_Expecter) SaveEthLogsWithHeaders(tx interface{}, blockHeaders interface{}, logs interface{}, isFinal interface{}) *Storager_SaveEthLogsWithHeaders_Call { return &Storager_SaveEthLogsWithHeaders_Call{Call: _e.mock.On("SaveEthLogsWithHeaders", tx, blockHeaders, logs, isFinal)} } -func (_c *Storager_SaveEthLogsWithHeaders_Call) Run(run func(tx types.Querier, blockHeaders []*aggkittypes.BlockHeader, logs []coretypes.Log, isFinal bool)) *Storager_SaveEthLogsWithHeaders_Call { +func (_c *Storager_SaveEthLogsWithHeaders_Call) Run(run func(tx types.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []coretypes.Log, isFinal bool)) *Storager_SaveEthLogsWithHeaders_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(types.Querier), args[1].([]*aggkittypes.BlockHeader), args[2].([]coretypes.Log), args[3].(bool)) + run(args[0].(types.Querier), args[1].(aggkittypes.ListBlockHeaders), args[2].([]coretypes.Log), args[3].(bool)) }) return _c } @@ -546,7 +603,7 @@ func (_c *Storager_SaveEthLogsWithHeaders_Call) Return(_a0 error) *Storager_Save return _c } -func (_c *Storager_SaveEthLogsWithHeaders_Call) RunAndReturn(run func(types.Querier, []*aggkittypes.BlockHeader, []coretypes.Log, bool) error) *Storager_SaveEthLogsWithHeaders_Call { +func (_c *Storager_SaveEthLogsWithHeaders_Call) RunAndReturn(run func(types.Querier, aggkittypes.ListBlockHeaders, []coretypes.Log, bool) error) *Storager_SaveEthLogsWithHeaders_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 5508120bd..4f0d8d330 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -16,10 +16,11 @@ const ( ) type Storager interface { + StoragerForReorg dbtypes.KeyValueStorager // GetSyncedBlockRangePerContract It returns the synced block range stored in DB GetSyncedBlockRangePerContract(tx dbtypes.Querier) (SetSyncSegment, error) - SaveEthLogsWithHeaders(tx dbtypes.Querier, blockHeaders []*aggkittypes.BlockHeader, + SaveEthLogsWithHeaders(tx dbtypes.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []types.Log, isFinal bool) error GetEthLogs(tx dbtypes.Querier, query LogQuery) ([]types.Log, error) UpdateSyncedStatus(tx dbtypes.Querier, segments []SyncSegment) error @@ -27,8 +28,13 @@ type Storager interface { GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) NewTx(ctx context.Context) (dbtypes.Txer, error) - GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) ([]*aggkittypes.BlockHeader, error) + GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error GetRangeBlockHeader(tx dbtypes.Querier, isFinal FinalizedType) (lowest *aggkittypes.BlockHeader, highest *aggkittypes.BlockHeader, err error) } + +type StoragerForReorg interface { + GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, FinalizedType, error) + InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, reorgData ReorgData) (uint64, error) +} From dcb8dab63b9d866594cdd541cb10330347a91024 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 1 Dec 2025 19:13:15 +0100 Subject: [PATCH 03/75] feat: fix a lot of lint issues --- l1infotreesync/e2e_test.go | 11 +++++--- multidownloader/evm_multidownloader.go | 28 +++++++++++-------- multidownloader/evm_multidownloader_rpc.go | 6 ++-- .../evm_multidownloader_syncers_test.go | 2 +- multidownloader/evm_multidownloader_test.go | 4 +-- multidownloader/state.go | 3 +- multidownloader/storage/storage_block.go | 14 ++++++---- multidownloader/types/log_query.go | 1 - .../types/set_sync_segment_test.go | 8 +++--- test/helpers/e2e.go | 5 ++-- 10 files changed, 48 insertions(+), 34 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index d9621346e..fa901d25c 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -93,6 +93,7 @@ func TestE2E(t *testing.T) { nil, // rpcClient nil, nil, + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { @@ -175,8 +176,9 @@ func TestWithReorgs(t *testing.T) { "testMD", etherman.NewDefaultEthClient(client.Client(), nil, nil), nil, // rpcClient - nil, - nil, + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { @@ -328,8 +330,9 @@ func TestStressAndReorgs(t *testing.T) { "testMD", etherman.NewDefaultEthClient(client.Client(), nil, nil), nil, // rpcClient - nil, - nil, + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index a59c03589..422905205 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -142,7 +142,8 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er if err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) } - dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) + dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ + "unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) err = dh.detectReorgs(ctx, blocks) if err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: error detecting reorgs: %w", err) @@ -382,7 +383,6 @@ func (dh *EVMMultidownloader) sync(ctx context.Context, } dh.log.Infof("πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰ sync %s completed after %d iterations.", name, iteration) dh.statistics.FinishSyncing() - //dh.ShowStatistics(iteration) return nil } @@ -407,14 +407,14 @@ func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { // getTotalPendingBlockRange returns the full pending block range without taking in // consideration addrs -func (dh *EVMMultidownloader) getTotalPendingBlockRange(ctx context.Context) *aggkitcommon.BlockRange { +func (dh *EVMMultidownloader) getTotalPendingBlockRange() *aggkitcommon.BlockRange { dh.mutex.Lock() defer dh.mutex.Unlock() br := dh.state.GetTotalPendingBlockRange() return br } -func (dh *EVMMultidownloader) getUnsafeLogQueries(ctx context.Context, blockHeaders []*aggkittypes.BlockHeader) []mdrtypes.LogQuery { +func (dh *EVMMultidownloader) getUnsafeLogQueries(blockHeaders []*aggkittypes.BlockHeader) []mdrtypes.LogQuery { dh.mutex.Lock() defer dh.mutex.Unlock() logQueries := make([]mdrtypes.LogQuery, 0, len(blockHeaders)) @@ -464,17 +464,19 @@ func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log for _, lg := range logs { bh, exists := blockMap[lg.BlockNumber] if !exists { - return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: block header for log block number %d not found", lg.BlockNumber) + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: "+ + "block header for log block number %d not found", lg.BlockNumber) } if bh.Hash != lg.BlockHash { - return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: log block hash %s does not match block header hash %s for block number %d", + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: "+ + "log block hash %s does not match block header hash %s for block number %d", lg.BlockHash.String(), bh.Hash.String(), lg.BlockNumber) } } return nil } -func (dh *EVMMultidownloader) checkParent(ctx context.Context, blockHeader *aggkittypes.BlockHeader) error { +func (dh *EVMMultidownloader) checkParent(blockHeader *aggkittypes.BlockHeader) error { if blockHeader.Number == 0 { return nil } @@ -488,13 +490,15 @@ func (dh *EVMMultidownloader) checkParent(ctx context.Context, blockHeader *aggk // Parenthash (from DB) doesn't match parent Hash of first blockHeader, but parent is finalized // so the discrepancy is the new block that is discarded without reorg (still not in DB) if isFinalized && blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { - return fmt.Errorf("checkParent: parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", + return fmt.Errorf("checkParent: "+ + "parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String()) } if blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { // Parenthash mismatch, reorg detected - return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", - blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) + return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, + *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", + blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) } return nil } @@ -503,7 +507,7 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err := ctx.Err(); err != nil { return false, err } - pendingBlockRange := dh.getTotalPendingBlockRange(ctx) + pendingBlockRange := dh.getTotalPendingBlockRange() blocks := pendingBlockRange.ListBlockNumbers() // TODO: Check that the blocks are all inside unsafe range blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, @@ -512,7 +516,7 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { return false, fmt.Errorf("Unsafe/Step: failed to retrieve %s block headers: %w", pendingBlockRange.String(), err) } dh.log.Debugf("Unsafe/Step: querying logs for %s", pendingBlockRange.String()) - logQueries := dh.getUnsafeLogQueries(ctx, blockHeaders) + logQueries := dh.getUnsafeLogQueries(blockHeaders) logs, err := dh.requestMultiplesLogs(ctx, logQueries) if err != nil { return false, fmt.Errorf("Unsafe/Step: failed to retrieve logs for %s: %w", pendingBlockRange.String(), err) diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 78443303c..4d8492688 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -28,11 +28,13 @@ func NewEVMMultidownloaderRPC( func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { finalizedBlockNumber, err := b.downloader.GetFinalizedBlockNumber(context.Background()) if err != nil { - return nil, rpc.NewRPCError(rpc.DefaultErrorCode, "EVMMultidownloaderRPC.Status: getting finalized block number: %v", err) + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: getting finalized block number: %v", err) } latestBlockNumber, err := b.downloader.GetLatestBlockNumber(context.Background()) if err != nil { - return nil, rpc.NewRPCError(rpc.DefaultErrorCode, "EVMMultidownloaderRPC.Status: getting latest block number: %v", err) + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: getting latest block number: %v", err) } b.downloader.mutex.Lock() defer b.downloader.mutex.Unlock() diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index e8b5a453b..8b2f8e8cd 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -203,7 +203,7 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { } mdQuery := mdrtypes.NewLogQueryFromEthereumFilter(query) // It updated the syncedSegments with the new one to be available - err = testData.mdr.syncedSegments.AddLogQuery(&mdQuery) + err = testData.mdr.state.OnNewSyncedLogQuery(&mdQuery) require.NoError(t, err) testData.mockStorage.EXPECT().GetEthLogs(mock.Anything, mock.Anything). Return(nil, errStorageExample) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 43f819771..4636bdd15 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -35,7 +35,7 @@ const l1InfoTreeUseMultidownloader = true const storagePath = "../tmp/ut/" func TestEVMMultidownloader(t *testing.T) { - //t.Skip("code to test/debug not real unittest") + // t.Skip("code to test/debug not real unittest") cfgLog := log.Config{ Environment: "development", Level: "info", @@ -117,7 +117,7 @@ func TestEVMMultidownloader(t *testing.T) { }, multidownloader, reorgDetector, - //l1infotreesync.FlagStopOnFinalizedBlockReached, + // l1infotreesync.FlagStopOnFinalizedBlockReached, l1infotreesync.FlagNone, ) require.NoError(t, err) diff --git a/multidownloader/state.go b/multidownloader/state.go index c55b7f97f..567183021 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -25,7 +25,8 @@ func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) } } -func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, totalToSync mdrtypes.SetSyncSegment) (*State, error) { +func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, + totalToSync mdrtypes.SetSyncSegment) (*State, error) { err := totalToSync.SubtractSegments(&storageSynced) if err != nil { return nil, fmt.Errorf("Initialize: cannot calculate pendingSync: %w", err) diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index f49e0dea4..cf6293872 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -56,7 +56,7 @@ func (b *Blocks) Len() int { return len(b.Headers) } -func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, +func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, //nolint:unparam header *aggkittypes.BlockHeader, isFinal bool) error { blockRows := map[uint64]*blockRow{ header.Number: newBlockRowFromAggkitBlock(header, isFinal), @@ -91,8 +91,10 @@ func (a *MultidownloaderStorage) UpdateBlockToFinalized(tx dbtypes.Querier, bloc // GetRangeBlockHeader retrieves the highest block header stored in the database // return lowest and highest block headers -func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, isFinal mdtypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { - highestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) +func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, + isFinal mdtypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + highestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks "+ + "WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) if err != nil { return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) } @@ -103,7 +105,8 @@ func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, isFinal return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: more than one block returned (%d)", highestBlock.Len()) } - lowestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) + lowestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? "+ + "order by block_number DESC LIMIT 1", isFinal) if err != nil { return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) } @@ -162,7 +165,8 @@ func (a *MultidownloaderStorage) getBlockHeadersNoMutex(tx dbtypes.Querier, } // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock -func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { +func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, + maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { if tx == nil { tx = a.db } diff --git a/multidownloader/types/log_query.go b/multidownloader/types/log_query.go index 2ddc35ccc..af1009e97 100644 --- a/multidownloader/types/log_query.go +++ b/multidownloader/types/log_query.go @@ -45,7 +45,6 @@ func NewLogQueryFromEthereumFilter(query ethereum.FilterQuery) LogQuery { blockNumber = query.FromBlock.Uint64() } return NewLogQueryBlockHash(blockNumber, *query.BlockHash, query.Addresses) - } return NewLogQuery(query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses) } diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index b01a1a924..751cfffcf 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -5,7 +5,6 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/etherman/types/mocks" - "github.com/agglayer/aggkit/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" @@ -409,7 +408,7 @@ func TestSetSyncSegment_AfterFullySync(t *testing.T) { segment := SyncSegment{ ContractAddr: addr, BlockRange: aggkitcommon.NewBlockRange(1, 100), - TargetToBlock: types.LatestBlock, + TargetToBlock: aggkittypes.LatestBlock, } set.Add(segment) @@ -428,8 +427,9 @@ func TestSetSyncSegment_AfterFullySync(t *testing.T) { require.Equal(t, uint64(0), set.TotalBlocks()) mockBlockManager := mocks.NewBlockNotifierManager(t) - mockBlockManager.EXPECT().GetCurrentBlockNumber(mock.Anything, types.LatestBlock).Return(uint64(150), nil).Once() - set.UpdateTargetBlockToNumber(t.Context(), mockBlockManager) + mockBlockManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock).Return(uint64(150), nil).Once() + err = set.UpdateTargetBlockToNumber(t.Context(), mockBlockManager) + require.NoError(t, err) require.Equal(t, uint64(50), set.TotalBlocks()) segment, exists = set.GetByContract(addr) require.True(t, exists) diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 3fa6c9c03..8d2f0f662 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -167,8 +167,9 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { "testMD", l1EthClient, nil, // RPC client is not simulated - nil, - nil, + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { From 1b9931dab7a6d59427b976ca381cc50f40465a97 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 19 Jan 2026 15:54:35 +0100 Subject: [PATCH 04/75] feat: wip --- .../block_notifier_polling_test.go | 5 +- multidownloader/evm_multidownloader.go | 22 ++- .../evm_multidownloader_rpc_test.go | 15 +- .../evm_multidownloader_syncers.go | 3 + .../evm_multidownloader_syncers_test.go | 5 +- multidownloader/evm_multidownloader_test.go | 25 ++- multidownloader/reorg_processor.go | 150 ++++++++++++++++ multidownloader/reorg_processor_port.go | 68 ++++++++ multidownloader/state.go | 7 + multidownloader/state_test.go | 46 +++++ multidownloader/storage/storage.go | 9 - multidownloader/storage/storage_block_test.go | 50 ++++++ multidownloader/storage/storage_reorg.go | 96 +++++++++++ multidownloader/storage/storage_reorg_test.go | 67 ++++++++ multidownloader/storage/storage_sync.go | 122 +++++++++++++ multidownloader/storage/storage_sync_test.go | 113 ++++++++++++ multidownloader/storage/storage_test.go | 103 ----------- .../types/mocks/mock_reorg_processor.go | 83 +++++++++ .../types/mocks/mock_storager_for_reorg.go | 162 ++++++++++++++++++ multidownloader/types/reorg_data.go | 30 ++++ multidownloader/types/reorg_error.go | 62 +++++++ multidownloader/types/reorg_processor.go | 10 ++ multidownloader/types/set_sync_segment.go | 1 + .../types/set_sync_segment_test.go | 7 +- types/block_header.go | 3 + types/list_block_header.go | 31 ++++ types/map_block_header.go | 7 + 27 files changed, 1169 insertions(+), 133 deletions(-) create mode 100644 multidownloader/reorg_processor.go create mode 100644 multidownloader/reorg_processor_port.go create mode 100644 multidownloader/state_test.go create mode 100644 multidownloader/storage/storage_block_test.go create mode 100644 multidownloader/storage/storage_reorg.go create mode 100644 multidownloader/storage/storage_reorg_test.go create mode 100644 multidownloader/storage/storage_sync.go create mode 100644 multidownloader/storage/storage_sync_test.go create mode 100644 multidownloader/types/mocks/mock_reorg_processor.go create mode 100644 multidownloader/types/mocks/mock_storager_for_reorg.go create mode 100644 multidownloader/types/reorg_data.go create mode 100644 multidownloader/types/reorg_error.go create mode 100644 multidownloader/types/reorg_processor.go create mode 100644 types/list_block_header.go create mode 100644 types/map_block_header.go diff --git a/etherman/block_notifier/block_notifier_polling_test.go b/etherman/block_notifier/block_notifier_polling_test.go index 343f82f7b..71fe7be42 100644 --- a/etherman/block_notifier/block_notifier_polling_test.go +++ b/etherman/block_notifier/block_notifier_polling_test.go @@ -21,7 +21,7 @@ import ( ) func TestExploratoryBlockNotifierPolling(t *testing.T) { - t.Skip() + t.Skip("is an exploratory test that requires an external RPC") urlRPCL1 := os.Getenv("L1URL") fmt.Println("URL=", urlRPCL1) cfg := ðermanconfig.RPCClientConfig{ @@ -117,7 +117,8 @@ func TestBlockNotifierPollingStep(t *testing.T) { }, mockLoggerFn: func() aggkitcommon.Logger { mockLogger := commonmocks.NewLogger(t) - mockLogger.EXPECT().Warnf("Missed block(s) [finality:%s]: %d -> %d", aggkittypes.LatestBlock.String(), uint64(100), uint64(105)).Once() + mockLogger.EXPECT().Infof("Missed block(s) [finality:%s]: %d -> %d", aggkittypes.LatestBlock.String(), uint64(100), uint64(105)).Once() + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything).Maybe() return mockLogger }, headerByNumberError: false, diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 422905205..990d9e43f 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -43,9 +43,8 @@ type EVMMultidownloader struct { syncersConfig mdrtypes.SetSyncerConfig reorgProcessor mdrtypes.ReorgProcessor - mutex sync.Mutex - isInitialized bool - state *State // current state of synced and pending segments + mutex sync.Mutex + state *State // current state of synced and pending segments if nil not initialized statistics *Statistics } @@ -108,9 +107,10 @@ func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) erro dh.mutex.Lock() defer dh.mutex.Unlock() - if dh.isInitialized { + if dh.isInitializedNoMutex() { return fmt.Errorf("registerSyncer: cannot add new syncer config after initialization") } + dh.syncersConfig.Add(data) return nil } @@ -226,7 +226,7 @@ func (dh *EVMMultidownloader) CheckDatabase(ctx context.Context) error { func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { dh.mutex.Lock() defer dh.mutex.Unlock() - if dh.isInitialized { + if dh.isInitializedNoMutex() { return fmt.Errorf("initialize: already initialized") } dh.log.Infof("Initializing multidownloader...") @@ -263,7 +263,6 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { } // What is pending to download? dh.state = newState - dh.isInitialized = true dh.log.Infof("Initialization completed. state: %s", dh.state.String()) return nil @@ -278,6 +277,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { if err != nil { reorgErr := mdrtypes.CastReorgError(err) if reorgErr == nil { + // TODO: Remove this panic and handle properly panic("Error running multidownloader: " + err.Error()) } dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) @@ -398,6 +398,15 @@ func getBlockNumbers(logs []types.Log) []uint64 { } return result } +func (dh *EVMMultidownloader) IsInitialized() bool { + dh.mutex.Lock() + defer dh.mutex.Unlock() + return dh.state != nil +} + +func (dh *EVMMultidownloader) isInitializedNoMutex() bool { + return dh.state != nil +} func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { dh.mutex.Lock() @@ -476,6 +485,7 @@ func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log return nil } +// TODO: ??? why I did this function?? func (dh *EVMMultidownloader) checkParent(blockHeader *aggkittypes.BlockHeader) error { if blockHeader.Number == 0 { return nil diff --git a/multidownloader/evm_multidownloader_rpc_test.go b/multidownloader/evm_multidownloader_rpc_test.go index a84983731..66baae50b 100644 --- a/multidownloader/evm_multidownloader_rpc_test.go +++ b/multidownloader/evm_multidownloader_rpc_test.go @@ -1,9 +1,11 @@ package multidownloader import ( + "fmt" "testing" "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -20,17 +22,16 @@ func TestNewEVMMultidownloaderRPC(t *testing.T) { func TestEVMMultidownloaderRPC_Status(t *testing.T) { logger := log.WithFields("module", "test") - downloader := &EVMMultidownloader{} - rpcService := NewEVMMultidownloaderRPC(logger, downloader) + testData := newEVMMultidownloaderTestData(t, false) + testData.mdr.state = NewEmptyState() + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, + mock.Anything).Return(uint64(100), nil) + rpcService := NewEVMMultidownloaderRPC(logger, testData.mdr) result, err := rpcService.Status() require.Nil(t, err) require.NotNil(t, result) - statusInfo, ok := result.(struct { - Status string `json:"status"` - }) - require.True(t, ok) - require.Equal(t, "running", statusInfo.Status) + require.Contains(t, fmt.Sprintf("%+v", result), "Status") } diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 76349cf49..a2c671296 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -45,6 +45,9 @@ func (dh *EVMMultidownloader) BlockHeader(ctx context.Context, // FilterLogs filters the logs. It gets them from storage or waits until they are available func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { + if !dh.IsInitialized() { + return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: multidownloader not initialized") + } dh.log.Debugf("EVMMultidownloader.FilterLogs: received query: %+v", query) defer dh.log.Debugf("EVMMultidownloader.FilterLogs: finished query: %+v", query) logQuery := mdrtypes.NewLogQueryFromEthereumFilter(query) diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index 8b2f8e8cd..143eabdb7 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -167,7 +167,7 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { t.Run("FilterLogs context canceled waiting to catch up", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) - + testData.FakeInitialized(t) query := ethereum.FilterQuery{ Addresses: []common.Address{addr1}, FromBlock: big.NewInt(100), @@ -195,6 +195,9 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) + testData.MockInitialize(t, 1) + err = testData.mdr.Initialize(t.Context()) + require.NoError(t, err) query := ethereum.FilterQuery{ Addresses: []common.Address{addr1}, diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 4636bdd15..d2bf48d74 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -3,6 +3,7 @@ package multidownloader import ( "context" "fmt" + "math/big" "os" "sync" "testing" @@ -11,6 +12,7 @@ import ( jRPC "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" + "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/etherman" mockethermantypes "github.com/agglayer/aggkit/etherman/types/mocks" "github.com/agglayer/aggkit/l1infotreesync" @@ -30,7 +32,7 @@ import ( "github.com/stretchr/testify/require" ) -const runL1InfoTree = false +const runL1InfoTree = true const l1InfoTreeUseMultidownloader = true const storagePath = "../tmp/ut/" @@ -453,6 +455,26 @@ type testDataEVMMultidownloader struct { mockBlockNotifierManager *mockethermantypes.BlockNotifierManager } +func (td *testDataEVMMultidownloader) FakeInitialized(t *testing.T) { + t.Helper() + td.mdr.state = NewEmptyState() +} + +func (td *testDataEVMMultidownloader) MockInitialize(t *testing.T, chainID uint64) { + t.Helper() + chainIDBig := big.NewInt(0).SetUint64(chainID) + td.mockEthClient.EXPECT().ChainID(mock.Anything).Return(chainIDBig, nil).Maybe() + if td.mockStorage != nil { + td.mockStorage.EXPECT().GetValue(mock.Anything, mock.Anything, mock.Anything).Return("", db.ErrNotFound).Maybe() + td.mockStorage.EXPECT().InsertValue(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + td.mockStorage.EXPECT().UpsertSyncerConfigs(mock.Anything, mock.Anything).Return(nil).Maybe() + td.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything).Return(mdrtypes.NewSetSyncSegment(), nil).Maybe() + } + if td.mockBlockNotifierManager != nil { + td.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything).Return(uint64(200), nil).Maybe() + } +} + func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMMultidownloader { t.Helper() logger := log.WithFields("test", "evm_multidownloader_test") @@ -478,7 +500,6 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM require.NoError(t, err) useDB = realDB } - // TODO: Add mock for ethRPCClient if needed mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, useDB, mockBlockNotifierManager, nil) require.NoError(t, err) return &testDataEVMMultidownloader{ diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go new file mode 100644 index 000000000..4464ebd19 --- /dev/null +++ b/multidownloader/reorg_processor.go @@ -0,0 +1,150 @@ +package multidownloader + +import ( + "context" + "fmt" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgPorter interface { + NewTx(ctx context.Context) (dbtypes.Txer, error) + GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*compareBlockHeaders, error) + GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) + // Return ChainID of the inserted reorg + MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) + // Return latest block number in RPC + GetLatestBlockNumberInRPC(ctx context.Context) (uint64, error) +} + +type ReorgProcessor struct { + log aggkitcommon.Logger + port ReorgPorter + funcNow func() uint64 +} + +func NewReorgProcessor(log aggkitcommon.Logger, + ethClient aggkittypes.BaseEthereumClienter, + rpcClient aggkittypes.RPCClienter, + storage mdtypes.Storager) *ReorgProcessor { + return &ReorgProcessor{ + log: log, + port: &ReorgPort{ + ethClient: ethClient, + rpcClient: rpcClient, + storage: storage, + }, + funcNow: func() uint64 { + return uint64(time.Now().Unix()) + }, + } +} + +// After detecting a reorg at offendingBlockNumber, +// - find affected blocks +// - store the reorg info in storage +func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, + offendingBlockNumber uint64) error { + // We known that offendingBlockNumber is affected, so we go backwards until we find + // the first unaffected block + currentBlockNumber := offendingBlockNumber + tx, err := rm.port.NewTx(ctx) + if err != nil { + return fmt.Errorf("ProcessReorg: error starting new tx: %w", err) + } + committed := false + defer func() { + if !committed { + rm.log.Debugf("ProcessReorg: rolling back tx") + if err := tx.Rollback(); err != nil { + rm.log.Errorf("ProcessReorg: error rolling back tx: %v", err) + } + } + }() + + firstUnaffectedBlock, err := rm.findFirstUnaffectedBlock(ctx, tx, currentBlockNumber-1) + if err != nil { + return fmt.Errorf("ProcessReorg: error finding first unaffected block: %w", err) + } + lastBlockNumberInStorage, err := rm.port.GetLastBlockNumberInStorage(tx) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting last block number in storage: %w", err) + } + latestBlockNumberInRPC, err := rm.port.GetLatestBlockNumberInRPC(ctx) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting latest block number in RPC: %w", err) + } + rm.log.Infof("ProcessReorg: reorg detected from block %d to block %d", + currentBlockNumber+1, lastBlockNumberInStorage) + + reorgData := mdtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(firstUnaffectedBlock+1, lastBlockNumberInStorage), + DetectedAtBlock: lastBlockNumberInStorage, + DetectedTimestamp: rm.funcNow(), + NetworkLatestBlock: latestBlockNumberInRPC, + NetworkFinalizedBlock: firstUnaffectedBlock, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + chainID, err := rm.port.MoveReorgedBlocks(tx, reorgData) + if err != nil { + return fmt.Errorf("ProcessReorg: error moving reorged blocks: %w", err) + } + reorgData.ChainID = chainID + committed = true + if err := tx.Commit(); err != nil { + return fmt.Errorf("ProcessReorg: cannot commit tx: %w", err) + } + rm.log.Warnf("ProcessReorg: finalized reorgProcess: %s", reorgData.String()) + return nil +} + +func (rm *ReorgProcessor) findFirstUnaffectedBlock(ctx context.Context, + tx dbtypes.Querier, + startBlockNumber uint64) (uint64, error) { + currentBlockNumber := startBlockNumber + for { + if currentBlockNumber == 0 { + // Genesis block reached, stop here + return 0, fmt.Errorf("findFirstUnaffectedBlock: genesis block reached while checking reorgs, "+ + "cannot find unaffected block. First block checked: %d", startBlockNumber) + } + data, err := rm.port.GetBlockStorageAndRPC(ctx, tx, currentBlockNumber) + if err != nil { + return 0, err + } + match, err := rm.checkBlocks(data) + if err != nil { + return 0, err + } + if match { + // Found the first unaffected block + return currentBlockNumber, nil + } + currentBlockNumber-- + } +} + +// checkBlocks compares storage and rpc block headers and returns true if they match +func (rm *ReorgProcessor) checkBlocks(blocks *compareBlockHeaders) (bool, error) { + if blocks == nil || blocks.StorageHeader == nil || blocks.RpcHeader == nil { + // Block not in storage, so it is a reorg + return false, fmt.Errorf("checkBlocks bad input data (nil)") + } + if blocks.StorageHeader.Number != blocks.RpcHeader.Number { + return false, fmt.Errorf("checkBlocks block numbers do not match: storage=%d rpc=%d", + blocks.StorageHeader.Number, blocks.RpcHeader.Number) + } + // This is a sanity check, never have to happen because we trust in finalized blocks! + if blocks.StorageHeader.Hash != blocks.RpcHeader.Hash { + if blocks.IsFinalized == mdtypes.Finalized { + rm.log.Warnf("checkBlocks: block %d is finalized and mismatch hash %s!=%s", blocks.StorageHeader.Number, + blocks.StorageHeader.Hash.Hex(), blocks.RpcHeader.Hash.Hex()) + } + return false, nil + } + return true, nil +} diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go new file mode 100644 index 000000000..8d8726a64 --- /dev/null +++ b/multidownloader/reorg_processor_port.go @@ -0,0 +1,68 @@ +package multidownloader + +import ( + "context" + "fmt" + "math/big" + + dbtypes "github.com/agglayer/aggkit/db/types" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type compareBlockHeaders struct { + StorageHeader *aggkittypes.BlockHeader + IsFinalized mdtypes.FinalizedType + RpcHeader *aggkittypes.BlockHeader +} + +type ReorgPort struct { + ethClient aggkittypes.BaseEthereumClienter + rpcClient aggkittypes.RPCClienter + storage mdtypes.Storager +} + +func (r *ReorgPort) NewTx(ctx context.Context) (dbtypes.Txer, error) { + return r.storage.NewTx(ctx) +} + +func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, + blockNumber uint64) (*compareBlockHeaders, error) { + currentStorageBlock, finalized, err := r.storage.GetBlockHeaderByNumber(tx, blockNumber) + if err != nil { + return nil, err + } + number := big.NewInt(0).SetUint64(blockNumber) + rpcBlock, err := r.ethClient.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + return &compareBlockHeaders{ + StorageHeader: currentStorageBlock, + IsFinalized: finalized, + RpcHeader: aggkittypes.NewBlockHeaderFromEthHeader(rpcBlock.Header()), + }, nil +} + +func (r *ReorgPort) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { + highestBlock, _, err := r.storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + if err != nil { + return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block from storage: %w", err) + } + if highestBlock == nil { + return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block (=nil) from storage") + } + return highestBlock.Number, nil +} + +func (r *ReorgPort) MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) { + return r.storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) +} + +func (r *ReorgPort) GetLatestBlockNumberInRPC(ctx context.Context) (uint64, error) { + latestBlockNumber, err := r.ethClient.BlockNumber(ctx) + if err != nil { + return 0, fmt.Errorf("GetLatestBlockNumber: error getting latest block number from RPC: %w", err) + } + return latestBlockNumber, nil +} diff --git a/multidownloader/state.go b/multidownloader/state.go index 567183021..eaa45f8c8 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -18,6 +18,13 @@ type State struct { Pending mdrtypes.SetSyncSegment } +func NewEmptyState() *State { + return &State{ + Synced: mdrtypes.NewSetSyncSegment(), + Pending: mdrtypes.NewSetSyncSegment(), + } +} + func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) *State { return &State{ Synced: *synced, diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go new file mode 100644 index 000000000..4d3f3cc6d --- /dev/null +++ b/multidownloader/state_test.go @@ -0,0 +1,46 @@ +package multidownloader + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestStateInitial(t *testing.T) { + addr1 := common.HexToAddress("0x10") + addr2 := common.HexToAddress("0x20") + storageData := mdtypes.NewSetSyncSegment() + storageData.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.BlockRangeZero, aggkittypes.FinalizedBlock, + false)) + storageData.Add(mdtypes.NewSyncSegment(addr2, + aggkitcommon.BlockRangeZero, aggkittypes.LatestBlock, + false)) + configData := mdtypes.NewSetSyncSegment() + segment1 := mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(0, 1000), aggkittypes.FinalizedBlock, + false) + segment2 := mdtypes.NewSyncSegment(addr2, + aggkitcommon.NewBlockRange(0, 2000), aggkittypes.LatestBlock, + false) + configData.Add(segment1) + configData.Add(segment2) + + state, err := NewStateFromStorageSyncedBlocks(storageData, configData) + require.NoError(t, err) + require.NotNil(t, state) + logQuery := mdtypes.NewLogQuery( + 123, 456, []common.Address{addr1}) + + err = state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + pendingSegments := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(pendingSegments)) + require.Equal(t, addr1, pendingSegments[0].ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(0, 1000), pendingSegments[0].BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) +} diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index e3fae16cd..ab390ea09 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -60,15 +60,6 @@ func NewLogRowsFromEthLogs(logs []types.Log) []*logRow { return rows } -type syncStatusRow struct { - Address common.Address `meddler:"contract_address,address"` - TargetFromBlock uint64 `meddler:"target_from_block"` - TargetToBlock string `meddler:"target_to_block"` - SyncedFromBlock uint64 `meddler:"synced_from_block"` - SyncedToBlock uint64 `meddler:"synced_to_block"` - SyncersIDs string `meddler:"syncers_id"` -} - func NewLogRowFromEthLog(log types.Log) *logRow { topicsJSON, err := json.Marshal(log.Topics) if err != nil { diff --git a/multidownloader/storage/storage_block_test.go b/multidownloader/storage/storage_block_test.go new file mode 100644 index 000000000..7083f66ff --- /dev/null +++ b/multidownloader/storage/storage_block_test.go @@ -0,0 +1,50 @@ +package storage + +import ( + "testing" + + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_GetBlock(t *testing.T) { + storage := newStorageForTest(t, nil) + // BlockBase not present + blockHeader, _, err := storage.GetBlockHeaderByNumber(nil, 1234) + require.NoError(t, err, "cannot get BlockHeader") + require.Nil(t, blockHeader, "expected nil BlockHeader") + block := aggkittypes.NewBlockHeader(1234, exampleTestHash[0], 5678, &exampleTestHash[1]) + err = storage.saveAggkitBlock(nil, block, true) + require.NoError(t, err, "cannot insert BlockHeader") + // Get and verify block + readBlock, isFinal, err := storage.GetBlockHeaderByNumber(nil, 1234) + require.NoError(t, err, "cannot get BlockHeader") + require.NotNil(t, readBlock, "expected non-nil BlockHeader") + require.Equal(t, block, readBlock, "BlockHeader mismatch") + require.True(t, isFinal, "expected block to be final") + + blockNilParentHash := aggkittypes.NewBlockHeader(1235, exampleTestHash[0], 5678, nil) + err = storage.saveAggkitBlock(nil, blockNilParentHash, true) + require.NoError(t, err, "cannot get BlockHeader") + readBlock, _, err = storage.GetBlockHeaderByNumber(nil, blockNilParentHash.Number) + require.NoError(t, err, "cannot get BlockHeader") + require.Equal(t, blockNilParentHash, readBlock, "BlockHeader mismatch") +} + +func TestStorage_GetRangeBlockHeader(t *testing.T) { + storage := newStorageForTest(t, nil) + block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) + err := storage.saveAggkitBlock(nil, block, mdtypes.NotFinalized) + require.NoError(t, err, "cannot insert BlockHeader") + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.Equal(t, block, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block, highest, "highest BlockHeader mismatch") + + lowest, highest, err = storage.GetRangeBlockHeader(nil, mdtypes.Finalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.True(t, lowest.Empty(), "lowest BlockHeader mismatch") + require.True(t, highest.Empty(), "highest BlockHeader mismatch") +} diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go new file mode 100644 index 000000000..522803f78 --- /dev/null +++ b/multidownloader/storage/storage_reorg.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/russross/meddler" +) + +type reorgRow struct { + ChainID uint64 `meddler:"chain_id"` + DetectedAtBlock uint64 `meddler:"detected_at_block"` + ReorgedFromBlock uint64 `meddler:"reorged_from_block"` + ReorgedToBlock uint64 `meddler:"reorged_to_block"` + DetectedTimestamp uint64 `meddler:"detected_timestamp"` + NetworkLatestBlock uint64 `meddler:"network_latest_block"` + NetworkFinalizedBlock uint64 `meddler:"network_finalized_block"` + NetworkFinalizedBlockName string `meddler:"network_finalized_block_name"` +} + +func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { + return &reorgRow{ + ChainID: reorgData.ChainID, + DetectedAtBlock: reorgData.DetectedAtBlock, + ReorgedFromBlock: reorgData.BlockRangeAffected.FromBlock, + ReorgedToBlock: reorgData.BlockRangeAffected.ToBlock, + DetectedTimestamp: reorgData.DetectedTimestamp, + NetworkLatestBlock: reorgData.NetworkLatestBlock, + NetworkFinalizedBlock: reorgData.NetworkFinalizedBlock, + NetworkFinalizedBlockName: reorgData.NetworkFinalizedBlockName.String(), + } +} + +// returns ChainID of the inserted reorg +func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, + reorgData mdrtypes.ReorgData) (uint64, error) { + if tx == nil { + return 0, fmt.Errorf("InsertNewReorg: require a tx because it done multiples operations") + } + reorgRow := newReorgRowFromReorgData(reorgData) + a.mutex.Lock() + defer a.mutex.Unlock() + // Get Next ChainID from storage using rowid + lastChainID := struct { + ChainID *uint64 `meddler:"chain_id"` + }{} + err := meddler.QueryRow(tx, &lastChainID, "SELECT MAX(chain_id) as chain_id FROM reorgs") + if err != nil { + return 0, fmt.Errorf("InsertNewReorg: error getting last chain_id: %w", err) + } + if lastChainID.ChainID == nil { + reorgRow.ChainID = 1 + } else { + reorgRow.ChainID = *lastChainID.ChainID + 1 + } + + if err := meddler.Insert(tx, "reorgs", reorgRow); err != nil { + return 0, fmt.Errorf("InsertNewReorg: error inserting reorgs (%s): %w", reorgData.String(), err) + } + if err := a.moveReorgedBlocksAndLogsNoMutex(tx, reorgRow.ChainID, + reorgData.BlockRangeAffected); err != nil { + return 0, fmt.Errorf("InsertNewReorg: error moving reorged blocks to block_reorged: %w", err) + } + return reorgRow.ChainID, nil +} + +func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Querier, chainID uint64, + blockRangeAffected aggkitcommon.BlockRange) error { + a.logger.Debugf("storage: moving blocks to blocks_reorged - chain_id: %d, range: %s", + chainID, blockRangeAffected.String()) + query := `INSERT INTO blocks_reorged (chain_id, block_number, block_hash,block_parent_hash, block_timestamp) + SELECT ?, block_number, block_hash, block_parent_hash, block_timestamp + FROM blocks + WHERE block_number >= ? AND block_number <= ?; + INSERT INTO logs_reorged (chain_id, block_number, address,topics, data, tx_hash, tx_index, log_index) + SELECT ?, block_number, address, topics, data, tx_hash, tx_index, log_index + FROM logs + WHERE block_number >= ? AND block_number <= ?; + DELETE FROM logs + WHERE block_number >= ? AND block_number <= ?; + DELETE FROM blocks + WHERE block_number >= ? AND block_number <= ?;` + _, err := tx.Exec(query, + chainID, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + chainID, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock) + if err != nil { + return fmt.Errorf("moveReorgedBlocks: error moving reorged blocks to block_reorged: %w", err) + } + return nil +} diff --git a/multidownloader/storage/storage_reorg_test.go b/multidownloader/storage/storage_reorg_test.go new file mode 100644 index 000000000..e2510855a --- /dev/null +++ b/multidownloader/storage/storage_reorg_test.go @@ -0,0 +1,67 @@ +package storage + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_InsertNewReorg(t *testing.T) { + storage := newStorageForTest(t, nil) + reorgData := mdrtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(5000, 5010), + DetectedAtBlock: 5020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 6000, + NetworkFinalizedBlock: 5990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(1), chainID, "first chain ID must be 1") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") + + tx, err = storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + chainID, err = storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(2), chainID, "second chain ID must be 2") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") +} + +func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { + storage := newStorageForTest(t, nil) + populateLogsAndBlocksForTest(t, storage, + 5000, 20, 5) + + reorgData := mdrtypes.ReorgData{ + ChainID: 0, // will be set by InsertNewReorg + BlockRangeAffected: aggkitcommon.NewBlockRange(5005, 5015), + DetectedAtBlock: 5020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 6000, + NetworkFinalizedBlock: 5990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(1), chainID, "first chain ID must be 1") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") + // Now check that blocks from 5005 to 5015 are in block_reorged + for i := uint64(5005); i <= 5015; i++ { + hdr, _, err := storage.GetBlockHeaderByNumber(nil, i) + require.NoError(t, err) + require.Nil(t, hdr, "block header should not be in blocks table anymore") + } +} diff --git a/multidownloader/storage/storage_sync.go b/multidownloader/storage/storage_sync.go new file mode 100644 index 000000000..a8beeafe9 --- /dev/null +++ b/multidownloader/storage/storage_sync.go @@ -0,0 +1,122 @@ +package storage + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +type syncStatusRow struct { + Address common.Address `meddler:"contract_address,address"` + TargetFromBlock uint64 `meddler:"target_from_block"` + TargetToBlock string `meddler:"target_to_block"` + SyncedFromBlock uint64 `meddler:"synced_from_block"` + SyncedToBlock uint64 `meddler:"synced_to_block"` + SyncersIDs string `meddler:"syncers_id"` +} + +func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { + targetToBlock, err := aggkittypes.NewBlockNumberFinality(r.TargetToBlock) + if err != nil { + return mdrtypes.SyncSegment{}, fmt.Errorf("ToSyncSegment: error parsing target to block finality (%s): %w", + r.TargetToBlock, err) + } + return mdrtypes.SyncSegment{ + ContractAddr: r.Address, + TargetToBlock: targetToBlock, + BlockRange: aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock), + }, nil +} + +func (a *MultidownloaderStorage) GetSyncedBlockRangePerContract(tx dbtypes.Querier) (mdrtypes.SetSyncSegment, error) { + a.mutex.RLock() + defer a.mutex.RUnlock() + result := make([]*syncStatusRow, 0) + if tx == nil { + tx = a.db + } + err := meddler.QueryAll(tx, &result, "SELECT * FROM sync_status") + if err != nil { + return mdrtypes.SetSyncSegment{}, fmt.Errorf("error querying sync status: %w", err) + } + setSegments := mdrtypes.NewSetSyncSegment() + for _, row := range result { + segment, err := row.ToSyncSegment() + if err != nil { + return mdrtypes.SetSyncSegment{}, + fmt.Errorf("GetSyncedBlockRangePerContract: error converting row to sync segment: %w", err) + } + setSegments.Add(segment) + } + return setSegments, nil +} + +func (a *MultidownloaderStorage) UpdateSyncedStatus(tx dbtypes.Querier, + segments []mdrtypes.SyncSegment) error { + if tx == nil { + tx = a.db + } + query := ` + UPDATE sync_status SET + synced_from_block = ?, + synced_to_block = ? + WHERE contract_address = ?; + ` + a.mutex.Lock() + defer a.mutex.Unlock() + for _, segment := range segments { + result, err := tx.Exec(query, segment.BlockRange.FromBlock, + segment.BlockRange.ToBlock, segment.ContractAddr.Hex()) + if err != nil { + return fmt.Errorf("error updating %s sync status: %w", segment.String(), err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("error getting rows affected for contract %s: %w", + segment.ContractAddr.Hex(), err) + } + if rowsAffected == 0 { + return fmt.Errorf("no rows updated for contract %s", segment.ContractAddr.Hex()) + } + } + return nil +} + +func (a *MultidownloaderStorage) UpsertSyncerConfigs(tx dbtypes.Querier, configs []mdrtypes.ContractConfig) error { + if tx == nil { + tx = a.db + } + a.mutex.Lock() + defer a.mutex.Unlock() + for _, config := range configs { + row := syncStatusRow{ + Address: config.Address, + TargetFromBlock: config.FromBlock, + TargetToBlock: config.ToBlock.String(), + SyncedFromBlock: 0, + SyncedToBlock: 0, + SyncersIDs: fmt.Sprintf("%v", config.Syncers), + } + // Upsert logic + query := ` + INSERT INTO sync_status (contract_address, target_from_block, + target_to_block, synced_from_block, synced_to_block, syncers_id) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(contract_address) DO UPDATE SET + target_from_block = excluded.target_from_block, + target_to_block = excluded.target_to_block, + syncers_id = excluded.syncers_id + ` + _, err := tx.Exec(query, row.Address.Hex(), row.TargetFromBlock, row.TargetToBlock, + row.SyncedFromBlock, row.SyncedToBlock, row.SyncersIDs) + if err != nil { + return fmt.Errorf("error updating sync status: %w", err) + } + } + return nil +} diff --git a/multidownloader/storage/storage_sync_test.go b/multidownloader/storage/storage_sync_test.go new file mode 100644 index 000000000..3d9334cac --- /dev/null +++ b/multidownloader/storage/storage_sync_test.go @@ -0,0 +1,113 @@ +package storage + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_GetSyncedBlockRangePerContract(t *testing.T) { + storage := newStorageForTest(t, nil) + data, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, "SetSyncSegment: ", data.String()) +} + +func TestStorage_UpsertSyncerConfigs(t *testing.T) { + storage := newStorageForTest(t, nil) + configs := []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1000, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 2000, + ToBlock: aggkittypes.LatestBlock, + }, + } + err := storage.UpsertSyncerConfigs(nil, configs) + require.NoError(t, err) + + // Upsert again with different start block + configsUpdated := []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1300, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 1600, + ToBlock: aggkittypes.FinalizedBlock, + }, + } + err = storage.UpsertSyncerConfigs(nil, configsUpdated) + require.NoError(t, err) + + syncSegments, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, 2, len(syncSegments.GetAddressesForBlockRange( + aggkitcommon.NewBlockRange(0, 10000), + ))) + seg1, exists := syncSegments.GetByContract(exampleAddr1) + require.True(t, exists) + require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) + require.Equal(t, aggkitcommon.BlockRangeZero, seg1.BlockRange) + + seg2, exists := syncSegments.GetByContract(exampleAddr2) + require.True(t, exists) + require.Equal(t, aggkittypes.FinalizedBlock, seg2.TargetToBlock) +} + +func TestStorage_UpdateSyncedStatus(t *testing.T) { + storage := newStorageForTest(t, nil) + segments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + exampleAddr1, + aggkitcommon.NewBlockRange(1000, 2000), + aggkittypes.FinalizedBlock, + true, + ), + mdrtypes.NewSyncSegment( + exampleAddr2, + aggkitcommon.NewBlockRange(1500, 2500), + aggkittypes.LatestBlock, + false, + ), + } + err := storage.UpsertSyncerConfigs(nil, []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1000, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 1500, + ToBlock: aggkittypes.LatestBlock, + }, + }) + require.NoError(t, err) + err = storage.UpdateSyncedStatus(nil, segments) + require.NoError(t, err) + + syncedSegments, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, 2, len(syncedSegments.GetAddressesForBlockRange( + aggkitcommon.NewBlockRange(0, 3000), + ))) + seg1, exists := syncedSegments.GetByContract(exampleAddr1) + require.True(t, exists) + require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), seg1.BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) + + seg2, exists := syncedSegments.GetByContract(exampleAddr2) + require.True(t, exists) + require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) + require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) +} diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index d9707bb06..7fae3f2ca 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -4,7 +4,6 @@ import ( "path" "testing" - aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/log" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -173,108 +172,6 @@ func TestStorage_SaveEthLogsWithHeaders(t *testing.T) { require.Equal(t, logs[1], readLogs[1]) } -func TestStorage_GetSyncedBlockRangePerContract(t *testing.T) { - storage := newStorageForTest(t, nil) - data, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, "SetSyncSegment: ", data.String()) -} - -func TestStorage_UpsertSyncerConfigs(t *testing.T) { - storage := newStorageForTest(t, nil) - configs := []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1000, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 2000, - ToBlock: aggkittypes.LatestBlock, - }, - } - err := storage.UpsertSyncerConfigs(nil, configs) - require.NoError(t, err) - - // Upsert again with different start block - configsUpdated := []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1300, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 1600, - ToBlock: aggkittypes.FinalizedBlock, - }, - } - err = storage.UpsertSyncerConfigs(nil, configsUpdated) - require.NoError(t, err) - - syncSegments, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, 2, len(syncSegments.GetAddressesForBlockRange( - aggkitcommon.NewBlockRange(0, 10000), - ))) - seg1, exists := syncSegments.GetByContract(exampleAddr1) - require.True(t, exists) - require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) - - seg2, exists := syncSegments.GetByContract(exampleAddr2) - require.True(t, exists) - require.Equal(t, aggkittypes.FinalizedBlock, seg2.TargetToBlock) -} - -func TestStorage_UpdateSyncedStatus(t *testing.T) { - storage := newStorageForTest(t, nil) - segments := []mdrtypes.SyncSegment{ - mdrtypes.NewSyncSegment( - exampleAddr1, - aggkitcommon.NewBlockRange(1000, 2000), - aggkittypes.FinalizedBlock, - true, - ), - mdrtypes.NewSyncSegment( - exampleAddr2, - aggkitcommon.NewBlockRange(1500, 2500), - aggkittypes.LatestBlock, - false, - ), - } - err := storage.UpsertSyncerConfigs(nil, []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1000, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 1500, - ToBlock: aggkittypes.LatestBlock, - }, - }) - require.NoError(t, err) - err = storage.UpdateSyncedStatus(nil, segments) - require.NoError(t, err) - - syncedSegments, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, 2, len(syncedSegments.GetAddressesForBlockRange( - aggkitcommon.NewBlockRange(0, 3000), - ))) - seg1, exists := syncedSegments.GetByContract(exampleAddr1) - require.True(t, exists) - require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), seg1.BlockRange) - require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) - - seg2, exists := syncedSegments.GetByContract(exampleAddr2) - require.True(t, exists) - require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) - require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) -} - func TestStorage_UpdateIsFinal(t *testing.T) { storage := newStorageForTest(t, nil) block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go new file mode 100644 index 000000000..e9b869d3e --- /dev/null +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -0,0 +1,83 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// ReorgProcessor is an autogenerated mock type for the ReorgProcessor type +type ReorgProcessor struct { + mock.Mock +} + +type ReorgProcessor_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgProcessor) EXPECT() *ReorgProcessor_Expecter { + return &ReorgProcessor_Expecter{mock: &_m.Mock} +} + +// ProcessReorg provides a mock function with given fields: ctx, offendingBlockNumber +func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, offendingBlockNumber uint64) error { + ret := _m.Called(ctx, offendingBlockNumber) + + if len(ret) == 0 { + panic("no return value specified for ProcessReorg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, offendingBlockNumber) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorgProcessor_ProcessReorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessReorg' +type ReorgProcessor_ProcessReorg_Call struct { + *mock.Call +} + +// ProcessReorg is a helper method to define mock.On call +// - ctx context.Context +// - offendingBlockNumber uint64 +func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, offendingBlockNumber interface{}) *ReorgProcessor_ProcessReorg_Call { + return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, offendingBlockNumber)} +} + +func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, offendingBlockNumber uint64)) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ReorgProcessor_ProcessReorg_Call) Return(_a0 error) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgProcessor creates a new instance of ReorgProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgProcessor(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgProcessor { + mock := &ReorgProcessor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/mocks/mock_storager_for_reorg.go b/multidownloader/types/mocks/mock_storager_for_reorg.go new file mode 100644 index 000000000..74bf29868 --- /dev/null +++ b/multidownloader/types/mocks/mock_storager_for_reorg.go @@ -0,0 +1,162 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + aggkittypes "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" + + types "github.com/agglayer/aggkit/db/types" +) + +// StoragerForReorg is an autogenerated mock type for the StoragerForReorg type +type StoragerForReorg struct { + mock.Mock +} + +type StoragerForReorg_Expecter struct { + mock *mock.Mock +} + +func (_m *StoragerForReorg) EXPECT() *StoragerForReorg_Expecter { + return &StoragerForReorg_Expecter{mock: &_m.Mock} +} + +// GetBlockHeaderByNumber provides a mock function with given fields: tx, blockNumber +func (_m *StoragerForReorg) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { + ret := _m.Called(tx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 multidownloadertypes.FinalizedType + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { + return rf(tx, blockNumber) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64) *aggkittypes.BlockHeader); ok { + r0 = rf(tx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64) multidownloadertypes.FinalizedType); ok { + r1 = rf(tx, blockNumber) + } else { + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) + } + + if rf, ok := ret.Get(2).(func(types.Querier, uint64) error); ok { + r2 = rf(tx, blockNumber) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// StoragerForReorg_GetBlockHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeaderByNumber' +type StoragerForReorg_GetBlockHeaderByNumber_Call struct { + *mock.Call +} + +// GetBlockHeaderByNumber is a helper method to define mock.On call +// - tx types.Querier +// - blockNumber uint64 +func (_e *StoragerForReorg_Expecter) GetBlockHeaderByNumber(tx interface{}, blockNumber interface{}) *StoragerForReorg_GetBlockHeaderByNumber_Call { + return &StoragerForReorg_GetBlockHeaderByNumber_Call{Call: _e.mock.On("GetBlockHeaderByNumber", tx, blockNumber)} +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) Run(run func(tx types.Querier, blockNumber uint64)) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// InsertReorgAndMoveReorgedBlocksAndLogs provides a mock function with given fields: tx, reorgData +func (_m *StoragerForReorg) InsertReorgAndMoveReorgedBlocksAndLogs(tx types.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for InsertReorgAndMoveReorgedBlocksAndLogs") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertReorgAndMoveReorgedBlocksAndLogs' +type StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call struct { + *mock.Call +} + +// InsertReorgAndMoveReorgedBlocksAndLogs is a helper method to define mock.On call +// - tx types.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *StoragerForReorg_Expecter) InsertReorgAndMoveReorgedBlocksAndLogs(tx interface{}, reorgData interface{}) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + return &StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call{Call: _e.mock.On("InsertReorgAndMoveReorgedBlocksAndLogs", tx, reorgData)} +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Run(run func(tx types.Querier, reorgData multidownloadertypes.ReorgData)) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Return(_a0 uint64, _a1 error) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(run) + return _c +} + +// NewStoragerForReorg creates a new instance of StoragerForReorg. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStoragerForReorg(t interface { + mock.TestingT + Cleanup(func()) +}) *StoragerForReorg { + mock := &StoragerForReorg{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/reorg_data.go b/multidownloader/types/reorg_data.go new file mode 100644 index 000000000..dbfea44b7 --- /dev/null +++ b/multidownloader/types/reorg_data.go @@ -0,0 +1,30 @@ +package types + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgData struct { + ChainID uint64 + BlockRangeAffected aggkitcommon.BlockRange + DetectedAtBlock uint64 + DetectedTimestamp uint64 + NetworkLatestBlock uint64 + NetworkFinalizedBlock uint64 + NetworkFinalizedBlockName aggkittypes.BlockNumberFinality +} + +func (r *ReorgData) String() string { + return fmt.Sprintf("ReorgData{ChainID: %d, BlockRangeAffected: %s, DetectedAtBlock: %d, DetectedTimestamp: %d, "+ + "NetworkLatestBlock: %d, NetworkFinalizedBlock: %d (%s)}", + r.ChainID, + r.BlockRangeAffected.String(), + r.DetectedAtBlock, + r.DetectedTimestamp, + r.NetworkLatestBlock, + r.NetworkFinalizedBlock, + r.NetworkFinalizedBlockName.String()) +} diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go new file mode 100644 index 000000000..01ad209d1 --- /dev/null +++ b/multidownloader/types/reorg_error.go @@ -0,0 +1,62 @@ +package types + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// ReorgError is an error that is raised when a reorg is detected +type ReorgError struct { + OffendingBlockNumber uint64 + OldHash common.Hash + NewHash common.Hash + Message string +} + +// IsReorgError checks if an error is a ReorgError +func IsReorgError(err error) bool { + c := CastReorgError(err) + return c != nil +} + +// NewReorgError creates a new ReorgError +func NewReorgError(offendingBlockNumber uint64, + oldHash, newHash common.Hash, msg string) *ReorgError { + return &ReorgError{ + OffendingBlockNumber: offendingBlockNumber, + OldHash: oldHash, + NewHash: newHash, + Message: msg, + } +} + +func (e *ReorgError) Error() string { + return fmt.Sprintf("reorgError: block number %d: old hash %s != new hash %s: %s", + e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) +} + +func CastReorgError(err error) *ReorgError { + var reorgErr *ReorgError + if errors.As(err, &reorgErr) { + return reorgErr + } + return nil +} + +// // GetReorgErrorBlockNumber returns the block number that caused the reorg +// func GetReorgErrorBlockNumber(err error) uint64 { +// if reorgErr, ok := err.(*ReorgError); ok { +// return reorgErr.BlockNumber +// } +// return 0 +// } + +// // GetReorgErrorWrappedError returns the wrapped error that caused the reorg +// func GetReorgErrorWrappedError(err error) error { +// if reorgErr, ok := err.(*ReorgError); ok { +// return reorgErr.Err +// } +// return nil +// } diff --git a/multidownloader/types/reorg_processor.go b/multidownloader/types/reorg_processor.go new file mode 100644 index 000000000..537815a72 --- /dev/null +++ b/multidownloader/types/reorg_processor.go @@ -0,0 +1,10 @@ +package types + +import "context" + +type ReorgProcessor interface { + // ProcessReorg processes a detected reorg starting from the offending block number. + // It identifies the range of blocks affected by the reorg and takes necessary actions + // to handle the reorganization. + ProcessReorg(ctx context.Context, offendingBlockNumber uint64) error +} diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 40e1c3152..6d0416827 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -63,6 +63,7 @@ func (s *SetSyncSegment) Add(segment SyncSegment) { } // GetByContract returns the SyncSegment for the given contract address + func (s *SetSyncSegment) GetByContract(addr common.Address) (SyncSegment, bool) { if s == nil { return SyncSegment{}, false diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 751cfffcf..bf29775c4 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -364,7 +364,7 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Equal(t, uint64(100), res.BlockRange.ToBlock) }) - t.Run("remove totally a segment", func(t *testing.T) { + t.Run("fulfill totally a segment,set it as empty ", func(t *testing.T) { set := NewSetSyncSegment() addr := common.HexToAddress("0x123") segment := SyncSegment{ @@ -380,8 +380,9 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { err := set.SubtractLogQuery(logQuery) require.NoError(t, err) - _, exists := set.GetByContract(addr) - require.False(t, exists) + segment, exists := set.GetByContract(addr) + require.True(t, segment.IsEmpty(), "segment is empty") + require.True(t, exists, "is empty but exists") }) t.Run("bad removed segment (middle segment)", func(t *testing.T) { diff --git a/types/block_header.go b/types/block_header.go index 184107dcc..3dc08a53f 100644 --- a/types/block_header.go +++ b/types/block_header.go @@ -34,6 +34,9 @@ func NewBlockHeaderFromEthHeader(ethHeader *types.Header) *BlockHeader { ethHeader.Time, ðHeader.ParentHash) } +func (gb *BlockHeader) Empty() bool { + return gb == nil || gb.Number == 0 +} func (gb *BlockHeader) String() string { if gb == nil { diff --git a/types/list_block_header.go b/types/list_block_header.go new file mode 100644 index 000000000..081d3ec16 --- /dev/null +++ b/types/list_block_header.go @@ -0,0 +1,31 @@ +package types + +import "sort" + +type ListBlockHeaders []*BlockHeader + +func NewListBlockHeadersEmpty(preAllocatedSize int) ListBlockHeaders { + return ListBlockHeaders(make([]*BlockHeader, 0, preAllocatedSize)) +} +func (lbs ListBlockHeaders) Len() int { + return len(lbs) +} + +func (lbs ListBlockHeaders) ToMap() MapBlockHeaders { + result := NewMapBlockHeadersEmpty(lbs.Len()) + for _, header := range lbs { + result[header.Number] = header + } + return result +} + +func (lbs ListBlockHeaders) BlockNumbers() []uint64 { + result := make([]uint64, 0, len(lbs)) + for _, header := range lbs { + result = append(result, header.Number) + } + sort.Slice(result, func(i, j int) bool { + return result[i] < result[j] + }) + return result +} diff --git a/types/map_block_header.go b/types/map_block_header.go new file mode 100644 index 000000000..76d9529eb --- /dev/null +++ b/types/map_block_header.go @@ -0,0 +1,7 @@ +package types + +type MapBlockHeaders map[uint64]*BlockHeader + +func NewMapBlockHeadersEmpty(preAllocatedSize int) MapBlockHeaders { + return MapBlockHeaders(make(map[uint64]*BlockHeader, preAllocatedSize)) +} From d2e3ea429fa5e3ac4ab59736ca7d10dbac880043 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 19 Jan 2026 17:30:52 +0100 Subject: [PATCH 05/75] fix: lint errors --- multidownloader/evm_multidownloader.go | 54 +++++++------ multidownloader/reorg_processor_port.go | 6 +- multidownloader/storage/storage.go | 101 ------------------------ multidownloader/storage/storage_sync.go | 2 +- 4 files changed, 31 insertions(+), 132 deletions(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 990d9e43f..44b20a687 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -486,32 +486,34 @@ func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log } // TODO: ??? why I did this function?? -func (dh *EVMMultidownloader) checkParent(blockHeader *aggkittypes.BlockHeader) error { - if blockHeader.Number == 0 { - return nil - } - parentHeader, isFinalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockHeader.Number-1) - if err != nil { - return fmt.Errorf("checkParent: cannot get parent block header for block number %d: %w", blockHeader.Number, err) - } - if parentHeader == nil { - return fmt.Errorf("checkParent: parent block header for block number %d not found in storage", blockHeader.Number-1) - } - // Parenthash (from DB) doesn't match parent Hash of first blockHeader, but parent is finalized - // so the discrepancy is the new block that is discarded without reorg (still not in DB) - if isFinalized && blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { - return fmt.Errorf("checkParent: "+ - "parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", - blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String()) - } - if blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { - // Parenthash mismatch, reorg detected - return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, - *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", - blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) - } - return nil -} +// TODO: remove +// func (dh *EVMMultidownloader) checkParent(blockHeader *aggkittypes.BlockHeader) error { +// if blockHeader.Number == 0 { +// return nil +// } +// parentHeader, isFinalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockHeader.Number-1) +// if err != nil { +// return fmt.Errorf("checkParent: cannot get parent block header for block number %d: %w", blockHeader.Number, err) +// } +// if parentHeader == nil { +// return fmt.Errorf("checkParent: parent block header for block number %d not found in storage", +// blockHeader.Number-1) +// } +// // Parenthash (from DB) doesn't match parent Hash of first blockHeader, but parent is finalized +// // so the discrepancy is the new block that is discarded without reorg (still not in DB) +// if isFinalized && blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { +// return fmt.Errorf("checkParent: "+ +// "parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", +// blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String()) +// } +// if blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { +// // Parenthash mismatch, reorg detected +// return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, +// *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", +// blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) +// } +// return nil +// } func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err := ctx.Err(); err != nil { diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index 8d8726a64..accaeb6bb 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -3,7 +3,6 @@ package multidownloader import ( "context" "fmt" - "math/big" dbtypes "github.com/agglayer/aggkit/db/types" mdtypes "github.com/agglayer/aggkit/multidownloader/types" @@ -32,15 +31,14 @@ func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querie if err != nil { return nil, err } - number := big.NewInt(0).SetUint64(blockNumber) - rpcBlock, err := r.ethClient.BlockByNumber(ctx, number) + rpcBlock, err := r.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) if err != nil { return nil, err } return &compareBlockHeaders{ StorageHeader: currentStorageBlock, IsFinalized: finalized, - RpcHeader: aggkittypes.NewBlockHeaderFromEthHeader(rpcBlock.Header()), + RpcHeader: rpcBlock, }, nil } diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index ab390ea09..e650cb4c6 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -284,104 +284,3 @@ func (a *MultidownloaderStorage) saveLogsNoMutex(tx dbtypes.Querier, logRows []* } return nil } - -func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { - targetToBlock, err := aggkittypes.NewBlockNumberFinality(r.TargetToBlock) - if err != nil { - return mdrtypes.SyncSegment{}, fmt.Errorf("ToSyncSegment: error parsing target to block finality (%s): %w", - r.TargetToBlock, err) - } - return mdrtypes.SyncSegment{ - ContractAddr: r.Address, - TargetToBlock: *targetToBlock, - BlockRange: aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock), - }, nil -} - -func (a *MultidownloaderStorage) GetSyncedBlockRangePerContract(tx dbtypes.Querier) (mdrtypes.SetSyncSegment, error) { - a.mutex.RLock() - defer a.mutex.RUnlock() - result := make([]*syncStatusRow, 0) - if tx == nil { - tx = a.db - } - err := meddler.QueryAll(tx, &result, "SELECT * FROM sync_status") - if err != nil { - return mdrtypes.SetSyncSegment{}, fmt.Errorf("error querying sync status: %w", err) - } - setSegments := mdrtypes.NewSetSyncSegment() - for _, row := range result { - segment, err := row.ToSyncSegment() - if err != nil { - return mdrtypes.SetSyncSegment{}, - fmt.Errorf("GetSyncedBlockRangePerContract: error converting row to sync segment: %w", err) - } - setSegments.Add(segment) - } - return setSegments, nil -} - -func (a *MultidownloaderStorage) UpdateSyncedStatus(tx dbtypes.Querier, - segments []mdrtypes.SyncSegment) error { - if tx == nil { - tx = a.db - } - query := ` - UPDATE sync_status SET - synced_from_block = ?, - synced_to_block = ? - WHERE contract_address = ?; - ` - a.mutex.Lock() - defer a.mutex.Unlock() - for _, segment := range segments { - result, err := tx.Exec(query, segment.BlockRange.FromBlock, - segment.BlockRange.ToBlock, segment.ContractAddr.Hex()) - if err != nil { - return fmt.Errorf("error updating %s sync status: %w", segment.String(), err) - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected for contract %s: %w", - segment.ContractAddr.Hex(), err) - } - if rowsAffected == 0 { - return fmt.Errorf("no rows updated for contract %s", segment.ContractAddr.Hex()) - } - } - return nil -} - -func (a *MultidownloaderStorage) UpsertSyncerConfigs(tx dbtypes.Querier, configs []mdrtypes.ContractConfig) error { - if tx == nil { - tx = a.db - } - a.mutex.Lock() - defer a.mutex.Unlock() - for _, config := range configs { - row := syncStatusRow{ - Address: config.Address, - TargetFromBlock: config.FromBlock, - TargetToBlock: config.ToBlock.String(), - SyncedFromBlock: 0, - SyncedToBlock: 0, - SyncersIDs: fmt.Sprintf("%v", config.Syncers), - } - // Upsert logic - query := ` - INSERT INTO sync_status (contract_address, target_from_block, - target_to_block, synced_from_block, synced_to_block, syncers_id) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(contract_address) DO UPDATE SET - target_from_block = excluded.target_from_block, - target_to_block = excluded.target_to_block, - syncers_id = excluded.syncers_id - ` - _, err := tx.Exec(query, row.Address.Hex(), row.TargetFromBlock, row.TargetToBlock, - row.SyncedFromBlock, row.SyncedToBlock, row.SyncersIDs) - if err != nil { - return fmt.Errorf("error updating sync status: %w", err) - } - } - return nil -} diff --git a/multidownloader/storage/storage_sync.go b/multidownloader/storage/storage_sync.go index a8beeafe9..29fb45e1f 100644 --- a/multidownloader/storage/storage_sync.go +++ b/multidownloader/storage/storage_sync.go @@ -28,7 +28,7 @@ func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { } return mdrtypes.SyncSegment{ ContractAddr: r.Address, - TargetToBlock: targetToBlock, + TargetToBlock: *targetToBlock, BlockRange: aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock), }, nil } From 0269675a6d04678af6a93e624cbe5a3f372fc904 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 19 Jan 2026 18:24:26 +0100 Subject: [PATCH 06/75] fix: UT --- config/default.go | 2 ++ multidownloader/state_test.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/config/default.go b/config/default.go index b30268731..9c617e2a6 100644 --- a/config/default.go +++ b/config/default.go @@ -343,6 +343,7 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" MaxParallelBlockHeaderRetrieval = 30 BlockFinality = "FinalizedBlock" WaitPeriodToCheckCatchUp = "10s" + PeriodToCheckReorgs = "5s" [L2Multidownloader] Enabled = false @@ -351,4 +352,5 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" MaxParallelBlockHeaderRetrieval = 30 BlockFinality = "LatestBlock" WaitPeriodToCheckCatchUp = "10s" + PeriodToCheckReorgs = "5s" ` diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go index 4d3f3cc6d..10a54f179 100644 --- a/multidownloader/state_test.go +++ b/multidownloader/state_test.go @@ -34,13 +34,13 @@ func TestStateInitial(t *testing.T) { require.NoError(t, err) require.NotNil(t, state) logQuery := mdtypes.NewLogQuery( - 123, 456, []common.Address{addr1}) + 1, 456, []common.Address{addr1}) err = state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) pendingSegments := state.SyncedSegmentsByContract([]common.Address{addr1}) require.Equal(t, 1, len(pendingSegments)) require.Equal(t, addr1, pendingSegments[0].ContractAddr) - require.Equal(t, aggkitcommon.NewBlockRange(0, 1000), pendingSegments[0].BlockRange) + require.Equal(t, aggkitcommon.NewBlockRange(0, 456), pendingSegments[0].BlockRange) require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) } From 23f0f928674d370141af53c3b7d8a2233dd2c38b Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 21 Jan 2026 19:44:20 +0100 Subject: [PATCH 07/75] Breaking Changes: - Rename SyncerConfig.ContractsAddr to ContractAddresses for better naming consistency - Updated all references across multidownloader, l1infotreesync, and types packages - Updated all unit tests to use new field name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improvements: - Add auto-initialization in EVMMultidownloader.Start() when not already initialized - Adds safety check to call Initialize() if IsInitialized() returns false - Logs initialization attempt for better observability - Fix typo in log_query.go panic message: "unsuppoerted" β†’ "unsupported" - Add nil check for query.ToBlock in NewLogQueryFromEthereumFilter to prevent nil pointer panics - Fix typo in evm_multidownloader.go log message: "relauncing" β†’ "relaunching" Database: - Fix SQL migration 0002.sql: - Add missing DROP statements for blocks_reorged and reorgs tables in Down migration - Fix typo in comment: "extran" β†’ "extra" Types: - Update BlockHeader.Empty() to only check for nil (removed Number == 0 check) - Update NewListBlockHeadersEmpty to pre-allocate slice with nil elements - Add nil checks in ListBlockHeaders.ToMap() and BlockNumbers() methods - Add clarifying comment to ReorgError about OffendingBlockNumber behavior Testing: - Add unit test for Start() auto-initialization error handling - Re-enable TestEVMMultidownloader_Start with simplified test case - Update test contracts compilation scripts (logemitter) Co-Authored-By: Claude Sonnet 4.5 --- l1infotreesync/l1infotreesync.go | 8 +- multidownloader/evm_multidownloader.go | 13 ++- .../evm_multidownloader_syncers_test.go | 8 +- multidownloader/evm_multidownloader_test.go | 46 ++++++--- multidownloader/storage/migrations/0002.sql | 4 +- multidownloader/types/log_query.go | 3 + multidownloader/types/reorg_error.go | 3 +- multidownloader/types/syncer_config.go | 6 +- multidownloader/types/syncer_config_test.go | 96 +++++++++---------- test/contracts/bind.sh | 3 +- test/contracts/compile.sh | 7 +- types/block_header.go | 2 +- types/list_block_header.go | 11 ++- types/multidownloader.go | 4 +- 14 files changed, 125 insertions(+), 89 deletions(-) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index cc905cbd5..de979a1a8 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -100,10 +100,10 @@ func New( addressesToQuery := []common.Address{cfg.GlobalExitRootAddr, cfg.RollupManagerAddr} err = l1Client.RegisterSyncer( aggkittypes.SyncerConfig{ - SyncerID: "l1infotreesync", - ContractsAddr: addressesToQuery, - FromBlock: cfg.InitialBlock, - ToBlock: cfg.BlockFinality, + SyncerID: "l1infotreesync", + ContractAddresses: addressesToQuery, + FromBlock: cfg.InitialBlock, + ToBlock: cfg.BlockFinality, }, ) if err != nil { diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 44b20a687..a4982d0b9 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -268,12 +268,15 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { return nil } func (dh *EVMMultidownloader) Start(ctx context.Context) error { - err := dh.Initialize(ctx) - if err != nil { - return err + if !dh.IsInitialized() { + dh.log.Infof("EVMMultidownloader.Start: multidownloader not initialized, initializing...") + err := dh.Initialize(ctx) + if err != nil { + return err + } } for { - err = dh.StartStep(ctx) + err := dh.StartStep(ctx) if err != nil { reorgErr := mdrtypes.CastReorgError(err) if reorgErr == nil { @@ -287,7 +290,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { } } // Breathing, just in case - dh.log.Infof("relauncing sync loop... (waiting 1 second)") + dh.log.Infof("relaunching sync loop... (waiting 1 second)") time.Sleep(1 * time.Second) } } diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index 143eabdb7..3099847f9 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -189,10 +189,10 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ - SyncerID: "test_syncer", - ContractsAddr: []common.Address{addr1}, - FromBlock: 100, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "test_syncer", + ContractAddresses: []common.Address{addr1}, + FromBlock: 100, + ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) testData.MockInitialize(t, 1) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index d2bf48d74..a40118322 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -37,7 +37,7 @@ const l1InfoTreeUseMultidownloader = true const storagePath = "../tmp/ut/" func TestEVMMultidownloader(t *testing.T) { - // t.Skip("code to test/debug not real unittest") + t.Skip("code to test/debug not real unittest") cfgLog := log.Config{ Environment: "development", Level: "info", @@ -76,7 +76,7 @@ func TestEVMMultidownloader(t *testing.T) { require.NotNil(t, mdr) err = mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "test_syncer", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), // GERManager common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), // RollupManager }, @@ -278,7 +278,7 @@ func TestEVMMultidownloader_RegisterSyncer(t *testing.T) { testData := newEVMMultidownloaderTestData(t, false) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "syncer1", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x1"), }, FromBlock: 100, @@ -376,7 +376,7 @@ func TestEVMMultidownloader_StepSafe(t *testing.T) { testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "syncer1", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x1"), }, FromBlock: 100, @@ -431,20 +431,36 @@ func TestEVMMultidownloader_sync(t *testing.T) { }) } -/* func TestEVMMultidownloader_Start(t *testing.T) { - testData := newEVMMultidownloaderTestData(t) - testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil).Maybe() - err := testData.mdr.Initialize(t.Context()) - require.NoError(t, err) + t.Run("initialization error is returned", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) - start := time.Now() - err = testData.mdr.Start(t.Context()) - duration := time.Since(start) - log.Infof("Multidownloader Start took %s", duration.String()) - require.NoError(t, err) + // Verify not initialized + require.False(t, testData.mdr.IsInitialized()) + + // Mock ChainID to fail + expectedErr := fmt.Errorf("chain ID error") + testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(nil, expectedErr).Once() + + ctx := context.Background() + + // Start should try to initialize and return the error + err := testData.mdr.Start(ctx) + + // Should return the initialization error + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID error") + + // Verify it was not initialized + require.False(t, testData.mdr.IsInitialized()) + }) + + // Note: Testing the full Start() loop with auto-initialization is complex because Start() + // has an infinite loop and requires extensive mocking. The key behavior is tested above: + // - If not initialized, Start() calls Initialize() + // - If Initialize() fails, Start() returns the error + // For integration testing of the full Start() flow, see e2e_test.go } -*/ type testDataEVMMultidownloader struct { mockEthClient *mocktypes.BaseEthereumClienter diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql index db16b81c0..7cfb0f2b0 100644 --- a/multidownloader/storage/migrations/0002.sql +++ b/multidownloader/storage/migrations/0002.sql @@ -1,5 +1,7 @@ -- +migrate Down DROP TABLE IF EXISTS logs_reorged; +DROP TABLE IF EXISTS blocks_reorged; +DROP TABLE IF EXISTS reorgs; -- +migrate Up CREATE TABLE logs_reorged ( @@ -35,5 +37,5 @@ CREATE TABLE reorgs ( network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) - description TEXT -- extran information, can be null + description TEXT -- extra information, can be null ); \ No newline at end of file diff --git a/multidownloader/types/log_query.go b/multidownloader/types/log_query.go index af1009e97..d2cc6bc4b 100644 --- a/multidownloader/types/log_query.go +++ b/multidownloader/types/log_query.go @@ -46,6 +46,9 @@ func NewLogQueryFromEthereumFilter(query ethereum.FilterQuery) LogQuery { } return NewLogQueryBlockHash(blockNumber, *query.BlockHash, query.Addresses) } + if query.ToBlock == nil { + panic("NewLogQueryFromEthereumFilter: unsupported nil ToBlock") + } return NewLogQuery(query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses) } diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go index 01ad209d1..a087a92e7 100644 --- a/multidownloader/types/reorg_error.go +++ b/multidownloader/types/reorg_error.go @@ -8,8 +8,9 @@ import ( ) // ReorgError is an error that is raised when a reorg is detected +// The block is one of the blocks that were reorged, but not necessarily the first one type ReorgError struct { - OffendingBlockNumber uint64 + OffendingBlockNumber uint64 // Important: is not the first reorged block, but one of them OldHash common.Hash NewHash common.Hash Message string diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index b394b4c39..78647d205 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -79,7 +79,7 @@ func (f *SetSyncerConfig) Addresses(blockRange aggkitcommon.BlockRange) []common for _, filter := range f.filters { if filter.FromBlock >= blockRange.FromBlock { - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { if _, exists := dups[addr]; !exists { addresses = append(addresses, addr) dups[addr] = struct{}{} @@ -106,7 +106,7 @@ func (f *SetSyncerConfig) ContractConfigs() []ContractConfig { } contractMap := make(map[common.Address]*ContractConfig) for _, filter := range f.filters { - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { cc, exists := contractMap[addr] if !exists { contractMap[addr] = NewContractConfigFromSyncerConfig(addr, filter) @@ -135,7 +135,7 @@ func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { // contract address and block range for _, filter := range f.filters { // TODO: instead of calling RPC use block_notifier_values - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { segment := SyncSegment{ ContractAddr: addr, // Initially set ToBlock as 0; it will be updated later diff --git a/multidownloader/types/syncer_config_test.go b/multidownloader/types/syncer_config_test.go index 0207d16c9..314c26cfc 100644 --- a/multidownloader/types/syncer_config_test.go +++ b/multidownloader/types/syncer_config_test.go @@ -22,10 +22,10 @@ func TestContractConfigs_SingleSyncerSingleContract(t *testing.T) { addr := common.HexToAddress("0x1") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr}, - FromBlock: 10, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 10, + ToBlock: aggkittypes.FinalizedBlock, }) configs := set.ContractConfigs() @@ -41,16 +41,16 @@ func TestContractConfigs_MultipleSyncersSameContract(t *testing.T) { addr := common.HexToAddress("0x2") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{addr}, - FromBlock: 5, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{addr}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, }) configs := set.ContractConfigs() @@ -69,16 +69,16 @@ func TestContractConfigs_MultipleSyncersMultipleContracts(t *testing.T) { addr2 := common.HexToAddress("0x4") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr1, addr2}, - FromBlock: 1, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr1, addr2}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, }) set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{addr2}, - FromBlock: 2, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{addr2}, + FromBlock: 2, + ToBlock: aggkittypes.LatestBlock, }) configs := set.ContractConfigs() @@ -111,10 +111,10 @@ func TestContractConfig_Update_FromBlock(t *testing.T) { // Update with lower FromBlock cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 5, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, uint64(5), cc.FromBlock) @@ -122,10 +122,10 @@ func TestContractConfig_Update_FromBlock(t *testing.T) { // Update with higher FromBlock (should not change) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer3", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer3", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, uint64(5), cc.FromBlock) @@ -142,10 +142,10 @@ func TestContractConfig_Update_ToBlock(t *testing.T) { // Update with less final ToBlock (LatestBlock < FinalizedBlock) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.LatestBlock, }) require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) @@ -153,10 +153,10 @@ func TestContractConfig_Update_ToBlock(t *testing.T) { // Update with more final ToBlock (should not change) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer3", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 20, - ToBlock: aggkittypes.SafeBlock, + SyncerID: "syncer3", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 20, + ToBlock: aggkittypes.SafeBlock, }) require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) @@ -173,20 +173,20 @@ func TestContractConfig_Update_Syncers(t *testing.T) { // Add new syncer cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, []SyncerID{"syncer1", "syncer2", "syncer3"}, cc.Syncers) // Add existing syncer (should not duplicate) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 20, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 20, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, []SyncerID{"syncer1", "syncer2", "syncer3"}, cc.Syncers) @@ -202,10 +202,10 @@ func TestContractConfig_Update_Combined(t *testing.T) { // Update all fields at once cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 5, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, }) require.Equal(t, uint64(5), cc.FromBlock) diff --git a/test/contracts/bind.sh b/test/contracts/bind.sh index 25ddd7820..5a4d44d60 100755 --- a/test/contracts/bind.sh +++ b/test/contracts/bind.sh @@ -11,4 +11,5 @@ gen() { gen verifybatchesmock gen claimmock gen claimmockcaller -gen claimmocktest \ No newline at end of file +gen claimmocktest +gen logemitter \ No newline at end of file diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh index 7dd357a9e..ae3d1cd3a 100755 --- a/test/contracts/compile.sh +++ b/test/contracts/compile.sh @@ -18,7 +18,12 @@ docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/cl mv -f ClaimMockTest.abi abi/claimmocktest.abi mv -f ClaimMockTest.bin bin/claimmocktest.bin +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/logemitter/LogEmitter.sol -o /contracts --abi --bin --overwrite --optimize --via-ir +mv -f LogEmitter.abi abi/logemitter.abi +mv -f LogEmitter.bin bin/logemitter.bin + + rm -f IClaimMock.abi rm -f IClaimMock.bin rm -f IClaimMockCaller.abi -rm -f IClaimMockCaller.bin \ No newline at end of file +rm -f IClaimMockCaller.bin diff --git a/types/block_header.go b/types/block_header.go index 3dc08a53f..43a891f74 100644 --- a/types/block_header.go +++ b/types/block_header.go @@ -35,7 +35,7 @@ func NewBlockHeaderFromEthHeader(ethHeader *types.Header) *BlockHeader { ðHeader.ParentHash) } func (gb *BlockHeader) Empty() bool { - return gb == nil || gb.Number == 0 + return gb == nil } func (gb *BlockHeader) String() string { diff --git a/types/list_block_header.go b/types/list_block_header.go index 081d3ec16..9b0e24be7 100644 --- a/types/list_block_header.go +++ b/types/list_block_header.go @@ -4,8 +4,9 @@ import "sort" type ListBlockHeaders []*BlockHeader +// NewListBlockHeadersEmpty creates a new ListBlockHeaders with pre-allocated items set to nil func NewListBlockHeadersEmpty(preAllocatedSize int) ListBlockHeaders { - return ListBlockHeaders(make([]*BlockHeader, 0, preAllocatedSize)) + return ListBlockHeaders(make([]*BlockHeader, preAllocatedSize, preAllocatedSize)) } func (lbs ListBlockHeaders) Len() int { return len(lbs) @@ -14,7 +15,9 @@ func (lbs ListBlockHeaders) Len() int { func (lbs ListBlockHeaders) ToMap() MapBlockHeaders { result := NewMapBlockHeadersEmpty(lbs.Len()) for _, header := range lbs { - result[header.Number] = header + if header != nil { + result[header.Number] = header + } } return result } @@ -22,7 +25,9 @@ func (lbs ListBlockHeaders) ToMap() MapBlockHeaders { func (lbs ListBlockHeaders) BlockNumbers() []uint64 { result := make([]uint64, 0, len(lbs)) for _, header := range lbs { - result = append(result, header.Number) + if header != nil { + result = append(result, header.Number) + } } sort.Slice(result, func(i, j int) bool { return result[i] < result[j] diff --git a/types/multidownloader.go b/types/multidownloader.go index 4c4dc279c..665950e62 100644 --- a/types/multidownloader.go +++ b/types/multidownloader.go @@ -11,8 +11,8 @@ import ( type SyncerConfig struct { // SyncerID is the unique identifier for the syncer SyncerID string - // ContractAddr is list of contract addresses to sync - ContractsAddr []common.Address + // ContractAddresses is list of contract addresses to sync + ContractAddresses []common.Address // Starting block FromBlock uint64 // Target for final block (e.g. LatestBlock, SafeBlock, FinalizedBlock) From 1f6a8c6b963645c2db4a62f09700373b571d02b1 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 26 Jan 2026 18:13:28 +0100 Subject: [PATCH 08/75] feat: improve multidownloader lifecycle control and reorg handling Enhancements to multidownloader: - Add graceful Stop() method with context cancellation and WaitGroup - Improve Start() with better error handling and continuous reorg processing - Replace panic with warnings and automatic retry on errors - Add updateTargetBlockNumber() and checkReorgsUnsafeZone() methods - Refactor StartStep() with improved sync logic Reorg processor improvements: - Change GetLatestBlockNumberInRPC to GetBlockNumberInRPC with finality parameter - Fetch both latest and finalized block numbers from RPC during reorg processing - Improve reorg detection logging and metadata Storage enhancements: - Add GetBlockReorgedChainID() method to query reorged blocks - Update GetBlockHeadersNotFinalized() to accept nullable finalized block number - Improve block and reorg storage operations Common utilities: - Add BlockRange.SplitByBlockNumber() method with comprehensive tests - Add warning log when L1 MultiDownloader is disabled Test coverage: - Add extensive unit tests for state, storage, and set_sync_segment - Update etherman client tests - Add tests for new BlockRange split functionality Co-Authored-By: Claude Sonnet 4.5 --- cmd/run.go | 1 + common/block_range.go | 27 ++ common/block_range_test.go | 150 ++++++++ etherman/batch_requests.go | 2 +- .../block_notifier/block_notifier_polling.go | 3 +- etherman/default_eth_client.go | 18 +- etherman/default_eth_client_test.go | 233 +++++++++--- multidownloader/evm_multidownloader.go | 268 +++++++++++--- .../evm_multidownloader_syncers.go | 66 +++- multidownloader/evm_multidownloader_test.go | 113 ++++++ multidownloader/reorg_processor.go | 16 +- multidownloader/reorg_processor_port.go | 24 +- multidownloader/state.go | 42 ++- multidownloader/state_test.go | 335 ++++++++++++++++++ multidownloader/storage/storage.go | 1 + multidownloader/storage/storage_block.go | 31 +- multidownloader/storage/storage_reorg.go | 23 ++ multidownloader/types/mocks/mock_storager.go | 142 +++++++- multidownloader/types/set_sync_segment.go | 4 + .../types/set_sync_segment_test.go | 74 ++++ multidownloader/types/storager.go | 12 +- types/list_block_header.go | 7 +- 22 files changed, 1431 insertions(+), 161 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 6c008d890..eaddc8f6c 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -616,6 +616,7 @@ func runL1MultiDownloaderIfNeeded( } // If it's disable It creates a direct eth client if !cfg.Enabled { + log.Warnf("L1 MultiDownloader is disabled, using legacy EthClient") return aggkitsync.NewAdapterEthClientToMultidownloader(l1Client), nil, nil } logger := log.WithFields("module", "L1MultiDownloader") diff --git a/common/block_range.go b/common/block_range.go index b4929ffc1..c30023111 100644 --- a/common/block_range.go +++ b/common/block_range.go @@ -202,3 +202,30 @@ func (b BlockRange) ListBlockNumbers() []uint64 { } return blockNumbers } + +// SplitByBlockNumber splits a BlockRange into two parts at the given block number +// The first range includes blocks from FromBlock to blockNumber (inclusive) +// The second range includes blocks from blockNumber+1 to ToBlock (inclusive) +// If blockNumber is outside the range, one of the returned ranges will be empty +func (b BlockRange) SplitByBlockNumber(blockNumber uint64) (BlockRange, BlockRange) { + // If the original range is empty, return two empty ranges + if b.IsEmpty() { + return BlockRangeZero, BlockRangeZero + } + + // If blockNumber is before FromBlock, first range is empty + if blockNumber < b.FromBlock { + return BlockRangeZero, b + } + + // If blockNumber is at or after ToBlock, second range is empty + if blockNumber >= b.ToBlock { + return b, BlockRangeZero + } + + // Split in the middle + first := NewBlockRange(b.FromBlock, blockNumber) + second := NewBlockRange(blockNumber+1, b.ToBlock) + + return first, second +} diff --git a/common/block_range_test.go b/common/block_range_test.go index 86aa4d940..94a65cc20 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -480,3 +480,153 @@ func TestBlockRange_ListBlockNumbers(t *testing.T) { bn3 := NewBlockRange(0, 0) require.Equal(t, []uint64{}, bn3.ListBlockNumbers()) } + +func TestBlockRange_SplitByBlockNumber(t *testing.T) { + tests := []struct { + name string + blockRange BlockRange + splitBlock uint64 + expectedFirst BlockRange + expectedSecond BlockRange + descriptionFirst string + descriptionSecond string + }{ + { + name: "split in the middle", + blockRange: NewBlockRange(100, 200), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 150), + expectedSecond: NewBlockRange(151, 200), + descriptionFirst: "first half includes split block", + descriptionSecond: "second half starts after split block", + }, + { + name: "split at FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 200), + descriptionFirst: "first range is single block", + descriptionSecond: "second range is rest of blocks", + }, + { + name: "split at ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 200, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", + descriptionSecond: "second range is empty", + }, + { + name: "split before FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 200), + descriptionFirst: "first range is empty", + descriptionSecond: "second range is entire original range", + }, + { + name: "split after ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 250, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", + descriptionSecond: "second range is empty", + }, + { + name: "split single block range at that block", + blockRange: NewBlockRange(100, 100), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", + descriptionSecond: "second range is empty", + }, + { + name: "split single block range before", + blockRange: NewBlockRange(100, 100), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 100), + descriptionFirst: "first range is empty", + descriptionSecond: "second range is the single block", + }, + { + name: "split single block range after", + blockRange: NewBlockRange(100, 100), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", + descriptionSecond: "second range is empty", + }, + { + name: "split empty range", + blockRange: BlockRangeZero, + splitBlock: 100, + expectedFirst: BlockRangeZero, + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is empty", + descriptionSecond: "second range is empty", + }, + { + name: "split two block range at first", + blockRange: NewBlockRange(100, 101), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 101), + descriptionFirst: "first range is first block", + descriptionSecond: "second range is second block", + }, + { + name: "split two block range at second", + blockRange: NewBlockRange(100, 101), + splitBlock: 101, + expectedFirst: NewBlockRange(100, 101), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is both blocks", + descriptionSecond: "second range is empty", + }, + { + name: "split at ToBlock minus 1", + blockRange: NewBlockRange(100, 200), + splitBlock: 199, + expectedFirst: NewBlockRange(100, 199), + expectedSecond: NewBlockRange(200, 200), + descriptionFirst: "first range is all but last block", + descriptionSecond: "second range is last block only", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + first, second := tt.blockRange.SplitByBlockNumber(tt.splitBlock) + + require.Equal(t, tt.expectedFirst, first, + "SplitByBlockNumber() first range for %s: expected %v, got %v (%s)", + tt.name, tt.expectedFirst, first, tt.descriptionFirst) + + require.Equal(t, tt.expectedSecond, second, + "SplitByBlockNumber() second range for %s: expected %v, got %v (%s)", + tt.name, tt.expectedSecond, second, tt.descriptionSecond) + + // Verify that the split is valid + if !first.IsEmpty() && !second.IsEmpty() { + // Verify there's no gap between ranges + require.Equal(t, first.ToBlock+1, second.FromBlock, + "There should be no gap between first and second ranges") + } + + // Verify that combined ranges equal original + if !first.IsEmpty() && !second.IsEmpty() { + require.Equal(t, tt.blockRange.FromBlock, first.FromBlock, + "First range should start at original FromBlock") + require.Equal(t, tt.blockRange.ToBlock, second.ToBlock, + "Second range should end at original ToBlock") + } + }) + } +} diff --git a/etherman/batch_requests.go b/etherman/batch_requests.go index c6e5e9142..497feccf4 100644 --- a/etherman/batch_requests.go +++ b/etherman/batch_requests.go @@ -89,7 +89,7 @@ func RetrieveBlockHeadersLegacy(ctx context.Context, ctx, log, func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { - result := aggkittypes.NewListBlockHeadersEmpty(len(blocks)) + result := aggkittypes.NewListBlockHeaders(len(blocks)) for i, blockNumber := range blocks { header, err := ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) if err != nil { diff --git a/etherman/block_notifier/block_notifier_polling.go b/etherman/block_notifier/block_notifier_polling.go index 7401b026a..b31798a06 100644 --- a/etherman/block_notifier/block_notifier_polling.go +++ b/etherman/block_notifier/block_notifier_polling.go @@ -141,12 +141,13 @@ func (b *BlockNotifierPolling) getGlobalStatus() *blockNotifierPollingInternalSt func (b *BlockNotifierPolling) step(ctx context.Context, previousState *blockNotifierPollingInternalStatus) (time.Duration, *blockNotifierPollingInternalStatus, *ethmantypes.EventNewBlock) { - currentBlock, err := b.blockFinality.BlockNumber(ctx, b.ethClient) + hdr, err := b.ethClient.CustomHeaderByNumber(ctx, &b.blockFinality) if err != nil { b.logger.Errorf("Failed to get block number %s: %v", b.blockFinality.String(), err) newState := previousState.clear() return b.nextBlockRequestDelay(nil, err), newState, nil } + currentBlock := hdr.Number if previousState == nil { newState := previousState.initialBlock(currentBlock) return b.nextBlockRequestDelay(previousState, nil), newState, nil diff --git a/etherman/default_eth_client.go b/etherman/default_eth_client.go index 78820f313..ea250ce1b 100644 --- a/etherman/default_eth_client.go +++ b/etherman/default_eth_client.go @@ -101,7 +101,19 @@ func (c *DefaultEthClient) CustomHeaderByNumber(ctx context.Context, if err != nil { return nil, err } + result, err := c.internalHeaderByNumber(ctx, numberBigInt) + if err != nil { + return nil, err + } + + result.RequestedBlock = number + return result, nil +} + +func (c *DefaultEthClient) internalHeaderByNumber(ctx context.Context, + numberBigInt *big.Int) (*aggkittypes.BlockHeader, error) { var result *aggkittypes.BlockHeader + var err error if c.HashFromJSON { result, err = c.rpcGetBlockByNumber(ctx, numberBigInt) if err != nil { @@ -114,8 +126,6 @@ func (c *DefaultEthClient) CustomHeaderByNumber(ctx context.Context, } result = aggkittypes.NewBlockHeaderFromEthHeader(ethHeader) } - - result.RequestedBlock = number return result, nil } @@ -126,7 +136,7 @@ func (c *DefaultEthClient) resolveBlockNumber(ctx context.Context, return number.ToBigInt(), nil } // Resolve the base block number - hdr, err := c.rpcGetBlockByNumber(ctx, number.ToBigInt()) + hdr, err := c.internalHeaderByNumber(ctx, number.ToBigInt()) if err != nil { return nil, err } @@ -144,7 +154,7 @@ func (c *DefaultEthClient) rpcGetBlockByNumber(ctx context.Context, number *big. var rawEthHeader *blockRawEth err := c.CallContext(ctx, &rawEthHeader, "eth_getBlockByNumber", blockArg, false) if err != nil { - return nil, fmt.Errorf("rpcGetBlockByNumber: %w", err) + return nil, fmt.Errorf("rpcGetBlockByNumber: CallContext error: %w", err) } return rawEthHeader.ToBlockHeader() } diff --git a/etherman/default_eth_client_test.go b/etherman/default_eth_client_test.go index 04b991b4b..5543c576f 100644 --- a/etherman/default_eth_client_test.go +++ b/etherman/default_eth_client_test.go @@ -37,67 +37,86 @@ func TestDefaultEthClientExploratory(t *testing.T) { fmt.Printf("header: %+v\n", header) } -func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { +// testBlockWithOffsetHelper is a helper function for testing block tag resolution with offsets +func testBlockWithOffsetHelper( + t *testing.T, + ctx context.Context, + blockTag string, + blockNumFinality string, + firstBlockNum uint64, + firstBlockHash string, + secondBlockNum uint64, + secondBlockHash string, +) { + t.Helper() mockEthClient := mocks.NewEthereumClienter(t) mockRPCClient := mocks.NewRPCClienter(t) - client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) - bnFinalized5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/5") + client.HashFromJSON = true + + bn, err := aggkittypes.NewBlockNumberFinality(blockNumFinality) require.NoError(t, err) - ctx := t.Context() - blockRaw95 := &blockRawEth{ - Number: "0x5f", // 95 in hex - Hash: "0xabc123", + + firstBlock := &blockRawEth{ + Number: fmt.Sprintf("0x%x", firstBlockNum), + Hash: firstBlockHash, Timestamp: "1234", } - blockRaw100 := &blockRawEth{ - Number: "0x64", // 100 in hex - Hash: "0xabc123", - Timestamp: "1234", + secondBlock := &blockRawEth{ + Number: fmt.Sprintf("0x%x", secondBlockNum), + Hash: secondBlockHash, + Timestamp: "1235", } - t.Run("FinalizedBlock with offset", func(t *testing.T) { - client.HashFromJSON = true - // Setup mock for rpcGetBlockByNumber - // Call to resolve finalized block - mockRPCClient. - EXPECT(). - CallContext( - ctx, - mock.Anything, - "eth_getBlockByNumber", - "finalized", - false, - ). - Return(nil). - Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { - rawEth, ok := result.(**blockRawEth) - require.True(t, ok) - *rawEth = blockRaw95 - }).Once() + // First call to resolve block tag + mockRPCClient. + EXPECT(). + CallContext(ctx, mock.Anything, "eth_getBlockByNumber", blockTag, false). + Return(nil). + Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { + rawEth, ok := result.(**blockRawEth) + require.True(t, ok) + *rawEth = firstBlock + }).Once() - mockRPCClient. - EXPECT(). - CallContext(ctx, mock.Anything, "eth_getBlockByNumber", "0x64", false). - Return(nil). - Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { - rawEth, ok := result.(**blockRawEth) - require.True(t, ok) - *rawEth = blockRaw100 - }).Once() - // Call CustomHeaderByNumber - header, err := client.CustomHeaderByNumber(ctx, bnFinalized5) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, uint64(100), header.Number) - require.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000abc123", header.Hash.Hex()) - require.Equal(t, bnFinalized5, header.RequestedBlock) + // Second call to get the final block after offset + mockRPCClient. + EXPECT(). + CallContext(ctx, mock.Anything, "eth_getBlockByNumber", fmt.Sprintf("0x%x", secondBlockNum), false). + Return(nil). + Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { + rawEth, ok := result.(**blockRawEth) + require.True(t, ok) + *rawEth = secondBlock + }).Once() + + header, err := client.CustomHeaderByNumber(ctx, bn) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, secondBlockNum, header.Number) + require.Equal(t, fmt.Sprintf("0x%064s", secondBlockHash[2:]), header.Hash.Hex()) + require.Equal(t, bn, header.RequestedBlock) +} + +func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { + ctx := context.Background() + + t.Run("FinalizedBlock with offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "finalized", "FinalizedBlock/5", 95, "0xabc123", 100, "0xabc123") }) t.Run("Latest block", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) client.HashFromJSON = true - ctx := t.Context() + + blockRaw95 := &blockRawEth{ + Number: "0x5f", // 95 in hex + Hash: "0xabc123", + Timestamp: "1234", + } mockRPCClient. EXPECT(). @@ -115,15 +134,27 @@ func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { }) t.Run("failed to find blockNumber for tag block", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = true + + bnFinalized5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/5") + require.NoError(t, err) + mockRPCClient. EXPECT().CallContext(ctx, mock.Anything, "eth_getBlockByNumber", "finalized", false). Return(fmt.Errorf("rpc error")) - _, err := client.CustomHeaderByNumber(ctx, bnFinalized5) + _, err = client.CustomHeaderByNumber(ctx, bnFinalized5) require.Error(t, err) }) t.Run("use HashFromJSON=false (geth call)", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) client.HashFromJSON = false + mockEthClient.EXPECT(). HeaderByNumber(ctx, (*big.Int)(nil)). Return(&types.Header{ @@ -133,4 +164,110 @@ func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { require.NoError(t, err) require.NotNil(t, header) }) + + t.Run("LatestBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "latest", "LatestBlock/-10", 100, "0xdef456", 90, "0xabc789") + }) + + t.Run("FinalizedBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "finalized", "FinalizedBlock/-5", 100, "0xfed123", 95, "0xabc456") + }) + + t.Run("SafeBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "safe", "SafeBlock/-3", 50, "0x123abc", 47, "0x456def") + }) + + t.Run("PendingBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "pending", "PendingBlock/-2", 101, "0x789abc", 99, "0xdef123") + }) + + t.Run("LatestBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = false + + bnLatestMinus10, err := aggkittypes.NewBlockNumberFinality("LatestBlock/-10") + require.NoError(t, err) + + // First call to resolve latest block (returns 100) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, (*big.Int)(nil)). + Return(&types.Header{ + Number: big.NewInt(100), + }, nil).Once() + + // Second call to get block 90 (100 - 10) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(90)). + Return(&types.Header{ + Number: big.NewInt(90), + }, nil).Once() + + header, err := client.CustomHeaderByNumber(ctx, bnLatestMinus10) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, uint64(90), header.Number) + require.Equal(t, bnLatestMinus10, header.RequestedBlock) + }) + + t.Run("FinalizedBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = false + + bnFinalizedMinus5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/-5") + require.NoError(t, err) + + // First call to resolve finalized block (returns 100) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(-3)). + Return(&types.Header{ + Number: big.NewInt(100), + }, nil).Once() + + // Second call to get block 95 (100 - 5) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(95)). + Return(&types.Header{ + Number: big.NewInt(95), + }, nil).Once() + + header, err := client.CustomHeaderByNumber(ctx, bnFinalizedMinus5) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, uint64(95), header.Number) + require.Equal(t, bnFinalizedMinus5, header.RequestedBlock) + }) + + t.Run("SafeBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = false + + bnSafeMinus3, err := aggkittypes.NewBlockNumberFinality("SafeBlock/-3") + require.NoError(t, err) + + // First call to resolve safe block (returns 50) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(-4)). + Return(&types.Header{ + Number: big.NewInt(50), + }, nil).Once() + + // Second call to get block 47 (50 - 3) + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(47)). + Return(&types.Header{ + Number: big.NewInt(47), + }, nil).Once() + + header, err := client.CustomHeaderByNumber(ctx, bnSafeMinus3) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, uint64(47), header.Number) + require.Equal(t, bnSafeMinus3, header.RequestedBlock) + }) } diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index a4982d0b9..0cf78000c 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -43,10 +43,15 @@ type EVMMultidownloader struct { syncersConfig mdrtypes.SetSyncerConfig reorgProcessor mdrtypes.ReorgProcessor - mutex sync.Mutex - state *State // current state of synced and pending segments if nil not initialized - + mutex sync.Mutex + state *State // current state of synced and pending segments if nil not initialized statistics *Statistics + + // Control fields for Start/Stop + stopRequested bool + isRunning bool + wg sync.WaitGroup + cancel context.CancelFunc } var _ aggkittypes.MultiDownloader = (*EVMMultidownloader)(nil) @@ -138,10 +143,14 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er } }() - blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, finalizedBlockNumber) + blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, &finalizedBlockNumber) if err != nil { return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) } + if blocks.Len() == 0 { + dh.log.Debugf("MoveUnsafeToSafeIfPossible: no unsafe blocks to move to safe") + return nil + } dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ "unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) err = dh.detectReorgs(ctx, blocks) @@ -164,6 +173,10 @@ func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, blocks aggkittypes.ListBlockHeaders) error { // TODO: optimize this to don't check all blocks // TODO: Find the first block to reorg + if blocks.Len() == 0 { + dh.log.Debugf("detectReorgs: no blocks to check for reorgs") + return nil + } blocksNumber := blocks.BlockNumbers() currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) @@ -268,34 +281,167 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { return nil } func (dh *EVMMultidownloader) Start(ctx context.Context) error { + dh.mutex.Lock() + if dh.isRunning { + dh.mutex.Unlock() + return fmt.Errorf("Start: multidownloader is already running") + } + // Create a cancelable context for this run + runCtx, cancel := context.WithCancel(ctx) + dh.cancel = cancel + dh.isRunning = true + dh.stopRequested = false + dh.wg.Add(1) + dh.mutex.Unlock() + + defer func() { + dh.mutex.Lock() + dh.isRunning = false + dh.stopRequested = false + dh.cancel = nil + dh.mutex.Unlock() + dh.wg.Done() + }() + if !dh.IsInitialized() { dh.log.Infof("EVMMultidownloader.Start: multidownloader not initialized, initializing...") - err := dh.Initialize(ctx) + err := dh.Initialize(runCtx) if err != nil { return err } } + dh.statistics.StartSyncing() for { - err := dh.StartStep(ctx) + // check if context is done + if runCtx.Err() != nil { + dh.log.Infof("EVMMultidownloader.Start: context done, exiting...") + return runCtx.Err() + } + + err := dh.StartStep(runCtx) if err != nil { reorgErr := mdrtypes.CastReorgError(err) if reorgErr == nil { - // TODO: Remove this panic and handle properly - panic("Error running multidownloader: " + err.Error()) + dh.log.Warnf("Error running multidownloader: %s ", err.Error()) + time.Sleep(100 * time.Millisecond) // Brief pause before retry + continue } dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) - err = dh.reorgProcessor.ProcessReorg(ctx, reorgErr.OffendingBlockNumber) - if err != nil { - panic("Error running multidownloader: " + err.Error()) + for { + // check if context is done during reorg processing + if runCtx.Err() != nil { + dh.log.Infof("EVMMultidownloader.Start: context done during reorg processing, exiting...") + return runCtx.Err() + } + + dh.log.Infof("Processing reorg at block number %d...", reorgErr.OffendingBlockNumber) + err = dh.reorgProcessor.ProcessReorg(runCtx, reorgErr.OffendingBlockNumber) + if err != nil { + dh.log.Warnf("Error running reorg multidownloader: %s", err.Error()) + time.Sleep(1 * time.Second) + continue + } + break } } - // Breathing, just in case - dh.log.Infof("relaunching sync loop... (waiting 1 second)") - time.Sleep(1 * time.Second) } } +// Stop gracefully stops the multidownloader if it's running +func (dh *EVMMultidownloader) Stop(ctx context.Context) error { + dh.mutex.Lock() + if !dh.isRunning { + dh.mutex.Unlock() + return fmt.Errorf("Stop: multidownloader is not running") + } + cancel := dh.cancel + dh.mutex.Unlock() + + dh.log.Infof("Stop: stopping multidownloader...") + + // Cancel the running context + if cancel != nil { + cancel() + } + + // Wait for the goroutine to finish with context timeout + done := make(chan struct{}) + go func() { + dh.wg.Wait() + close(done) + }() + + select { + case <-done: + dh.log.Infof("Stop: multidownloader stopped successfully") + return nil + case <-ctx.Done(): + return fmt.Errorf("Stop: timeout waiting for multidownloader to stop: %w", ctx.Err()) + } +} +func (dh *EVMMultidownloader) updateTargetBlockNumber(ctx context.Context) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + return dh.state.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) +} + +func (dh *EVMMultidownloader) checkReorgsUnsafeZone(ctx context.Context) error { + blockInUnsafeZone, err := dh.storage.GetBlockHeadersNotFinalized(nil, nil) + if err != nil { + return fmt.Errorf("checkReorgsUnsafeZone: cannot get unsafe blocks: %w", err) + } + return dh.detectReorgs(ctx, blockInUnsafeZone) + +} + func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { + var err error + // Update ToBlock in pending segments to be able to calculate if finished + err = dh.updateTargetBlockNumber(ctx) + if err != nil { + return fmt.Errorf("cannot update ToBlock: %w", err) + } + + // There are unsafe blocks that can be moved to safe and checked? + if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { + return err + } + // Check possible reorgs in unsafe zone + if err = dh.checkReorgsUnsafeZone(ctx); err != nil { + return err + } + + // Get the pending blocks to sync + pendingBlockRange := dh.getTotalPendingBlockRange() + if pendingBlockRange != nil { + dh.log.Debugf("StartStep: pendingBlockRange=%s", pendingBlockRange.String()) + // Split into safe and unsafe + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("StartStep: cannot get finalized block number: %w", err) + } + safePendingBlockRange, unsafePendingBlockRange := pendingBlockRange.SplitByBlockNumber(finalizedBlockNumber) + if !safePendingBlockRange.IsEmpty() { + dh.log.Infof("πŸ›‘οΈ StartStep: Safe sync for pending range %s", safePendingBlockRange.String()) + _, err = dh.StepSafe(ctx) + return err + } + if !unsafePendingBlockRange.IsEmpty() { + dh.log.Infof("😈 StartStep: Unsafe sync for pending range %s", unsafePendingBlockRange.String()) + _, err = dh.StepUnsafe(ctx) + return err + } + } else { + dh.log.Debugf("StartStep: no pending blocks to sync") + } + dh.log.Infof("⏳StartStep: waiting new block...") + if err = dh.WaitForNewLatestBlocks(ctx); err != nil { + return err + } + return nil +} + +func (dh *EVMMultidownloader) StartStepOld(ctx context.Context) error { dh.log.Infof("checking unsafe blocks on DB...") var err error if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { @@ -310,12 +456,52 @@ func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { return err } dh.log.Infof("waiting new block...") + if err = dh.WaitForNewLatestBlocks(ctx); err != nil { + return err + } + + dh.log.Infof("waiting new checkReorgUntilNewBlock...") if err = dh.checkReorgUntilNewBlock(ctx); err != nil { return err } } } +func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error { + latestSyncedBlock := dh.state.GetHighestBlockNumberPendingToSync() + _, err := dh.waitForNewBlocks(ctx, aggkittypes.LatestBlock, latestSyncedBlock) + return err +} + +func (dh *EVMMultidownloader) waitForNewBlocks(ctx context.Context, + blockTag aggkittypes.BlockNumberFinality, + latestSyncedBlock uint64) (uint64, error) { + // TODO: This var dh.cfg.PeriodToCheckReorgs.Duration is the best choice? + ticker := time.NewTicker(dh.cfg.PeriodToCheckReorgs.Duration) + defer ticker.Stop() + dh.log.Debugf("waitForNewBlocks: waiting for new blocks %s after %d. Check each %s...", + blockTag.String(), + latestSyncedBlock, + dh.cfg.PeriodToCheckReorgs.Duration.String()) + for { + select { + case <-ctx.Done(): + dh.log.Info("context cancelled") + return latestSyncedBlock, ctx.Err() + case <-ticker.C: + currentBlock, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, blockTag) + if err != nil { + return latestSyncedBlock, fmt.Errorf("WaitForNewBlocks: cannot get current block number: %w", err) + } + if currentBlock > latestSyncedBlock { + dh.log.Debugf("waitForNewBlocks: Find new block %d > latestSyncedBlock %d", + currentBlock, latestSyncedBlock) + return currentBlock, nil + } + } + } +} + // This function check the tip of the chain to prevent any reorg, meanwhile // wait for a new block to arrive func (dh *EVMMultidownloader) checkReorgUntilNewBlock(ctx context.Context) error { @@ -488,41 +674,15 @@ func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log return nil } -// TODO: ??? why I did this function?? -// TODO: remove -// func (dh *EVMMultidownloader) checkParent(blockHeader *aggkittypes.BlockHeader) error { -// if blockHeader.Number == 0 { -// return nil -// } -// parentHeader, isFinalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockHeader.Number-1) -// if err != nil { -// return fmt.Errorf("checkParent: cannot get parent block header for block number %d: %w", blockHeader.Number, err) -// } -// if parentHeader == nil { -// return fmt.Errorf("checkParent: parent block header for block number %d not found in storage", -// blockHeader.Number-1) -// } -// // Parenthash (from DB) doesn't match parent Hash of first blockHeader, but parent is finalized -// // so the discrepancy is the new block that is discarded without reorg (still not in DB) -// if isFinalized && blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { -// return fmt.Errorf("checkParent: "+ -// "parent hash mismatch for block number %d: expected %s, got %s (but parent is finalized)", -// blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String()) -// } -// if blockHeader.ParentHash != nil && parentHeader.Hash != *blockHeader.ParentHash { -// // Parenthash mismatch, reorg detected -// return mdrtypes.NewReorgError(parentHeader.Number, parentHeader.Hash, -// *blockHeader.ParentHash, fmt.Sprintf("checkParent: parent hash mismatch for block number %d: expected %s, got %s", -// blockHeader.Number, blockHeader.ParentHash.String(), parentHeader.Hash.String())) -// } -// return nil -// } - func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err := ctx.Err(); err != nil { return false, err } pendingBlockRange := dh.getTotalPendingBlockRange() + if pendingBlockRange == nil { + dh.log.Debugf("StepUnsafe: no pending blocks to sync") + return false, nil + } blocks := pendingBlockRange.ListBlockNumbers() // TODO: Check that the blocks are all inside unsafe range blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, @@ -604,11 +764,6 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { logQueryData.String(), err) } - // Update ToBlock in pending segments to be able to calculate if finished - err = newState.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) - if err != nil { - return false, fmt.Errorf("Safe/Step: cannot update ToBlock in pendingSync: %w", err) - } // Store data in storage err = dh.storeData(ctx, logs, blockHeaders, newState.SyncedSegmentsByContract(logQueryData.Addrs), true) @@ -632,7 +787,8 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { } func (dh *EVMMultidownloader) storeData( ctx context.Context, - logs []types.Log, blocks []*aggkittypes.BlockHeader, + logs []types.Log, + blocks []*aggkittypes.BlockHeader, updatedSegments []mdrtypes.SyncSegment, isFinal bool) error { var err error @@ -643,31 +799,31 @@ func (dh *EVMMultidownloader) storeData( }() tx, err := dh.storage.NewTx(ctx) if err != nil { - return fmt.Errorf("Safe/Step: cannot create new tx: %w", err) + return fmt.Errorf("storeData: cannot create new tx: %w", err) } defer func() { if !committed { - dh.log.Debugf("Safe/Step: rolling back tx") + dh.log.Debugf("storeData: rolling back tx") if err := tx.Rollback(); err != nil { - dh.log.Errorf("Safe/Step: error rolling back tx: %v", err) + dh.log.Errorf("storeData: error rolling back tx: %v", err) } } }() // Save logs and block headers err = dh.storage.SaveEthLogsWithHeaders(tx, blocks, logs, isFinal) if err != nil { - return fmt.Errorf("Safe/Step: cannot save eth logs: %w", err) + return fmt.Errorf("storeData: cannot save eth logs: %w", err) } // Update synced segments in storage err = dh.storage.UpdateSyncedStatus(tx, updatedSegments) if err != nil { - return fmt.Errorf("Safe/Step: cannot update synced segments +%v in storage: %w", + return fmt.Errorf("storeData: cannot update synced segments +%v in storage: %w", updatedSegments, err) } committed = true if err = tx.Commit(); err != nil { - return fmt.Errorf("Safe/Step: cannot commit tx: %w", err) + return fmt.Errorf("storeData: cannot commit tx: %w", err) } return nil } diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index a2c671296..4ecc1a227 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -9,9 +9,12 @@ import ( mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) +const debugSyncerInterface = false + // ChainID gets the chain ID directly from ethClient func (dh *EVMMultidownloader) ChainID(ctx context.Context) (uint64, error) { chainID, err := dh.ethClient.ChainID(ctx) @@ -48,12 +51,16 @@ func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.Fil if !dh.IsInitialized() { return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: multidownloader not initialized") } - dh.log.Debugf("EVMMultidownloader.FilterLogs: received query: %+v", query) - defer dh.log.Debugf("EVMMultidownloader.FilterLogs: finished query: %+v", query) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs: received query: %+v", query) + defer dh.log.Debugf("EVMMultidownloader.FilterLogs: finished query: %+v", query) + } logQuery := mdrtypes.NewLogQueryFromEthereumFilter(query) for !dh.IsAvailable(logQuery) { - dh.log.Infof("EVMMultidownloader.FilterLogs: waiting %s for logs to be available: %s", - dh.cfg.WaitPeriodToCheckCatchUp.String(), logQuery.String()) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs: waiting %s for logs to be available: %s", + dh.cfg.WaitPeriodToCheckCatchUp.String(), logQuery.String()) + } select { case <-time.After(dh.cfg.WaitPeriodToCheckCatchUp.Duration): case <-ctx.Done(): @@ -66,16 +73,19 @@ func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.Fil if err != nil { return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) } - dh.log.Debugf("EVMMultidownloader.FilterLogs(%d - %d): len(logs)= %d", query.FromBlock, query.ToBlock, len(logs)) - + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs(%d - %d): len(logs)= %d", query.FromBlock, query.ToBlock, len(logs)) + } return logs, nil } // HeaderByNumber gets the block header for the given block number from storage or ethClient func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { - dh.log.Debugf("EVMMultidownloader.HeaderByNumber: received number: %s", number.String()) - defer dh.log.Debugf("EVMMultidownloader.HeaderByNumber: finished number: %s", number.String()) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.HeaderByNumber: received number: %s", number.String()) + defer dh.log.Debugf("EVMMultidownloader.HeaderByNumber: finished number: %s", number.String()) + } if !number.IsConstant() { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: only numeric blockNumbers are supported (got=%s)", number.String()) @@ -89,9 +99,10 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, if block != nil { return block, nil } - // This is a fallback mechanism in case the block is not found in storage (it must be in storage!) - dh.log.Debugf("EVMMultidownloader.HeaderByNumber: block number=%s not found in storage, fetching from ethClient", - number.String()) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.HeaderByNumber: block number=%s not found in storage, fetching from ethClient", + number.String()) + } blockHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, number) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: ethClient.HeaderByNumber(%s) failed. Err: %w", @@ -104,3 +115,36 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, func (dh *EVMMultidownloader) EthClient() aggkittypes.BaseEthereumClienter { return dh.ethClient } + +// CheckValidBlock checks if the given blockNumber and blockHash are still valid +// returns: isValid bool, reorgChainID uint64, err error +func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber uint64, + blockHash common.Hash) (bool, uint64, error) { + // Check if is stored as valid block + storedBlock, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot get BlockHeader number=%d: %w", + blockNumber, err) + } + if storedBlock != nil { + // Is valid? + if storedBlock.Hash == blockHash { + return true, 0, nil + } + } + // From this point is invalid or unknown + // Check in blocks_reorged + chainID, found, err := dh.storage.GetBlockReorgedChainID(nil, blockNumber, blockHash) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot check blocks_reorged for blockNumber=%d: %w", + blockNumber, err) + } + if found { + dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (chainID=%d)", + blockNumber, blockHash.Hex(), chainID) + return false, chainID, nil + } + // Not found anywhere, consider invalid + return false, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", + blockNumber, blockHash.Hex()) +} diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index a40118322..c3509429a 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -527,3 +527,116 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM mockBlockNotifierManager: mockBlockNotifierManager, } } + +func TestEVMMultidownloader_StartStop(t *testing.T) { + t.Run("Stop without Start returns error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + err := data.mdr.Stop(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "not running") + }) + + t.Run("Start and Stop successfully", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks for Start loop + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("stop test")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start in background + ctx := context.Background() + var startErr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + startErr = data.mdr.Start(ctx) + }() + + // Give it time to start and run a few iterations + time.Sleep(50 * time.Millisecond) + + // Stop should succeed + stopCtx := context.Background() + err := data.mdr.Stop(stopCtx) + require.NoError(t, err) + + // Wait for Start to finish + wg.Wait() + // Start should return context.Canceled (clean shutdown via context cancellation) + require.ErrorIs(t, startErr, context.Canceled) + }) + + t.Run("Start twice returns error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("stop test")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start first time + ctx := context.Background() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + _ = data.mdr.Start(ctx) + }() + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + // Try to start again - should fail + err := data.mdr.Start(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "already running") + + // Cleanup + _ = data.mdr.Stop(ctx) + wg.Wait() + }) + + t.Run("Stop waits for Start to complete", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("mock error")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start in background + ctx := context.Background() + startCompleted := false + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + _ = data.mdr.Start(ctx) + startCompleted = true + }() + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + // Stop and verify it waits + stopStartTime := time.Now() + stopCtx := context.Background() + err := data.mdr.Stop(stopCtx) + stopDuration := time.Since(stopStartTime) + + require.NoError(t, err) + require.True(t, startCompleted, "Start should have completed before Stop returns") + require.Greater(t, stopDuration, time.Duration(0), "Stop should take some time waiting for Start") + + wg.Wait() + }) +} diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 4464ebd19..77d4264aa 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -17,8 +17,7 @@ type ReorgPorter interface { GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) // Return ChainID of the inserted reorg MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) - // Return latest block number in RPC - GetLatestBlockNumberInRPC(ctx context.Context) (uint64, error) + GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) } type ReorgProcessor struct { @@ -74,19 +73,24 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, if err != nil { return fmt.Errorf("ProcessReorg: error getting last block number in storage: %w", err) } - latestBlockNumberInRPC, err := rm.port.GetLatestBlockNumberInRPC(ctx) + latestBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock) if err != nil { return fmt.Errorf("ProcessReorg: error getting latest block number in RPC: %w", err) } - rm.log.Infof("ProcessReorg: reorg detected from block %d to block %d", - currentBlockNumber+1, lastBlockNumberInStorage) + finalizedBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting finalized block number in RPC: %w", err) + } + rm.log.Infof("ProcessReorg: reorg detected from block %d to block %d", + firstUnaffectedBlock+1, lastBlockNumberInStorage) + // TODO: Add hash to blockNumbers reorgData := mdtypes.ReorgData{ BlockRangeAffected: aggkitcommon.NewBlockRange(firstUnaffectedBlock+1, lastBlockNumberInStorage), DetectedAtBlock: lastBlockNumberInStorage, DetectedTimestamp: rm.funcNow(), NetworkLatestBlock: latestBlockNumberInRPC, - NetworkFinalizedBlock: firstUnaffectedBlock, + NetworkFinalizedBlock: finalizedBlockNumberInRPC, NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, } chainID, err := rm.port.MoveReorgedBlocks(tx, reorgData) diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index accaeb6bb..7569da052 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -16,9 +16,10 @@ type compareBlockHeaders struct { } type ReorgPort struct { - ethClient aggkittypes.BaseEthereumClienter - rpcClient aggkittypes.RPCClienter - storage mdtypes.Storager + ethClient aggkittypes.BaseEthereumClienter + rpcClient aggkittypes.RPCClienter + storage mdtypes.Storager + finalizedBlockTag aggkittypes.BlockNumberFinality } func (r *ReorgPort) NewTx(ctx context.Context) (dbtypes.Txer, error) { @@ -43,24 +44,21 @@ func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querie } func (r *ReorgPort) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { - highestBlock, _, err := r.storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + highestBlock, err := r.storage.GetHighestBlockNumber(nil) if err != nil { return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block from storage: %w", err) } - if highestBlock == nil { - return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block (=nil) from storage") - } - return highestBlock.Number, nil + return highestBlock, nil } - func (r *ReorgPort) MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) { return r.storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) } -func (r *ReorgPort) GetLatestBlockNumberInRPC(ctx context.Context) (uint64, error) { - latestBlockNumber, err := r.ethClient.BlockNumber(ctx) +func (r *ReorgPort) GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) { + blockNumber, err := r.ethClient.CustomHeaderByNumber(ctx, &blockFinality) if err != nil { - return 0, fmt.Errorf("GetLatestBlockNumber: error getting latest block number from RPC: %w", err) + return 0, fmt.Errorf("GetBlockNumberInRPC: error getting block number for %s from RPC: %w", + blockFinality.String(), err) } - return latestBlockNumber, nil + return blockNumber.Number, nil } diff --git a/multidownloader/state.go b/multidownloader/state.go index eaa45f8c8..2c4abd3e2 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -41,10 +41,20 @@ func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, return NewState(&storageSynced, &totalToSync), nil } +// Clone creates a deep copy of the State +// This ensures that modifications to the cloned state don't affect the original func (s *State) Clone() *State { + if s == nil { + return nil + } + + // Use Clone() from SetSyncSegment which does deep copy + clonedSynced := s.Synced.Clone() + clonedPending := s.Pending.Clone() + return &State{ - Synced: s.Synced, - Pending: s.Pending, + Synced: *clonedSynced, + Pending: *clonedPending, } } func (s *State) String() string { @@ -79,15 +89,37 @@ func (s *State) TotalBlocksPendingToSync() uint64 { return s.Pending.TotalBlocks() } +// OnNewSyncedLogQuery updates the state to mark a LogQuery as synced +// This function is transactional - if either operation fails, the state remains unchanged func (s *State) OnNewSyncedLogQuery(logQuery *mdrtypes.LogQuery) error { - err := s.Synced.AddLogQuery(logQuery) + if s == nil { + return fmt.Errorf("OnNewSyncedLogQuery: state is nil") + } + if logQuery == nil { + return fmt.Errorf("OnNewSyncedLogQuery: logQuery is nil") + } + + // Clone both sets to ensure atomicity + // If either operation fails, the original state remains unchanged + clonedSynced := s.Synced.Clone() + clonedPending := s.Pending.Clone() + + // Try to add to synced + err := clonedSynced.AddLogQuery(logQuery) if err != nil { - return fmt.Errorf("OnNewSyncedLogQuery: addding syned segment: %w", err) + return fmt.Errorf("OnNewSyncedLogQuery: adding synced segment: %w", err) } - err = s.Pending.SubtractLogQuery(logQuery) + + // Try to subtract from pending + err = clonedPending.SubtractLogQuery(logQuery) if err != nil { return fmt.Errorf("OnNewSyncedLogQuery: subtracting pending segment: %w", err) } + + // Both operations succeeded, commit the changes + s.Synced = *clonedSynced + s.Pending = *clonedPending + return nil } diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go index 10a54f179..fd1edbfeb 100644 --- a/multidownloader/state_test.go +++ b/multidownloader/state_test.go @@ -44,3 +44,338 @@ func TestStateInitial(t *testing.T) { require.Equal(t, aggkitcommon.NewBlockRange(0, 456), pendingSegments[0].BlockRange) require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) } + +func TestState_OnNewSyncedLogQuery(t *testing.T) { + t.Run("nil state", func(t *testing.T) { + var state *State + logQuery := mdtypes.NewLogQuery(1, 10, []common.Address{common.HexToAddress("0x1")}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.Error(t, err) + require.Contains(t, err.Error(), "state is nil") + }) + + t.Run("nil logQuery", func(t *testing.T) { + state := NewEmptyState() + err := state.OnNewSyncedLogQuery(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "logQuery is nil") + }) + + t.Run("successful sync", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdtypes.NewSetSyncSegment() + syncedSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 200), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Get counts before + syncedBefore := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingBefore := state.TotalBlocksPendingToSync() + + require.Equal(t, 1, len(syncedBefore)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedBefore[0].BlockRange) + require.Equal(t, uint64(100), pendingBefore) + + // Sync blocks 101-150 + logQuery := mdtypes.NewLogQuery(101, 150, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify synced was extended + syncedAfter := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(syncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 150), syncedAfter[0].BlockRange) + + // Verify pending was reduced + pendingAfter := state.TotalBlocksPendingToSync() + require.Equal(t, uint64(50), pendingAfter) // 151-200 = 50 blocks + }) + + t.Run("transactional behavior - state unchanged on error", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdtypes.NewSetSyncSegment() + syncedSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 1000), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Get state before + syncedBefore := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingBefore := state.TotalBlocksPendingToSync() + syncedCountBefore := len(syncedBefore) + + // Try to sync a range in the middle (500-600) which would split the pending segment + // This should fail with "cannot split segment" error + logQuery := mdtypes.NewLogQuery(500, 600, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + + // Should fail because it would split the segment into two parts + require.Error(t, err) + require.Contains(t, err.Error(), "cannot split segment") + + // Verify state is unchanged + syncedAfter := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingAfter := state.TotalBlocksPendingToSync() + + require.Equal(t, syncedCountBefore, len(syncedAfter), "synced segments count should be unchanged") + require.Equal(t, syncedBefore[0].BlockRange, syncedAfter[0].BlockRange, "synced range should be unchanged") + require.Equal(t, pendingBefore, pendingAfter, "pending blocks should be unchanged") + }) + + t.Run("multiple consecutive syncs", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdtypes.NewSetSyncSegment() + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 1000), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Sync in chunks + chunks := []struct { + from uint64 + to uint64 + }{ + {1, 100}, + {101, 200}, + {201, 300}, + } + + for i, chunk := range chunks { + logQuery := mdtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err, "chunk %d should succeed", i) + + // Verify synced range + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, uint64(1), synced[0].BlockRange.FromBlock) + require.Equal(t, chunk.to, synced[0].BlockRange.ToBlock) + } + + // Verify final state + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, aggkitcommon.NewBlockRange(1, 300), synced[0].BlockRange) + require.Equal(t, uint64(700), state.TotalBlocksPendingToSync()) // 301-1000 + }) + + t.Run("sync everything until finished", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + // Start with empty synced and full pending + syncedSet := mdtypes.NewSetSyncSegment() + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 300), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Verify initial state + require.False(t, state.IsSyncFinished(), "should not be finished initially") + require.Equal(t, uint64(300), state.TotalBlocksPendingToSync()) + + // Sync all blocks in chunks + chunks := []struct { + from uint64 + to uint64 + }{ + {1, 100}, + {101, 200}, + {201, 300}, + } + + for i, chunk := range chunks { + logQuery := mdtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err, "chunk %d should succeed", i) + + if i < len(chunks)-1 { + // Not finished yet + require.False(t, state.IsSyncFinished(), "should not be finished after chunk %d", i) + require.Greater(t, state.TotalBlocksPendingToSync(), uint64(0), + "should have pending blocks after chunk %d", i) + } + } + + // Verify everything is synced + require.True(t, state.IsSyncFinished(), "should be finished after syncing all blocks") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 pending blocks") + + // Verify synced range covers everything + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 300), synced[0].BlockRange) + + // Verify total pending block range is nil or empty + totalPending := state.GetTotalPendingBlockRange() + if totalPending != nil { + require.True(t, totalPending.IsEmpty(), "total pending range should be empty") + } + }) + + t.Run("sync everything with single query", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + // Start with some already synced + syncedSet := mdtypes.NewSetSyncSegment() + syncedSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 50), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(51, 100), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Verify initial state + require.False(t, state.IsSyncFinished()) + require.Equal(t, uint64(50), state.TotalBlocksPendingToSync()) + + // Sync remaining blocks in one go + logQuery := mdtypes.NewLogQuery(51, 100, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify finished + require.True(t, state.IsSyncFinished(), "should be finished") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 pending blocks") + require.Nil(t, state.GetTotalPendingBlockRange(), "total pending range should be nil") + // Verify complete synced range + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), synced[0].BlockRange) + }) +} + +func TestState_Clone(t *testing.T) { + t.Run("nil state", func(t *testing.T) { + var state *State + cloned := state.Clone() + require.Nil(t, cloned, "cloning a nil state should return nil") + }) + + t.Run("deep copy verification", func(t *testing.T) { + // Create original state with synced and pending segments + addr1 := common.HexToAddress("0x100") + + syncedSet := mdtypes.NewSetSyncSegment() + syncedSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 200), + aggkittypes.LatestBlock, + false)) + + original := NewState(&syncedSet, &pendingSet) + + // Clone the state + cloned := original.Clone() + + // Verify cloned state has same values initially + require.NotNil(t, cloned, "cloned state should not be nil") + + // Get synced segments before modification + originalSyncedBefore := original.SyncedSegmentsByContract([]common.Address{addr1}) + clonedSyncedBefore := cloned.SyncedSegmentsByContract([]common.Address{addr1}) + + require.Equal(t, len(originalSyncedBefore), len(clonedSyncedBefore)) + require.Equal(t, originalSyncedBefore[0].BlockRange, clonedSyncedBefore[0].BlockRange) + + // Modify the original by syncing more blocks + logQuery := mdtypes.NewLogQuery(101, 150, []common.Address{addr1}) + err := original.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Get synced segments after modification + originalSyncedAfter := original.SyncedSegmentsByContract([]common.Address{addr1}) + clonedSyncedAfter := cloned.SyncedSegmentsByContract([]common.Address{addr1}) + + // Original should have extended synced range (1-150) + require.Equal(t, 1, len(originalSyncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 150), originalSyncedAfter[0].BlockRange, + "original should have extended range after sync") + + // Cloned should still have the original range (1-100) + require.Equal(t, 1, len(clonedSyncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), clonedSyncedAfter[0].BlockRange, + "cloned state should not be affected by modifications to original") + }) + + t.Run("empty state", func(t *testing.T) { + original := NewEmptyState() + cloned := original.Clone() + + require.NotNil(t, cloned, "cloned empty state should not be nil") + require.True(t, cloned.IsSyncFinished(), "cloned empty state should be finished") + require.Equal(t, uint64(0), cloned.TotalBlocksPendingToSync(), "cloned empty state should have 0 pending blocks") + }) + + t.Run("complex state with multiple segments", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + addr3 := common.HexToAddress("0x3") + + syncedSet := mdtypes.NewSetSyncSegment() + syncedSet.Add(mdtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(0, 100), aggkittypes.FinalizedBlock, false)) + syncedSet.Add(mdtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(0, 200), aggkittypes.FinalizedBlock, false)) + + pendingSet := mdtypes.NewSetSyncSegment() + pendingSet.Add(mdtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 500), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(201, 600), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdtypes.NewSyncSegment(addr3, aggkitcommon.NewBlockRange(0, 1000), aggkittypes.LatestBlock, false)) + + original := NewState(&syncedSet, &pendingSet) + cloned := original.Clone() + + // Verify counts before modification + originalPendingBefore := original.TotalBlocksPendingToSync() + clonedPendingBefore := cloned.TotalBlocksPendingToSync() + require.Equal(t, originalPendingBefore, clonedPendingBefore) + + // Modify original - sync blocks at the end of addr3 range to avoid splitting + logQuery := mdtypes.NewLogQuery(901, 1000, []common.Address{addr3}) + err := original.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify original changed + originalPendingAfter := original.TotalBlocksPendingToSync() + require.Less(t, originalPendingAfter, originalPendingBefore, "original pending should decrease") + + // Verify cloned is independent + clonedPendingAfter := cloned.TotalBlocksPendingToSync() + require.Equal(t, clonedPendingBefore, clonedPendingAfter, + "cloned state should be independent from original after modification") + }) +} diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index e650cb4c6..af29c8092 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -266,6 +266,7 @@ func (a *MultidownloaderStorage) saveBlocksNoMutex(tx dbtypes.Querier, blockRows tx = a.db } for _, blockRow := range blockRows { + a.logger.Debugf("Inserting block header row: %d %s final=%v", blockRow.BlockNumber, blockRow.BlockHash.Hex(), blockRow.IsFinal) if err := meddler.Insert(tx, "blocks", blockRow); err != nil { return fmt.Errorf("saveBlocksNoMutex: error inserting block header row (%s): %w", blockRow.String(), err) } diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index cf6293872..520eac20b 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -42,7 +42,7 @@ func (b *Blocks) Get(number uint64) (*aggkittypes.BlockHeader, bool, error) { } func (b *Blocks) ListHeaders() aggkittypes.ListBlockHeaders { - headers := aggkittypes.NewListBlockHeadersEmpty(len(b.Headers)) + headers := aggkittypes.NewListBlockHeaders(len(b.Headers)) for _, header := range b.Headers { headers = append(headers, header) } @@ -89,6 +89,22 @@ func (a *MultidownloaderStorage) UpdateBlockToFinalized(tx dbtypes.Querier, bloc return nil } +func (a *MultidownloaderStorage) GetHighestBlockNumber(tx dbtypes.Querier) (uint64, error) { + query := "SELECT MAX(block_number) as max_block_number FROM blocks" + if tx == nil { + tx = a.db + } + var maxBlockNumber sql.NullInt64 + err := tx.QueryRow(query).Scan(&maxBlockNumber) + if err != nil { + return 0, fmt.Errorf("GetHighestBlockNumber: error querying highest block number: %w", err) + } + if maxBlockNumber.Valid { + return uint64(maxBlockNumber.Int64), nil + } + return 0, nil +} + // GetRangeBlockHeader retrieves the highest block header stored in the database // return lowest and highest block headers func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, @@ -165,14 +181,23 @@ func (a *MultidownloaderStorage) getBlockHeadersNoMutex(tx dbtypes.Querier, } // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock +// if maxBlock is 0, retrieves all not finalized blocks func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, - maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { + maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) { if tx == nil { tx = a.db } + var blocks Blocks + var err error a.mutex.RLock() defer a.mutex.RUnlock() - blocks, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0 AND block_number <= ?", maxBlock) + + if maxBlock != nil { + blocks, err = a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0 AND block_number <= ?", *maxBlock) + } else { + blocks, err = a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0") + } + if err != nil { return nil, err } diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 522803f78..2c7fac56f 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -6,6 +6,7 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" dbtypes "github.com/agglayer/aggkit/db/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" "github.com/russross/meddler" ) @@ -94,3 +95,25 @@ func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Quer } return nil } + +func (a *MultidownloaderStorage) GetBlockReorgedChainID(tx dbtypes.Querier, + blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + var chainIDRow struct { + ChainID *uint64 `meddler:"chain_id"` + } + query := `SELECT chain_id FROM blocks_reorged + WHERE block_number = ? AND block_hash = ? LIMIT 1;` + err := tx.QueryRow(query, blockNumber, blockHash.Hex()).Scan(&chainIDRow.ChainID) + if err != nil { + return 0, false, fmt.Errorf("GetBlockReorgedChainID: error querying blocks_reorged: %w", err) + } + if chainIDRow.ChainID == nil { + return 0, false, nil + } + return *chainIDRow.ChainID, true, nil +} diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index 4ed827dfa..5335fff63 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -3,9 +3,10 @@ package mocks import ( - context "context" - aggkittypes "github.com/agglayer/aggkit/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" coretypes "github.com/ethereum/go-ethereum/core/types" @@ -96,7 +97,7 @@ func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Quer } // GetBlockHeadersNotFinalized provides a mock function with given fields: tx, maxBlock -func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) { +func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) { ret := _m.Called(tx, maxBlock) if len(ret) == 0 { @@ -105,10 +106,10 @@ func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint6 var r0 aggkittypes.ListBlockHeaders var r1 error - if rf, ok := ret.Get(0).(func(types.Querier, uint64) (aggkittypes.ListBlockHeaders, error)); ok { + if rf, ok := ret.Get(0).(func(types.Querier, *uint64) (aggkittypes.ListBlockHeaders, error)); ok { return rf(tx, maxBlock) } - if rf, ok := ret.Get(0).(func(types.Querier, uint64) aggkittypes.ListBlockHeaders); ok { + if rf, ok := ret.Get(0).(func(types.Querier, *uint64) aggkittypes.ListBlockHeaders); ok { r0 = rf(tx, maxBlock) } else { if ret.Get(0) != nil { @@ -116,7 +117,7 @@ func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock uint6 } } - if rf, ok := ret.Get(1).(func(types.Querier, uint64) error); ok { + if rf, ok := ret.Get(1).(func(types.Querier, *uint64) error); ok { r1 = rf(tx, maxBlock) } else { r1 = ret.Error(1) @@ -132,14 +133,14 @@ type Storager_GetBlockHeadersNotFinalized_Call struct { // GetBlockHeadersNotFinalized is a helper method to define mock.On call // - tx types.Querier -// - maxBlock uint64 +// - maxBlock *uint64 func (_e *Storager_Expecter) GetBlockHeadersNotFinalized(tx interface{}, maxBlock interface{}) *Storager_GetBlockHeadersNotFinalized_Call { return &Storager_GetBlockHeadersNotFinalized_Call{Call: _e.mock.On("GetBlockHeadersNotFinalized", tx, maxBlock)} } -func (_c *Storager_GetBlockHeadersNotFinalized_Call) Run(run func(tx types.Querier, maxBlock uint64)) *Storager_GetBlockHeadersNotFinalized_Call { +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Run(run func(tx types.Querier, maxBlock *uint64)) *Storager_GetBlockHeadersNotFinalized_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(types.Querier), args[1].(uint64)) + run(args[0].(types.Querier), args[1].(*uint64)) }) return _c } @@ -149,7 +150,72 @@ func (_c *Storager_GetBlockHeadersNotFinalized_Call) Return(_a0 aggkittypes.List return _c } -func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, uint64) (aggkittypes.ListBlockHeaders, error)) *Storager_GetBlockHeadersNotFinalized_Call { +func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, *uint64) (aggkittypes.ListBlockHeaders, error)) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockReorgedChainID provides a mock function with given fields: tx, blockNumber, blockHash +func (_m *Storager) GetBlockReorgedChainID(tx types.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { + ret := _m.Called(tx, blockNumber, blockHash) + + if len(ret) == 0 { + panic("no return value specified for GetBlockReorgedChainID") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64, common.Hash) (uint64, bool, error)); ok { + return rf(tx, blockNumber, blockHash) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64, common.Hash) uint64); ok { + r0 = rf(tx, blockNumber, blockHash) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64, common.Hash) bool); ok { + r1 = rf(tx, blockNumber, blockHash) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(types.Querier, uint64, common.Hash) error); ok { + r2 = rf(tx, blockNumber, blockHash) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Storager_GetBlockReorgedChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockReorgedChainID' +type Storager_GetBlockReorgedChainID_Call struct { + *mock.Call +} + +// GetBlockReorgedChainID is a helper method to define mock.On call +// - tx types.Querier +// - blockNumber uint64 +// - blockHash common.Hash +func (_e *Storager_Expecter) GetBlockReorgedChainID(tx interface{}, blockNumber interface{}, blockHash interface{}) *Storager_GetBlockReorgedChainID_Call { + return &Storager_GetBlockReorgedChainID_Call{Call: _e.mock.On("GetBlockReorgedChainID", tx, blockNumber, blockHash)} +} + +func (_c *Storager_GetBlockReorgedChainID_Call) Run(run func(tx types.Querier, blockNumber uint64, blockHash common.Hash)) *Storager_GetBlockReorgedChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64), args[2].(common.Hash)) + }) + return _c +} + +func (_c *Storager_GetBlockReorgedChainID_Call) Return(_a0 uint64, _a1 bool, _a2 error) *Storager_GetBlockReorgedChainID_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Storager_GetBlockReorgedChainID_Call) RunAndReturn(run func(types.Querier, uint64, common.Hash) (uint64, bool, error)) *Storager_GetBlockReorgedChainID_Call { _c.Call.Return(run) return _c } @@ -213,6 +279,62 @@ func (_c *Storager_GetEthLogs_Call) RunAndReturn(run func(types.Querier, multido return _c } +// GetHighestBlockNumber provides a mock function with given fields: tx +func (_m *Storager) GetHighestBlockNumber(tx types.Querier) (uint64, error) { + ret := _m.Called(tx) + + if len(ret) == 0 { + panic("no return value specified for GetHighestBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier) (uint64, error)); ok { + return rf(tx) + } + if rf, ok := ret.Get(0).(func(types.Querier) uint64); ok { + r0 = rf(tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier) error); ok { + r1 = rf(tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetHighestBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHighestBlockNumber' +type Storager_GetHighestBlockNumber_Call struct { + *mock.Call +} + +// GetHighestBlockNumber is a helper method to define mock.On call +// - tx types.Querier +func (_e *Storager_Expecter) GetHighestBlockNumber(tx interface{}) *Storager_GetHighestBlockNumber_Call { + return &Storager_GetHighestBlockNumber_Call{Call: _e.mock.On("GetHighestBlockNumber", tx)} +} + +func (_c *Storager_GetHighestBlockNumber_Call) Run(run func(tx types.Querier)) *Storager_GetHighestBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier)) + }) + return _c +} + +func (_c *Storager_GetHighestBlockNumber_Call) Return(_a0 uint64, _a1 error) *Storager_GetHighestBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetHighestBlockNumber_Call) RunAndReturn(run func(types.Querier) (uint64, error)) *Storager_GetHighestBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // GetRangeBlockHeader provides a mock function with given fields: tx, isFinal func (_m *Storager) GetRangeBlockHeader(tx types.Querier, isFinal multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { ret := _m.Called(tx, isFinal) diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 6d0416827..f00de014c 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -226,6 +226,10 @@ func (f *SetSyncSegment) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { } var totalRange *aggkitcommon.BlockRange for _, segment := range f.segments { + // Skip empty segments to avoid creating invalid BlockRanges + if segment.IsEmpty() { + continue + } if totalRange == nil { br := segment.BlockRange totalRange = &br diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index bf29775c4..00aca7e55 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -436,3 +436,77 @@ func TestSetSyncSegment_AfterFullySync(t *testing.T) { require.True(t, exists) require.Equal(t, "From: 101, To: 150 (50)", segment.BlockRange.String()) } + +func TestSetSyncSegment_GetTotalPendingBlockRange_WithEmptySegments(t *testing.T) { + t.Run("single empty segment returns nil", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment) + + // Sync everything + logQuery := &LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + + // Verify segment is empty + segment, exists := set.GetByContract(addr) + require.True(t, exists) + require.True(t, segment.IsEmpty()) + + // GetTotalPendingBlockRange should return nil, not an invalid range + totalRange := set.GetTotalPendingBlockRange() + require.Nil(t, totalRange, "should return nil when all segments are empty") + }) + + t.Run("multiple segments with some empty", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + // Add two segments + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + + // Sync first segment completely + logQuery := &LogQuery{ + Addrs: []common.Address{addr1}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + + // First segment should be empty + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.True(t, seg1.IsEmpty()) + + // Second segment should not be empty + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.False(t, seg2.IsEmpty()) + + // GetTotalPendingBlockRange should return only the non-empty segment range + totalRange := set.GetTotalPendingBlockRange() + require.NotNil(t, totalRange) + require.Equal(t, uint64(50), totalRange.FromBlock) + require.Equal(t, uint64(150), totalRange.ToBlock) + }) +} diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 4f0d8d330..6ecdbd207 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -5,6 +5,7 @@ import ( dbtypes "github.com/agglayer/aggkit/db/types" aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -27,11 +28,18 @@ type Storager interface { UpsertSyncerConfigs(tx dbtypes.Querier, configs []ContractConfig) error GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) NewTx(ctx context.Context) (dbtypes.Txer, error) - - GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock uint64) (aggkittypes.ListBlockHeaders, error) + // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock + // if maxBlock is nil, retrieves all not finalized blocks + GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error GetRangeBlockHeader(tx dbtypes.Querier, isFinal FinalizedType) (lowest *aggkittypes.BlockHeader, highest *aggkittypes.BlockHeader, err error) + // GetHighestBlockNumber returns the highest block number stored in db + GetHighestBlockNumber(tx dbtypes.Querier) (uint64, error) + // GetReorgedChainID returns the chainID of the reorged block if exists + // second return value indicates if the block is reorged + GetBlockReorgedChainID(tx dbtypes.Querier, + blockNumber uint64, blockHash common.Hash) (uint64, bool, error) } type StoragerForReorg interface { diff --git a/types/list_block_header.go b/types/list_block_header.go index 9b0e24be7..1070b00dc 100644 --- a/types/list_block_header.go +++ b/types/list_block_header.go @@ -6,7 +6,12 @@ type ListBlockHeaders []*BlockHeader // NewListBlockHeadersEmpty creates a new ListBlockHeaders with pre-allocated items set to nil func NewListBlockHeadersEmpty(preAllocatedSize int) ListBlockHeaders { - return ListBlockHeaders(make([]*BlockHeader, preAllocatedSize, preAllocatedSize)) + return ListBlockHeaders(make([]*BlockHeader, 0, preAllocatedSize)) +} + +// NewListBlockHeaders creates a new ListBlockHeaders with the given size to zero element +func NewListBlockHeaders(size int) ListBlockHeaders { + return ListBlockHeaders(make([]*BlockHeader, size)) } func (lbs ListBlockHeaders) Len() int { return len(lbs) From 90e375bc96aebc71f032c3dd5ad92e584ff98ebb Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 26 Jan 2026 21:08:15 +0100 Subject: [PATCH 09/75] fix: lint --- common/block_range_test.go | 156 ++++++------ etherman/default_eth_client_test.go | 121 +++------ multidownloader/e2e_test.go | 233 ++++++++++++++++++ multidownloader/evm_multidownloader.go | 5 +- .../evm_multidownloader_syncers.go | 3 +- multidownloader/evm_multidownloader_test.go | 7 +- multidownloader/reorg_processor_port.go | 11 +- multidownloader/storage/storage.go | 3 +- multidownloader/storage/storage_block.go | 2 +- 9 files changed, 368 insertions(+), 173 deletions(-) create mode 100644 multidownloader/e2e_test.go diff --git a/common/block_range_test.go b/common/block_range_test.go index 94a65cc20..6652de124 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -483,120 +483,120 @@ func TestBlockRange_ListBlockNumbers(t *testing.T) { func TestBlockRange_SplitByBlockNumber(t *testing.T) { tests := []struct { - name string - blockRange BlockRange - splitBlock uint64 - expectedFirst BlockRange - expectedSecond BlockRange - descriptionFirst string + name string + blockRange BlockRange + splitBlock uint64 + expectedFirst BlockRange + expectedSecond BlockRange + descriptionFirst string descriptionSecond string }{ { - name: "split in the middle", - blockRange: NewBlockRange(100, 200), - splitBlock: 150, - expectedFirst: NewBlockRange(100, 150), - expectedSecond: NewBlockRange(151, 200), - descriptionFirst: "first half includes split block", + name: "split in the middle", + blockRange: NewBlockRange(100, 200), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 150), + expectedSecond: NewBlockRange(151, 200), + descriptionFirst: "first half includes split block", descriptionSecond: "second half starts after split block", }, { - name: "split at FromBlock", - blockRange: NewBlockRange(100, 200), - splitBlock: 100, - expectedFirst: NewBlockRange(100, 100), - expectedSecond: NewBlockRange(101, 200), - descriptionFirst: "first range is single block", + name: "split at FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 200), + descriptionFirst: "first range is single block", descriptionSecond: "second range is rest of blocks", }, { - name: "split at ToBlock", - blockRange: NewBlockRange(100, 200), - splitBlock: 200, - expectedFirst: NewBlockRange(100, 200), - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is entire range", + name: "split at ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 200, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", descriptionSecond: "second range is empty", }, { - name: "split before FromBlock", - blockRange: NewBlockRange(100, 200), - splitBlock: 50, - expectedFirst: BlockRangeZero, - expectedSecond: NewBlockRange(100, 200), - descriptionFirst: "first range is empty", + name: "split before FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 200), + descriptionFirst: "first range is empty", descriptionSecond: "second range is entire original range", }, { - name: "split after ToBlock", - blockRange: NewBlockRange(100, 200), - splitBlock: 250, - expectedFirst: NewBlockRange(100, 200), - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is entire range", + name: "split after ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 250, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", descriptionSecond: "second range is empty", }, { - name: "split single block range at that block", - blockRange: NewBlockRange(100, 100), - splitBlock: 100, - expectedFirst: NewBlockRange(100, 100), - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is the single block", + name: "split single block range at that block", + blockRange: NewBlockRange(100, 100), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", descriptionSecond: "second range is empty", }, { - name: "split single block range before", - blockRange: NewBlockRange(100, 100), - splitBlock: 50, - expectedFirst: BlockRangeZero, - expectedSecond: NewBlockRange(100, 100), - descriptionFirst: "first range is empty", + name: "split single block range before", + blockRange: NewBlockRange(100, 100), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 100), + descriptionFirst: "first range is empty", descriptionSecond: "second range is the single block", }, { - name: "split single block range after", - blockRange: NewBlockRange(100, 100), - splitBlock: 150, - expectedFirst: NewBlockRange(100, 100), - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is the single block", + name: "split single block range after", + blockRange: NewBlockRange(100, 100), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", descriptionSecond: "second range is empty", }, { - name: "split empty range", - blockRange: BlockRangeZero, - splitBlock: 100, - expectedFirst: BlockRangeZero, - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is empty", + name: "split empty range", + blockRange: BlockRangeZero, + splitBlock: 100, + expectedFirst: BlockRangeZero, + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is empty", descriptionSecond: "second range is empty", }, { - name: "split two block range at first", - blockRange: NewBlockRange(100, 101), - splitBlock: 100, - expectedFirst: NewBlockRange(100, 100), - expectedSecond: NewBlockRange(101, 101), - descriptionFirst: "first range is first block", + name: "split two block range at first", + blockRange: NewBlockRange(100, 101), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 101), + descriptionFirst: "first range is first block", descriptionSecond: "second range is second block", }, { - name: "split two block range at second", - blockRange: NewBlockRange(100, 101), - splitBlock: 101, - expectedFirst: NewBlockRange(100, 101), - expectedSecond: BlockRangeZero, - descriptionFirst: "first range is both blocks", + name: "split two block range at second", + blockRange: NewBlockRange(100, 101), + splitBlock: 101, + expectedFirst: NewBlockRange(100, 101), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is both blocks", descriptionSecond: "second range is empty", }, { - name: "split at ToBlock minus 1", - blockRange: NewBlockRange(100, 200), - splitBlock: 199, - expectedFirst: NewBlockRange(100, 199), - expectedSecond: NewBlockRange(200, 200), - descriptionFirst: "first range is all but last block", + name: "split at ToBlock minus 1", + blockRange: NewBlockRange(100, 200), + splitBlock: 199, + expectedFirst: NewBlockRange(100, 199), + expectedSecond: NewBlockRange(200, 200), + descriptionFirst: "first range is all but last block", descriptionSecond: "second range is last block only", }, } diff --git a/etherman/default_eth_client_test.go b/etherman/default_eth_client_test.go index 5543c576f..3ced7164f 100644 --- a/etherman/default_eth_client_test.go +++ b/etherman/default_eth_client_test.go @@ -99,6 +99,43 @@ func testBlockWithOffsetHelper( require.Equal(t, bn, header.RequestedBlock) } +// testBlockWithOffsetHelperGeth is a helper function for testing block tag resolution with offsets using geth client +func testBlockWithOffsetHelperGeth( + t *testing.T, + ctx context.Context, + blockNumFinality string, + firstCallArg *big.Int, + firstBlockNum uint64, + secondBlockNum uint64, +) { + t.Helper() + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = false + + bn, err := aggkittypes.NewBlockNumberFinality(blockNumFinality) + require.NoError(t, err) + + mockEthClient.EXPECT(). + HeaderByNumber(ctx, firstCallArg). + Return(&types.Header{ + Number: big.NewInt(int64(firstBlockNum)), + }, nil).Once() + + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(int64(secondBlockNum))). + Return(&types.Header{ + Number: big.NewInt(int64(secondBlockNum)), + }, nil).Once() + + header, err := client.CustomHeaderByNumber(ctx, bn) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, secondBlockNum, header.Number) + require.Equal(t, bn, header.RequestedBlock) +} + func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { ctx := context.Background() @@ -182,92 +219,14 @@ func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { }) t.Run("LatestBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { - mockEthClient := mocks.NewEthereumClienter(t) - mockRPCClient := mocks.NewRPCClienter(t) - client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) - client.HashFromJSON = false - - bnLatestMinus10, err := aggkittypes.NewBlockNumberFinality("LatestBlock/-10") - require.NoError(t, err) - - // First call to resolve latest block (returns 100) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, (*big.Int)(nil)). - Return(&types.Header{ - Number: big.NewInt(100), - }, nil).Once() - - // Second call to get block 90 (100 - 10) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, big.NewInt(90)). - Return(&types.Header{ - Number: big.NewInt(90), - }, nil).Once() - - header, err := client.CustomHeaderByNumber(ctx, bnLatestMinus10) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, uint64(90), header.Number) - require.Equal(t, bnLatestMinus10, header.RequestedBlock) + testBlockWithOffsetHelperGeth(t, ctx, "LatestBlock/-10", nil, 100, 90) }) t.Run("FinalizedBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { - mockEthClient := mocks.NewEthereumClienter(t) - mockRPCClient := mocks.NewRPCClienter(t) - client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) - client.HashFromJSON = false - - bnFinalizedMinus5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/-5") - require.NoError(t, err) - - // First call to resolve finalized block (returns 100) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, big.NewInt(-3)). - Return(&types.Header{ - Number: big.NewInt(100), - }, nil).Once() - - // Second call to get block 95 (100 - 5) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, big.NewInt(95)). - Return(&types.Header{ - Number: big.NewInt(95), - }, nil).Once() - - header, err := client.CustomHeaderByNumber(ctx, bnFinalizedMinus5) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, uint64(95), header.Number) - require.Equal(t, bnFinalizedMinus5, header.RequestedBlock) + testBlockWithOffsetHelperGeth(t, ctx, "FinalizedBlock/-5", big.NewInt(-3), 100, 95) }) t.Run("SafeBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { - mockEthClient := mocks.NewEthereumClienter(t) - mockRPCClient := mocks.NewRPCClienter(t) - client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) - client.HashFromJSON = false - - bnSafeMinus3, err := aggkittypes.NewBlockNumberFinality("SafeBlock/-3") - require.NoError(t, err) - - // First call to resolve safe block (returns 50) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, big.NewInt(-4)). - Return(&types.Header{ - Number: big.NewInt(50), - }, nil).Once() - - // Second call to get block 47 (50 - 3) - mockEthClient.EXPECT(). - HeaderByNumber(ctx, big.NewInt(47)). - Return(&types.Header{ - Number: big.NewInt(47), - }, nil).Once() - - header, err := client.CustomHeaderByNumber(ctx, bnSafeMinus3) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, uint64(47), header.Number) - require.Equal(t, bnSafeMinus3, header.RequestedBlock) + testBlockWithOffsetHelperGeth(t, ctx, "SafeBlock/-3", big.NewInt(-4), 50, 47) }) } diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go new file mode 100644 index 000000000..ea2d0096d --- /dev/null +++ b/multidownloader/e2e_test.go @@ -0,0 +1,233 @@ +package multidownloader + +import ( + "context" + "errors" + "math/big" + "testing" + "time" + + configtypes "github.com/agglayer/aggkit/config/types" + "github.com/agglayer/aggkit/etherman" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader/storage" + "github.com/agglayer/aggkit/test/contracts/logemitter" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +var ( + pingSignature = crypto.Keccak256Hash([]byte("Ping(address,uint256,string)")) +) + +type mdrE2ESimulatedEnv struct { + SimulatedL1 *simulated.Backend + LogEmitterAddr common.Address + LogEmitterContract *logemitter.Logemitter + ethClient *etherman.DefaultEthClient + auth *bind.TransactOpts +} + +func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { + t.Helper() + // Generate key + address + key, err := crypto.GenerateKey() + require.NoError(t, err) + from := crypto.PubkeyToAddress(key.PublicKey) + // Genesis + alloc := types.GenesisAlloc{ + from: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}, // 100 ETH + } + envL1 := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(10000000)) + chainID := big.NewInt(1337) + auth, err := bind.NewKeyedTransactorWithChainID(key, chainID) + require.NoError(t, err) + logEmitterAddr, _, logEmitterContract, err := logemitter.DeployLogemitter(auth, envL1.Client(), "msg") + require.NoError(t, err) + require.NotEqual(t, logEmitterAddr, nil) + require.NotNil(t, logEmitterContract) + + envL1.Commit() + return &mdrE2ESimulatedEnv{ + SimulatedL1: envL1, + LogEmitterAddr: logEmitterAddr, + LogEmitterContract: logEmitterContract, + ethClient: etherman.NewDefaultEthClient(envL1.Client(), nil, nil), + auth: auth, + } +} + +func TestE2E(t *testing.T) { + // Simulated L1 + testData := buildL1Simulated(t) + + logger := log.WithFields("module", "mdr_e2e") + cfg := NewConfigDefault("e2e_test", t.TempDir()) + store, err := storage.NewMultidownloaderStorage(logger, + storage.MultidownloaderStorageConfig{ + DBPath: cfg.StoragePath, + }) + require.NoError(t, err) + simulatedFinalized, err := aggkittypes.NewBlockNumberFinality("LatestBlock/-5") + require.NoError(t, err) + _, err = testData.ethClient.CustomHeaderByNumber(t.Context(), simulatedFinalized) + require.NoError(t, err) + + cfg.BlockFinality = *simulatedFinalized + cfg.WaitPeriodToCheckCatchUp = configtypes.Duration{Duration: 1 * time.Millisecond} + cfg.PeriodToCheckReorgs = configtypes.Duration{Duration: 1 * time.Millisecond} + require.NoError(t, err) + + mdr, err := NewEVMMultidownloader( + logger, + cfg, + "mdr_e2e", + testData.ethClient, + nil, // rpcClient + store, + nil, + nil, + ) + require.NoError(t, err) + require.NotNil(t, mdr) + // Generate some logs + _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "hello world") + require.NoError(t, err) + testData.SimulatedL1.Commit() + + err = mdr.RegisterSyncer(aggkittypes.SyncerConfig{ + SyncerID: "log_emitter_e2e_test", + ContractAddresses: []common.Address{ + testData.LogEmitterAddr, + }, + FromBlock: 0, + ToBlock: aggkittypes.LatestBlock, + }) + require.NoError(t, err) + ctx := t.Context() + err = mdr.Initialize(ctx) + require.NoError(t, err) + + go func() { + err := mdr.Start(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } + }() + latestBlock, err := mdr.BlockNumber(ctx, aggkittypes.LatestBlock) + require.NoError(t, err) + logs, err := mdr.FilterLogs(ctx, ethereum.FilterQuery{ + Addresses: []common.Address{testData.LogEmitterAddr}, + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(int64(latestBlock)), + }) + require.NoError(t, err) + emitterLogs := processEvents(t, testData.LogEmitterContract, logs) + require.Equal(t, 2, len(logs)) + require.Equal(t, testData.LogEmitterAddr, logs[0].Address) + require.Equal(t, logEmitterEvent{ + From: testData.auth.From, + Id: big.NewInt(123), + Message: "hello world", + }, emitterLogs[1]) + timeStart := time.Now() + testData.SimulatedL1.Commit() // Block 3 + _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "block 4") + require.NoError(t, err) + testData.SimulatedL1.Commit() // Block 4 + _, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ + Addresses: []common.Address{testData.LogEmitterAddr}, + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(int64(latestBlock + 2)), + }) + require.NoError(t, err) + require.Equal(t, 3, len(logs)) + elapsed := time.Since(timeStart) + logger.Infof("E2E test completed in %s", elapsed.String()) + showChainStatus(t, ctx, logger, testData.SimulatedL1) + blk4, err := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(4)) + require.NoError(t, err) + + // Forking at block 3 -> so block 4 will be reorged + forkAt(t, ctx, logger, testData.SimulatedL1, 3) + + // Now se have to create a longer chain to force reorg + testData.SimulatedL1.Commit() // reorg chain: Block 4 + testData.SimulatedL1.Commit() // reorg chain: Block 5 + showChainStatus(t, ctx, logger, testData.SimulatedL1) + _, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ + Addresses: []common.Address{testData.LogEmitterAddr}, + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(int64(5)), + }) + require.NoError(t, err) + blkReorged4, err := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(4)) + require.NoError(t, err) + logger.Infof("Block 4 hash after reorg: %s", blkReorged4.Hash.Hex()) + require.NotEqual(t, blk4.Hash, blkReorged4.Hash, "block 4 hash should be different after reorg") + time.Sleep(1 * time.Second) + err = mdr.Stop(ctx) + require.NoError(t, err) + isValid, reorgChainID, err := mdr.CheckValidBlock(ctx, blk4.Number, blk4.Hash) + require.NoError(t, err) + require.False(t, isValid, "block 4 should not be valid after reorg") + require.Equal(t, uint64(1), reorgChainID, "reorgChainID should be 1") +} + +func forkAt(t *testing.T, ctx context.Context, logger *log.Logger, sim *simulated.Backend, blockNumber uint64) { + t.Helper() + blk, err := sim.Client().HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) + require.NoError(t, err) + require.NoError(t, err) + logger.Infof("Forking L1 at block %d (%s)... This will generate new block for reorg >%d", blockNumber, blk.Hash().Hex(), blockNumber) + + err = sim.Fork(blk.Hash()) + require.NoError(t, err) +} + +func showChainStatus(t *testing.T, ctx context.Context, logger *log.Logger, sim *simulated.Backend) { + t.Helper() + latestBlock, err := sim.Client().BlockNumber(ctx) + + require.NoError(t, err) + logger.Infof("Current chain latest block: %d", latestBlock) + for i := uint64(0); i <= latestBlock; i++ { + blk, err := sim.Client().HeaderByNumber(ctx, big.NewInt(int64(i))) + require.NoError(t, err) + logger.Infof(" Block %d: %s", i, blk.Hash().Hex()) + } +} + +type logEmitterEvent struct { + From common.Address + Id *big.Int + Message string +} + +func processEvents(t *testing.T, contract *logemitter.Logemitter, logs []types.Log) []logEmitterEvent { + t.Helper() + result := make([]logEmitterEvent, 0) + for _, lg := range logs { + if lg.Topics[0] == pingSignature { + event, err := contract.ParsePing(lg) + require.NoError(t, err) + log.Infof("Processed Ping event: From=%s, Id=%s, Message=%s", + event.From, event.Id, event.Message) + result = append(result, logEmitterEvent{ + From: event.From, + Id: event.Id, + Message: event.Message, + }) + } else { + t.Fatalf("Unknown event signature: %s", lg.Topics[0].Hex()) + } + } + return result +} diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 0cf78000c..d2802c2f3 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -323,7 +323,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { reorgErr := mdrtypes.CastReorgError(err) if reorgErr == nil { dh.log.Warnf("Error running multidownloader: %s ", err.Error()) - time.Sleep(100 * time.Millisecond) // Brief pause before retry + time.Sleep(time.Millisecond) // Brief pause before retry continue } dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) @@ -391,7 +391,6 @@ func (dh *EVMMultidownloader) checkReorgsUnsafeZone(ctx context.Context) error { return fmt.Errorf("checkReorgsUnsafeZone: cannot get unsafe blocks: %w", err) } return dh.detectReorgs(ctx, blockInUnsafeZone) - } func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { @@ -482,7 +481,7 @@ func (dh *EVMMultidownloader) waitForNewBlocks(ctx context.Context, dh.log.Debugf("waitForNewBlocks: waiting for new blocks %s after %d. Check each %s...", blockTag.String(), latestSyncedBlock, - dh.cfg.PeriodToCheckReorgs.Duration.String()) + dh.cfg.PeriodToCheckReorgs.String()) for { select { case <-ctx.Done(): diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 4ecc1a227..9ef159c2f 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -145,6 +145,7 @@ func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber u return false, chainID, nil } // Not found anywhere, consider invalid - return false, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", + return false, 0, fmt.Errorf( + "EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", blockNumber, blockHash.Hex()) } diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index c3509429a..b20f67831 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -6,6 +6,7 @@ import ( "math/big" "os" "sync" + "sync/atomic" "testing" "time" @@ -615,13 +616,13 @@ func TestEVMMultidownloader_StartStop(t *testing.T) { // Start in background ctx := context.Background() - startCompleted := false + var startCompleted atomic.Bool var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() _ = data.mdr.Start(ctx) - startCompleted = true + startCompleted.Store(true) }() // Give it time to start @@ -634,7 +635,7 @@ func TestEVMMultidownloader_StartStop(t *testing.T) { stopDuration := time.Since(stopStartTime) require.NoError(t, err) - require.True(t, startCompleted, "Start should have completed before Stop returns") + require.True(t, startCompleted.Load(), "Start should have completed before Stop returns") require.Greater(t, stopDuration, time.Duration(0), "Stop should take some time waiting for Start") wg.Wait() diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index 7569da052..c14e2cdb5 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -16,10 +16,9 @@ type compareBlockHeaders struct { } type ReorgPort struct { - ethClient aggkittypes.BaseEthereumClienter - rpcClient aggkittypes.RPCClienter - storage mdtypes.Storager - finalizedBlockTag aggkittypes.BlockNumberFinality + ethClient aggkittypes.BaseEthereumClienter + rpcClient aggkittypes.RPCClienter + storage mdtypes.Storager } func (r *ReorgPort) NewTx(ctx context.Context) (dbtypes.Txer, error) { @@ -54,7 +53,9 @@ func (r *ReorgPort) MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.Reor return r.storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) } -func (r *ReorgPort) GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) { +func (r *ReorgPort) GetBlockNumberInRPC( + ctx context.Context, blockFinality aggkittypes.BlockNumberFinality, +) (uint64, error) { blockNumber, err := r.ethClient.CustomHeaderByNumber(ctx, &blockFinality) if err != nil { return 0, fmt.Errorf("GetBlockNumberInRPC: error getting block number for %s from RPC: %w", diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index af29c8092..69d0e6e81 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -266,7 +266,8 @@ func (a *MultidownloaderStorage) saveBlocksNoMutex(tx dbtypes.Querier, blockRows tx = a.db } for _, blockRow := range blockRows { - a.logger.Debugf("Inserting block header row: %d %s final=%v", blockRow.BlockNumber, blockRow.BlockHash.Hex(), blockRow.IsFinal) + a.logger.Debugf("Inserting block header row: %d %s final=%v", blockRow.BlockNumber, + blockRow.BlockHash.Hex(), blockRow.IsFinal) if err := meddler.Insert(tx, "blocks", blockRow); err != nil { return fmt.Errorf("saveBlocksNoMutex: error inserting block header row (%s): %w", blockRow.String(), err) } diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index 520eac20b..b4acf26ac 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -42,7 +42,7 @@ func (b *Blocks) Get(number uint64) (*aggkittypes.BlockHeader, bool, error) { } func (b *Blocks) ListHeaders() aggkittypes.ListBlockHeaders { - headers := aggkittypes.NewListBlockHeaders(len(b.Headers)) + headers := aggkittypes.NewListBlockHeadersEmpty(len(b.Headers)) for _, header := range b.Headers { headers = append(headers, header) } From 48723617a69e84528cda96e3fb844183f9e03827 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 27 Jan 2026 09:24:14 +0100 Subject: [PATCH 10/75] feat: add test contract log emitter --- test/contracts/abi/logemitter.abi | 1 + test/contracts/logemitter/LogEmitter.sol | 29 ++ test/contracts/logemitter/logemitter.go | 584 +++++++++++++++++++++++ 3 files changed, 614 insertions(+) create mode 100644 test/contracts/abi/logemitter.abi create mode 100644 test/contracts/logemitter/LogEmitter.sol create mode 100644 test/contracts/logemitter/logemitter.go diff --git a/test/contracts/abi/logemitter.abi b/test/contracts/abi/logemitter.abi new file mode 100644 index 000000000..d129511f4 --- /dev/null +++ b/test/contracts/abi/logemitter.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"string","name":"bootMessage","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"bytes32","name":"topic","type":"bytes32"},{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Data","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"string","name":"message","type":"string"}],"name":"Ping","type":"event"},{"inputs":[],"name":"counter","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"topic","type":"bytes32"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"emitData","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"string","name":"message","type":"string"}],"name":"emitPing","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/logemitter/LogEmitter.sol b/test/contracts/logemitter/LogEmitter.sol new file mode 100644 index 000000000..419e2e9f5 --- /dev/null +++ b/test/contracts/logemitter/LogEmitter.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity 0.8.18; + +contract LogEmitter { + // Simple event + event Ping(address indexed from, uint256 indexed id, string message); + + // Event with arbitrary data + event Data(address indexed from, bytes32 indexed topic, bytes data); + + uint256 public counter; + + constructor(string memory bootMessage) { + // Emits a log on deployment + emit Ping(msg.sender, 0, bootMessage); + } + + // Emits an event and increments a counter + function emitPing(uint256 id, string calldata message) external { + counter += 1; + emit Ping(msg.sender, id, message); + } + + // Emits an event with arbitrary bytes (useful for tests) + function emitData(bytes32 topic, bytes calldata data) external { + emit Data(msg.sender, topic, data); + } +} \ No newline at end of file diff --git a/test/contracts/logemitter/logemitter.go b/test/contracts/logemitter/logemitter.go new file mode 100644 index 000000000..9f6f29d4e --- /dev/null +++ b/test/contracts/logemitter/logemitter.go @@ -0,0 +1,584 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package logemitter + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// LogemitterMetaData contains all meta data concerning the Logemitter contract. +var LogemitterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"bootMessage\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"topic\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Data\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"Ping\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"topic\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"emitData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"emitPing\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052346100e05761033b80380380610019816100fb565b9283398101602080838303126100e05782516001600160401b03938482116100e057019082601f830112156100e05781519384116100e557601f1993610065601f8201861683016100fb565b818152828101948383860101116100e0576000956100a960409387867f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a9801610120565b6100c58351948593818552519788809286015285850190610120565b601f339601168101030190a36040516101f790816101448239f35b600080fd5b634e487b7160e01b600052604160045260246000fd5b6040519190601f01601f191682016001600160401b038111838210176100e557604052565b60005b8381106101335750506000910152565b818101518382015260200161012356fe60808060405260048036101561001457600080fd5b600091823560e01c90816361bc221a14610153575080638b692c37146100c05763e85f05f21461004357600080fd5b346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8577f5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b26100976100b2923690850161016d565b929093604051918291602083523595339560208401916101a0565b0390a380f35b8280fd5b5080fd5b50346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8576100f1903690830161016d565b9091835460018101809111610140577f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a916100b2918655604051918291602083523595339560208401916101a0565b634e487b7160e01b855260118252602485fd5b8390346100bc57816003193601126100bc57602091548152f35b9181601f8401121561019b5782359167ffffffffffffffff831161019b576020838186019501011161019b57565b600080fd5b908060209392818452848401376000828201840152601f01601f191601019056fea26469706673582212200fe8d91d7cb5850d0d16712aa2af488fa7d2240ee843a08e90d8fc7ba83ddc3d64736f6c63430008120033", +} + +// LogemitterABI is the input ABI used to generate the binding from. +// Deprecated: Use LogemitterMetaData.ABI instead. +var LogemitterABI = LogemitterMetaData.ABI + +// LogemitterBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use LogemitterMetaData.Bin instead. +var LogemitterBin = LogemitterMetaData.Bin + +// DeployLogemitter deploys a new Ethereum contract, binding an instance of Logemitter to it. +func DeployLogemitter(auth *bind.TransactOpts, backend bind.ContractBackend, bootMessage string) (common.Address, *types.Transaction, *Logemitter, error) { + parsed, err := LogemitterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogemitterBin), backend, bootMessage) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Logemitter{LogemitterCaller: LogemitterCaller{contract: contract}, LogemitterTransactor: LogemitterTransactor{contract: contract}, LogemitterFilterer: LogemitterFilterer{contract: contract}}, nil +} + +// Logemitter is an auto generated Go binding around an Ethereum contract. +type Logemitter struct { + LogemitterCaller // Read-only binding to the contract + LogemitterTransactor // Write-only binding to the contract + LogemitterFilterer // Log filterer for contract events +} + +// LogemitterCaller is an auto generated read-only Go binding around an Ethereum contract. +type LogemitterCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterTransactor is an auto generated write-only Go binding around an Ethereum contract. +type LogemitterTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type LogemitterFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type LogemitterSession struct { + Contract *Logemitter // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LogemitterCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type LogemitterCallerSession struct { + Contract *LogemitterCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// LogemitterTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type LogemitterTransactorSession struct { + Contract *LogemitterTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LogemitterRaw is an auto generated low-level Go binding around an Ethereum contract. +type LogemitterRaw struct { + Contract *Logemitter // Generic contract binding to access the raw methods on +} + +// LogemitterCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type LogemitterCallerRaw struct { + Contract *LogemitterCaller // Generic read-only contract binding to access the raw methods on +} + +// LogemitterTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type LogemitterTransactorRaw struct { + Contract *LogemitterTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewLogemitter creates a new instance of Logemitter, bound to a specific deployed contract. +func NewLogemitter(address common.Address, backend bind.ContractBackend) (*Logemitter, error) { + contract, err := bindLogemitter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Logemitter{LogemitterCaller: LogemitterCaller{contract: contract}, LogemitterTransactor: LogemitterTransactor{contract: contract}, LogemitterFilterer: LogemitterFilterer{contract: contract}}, nil +} + +// NewLogemitterCaller creates a new read-only instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterCaller(address common.Address, caller bind.ContractCaller) (*LogemitterCaller, error) { + contract, err := bindLogemitter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogemitterCaller{contract: contract}, nil +} + +// NewLogemitterTransactor creates a new write-only instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterTransactor(address common.Address, transactor bind.ContractTransactor) (*LogemitterTransactor, error) { + contract, err := bindLogemitter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogemitterTransactor{contract: contract}, nil +} + +// NewLogemitterFilterer creates a new log filterer instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterFilterer(address common.Address, filterer bind.ContractFilterer) (*LogemitterFilterer, error) { + contract, err := bindLogemitter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogemitterFilterer{contract: contract}, nil +} + +// bindLogemitter binds a generic wrapper to an already deployed contract. +func bindLogemitter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LogemitterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Logemitter *LogemitterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Logemitter.Contract.LogemitterCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Logemitter *LogemitterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Logemitter.Contract.LogemitterTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Logemitter *LogemitterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Logemitter.Contract.LogemitterTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Logemitter *LogemitterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Logemitter.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Logemitter *LogemitterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Logemitter.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Logemitter *LogemitterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Logemitter.Contract.contract.Transact(opts, method, params...) +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Logemitter.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterSession) Counter() (*big.Int, error) { + return _Logemitter.Contract.Counter(&_Logemitter.CallOpts) +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterCallerSession) Counter() (*big.Int, error) { + return _Logemitter.Contract.Counter(&_Logemitter.CallOpts) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterTransactor) EmitData(opts *bind.TransactOpts, topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.contract.Transact(opts, "emitData", topic, data) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterSession) EmitData(topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.Contract.EmitData(&_Logemitter.TransactOpts, topic, data) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterTransactorSession) EmitData(topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.Contract.EmitData(&_Logemitter.TransactOpts, topic, data) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterTransactor) EmitPing(opts *bind.TransactOpts, id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.contract.Transact(opts, "emitPing", id, message) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterSession) EmitPing(id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.Contract.EmitPing(&_Logemitter.TransactOpts, id, message) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterTransactorSession) EmitPing(id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.Contract.EmitPing(&_Logemitter.TransactOpts, id, message) +} + +// LogemitterDataIterator is returned from FilterData and is used to iterate over the raw logs and unpacked data for Data events raised by the Logemitter contract. +type LogemitterDataIterator struct { + Event *LogemitterData // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LogemitterDataIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogemitterData) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LogemitterData) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LogemitterDataIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LogemitterDataIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LogemitterData represents a Data event raised by the Logemitter contract. +type LogemitterData struct { + From common.Address + Topic [32]byte + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterData is a free log retrieval operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) FilterData(opts *bind.FilterOpts, from []common.Address, topic [][32]byte) (*LogemitterDataIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var topicRule []interface{} + for _, topicItem := range topic { + topicRule = append(topicRule, topicItem) + } + + logs, sub, err := _Logemitter.contract.FilterLogs(opts, "Data", fromRule, topicRule) + if err != nil { + return nil, err + } + return &LogemitterDataIterator{contract: _Logemitter.contract, event: "Data", logs: logs, sub: sub}, nil +} + +// WatchData is a free log subscription operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) WatchData(opts *bind.WatchOpts, sink chan<- *LogemitterData, from []common.Address, topic [][32]byte) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var topicRule []interface{} + for _, topicItem := range topic { + topicRule = append(topicRule, topicItem) + } + + logs, sub, err := _Logemitter.contract.WatchLogs(opts, "Data", fromRule, topicRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LogemitterData) + if err := _Logemitter.contract.UnpackLog(event, "Data", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseData is a log parse operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) ParseData(log types.Log) (*LogemitterData, error) { + event := new(LogemitterData) + if err := _Logemitter.contract.UnpackLog(event, "Data", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// LogemitterPingIterator is returned from FilterPing and is used to iterate over the raw logs and unpacked data for Ping events raised by the Logemitter contract. +type LogemitterPingIterator struct { + Event *LogemitterPing // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LogemitterPingIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogemitterPing) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LogemitterPing) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LogemitterPingIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LogemitterPingIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LogemitterPing represents a Ping event raised by the Logemitter contract. +type LogemitterPing struct { + From common.Address + Id *big.Int + Message string + Raw types.Log // Blockchain specific contextual infos +} + +// FilterPing is a free log retrieval operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) FilterPing(opts *bind.FilterOpts, from []common.Address, id []*big.Int) (*LogemitterPingIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Logemitter.contract.FilterLogs(opts, "Ping", fromRule, idRule) + if err != nil { + return nil, err + } + return &LogemitterPingIterator{contract: _Logemitter.contract, event: "Ping", logs: logs, sub: sub}, nil +} + +// WatchPing is a free log subscription operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) WatchPing(opts *bind.WatchOpts, sink chan<- *LogemitterPing, from []common.Address, id []*big.Int) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Logemitter.contract.WatchLogs(opts, "Ping", fromRule, idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LogemitterPing) + if err := _Logemitter.contract.UnpackLog(event, "Ping", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParsePing is a log parse operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) ParsePing(log types.Log) (*LogemitterPing, error) { + event := new(LogemitterPing) + if err := _Logemitter.contract.UnpackLog(event, "Ping", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} From e69e2328b01e4e134d3a65656edae148deac5a17 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 27 Jan 2026 09:24:38 +0100 Subject: [PATCH 11/75] feat: add test contract log emitter --- test/contracts/bin/logemitter.bin | 1 + 1 file changed, 1 insertion(+) create mode 100644 test/contracts/bin/logemitter.bin diff --git a/test/contracts/bin/logemitter.bin b/test/contracts/bin/logemitter.bin new file mode 100644 index 000000000..8188e7658 --- /dev/null +++ b/test/contracts/bin/logemitter.bin @@ -0,0 +1 @@ +6080604052346100e05761033b80380380610019816100fb565b9283398101602080838303126100e05782516001600160401b03938482116100e057019082601f830112156100e05781519384116100e557601f1993610065601f8201861683016100fb565b818152828101948383860101116100e0576000956100a960409387867f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a9801610120565b6100c58351948593818552519788809286015285850190610120565b601f339601168101030190a36040516101f790816101448239f35b600080fd5b634e487b7160e01b600052604160045260246000fd5b6040519190601f01601f191682016001600160401b038111838210176100e557604052565b60005b8381106101335750506000910152565b818101518382015260200161012356fe60808060405260048036101561001457600080fd5b600091823560e01c90816361bc221a14610153575080638b692c37146100c05763e85f05f21461004357600080fd5b346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8577f5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b26100976100b2923690850161016d565b929093604051918291602083523595339560208401916101a0565b0390a380f35b8280fd5b5080fd5b50346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8576100f1903690830161016d565b9091835460018101809111610140577f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a916100b2918655604051918291602083523595339560208401916101a0565b634e487b7160e01b855260118252602485fd5b8390346100bc57816003193601126100bc57602091548152f35b9181601f8401121561019b5782359167ffffffffffffffff831161019b576020838186019501011161019b57565b600080fd5b908060209392818452848401376000828201840152601f01601f191601019056fea26469706673582212200fe8d91d7cb5850d0d16712aa2af488fa7d2240ee843a08e90d8fc7ba83ddc3d64736f6c63430008120033 \ No newline at end of file From 10ec31113319b1b00c91e29114239ae677415407 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 27 Jan 2026 12:40:08 +0100 Subject: [PATCH 12/75] feat: removed EVMMultidownloader.BlockHeader because is duplicated HeaderByNumber --- multidownloader/e2e_test.go | 3 ++ .../evm_multidownloader_syncers.go | 32 +++++++------------ .../evm_multidownloader_syncers_test.go | 2 +- sync/evmdownloader.go | 2 +- types/multidownloader.go | 4 +-- 5 files changed, 18 insertions(+), 25 deletions(-) diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index ea2d0096d..7dcb58af9 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -65,6 +65,9 @@ func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { } func TestE2E(t *testing.T) { + if testing.Short() { + t.Skip("skipping E2E test in short mode") + } // Simulated L1 testData := buildL1Simulated(t) diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 9ef159c2f..224efaccc 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -30,22 +30,6 @@ func (dh *EVMMultidownloader) BlockNumber(ctx context.Context, return dh.blockNotifierManager.GetCurrentBlockNumber(ctx, finality) } -// BlockHeader gets the block header for the given finality type -func (dh *EVMMultidownloader) BlockHeader(ctx context.Context, - finality aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { - number, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, finality) - if err != nil { - return nil, fmt.Errorf("EVMMultidownloader.BlockHeader: cannot get block number for finality=%s: %w", - finality.String(), err) - } - header, err := dh.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(number)) - if err != nil { - return nil, fmt.Errorf("EVMMultidownloader.BlockHeader: cannot get header for block number=%d: %w", - number, err) - } - return header, nil -} - // FilterLogs filters the logs. It gets them from storage or waits until they are available func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { if !dh.IsInitialized() { @@ -86,11 +70,16 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, dh.log.Debugf("EVMMultidownloader.HeaderByNumber: received number: %s", number.String()) defer dh.log.Debugf("EVMMultidownloader.HeaderByNumber: finished number: %s", number.String()) } - if !number.IsConstant() { - return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: only numeric blockNumbers are supported (got=%s)", - number.String()) + if number == nil { + number = &aggkittypes.LatestBlock + } + // Resolve blockNumber + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, *number) + if err != nil { + return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: cannot get block number for finality=%s: %w", + number.String(), err) } - blockNumber := number.Specific + // Is this block in storage? block, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: cannot get BlockHeader number=%s: %w", @@ -103,7 +92,8 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, dh.log.Debugf("EVMMultidownloader.HeaderByNumber: block number=%s not found in storage, fetching from ethClient", number.String()) } - blockHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, number) + // Get from ethClient + blockHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: ethClient.HeaderByNumber(%s) failed. Err: %w", number.String(), err) diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index 3099847f9..66cc8f487 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -66,7 +66,7 @@ func TestEVMMultidownloader_BlockHeader(t *testing.T) { Return(&aggkittypes.BlockHeader{ Number: 123456, }, nil) - header, err := testData.mdr.BlockHeader(t.Context(), aggkittypes.LatestBlock) + header, err := testData.mdr.HeaderByNumber(t.Context(), &aggkittypes.LatestBlock) require.NoError(t, err) require.Equal(t, uint64(123456), header.Number) } diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 7c32827c0..effde14e4 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -324,7 +324,7 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( d.log.Info("context cancelled") return latestSyncedBlock case <-ticker.C: - blockHeader, err := d.ethClient.BlockHeader(ctx, d.blockFinality) + blockHeader, err := d.ethClient.HeaderByNumber(ctx, &d.blockFinality) if err != nil { if ctx.Err() == nil { attempts++ diff --git a/types/multidownloader.go b/types/multidownloader.go index 665950e62..1deab838f 100644 --- a/types/multidownloader.go +++ b/types/multidownloader.go @@ -22,9 +22,9 @@ type SyncerConfig struct { type MultiDownloader interface { ChainID(ctx context.Context) (uint64, error) BlockNumber(ctx context.Context, finality BlockNumberFinality) (uint64, error) - // TODO: delete this method because it's only required for a intermediate fix of old RerogDetector - BlockHeader(ctx context.Context, finality BlockNumberFinality) (*BlockHeader, error) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]ethtypes.Log, error) + // Get block header by number and finality + // if number is nil, it gets the latest block HeaderByNumber(ctx context.Context, number *BlockNumberFinality) (*BlockHeader, error) EthClient() BaseEthereumClienter RegisterSyncer(data SyncerConfig) error From c89b202fed08552a66aaff8e1c91a1bb6c03e5f4 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 29 Jan 2026 15:38:59 +0100 Subject: [PATCH 13/75] feat: refactor multidownloader architecture with dedicated sync module - Add new multidownloader/sync package with Download and EVMDriver components - Introduce MultiDownloaderLegacy interface for backward compatibility - Rename ReorgError to DetectedReorgError for clarity - Add NewMultidownloadBased() implementation for l1infotreesync - Extend Storager interface with LogQuery, GetReorgedDataByChainID methods - Add IsPartiallyAvailable and ContainsBlockNumber utility methods - Update cmd/run.go to support both legacy and multidownloader-based flows - Regenerate all mocks with mockery v2.53.5 - Improve logging for block waiting states Co-Authored-By: Claude Sonnet 4.5 --- .mockery.yaml | 4 + agglayer/mocks/mock_agglayer_client.go | 2 +- .../mock_agg_oracle_committee_contract.go | 2 +- aggoracle/mocks/mock_eth_tx_manager.go | 2 +- .../mocks/mock_l2_ger_manager_contract.go | 2 +- .../mock_agg_proof_public_values_querier.go | 2 +- .../mock_agg_sende_storage_maintenancer.go | 177 +++ aggsender/mocks/mock_agg_sender_storage.go | 2 +- .../mock_agg_sender_storage_maintainer.go | 2 +- .../mocks/mock_aggchain_fep_rollup_querier.go | 2 +- .../mock_aggchain_proof_client_interface.go | 2 +- aggsender/mocks/mock_aggchain_proof_flow.go | 2 +- .../mocks/mock_aggchain_proof_generation.go | 2 +- .../mocks/mock_aggchain_proof_querier.go | 2 +- .../mocks/mock_agglayer_bridge_l2_reader.go | 2 +- .../mocks/mock_aggsender_builder_flow.go | 2 +- aggsender/mocks/mock_aggsender_flow_baser.go | 2 +- aggsender/mocks/mock_aggsender_interface.go | 2 +- aggsender/mocks/mock_aggsender_storer.go | 2 +- .../mocks/mock_aggsender_validator_client.go | 2 +- .../mocks/mock_aggsender_verifier_flow.go | 2 +- aggsender/mocks/mock_bridge_querier.go | 2 +- aggsender/mocks/mock_certificate_querier.go | 2 +- .../mocks/mock_certificate_send_trigger.go | 2 +- .../mocks/mock_certificate_status_checker.go | 2 +- .../mocks/mock_certificate_trigger_event.go | 2 +- .../mock_certificate_validate_and_signer.go | 2 +- aggsender/mocks/mock_certificate_validator.go | 2 +- aggsender/mocks/mock_chain_ger_reader.go | 2 +- aggsender/mocks/mock_emit_log_func.go | 76 ++ aggsender/mocks/mock_fep_contract_querier.go | 2 +- aggsender/mocks/mock_fep_inputs_querier.go | 2 +- aggsender/mocks/mock_ger_querier.go | 2 +- .../mocks/mock_l1_info_tree_data_querier.go | 2 +- .../mock_l1_info_tree_root_by_leaf_querier.go | 32 + aggsender/mocks/mock_l1_info_tree_syncer.go | 2 +- aggsender/mocks/mock_l2_bridge_syncer.go | 2 +- aggsender/mocks/mock_ler_querier.go | 2 +- aggsender/mocks/mock_local_exit_root_query.go | 2 +- aggsender/mocks/mock_logger.go | 2 +- ...k_max_l2_block_number_limiter_interface.go | 2 +- aggsender/mocks/mock_multisig_contract.go | 2 +- aggsender/mocks/mock_multisig_querier.go | 2 +- aggsender/mocks/mock_op_node_clienter.go | 2 +- .../mocks/mock_optimistic_mode_querier.go | 2 +- aggsender/mocks/mock_optimistic_signer.go | 2 +- aggsender/mocks/mock_rollup_data_querier.go | 2 +- ...ck_storage_retain_certificates_policier.go | 2 +- aggsender/mocks/mock_validator_client.go | 2 +- aggsender/mocks/mock_validator_poller.go | 2 +- .../types/mocks/mock_epoch_notifier.go | 2 +- .../mock_l1_info_tree_root_by_leaf_querier.go | 2 +- .../mock_agglayer_manager_upgrade_querier.go | 2 +- bridgeservice/mocks/mock_bridger.go | 2 +- .../mocks/mock_l1_info_tree_syncer.go | 2 +- bridgeservice/mocks/mock_l2_ger_syncer.go | 2 +- bridgesync/mock_bridge_querier.go | 2 +- bridgesync/mocks/mock_reorg_detector.go | 2 +- cmd/run.go | 49 +- common/block_range.go | 5 + common/block_range_test.go | 79 ++ common/mocks/mock_logger.go | 2 +- common/mocks/mock_pub_sub.go | 2 +- common/types/mocks/mock_retry_handler.go | 2 +- .../mocks/mock_retry_policy_configurer.go | 2 +- .../mocks/mock_compatibility_checker.go | 2 +- .../mocks/mock_compatibility_data_storager.go | 2 +- .../mocks/mock_runtime_data_getter_func.go | 2 +- db/mocks/mock_d_ber.go | 2 +- db/mocks/mock_key_value_storager.go | 2 +- db/mocks/mock_querier.go | 2 +- db/mocks/mock_sql_txer.go | 2 +- db/mocks/mock_txer.go | 2 +- .../block_notifier/block_notifier_manager.go | 3 + etherman/default_eth_client.go | 3 + etherman/mocks/mock_dial_func.go | 93 ++ etherman/mocks/mock_op_node_clienter.go | 2 +- .../mocks/mock_rollup_manager_contract.go | 2 +- etherman/types/mocks/mock_block_notifier.go | 2 +- .../mocks/mock_block_notifier_manager.go | 2 +- l1infotreesync/e2e_test.go | 6 +- l1infotreesync/l1infotreesync.go | 70 +- l1infotreesync/mock_downloader_interface.go | 80 ++ l1infotreesync/mock_driver_interface.go | 69 ++ l1infotreesync/mock_l1_info_tree_syncer.go | 2 +- l1infotreesync/mocks/mock_reorg_detector.go | 2 +- l1infotreesync/processor.go | 14 + l1infotreesync/processor_test.go | 57 + l2gersync/mocks/mock_l1_info_tree_querier.go | 2 +- multidownloader/e2e_test.go | 2 +- multidownloader/evm_multidownloader.go | 16 +- multidownloader/evm_multidownloader_reorg.go | 48 + .../evm_multidownloader_syncers.go | 77 +- .../evm_multidownloader_syncers_test.go | 15 +- multidownloader/evm_multidownloader_test.go | 56 +- multidownloader/state.go | 4 + multidownloader/storage/storage.go | 82 ++ multidownloader/storage/storage_reorg.go | 46 + multidownloader/storage/storage_reorg_test.go | 101 ++ ...k_certificate_submission_service_client.go | 114 ++ .../mock_configuration_service_client.go | 114 ++ .../mocks/mock_node_state_service_client.go | 262 ++++ .../mock_aggchain_proof_service_client.go | 188 +++ .../mock_aggchain_proof_service_server.go | 155 +++ ...ck_unsafe_aggchain_proof_service_server.go | 64 + multidownloader/sync/download.go | 309 +++++ multidownloader/sync/download_test.go | 1061 +++++++++++++++++ multidownloader/sync/evmdriver.go | 155 +++ multidownloader/sync/types/evm_downloader.go | 50 + .../sync/types/evm_multidownloader.go | 31 + .../types/mocks/mock_downloader_interface.go | 100 ++ .../mocks/mock_multidownloader_interface.go | 496 ++++++++ .../types/mocks/mock_processor_interface.go | 191 +++ multidownloader/sync/types/processor.go | 14 + multidownloader/types/log_query_response.go | 59 + .../types/mocks/mock_reorg_processor.go | 2 +- multidownloader/types/mocks/mock_storager.go | 132 +- .../types/mocks/mock_storager_for_reorg.go | 2 +- multidownloader/types/reorg_error.go | 72 +- multidownloader/types/set_sync_segment.go | 54 + .../types/set_sync_segment_test.go | 207 ++++ multidownloader/types/storager.go | 6 +- sync/adapter_eth_to_multidownloader.go | 2 +- sync/evmdownloader.go | 6 +- sync/evmdownloader_test.go | 16 +- sync/mock_downloader.go | 2 +- sync/mock_evm_downloader_interface.go | 2 +- sync/mock_processor_interface.go | 2 +- sync/mock_reorg_detector.go | 2 +- test/helpers/e2e.go | 2 +- tree/types/mocks/mock_full_treer.go | 2 +- tree/types/mocks/mock_leaf_writer.go | 2 +- tree/types/mocks/mock_read_treer.go | 2 +- tree/types/mocks/mock_reorganize_treer.go | 2 +- types/list_block_header_test.go | 167 +++ types/mocks/mock_base_ethereum_clienter.go | 2 +- types/mocks/mock_custom_ethereum_clienter.go | 96 ++ types/mocks/mock_eth_chain_reader.go | 99 ++ types/mocks/mock_eth_clienter.go | 2 +- types/mocks/mock_ethereum_clienter.go | 2 +- types/mocks/mock_multi_downloader.go | 59 - types/mocks/mock_multi_downloader_legacy.go | 411 +++++++ types/mocks/mock_rpc_clienter.go | 2 +- types/multidownloader.go | 2 +- 144 files changed, 5863 insertions(+), 269 deletions(-) create mode 100644 aggsender/mocks/mock_agg_sende_storage_maintenancer.go create mode 100644 aggsender/mocks/mock_emit_log_func.go create mode 100644 aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go create mode 100644 etherman/mocks/mock_dial_func.go create mode 100644 l1infotreesync/mock_downloader_interface.go create mode 100644 l1infotreesync/mock_driver_interface.go create mode 100644 multidownloader/evm_multidownloader_reorg.go create mode 100644 multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go create mode 100644 multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go create mode 100644 multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go create mode 100644 multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go create mode 100644 multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go create mode 100644 multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go create mode 100644 multidownloader/sync/download.go create mode 100644 multidownloader/sync/download_test.go create mode 100644 multidownloader/sync/evmdriver.go create mode 100644 multidownloader/sync/types/evm_downloader.go create mode 100644 multidownloader/sync/types/evm_multidownloader.go create mode 100644 multidownloader/sync/types/mocks/mock_downloader_interface.go create mode 100644 multidownloader/sync/types/mocks/mock_multidownloader_interface.go create mode 100644 multidownloader/sync/types/mocks/mock_processor_interface.go create mode 100644 multidownloader/sync/types/processor.go create mode 100644 multidownloader/types/log_query_response.go create mode 100644 types/list_block_header_test.go create mode 100644 types/mocks/mock_custom_ethereum_clienter.go create mode 100644 types/mocks/mock_eth_chain_reader.go create mode 100644 types/mocks/mock_multi_downloader_legacy.go diff --git a/.mockery.yaml b/.mockery.yaml index 7d5ff522d..36fa370a5 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -162,3 +162,7 @@ packages: config: dir: "{{ .InterfaceDir }}/mocks" all: true + github.com/agglayer/aggkit/multidownloader/sync/types: + config: + dir: "{{ .InterfaceDir }}/mocks" + all: true diff --git a/agglayer/mocks/mock_agglayer_client.go b/agglayer/mocks/mock_agglayer_client.go index a14237a31..d3969de91 100644 --- a/agglayer/mocks/mock_agglayer_client.go +++ b/agglayer/mocks/mock_agglayer_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_agg_oracle_committee_contract.go b/aggoracle/mocks/mock_agg_oracle_committee_contract.go index 3adaa0220..97586accd 100644 --- a/aggoracle/mocks/mock_agg_oracle_committee_contract.go +++ b/aggoracle/mocks/mock_agg_oracle_committee_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_eth_tx_manager.go b/aggoracle/mocks/mock_eth_tx_manager.go index a055f9581..59ffea2f6 100644 --- a/aggoracle/mocks/mock_eth_tx_manager.go +++ b/aggoracle/mocks/mock_eth_tx_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_l2_ger_manager_contract.go b/aggoracle/mocks/mock_l2_ger_manager_contract.go index 6d0d55007..4708f6062 100644 --- a/aggoracle/mocks/mock_l2_ger_manager_contract.go +++ b/aggoracle/mocks/mock_l2_ger_manager_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_proof_public_values_querier.go b/aggsender/mocks/mock_agg_proof_public_values_querier.go index 67eee292c..6900d4f4c 100644 --- a/aggsender/mocks/mock_agg_proof_public_values_querier.go +++ b/aggsender/mocks/mock_agg_proof_public_values_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_sende_storage_maintenancer.go b/aggsender/mocks/mock_agg_sende_storage_maintenancer.go new file mode 100644 index 000000000..dd69b5c13 --- /dev/null +++ b/aggsender/mocks/mock_agg_sende_storage_maintenancer.go @@ -0,0 +1,177 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + types "github.com/agglayer/aggkit/db/types" + mock "github.com/stretchr/testify/mock" +) + +// AggSendeStorageMaintenancer is an autogenerated mock type for the AggSendeStorageMaintenancer type +type AggSendeStorageMaintenancer struct { + mock.Mock +} + +type AggSendeStorageMaintenancer_Expecter struct { + mock *mock.Mock +} + +func (_m *AggSendeStorageMaintenancer) EXPECT() *AggSendeStorageMaintenancer_Expecter { + return &AggSendeStorageMaintenancer_Expecter{mock: &_m.Mock} +} + +// DeleteCertificate provides a mock function with given fields: tx, height, mustDelete +func (_m *AggSendeStorageMaintenancer) DeleteCertificate(tx types.Querier, height uint64, mustDelete bool) error { + ret := _m.Called(tx, height, mustDelete) + + if len(ret) == 0 { + panic("no return value specified for DeleteCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64, bool) error); ok { + r0 = rf(tx, height, mustDelete) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' +type AggSendeStorageMaintenancer_DeleteCertificate_Call struct { + *mock.Call +} + +// DeleteCertificate is a helper method to define mock.On call +// - tx types.Querier +// - height uint64 +// - mustDelete bool +func (_e *AggSendeStorageMaintenancer_Expecter) DeleteCertificate(tx interface{}, height interface{}, mustDelete interface{}) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + return &AggSendeStorageMaintenancer_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", tx, height, mustDelete)} +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) Run(run func(tx types.Querier, height uint64, mustDelete bool)) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64), args[2].(bool)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) Return(_a0 error) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) RunAndReturn(run func(types.Querier, uint64, bool) error) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Return(run) + return _c +} + +// DeleteOldCertificates provides a mock function with given fields: tx, olderThanHeight +func (_m *AggSendeStorageMaintenancer) DeleteOldCertificates(tx types.Querier, olderThanHeight uint64) error { + ret := _m.Called(tx, olderThanHeight) + + if len(ret) == 0 { + panic("no return value specified for DeleteOldCertificates") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) error); ok { + r0 = rf(tx, olderThanHeight) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_DeleteOldCertificates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOldCertificates' +type AggSendeStorageMaintenancer_DeleteOldCertificates_Call struct { + *mock.Call +} + +// DeleteOldCertificates is a helper method to define mock.On call +// - tx types.Querier +// - olderThanHeight uint64 +func (_e *AggSendeStorageMaintenancer_Expecter) DeleteOldCertificates(tx interface{}, olderThanHeight interface{}) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + return &AggSendeStorageMaintenancer_DeleteOldCertificates_Call{Call: _e.mock.On("DeleteOldCertificates", tx, olderThanHeight)} +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) Run(run func(tx types.Querier, olderThanHeight uint64)) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) Return(_a0 error) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) RunAndReturn(run func(types.Querier, uint64) error) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Return(run) + return _c +} + +// MoveCertificateToHistory provides a mock function with given fields: tx, height +func (_m *AggSendeStorageMaintenancer) MoveCertificateToHistory(tx types.Querier, height uint64) error { + ret := _m.Called(tx, height) + + if len(ret) == 0 { + panic("no return value specified for MoveCertificateToHistory") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) error); ok { + r0 = rf(tx, height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_MoveCertificateToHistory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveCertificateToHistory' +type AggSendeStorageMaintenancer_MoveCertificateToHistory_Call struct { + *mock.Call +} + +// MoveCertificateToHistory is a helper method to define mock.On call +// - tx types.Querier +// - height uint64 +func (_e *AggSendeStorageMaintenancer_Expecter) MoveCertificateToHistory(tx interface{}, height interface{}) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + return &AggSendeStorageMaintenancer_MoveCertificateToHistory_Call{Call: _e.mock.On("MoveCertificateToHistory", tx, height)} +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) Run(run func(tx types.Querier, height uint64)) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) Return(_a0 error) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) RunAndReturn(run func(types.Querier, uint64) error) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Return(run) + return _c +} + +// NewAggSendeStorageMaintenancer creates a new instance of AggSendeStorageMaintenancer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggSendeStorageMaintenancer(t interface { + mock.TestingT + Cleanup(func()) +}) *AggSendeStorageMaintenancer { + mock := &AggSendeStorageMaintenancer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_agg_sender_storage.go b/aggsender/mocks/mock_agg_sender_storage.go index b0ff20665..865f31b33 100644 --- a/aggsender/mocks/mock_agg_sender_storage.go +++ b/aggsender/mocks/mock_agg_sender_storage.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_sender_storage_maintainer.go b/aggsender/mocks/mock_agg_sender_storage_maintainer.go index ed2e4a384..c5586a08f 100644 --- a/aggsender/mocks/mock_agg_sender_storage_maintainer.go +++ b/aggsender/mocks/mock_agg_sender_storage_maintainer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_fep_rollup_querier.go b/aggsender/mocks/mock_aggchain_fep_rollup_querier.go index c9eeaa16b..8e43575df 100644 --- a/aggsender/mocks/mock_aggchain_fep_rollup_querier.go +++ b/aggsender/mocks/mock_aggchain_fep_rollup_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_client_interface.go b/aggsender/mocks/mock_aggchain_proof_client_interface.go index 418a64a4d..ef5f61c52 100644 --- a/aggsender/mocks/mock_aggchain_proof_client_interface.go +++ b/aggsender/mocks/mock_aggchain_proof_client_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_flow.go b/aggsender/mocks/mock_aggchain_proof_flow.go index a72d1c8e4..ef17180d2 100644 --- a/aggsender/mocks/mock_aggchain_proof_flow.go +++ b/aggsender/mocks/mock_aggchain_proof_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_generation.go b/aggsender/mocks/mock_aggchain_proof_generation.go index 11b5d0c84..45f0b0836 100644 --- a/aggsender/mocks/mock_aggchain_proof_generation.go +++ b/aggsender/mocks/mock_aggchain_proof_generation.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_querier.go b/aggsender/mocks/mock_aggchain_proof_querier.go index caebcf74b..0b6bbac88 100644 --- a/aggsender/mocks/mock_aggchain_proof_querier.go +++ b/aggsender/mocks/mock_aggchain_proof_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go index a97430c9b..b3ab28fe2 100644 --- a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go +++ b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index 2963c3653..a18464ba7 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_flow_baser.go b/aggsender/mocks/mock_aggsender_flow_baser.go index ab816984c..06195a0a6 100644 --- a/aggsender/mocks/mock_aggsender_flow_baser.go +++ b/aggsender/mocks/mock_aggsender_flow_baser.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_interface.go b/aggsender/mocks/mock_aggsender_interface.go index c7964ecfc..be57ec7cb 100644 --- a/aggsender/mocks/mock_aggsender_interface.go +++ b/aggsender/mocks/mock_aggsender_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_storer.go b/aggsender/mocks/mock_aggsender_storer.go index 157b9ce86..2587c9f66 100644 --- a/aggsender/mocks/mock_aggsender_storer.go +++ b/aggsender/mocks/mock_aggsender_storer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_validator_client.go b/aggsender/mocks/mock_aggsender_validator_client.go index 2d653c487..a04d4fe04 100644 --- a/aggsender/mocks/mock_aggsender_validator_client.go +++ b/aggsender/mocks/mock_aggsender_validator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_verifier_flow.go b/aggsender/mocks/mock_aggsender_verifier_flow.go index 31fd954e8..b42567ed4 100644 --- a/aggsender/mocks/mock_aggsender_verifier_flow.go +++ b/aggsender/mocks/mock_aggsender_verifier_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_bridge_querier.go b/aggsender/mocks/mock_bridge_querier.go index 8916837fb..9a3571b39 100644 --- a/aggsender/mocks/mock_bridge_querier.go +++ b/aggsender/mocks/mock_bridge_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_querier.go b/aggsender/mocks/mock_certificate_querier.go index 4edcc2512..ae09b838e 100644 --- a/aggsender/mocks/mock_certificate_querier.go +++ b/aggsender/mocks/mock_certificate_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_send_trigger.go b/aggsender/mocks/mock_certificate_send_trigger.go index c9c51c6cc..73a14996b 100644 --- a/aggsender/mocks/mock_certificate_send_trigger.go +++ b/aggsender/mocks/mock_certificate_send_trigger.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_status_checker.go b/aggsender/mocks/mock_certificate_status_checker.go index 5937d264e..11a0170f1 100644 --- a/aggsender/mocks/mock_certificate_status_checker.go +++ b/aggsender/mocks/mock_certificate_status_checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_trigger_event.go b/aggsender/mocks/mock_certificate_trigger_event.go index 5d1497bcc..3774cdc92 100644 --- a/aggsender/mocks/mock_certificate_trigger_event.go +++ b/aggsender/mocks/mock_certificate_trigger_event.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_validate_and_signer.go b/aggsender/mocks/mock_certificate_validate_and_signer.go index cd65e1de6..19c1e3a06 100644 --- a/aggsender/mocks/mock_certificate_validate_and_signer.go +++ b/aggsender/mocks/mock_certificate_validate_and_signer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_validator.go b/aggsender/mocks/mock_certificate_validator.go index ca88aa196..aa2bc5a2e 100644 --- a/aggsender/mocks/mock_certificate_validator.go +++ b/aggsender/mocks/mock_certificate_validator.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_chain_ger_reader.go b/aggsender/mocks/mock_chain_ger_reader.go index f1fe567ea..5f1ff9a92 100644 --- a/aggsender/mocks/mock_chain_ger_reader.go +++ b/aggsender/mocks/mock_chain_ger_reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_emit_log_func.go b/aggsender/mocks/mock_emit_log_func.go new file mode 100644 index 000000000..fd3b744e0 --- /dev/null +++ b/aggsender/mocks/mock_emit_log_func.go @@ -0,0 +1,76 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// EmitLogFunc is an autogenerated mock type for the EmitLogFunc type +type EmitLogFunc struct { + mock.Mock +} + +type EmitLogFunc_Expecter struct { + mock *mock.Mock +} + +func (_m *EmitLogFunc) EXPECT() *EmitLogFunc_Expecter { + return &EmitLogFunc_Expecter{mock: &_m.Mock} +} + +// Execute provides a mock function with given fields: template, args +func (_m *EmitLogFunc) Execute(template string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, template) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// EmitLogFunc_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type EmitLogFunc_Execute_Call struct { + *mock.Call +} + +// Execute is a helper method to define mock.On call +// - template string +// - args ...interface{} +func (_e *EmitLogFunc_Expecter) Execute(template interface{}, args ...interface{}) *EmitLogFunc_Execute_Call { + return &EmitLogFunc_Execute_Call{Call: _e.mock.On("Execute", + append([]interface{}{template}, args...)...)} +} + +func (_c *EmitLogFunc_Execute_Call) Run(run func(template string, args ...interface{})) *EmitLogFunc_Execute_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *EmitLogFunc_Execute_Call) Return() *EmitLogFunc_Execute_Call { + _c.Call.Return() + return _c +} + +func (_c *EmitLogFunc_Execute_Call) RunAndReturn(run func(string, ...interface{})) *EmitLogFunc_Execute_Call { + _c.Run(run) + return _c +} + +// NewEmitLogFunc creates a new instance of EmitLogFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEmitLogFunc(t interface { + mock.TestingT + Cleanup(func()) +}) *EmitLogFunc { + mock := &EmitLogFunc{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_fep_contract_querier.go b/aggsender/mocks/mock_fep_contract_querier.go index 311373bb6..f43c86a14 100644 --- a/aggsender/mocks/mock_fep_contract_querier.go +++ b/aggsender/mocks/mock_fep_contract_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_fep_inputs_querier.go b/aggsender/mocks/mock_fep_inputs_querier.go index 2de0ac3c7..ac1bc5d9f 100644 --- a/aggsender/mocks/mock_fep_inputs_querier.go +++ b/aggsender/mocks/mock_fep_inputs_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_ger_querier.go b/aggsender/mocks/mock_ger_querier.go index fe05effd3..e8b171b73 100644 --- a/aggsender/mocks/mock_ger_querier.go +++ b/aggsender/mocks/mock_ger_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l1_info_tree_data_querier.go b/aggsender/mocks/mock_l1_info_tree_data_querier.go index 56139dc77..3ddda7b78 100644 --- a/aggsender/mocks/mock_l1_info_tree_data_querier.go +++ b/aggsender/mocks/mock_l1_info_tree_data_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go b/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go new file mode 100644 index 000000000..d25673da3 --- /dev/null +++ b/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// L1InfoTreeRootByLeafQuerier is an autogenerated mock type for the L1InfoTreeRootByLeafQuerier type +type L1InfoTreeRootByLeafQuerier struct { + mock.Mock +} + +type L1InfoTreeRootByLeafQuerier_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreeRootByLeafQuerier) EXPECT() *L1InfoTreeRootByLeafQuerier_Expecter { + return &L1InfoTreeRootByLeafQuerier_Expecter{mock: &_m.Mock} +} + +// NewL1InfoTreeRootByLeafQuerier creates a new instance of L1InfoTreeRootByLeafQuerier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreeRootByLeafQuerier(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreeRootByLeafQuerier { + mock := &L1InfoTreeRootByLeafQuerier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l1_info_tree_syncer.go b/aggsender/mocks/mock_l1_info_tree_syncer.go index be3e51cd0..4b9d0c9b4 100644 --- a/aggsender/mocks/mock_l1_info_tree_syncer.go +++ b/aggsender/mocks/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l2_bridge_syncer.go b/aggsender/mocks/mock_l2_bridge_syncer.go index 23fd452c4..7fb7a2c84 100644 --- a/aggsender/mocks/mock_l2_bridge_syncer.go +++ b/aggsender/mocks/mock_l2_bridge_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_ler_querier.go b/aggsender/mocks/mock_ler_querier.go index 7e147e224..5cf500103 100644 --- a/aggsender/mocks/mock_ler_querier.go +++ b/aggsender/mocks/mock_ler_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_local_exit_root_query.go b/aggsender/mocks/mock_local_exit_root_query.go index 10f0e2f39..c59f0e851 100644 --- a/aggsender/mocks/mock_local_exit_root_query.go +++ b/aggsender/mocks/mock_local_exit_root_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_logger.go b/aggsender/mocks/mock_logger.go index 670c84686..569e2a50a 100644 --- a/aggsender/mocks/mock_logger.go +++ b/aggsender/mocks/mock_logger.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go b/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go index 9b6cbe31c..fb0aa2bca 100644 --- a/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go +++ b/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_multisig_contract.go b/aggsender/mocks/mock_multisig_contract.go index fcf43497f..9e4cd4c4f 100644 --- a/aggsender/mocks/mock_multisig_contract.go +++ b/aggsender/mocks/mock_multisig_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_multisig_querier.go b/aggsender/mocks/mock_multisig_querier.go index 892573fa9..fc96c760b 100644 --- a/aggsender/mocks/mock_multisig_querier.go +++ b/aggsender/mocks/mock_multisig_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_op_node_clienter.go b/aggsender/mocks/mock_op_node_clienter.go index 7f1dfc2cb..f6c69acc9 100644 --- a/aggsender/mocks/mock_op_node_clienter.go +++ b/aggsender/mocks/mock_op_node_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_optimistic_mode_querier.go b/aggsender/mocks/mock_optimistic_mode_querier.go index a1c60d7ec..0be3c286c 100644 --- a/aggsender/mocks/mock_optimistic_mode_querier.go +++ b/aggsender/mocks/mock_optimistic_mode_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_optimistic_signer.go b/aggsender/mocks/mock_optimistic_signer.go index bd259d59e..a8a40f88e 100644 --- a/aggsender/mocks/mock_optimistic_signer.go +++ b/aggsender/mocks/mock_optimistic_signer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_rollup_data_querier.go b/aggsender/mocks/mock_rollup_data_querier.go index 4e13addc9..119b0ad17 100644 --- a/aggsender/mocks/mock_rollup_data_querier.go +++ b/aggsender/mocks/mock_rollup_data_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_storage_retain_certificates_policier.go b/aggsender/mocks/mock_storage_retain_certificates_policier.go index 8967caa8c..051b0bea2 100644 --- a/aggsender/mocks/mock_storage_retain_certificates_policier.go +++ b/aggsender/mocks/mock_storage_retain_certificates_policier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_validator_client.go b/aggsender/mocks/mock_validator_client.go index 0c1a96c47..0da60d441 100644 --- a/aggsender/mocks/mock_validator_client.go +++ b/aggsender/mocks/mock_validator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_validator_poller.go b/aggsender/mocks/mock_validator_poller.go index 4d425d9a6..6f403b7f5 100644 --- a/aggsender/mocks/mock_validator_poller.go +++ b/aggsender/mocks/mock_validator_poller.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/trigger/types/mocks/mock_epoch_notifier.go b/aggsender/trigger/types/mocks/mock_epoch_notifier.go index bfa37e45b..49be1e0de 100644 --- a/aggsender/trigger/types/mocks/mock_epoch_notifier.go +++ b/aggsender/trigger/types/mocks/mock_epoch_notifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go b/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go index 135c8c722..bbed9de57 100644 --- a/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go +++ b/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go b/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go index d66de8bae..5858600f3 100644 --- a/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go +++ b/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_bridger.go b/bridgeservice/mocks/mock_bridger.go index 180d33c90..b2304c1cd 100644 --- a/bridgeservice/mocks/mock_bridger.go +++ b/bridgeservice/mocks/mock_bridger.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_l1_info_tree_syncer.go b/bridgeservice/mocks/mock_l1_info_tree_syncer.go index 648f6ff40..0b788e7cb 100644 --- a/bridgeservice/mocks/mock_l1_info_tree_syncer.go +++ b/bridgeservice/mocks/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_l2_ger_syncer.go b/bridgeservice/mocks/mock_l2_ger_syncer.go index 07e17c89e..7cfe30906 100644 --- a/bridgeservice/mocks/mock_l2_ger_syncer.go +++ b/bridgeservice/mocks/mock_l2_ger_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/bridgesync/mock_bridge_querier.go b/bridgesync/mock_bridge_querier.go index 9f0b01f1e..a7941330b 100644 --- a/bridgesync/mock_bridge_querier.go +++ b/bridgesync/mock_bridge_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package bridgesync diff --git a/bridgesync/mocks/mock_reorg_detector.go b/bridgesync/mocks/mock_reorg_detector.go index d50b74a53..24783d874 100644 --- a/bridgesync/mocks/mock_reorg_detector.go +++ b/bridgesync/mocks/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/cmd/run.go b/cmd/run.go index eaddc8f6c..6addc051a 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -99,7 +99,7 @@ func start(cliCtx *cli.Context) error { } }() var rpcServices []jRPC.Service - l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(l1Client, cfg.L1Multidownloader) + l1MdrEthAdapter, l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(l1Client, cfg.L1Multidownloader) if err != nil { return fmt.Errorf("failed to create L1MultiDownloader: %w", err) } @@ -119,7 +119,8 @@ func start(cliCtx *cli.Context) error { // Create WaitGroup for backfill goroutines synchronization var backfillWg sync.WaitGroup - l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(ctx, components, *cfg, reorgDetectorL1, l1Client, l1MultiDownloader) + l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(ctx, components, *cfg, reorgDetectorL1, + l1Client, l1MdrEthAdapter, l1MultiDownloader) if l1InfoTreeSync != nil { rpcServices = append(rpcServices, l1InfoTreeSync.GetRPCServices()...) } @@ -231,10 +232,10 @@ func start(cliCtx *cli.Context) error { if cfg.Profiling.ProfilingEnabled { go pprof.StartProfilingHTTPServer(ctx, cfg.Profiling) } - if l1MultiDownloader != nil { + if l1MdrEthAdapter != nil { log.Info("starting L1 MultiDownloader...") go func() { - err := l1MultiDownloader.Start(ctx) + err := l1MdrEthAdapter.Start(ctx) if err != nil { log.Error("l1MultiDownloader stopped: %w", err) } @@ -516,7 +517,8 @@ func runL1InfoTreeSyncerIfNeeded( cfg config.Config, reorgDetectorL1 aggkitsync.ReorgDetector, _ aggkittypes.BaseEthereumClienter, - l1MultiDownloader aggkittypes.MultiDownloader, + l1EthClient aggkittypes.MultiDownloaderLegacy, + l1MultiDownloader *multidownloader.EVMMultidownloader, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{ aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, @@ -524,13 +526,25 @@ func runL1InfoTreeSyncerIfNeeded( aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) { return nil } - l1InfoTreeSync, err := l1infotreesync.New( - ctx, - cfg.L1InfoTreeSync, - l1MultiDownloader, - reorgDetectorL1, - l1infotreesync.FlagNone, - ) + var l1InfoTreeSync *l1infotreesync.L1InfoTreeSync + var err error + if l1MultiDownloader != nil { + log.Info("L1 Info Tree Syncer using MultiDownloader based implementation") + l1InfoTreeSync, err = l1infotreesync.NewMultidownloadBased( + ctx, + cfg.L1InfoTreeSync, + l1MultiDownloader, + l1infotreesync.FlagNone, + ) + } else { + l1InfoTreeSync, err = l1infotreesync.New( + ctx, + cfg.L1InfoTreeSync, + l1EthClient, + reorgDetectorL1, + l1infotreesync.FlagNone, + ) + } if err != nil { log.Fatal(err) } @@ -609,15 +623,16 @@ func runReorgDetectorL1IfNeeded( func runL1MultiDownloaderIfNeeded( l1Client aggkittypes.EthClienter, cfg multidownloader.Config, -) (aggkittypes.MultiDownloader, []jRPC.Service, error) { +) (aggkittypes.MultiDownloaderLegacy, + *multidownloader.EVMMultidownloader, []jRPC.Service, error) { // The requirements are the same as L1Client if l1Client == nil { - return nil, nil, nil + return nil, nil, nil, nil } // If it's disable It creates a direct eth client if !cfg.Enabled { log.Warnf("L1 MultiDownloader is disabled, using legacy EthClient") - return aggkitsync.NewAdapterEthClientToMultidownloader(l1Client), nil, nil + return aggkitsync.NewAdapterEthClientToMultidownloader(l1Client), nil, nil, nil } logger := log.WithFields("module", "L1MultiDownloader") @@ -632,10 +647,10 @@ func runL1MultiDownloaderIfNeeded( nil, // reorgProcessor ) if err != nil { - return nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) + return nil, nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) } rpcServices := downloader.GetRPCServices() - return downloader, rpcServices, nil + return downloader, downloader, rpcServices, nil } func runReorgDetectorL2IfNeeded( diff --git a/common/block_range.go b/common/block_range.go index c30023111..82b1da4d0 100644 --- a/common/block_range.go +++ b/common/block_range.go @@ -145,6 +145,11 @@ func (b BlockRange) Contains(other BlockRange) bool { return b.FromBlock <= other.FromBlock && b.ToBlock >= other.ToBlock } +// ContainsBlockNumber returns true if the given block number is within the BlockRange (inclusive). +func (b BlockRange) ContainsBlockNumber(number uint64) bool { + return b.FromBlock <= number && number <= b.ToBlock +} + func (b BlockRange) Overlaps(other BlockRange) bool { return b.FromBlock <= other.ToBlock && other.FromBlock <= b.ToBlock } diff --git a/common/block_range_test.go b/common/block_range_test.go index 6652de124..b230507f2 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -316,6 +316,85 @@ func TestBlockRange_Contains(t *testing.T) { } } +func TestBlockRange_ContainsBlockNumber(t *testing.T) { + tests := []struct { + name string + blockRange BlockRange + blockNumber uint64 + expected bool + }{ + { + name: "block in the middle of range", + blockRange: NewBlockRange(10, 20), + blockNumber: 15, + expected: true, + }, + { + name: "block at FromBlock boundary", + blockRange: NewBlockRange(10, 20), + blockNumber: 10, + expected: true, + }, + { + name: "block at ToBlock boundary", + blockRange: NewBlockRange(10, 20), + blockNumber: 20, + expected: true, + }, + { + name: "block before range", + blockRange: NewBlockRange(10, 20), + blockNumber: 5, + expected: false, + }, + { + name: "block after range", + blockRange: NewBlockRange(10, 20), + blockNumber: 25, + expected: false, + }, + { + name: "single block range contains itself", + blockRange: NewBlockRange(15, 15), + blockNumber: 15, + expected: true, + }, + { + name: "single block range does not contain other", + blockRange: NewBlockRange(15, 15), + blockNumber: 16, + expected: false, + }, + { + name: "empty range does not contain block", + blockRange: NewBlockRange(0, 0), + blockNumber: 5, + expected: false, + }, + { + name: "empty range with block 0", + blockRange: NewBlockRange(0, 0), + blockNumber: 0, + expected: true, + }, + { + name: "invalid range (from > to) does not contain", + blockRange: NewBlockRange(20, 10), + blockNumber: 15, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.blockRange.ContainsBlockNumber(tt.blockNumber) + require.Equal(t, tt.expected, got, + "ContainsBlockNumber() for %s: expected %v, got %v", + tt.name, tt.expected, got) + }) + } +} + func TestBlockRange_Subtract(t *testing.T) { bn := NewBlockRange(10, 50) require.Equal(t, []BlockRange{NewBlockRange(10, 19), NewBlockRange(31, 50)}, bn.Subtract(NewBlockRange(20, 30))) diff --git a/common/mocks/mock_logger.go b/common/mocks/mock_logger.go index 670c84686..569e2a50a 100644 --- a/common/mocks/mock_logger.go +++ b/common/mocks/mock_logger.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/common/mocks/mock_pub_sub.go b/common/mocks/mock_pub_sub.go index 5cf4220eb..1eefc7564 100644 --- a/common/mocks/mock_pub_sub.go +++ b/common/mocks/mock_pub_sub.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/common/types/mocks/mock_retry_handler.go b/common/types/mocks/mock_retry_handler.go index 5e8cd6277..29a1d2d98 100644 --- a/common/types/mocks/mock_retry_handler.go +++ b/common/types/mocks/mock_retry_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/common/types/mocks/mock_retry_policy_configurer.go b/common/types/mocks/mock_retry_policy_configurer.go index 0c8aadfe8..f400153ee 100644 --- a/common/types/mocks/mock_retry_policy_configurer.go +++ b/common/types/mocks/mock_retry_policy_configurer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_compatibility_checker.go b/db/compatibility/mocks/mock_compatibility_checker.go index cf3dac66f..e5bb2b174 100644 --- a/db/compatibility/mocks/mock_compatibility_checker.go +++ b/db/compatibility/mocks/mock_compatibility_checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_compatibility_data_storager.go b/db/compatibility/mocks/mock_compatibility_data_storager.go index c691e5a39..bac0731f6 100644 --- a/db/compatibility/mocks/mock_compatibility_data_storager.go +++ b/db/compatibility/mocks/mock_compatibility_data_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_runtime_data_getter_func.go b/db/compatibility/mocks/mock_runtime_data_getter_func.go index db6758ec4..e658e4e09 100644 --- a/db/compatibility/mocks/mock_runtime_data_getter_func.go +++ b/db/compatibility/mocks/mock_runtime_data_getter_func.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_d_ber.go b/db/mocks/mock_d_ber.go index 76f54538d..48a28f534 100644 --- a/db/mocks/mock_d_ber.go +++ b/db/mocks/mock_d_ber.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_key_value_storager.go b/db/mocks/mock_key_value_storager.go index 611304528..357734a8c 100644 --- a/db/mocks/mock_key_value_storager.go +++ b/db/mocks/mock_key_value_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_querier.go b/db/mocks/mock_querier.go index 5bf4dc754..a3f206a87 100644 --- a/db/mocks/mock_querier.go +++ b/db/mocks/mock_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_sql_txer.go b/db/mocks/mock_sql_txer.go index 0730d62fb..b11f1cf2d 100644 --- a/db/mocks/mock_sql_txer.go +++ b/db/mocks/mock_sql_txer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_txer.go b/db/mocks/mock_txer.go index 75e613ae9..e64b72434 100644 --- a/db/mocks/mock_txer.go +++ b/db/mocks/mock_txer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/etherman/block_notifier/block_notifier_manager.go b/etherman/block_notifier/block_notifier_manager.go index a6212a114..3d4fa28e2 100644 --- a/etherman/block_notifier/block_notifier_manager.go +++ b/etherman/block_notifier/block_notifier_manager.go @@ -57,6 +57,9 @@ func (bnm *BlockNotifierManager) GetBlockNotifier(ctx context.Context, } func (bnm *BlockNotifierManager) GetCurrentBlockNumber(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) { + if blockFinality.IsConstant() { + return blockFinality.Specific, nil + } bn, err := bnm.GetBlockNotifier(ctx, blockFinality) if err != nil { return 0, err diff --git a/etherman/default_eth_client.go b/etherman/default_eth_client.go index ea250ce1b..0947e604e 100644 --- a/etherman/default_eth_client.go +++ b/etherman/default_eth_client.go @@ -156,6 +156,9 @@ func (c *DefaultEthClient) rpcGetBlockByNumber(ctx context.Context, number *big. if err != nil { return nil, fmt.Errorf("rpcGetBlockByNumber: CallContext error: %w", err) } + if rawEthHeader == nil { + return nil, fmt.Errorf("rpcGetBlockByNumber:not found: %s", blockArg) + } return rawEthHeader.ToBlockHeader() } diff --git a/etherman/mocks/mock_dial_func.go b/etherman/mocks/mock_dial_func.go new file mode 100644 index 000000000..dd5fae541 --- /dev/null +++ b/etherman/mocks/mock_dial_func.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// DialFunc is an autogenerated mock type for the DialFunc type +type DialFunc struct { + mock.Mock +} + +type DialFunc_Expecter struct { + mock *mock.Mock +} + +func (_m *DialFunc) EXPECT() *DialFunc_Expecter { + return &DialFunc_Expecter{mock: &_m.Mock} +} + +// Execute provides a mock function with given fields: url +func (_m *DialFunc) Execute(url string) (types.BaseEthereumClienter, error) { + ret := _m.Called(url) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 types.BaseEthereumClienter + var r1 error + if rf, ok := ret.Get(0).(func(string) (types.BaseEthereumClienter, error)); ok { + return rf(url) + } + if rf, ok := ret.Get(0).(func(string) types.BaseEthereumClienter); ok { + r0 = rf(url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.BaseEthereumClienter) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(url) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DialFunc_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type DialFunc_Execute_Call struct { + *mock.Call +} + +// Execute is a helper method to define mock.On call +// - url string +func (_e *DialFunc_Expecter) Execute(url interface{}) *DialFunc_Execute_Call { + return &DialFunc_Execute_Call{Call: _e.mock.On("Execute", url)} +} + +func (_c *DialFunc_Execute_Call) Run(run func(url string)) *DialFunc_Execute_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *DialFunc_Execute_Call) Return(_a0 types.BaseEthereumClienter, _a1 error) *DialFunc_Execute_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DialFunc_Execute_Call) RunAndReturn(run func(string) (types.BaseEthereumClienter, error)) *DialFunc_Execute_Call { + _c.Call.Return(run) + return _c +} + +// NewDialFunc creates a new instance of DialFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDialFunc(t interface { + mock.TestingT + Cleanup(func()) +}) *DialFunc { + mock := &DialFunc{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/etherman/mocks/mock_op_node_clienter.go b/etherman/mocks/mock_op_node_clienter.go index c30df8b25..167e98b50 100644 --- a/etherman/mocks/mock_op_node_clienter.go +++ b/etherman/mocks/mock_op_node_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/etherman/querier/mocks/mock_rollup_manager_contract.go b/etherman/querier/mocks/mock_rollup_manager_contract.go index 989f2055e..293a3b649 100644 --- a/etherman/querier/mocks/mock_rollup_manager_contract.go +++ b/etherman/querier/mocks/mock_rollup_manager_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/etherman/types/mocks/mock_block_notifier.go b/etherman/types/mocks/mock_block_notifier.go index 96cebceb2..69d0e1c24 100644 --- a/etherman/types/mocks/mock_block_notifier.go +++ b/etherman/types/mocks/mock_block_notifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/etherman/types/mocks/mock_block_notifier_manager.go b/etherman/types/mocks/mock_block_notifier_manager.go index 6113fb7fd..565d1dce5 100644 --- a/etherman/types/mocks/mock_block_notifier_manager.go +++ b/etherman/types/mocks/mock_block_notifier_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index fa901d25c..f23d0a72d 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -80,7 +80,7 @@ func TestE2E(t *testing.T) { mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) client, auth, gerAddr, verifyAddr, gerSc, _ := newSimulatedClient(t) - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy var err error if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) @@ -166,7 +166,7 @@ func TestWithReorgs(t *testing.T) { rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true @@ -320,7 +320,7 @@ func TestStressAndReorgs(t *testing.T) { require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index de979a1a8..dbefd50fd 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -5,11 +5,14 @@ import ( "errors" "fmt" "math/big" + "time" jRPC "github.com/0xPolygon/cdk-rpc/rpc" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader" + mdrsync "github.com/agglayer/aggkit/multidownloader/sync" "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree" "github.com/agglayer/aggkit/tree/types" @@ -35,10 +38,18 @@ var ( ErrNotFound = errors.New("l1infotreesync: not found") ) +type DriverInterface interface { + Sync(ctx context.Context) +} + +type DownloaderInterface interface { + Finality() aggkittypes.BlockNumberFinality +} + type L1InfoTreeSync struct { processor *processor - driver *sync.EVMDriver - downloader *sync.EVMDownloader + driver DriverInterface + downloader DownloaderInterface } func NewReadOnly( @@ -55,11 +66,64 @@ func NewReadOnly( }, nil } +// New creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree +func NewMultidownloadBased( + ctx context.Context, + cfg Config, + l1Multidownloader *multidownloader.EVMMultidownloader, + flags CreationFlags, +) (*L1InfoTreeSync, error) { + processor, err := newProcessor(cfg.DBPath) + if err != nil { + return nil, err + } + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + } + + appender, err := buildAppender(l1Multidownloader.EthClient(), cfg.GlobalExitRootAddr, cfg.RollupManagerAddr, flags) + if err != nil { + return nil, err + } + addressesToQuery := []common.Address{cfg.GlobalExitRootAddr, cfg.RollupManagerAddr} + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "l1infotreesync", + ContractAddresses: addressesToQuery, + FromBlock: cfg.InitialBlock, + ToBlock: cfg.BlockFinality, + } + err = l1Multidownloader.RegisterSyncer(syncerConfig) + if err != nil { + return nil, fmt.Errorf("failed to register l1infotreesync in multidownloader: %w", err) + } + logger := log.WithFields("syncer", syncerID) + downloader := mdrsync.NewDownloader( + l1Multidownloader, + logger, + rh, + appender, + 5*time.Second, + time.Second, + ) + + driver := mdrsync.NewEVMDriver(processor, downloader, syncerConfig, + cfg.SyncBlockChunkSize, rh, logger) + if err != nil { + return nil, err + } + return &L1InfoTreeSync{ + processor: processor, + driver: driver, + downloader: downloader, + }, nil +} + // New creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree func New( ctx context.Context, cfg Config, - l1Client aggkittypes.MultiDownloader, + l1Client aggkittypes.MultiDownloaderLegacy, reorgDetector sync.ReorgDetector, flags CreationFlags, ) (*L1InfoTreeSync, error) { diff --git a/l1infotreesync/mock_downloader_interface.go b/l1infotreesync/mock_downloader_interface.go new file mode 100644 index 000000000..5cdbc4b27 --- /dev/null +++ b/l1infotreesync/mock_downloader_interface.go @@ -0,0 +1,80 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package l1infotreesync + +import ( + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// DownloaderInterfaceMock is an autogenerated mock type for the DownloaderInterface type +type DownloaderInterfaceMock struct { + mock.Mock +} + +type DownloaderInterfaceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *DownloaderInterfaceMock) EXPECT() *DownloaderInterfaceMock_Expecter { + return &DownloaderInterfaceMock_Expecter{mock: &_m.Mock} +} + +// Finality provides a mock function with no fields +func (_m *DownloaderInterfaceMock) Finality() types.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Finality") + } + + var r0 types.BlockNumberFinality + if rf, ok := ret.Get(0).(func() types.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.BlockNumberFinality) + } + + return r0 +} + +// DownloaderInterfaceMock_Finality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finality' +type DownloaderInterfaceMock_Finality_Call struct { + *mock.Call +} + +// Finality is a helper method to define mock.On call +func (_e *DownloaderInterfaceMock_Expecter) Finality() *DownloaderInterfaceMock_Finality_Call { + return &DownloaderInterfaceMock_Finality_Call{Call: _e.mock.On("Finality")} +} + +func (_c *DownloaderInterfaceMock_Finality_Call) Run(run func()) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DownloaderInterfaceMock_Finality_Call) Return(_a0 types.BlockNumberFinality) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DownloaderInterfaceMock_Finality_Call) RunAndReturn(run func() types.BlockNumberFinality) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Return(run) + return _c +} + +// NewDownloaderInterfaceMock creates a new instance of DownloaderInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDownloaderInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DownloaderInterfaceMock { + mock := &DownloaderInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/mock_driver_interface.go b/l1infotreesync/mock_driver_interface.go new file mode 100644 index 000000000..a9c76d35e --- /dev/null +++ b/l1infotreesync/mock_driver_interface.go @@ -0,0 +1,69 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package l1infotreesync + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// DriverInterfaceMock is an autogenerated mock type for the DriverInterface type +type DriverInterfaceMock struct { + mock.Mock +} + +type DriverInterfaceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *DriverInterfaceMock) EXPECT() *DriverInterfaceMock_Expecter { + return &DriverInterfaceMock_Expecter{mock: &_m.Mock} +} + +// Sync provides a mock function with given fields: ctx +func (_m *DriverInterfaceMock) Sync(ctx context.Context) { + _m.Called(ctx) +} + +// DriverInterfaceMock_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' +type DriverInterfaceMock_Sync_Call struct { + *mock.Call +} + +// Sync is a helper method to define mock.On call +// - ctx context.Context +func (_e *DriverInterfaceMock_Expecter) Sync(ctx interface{}) *DriverInterfaceMock_Sync_Call { + return &DriverInterfaceMock_Sync_Call{Call: _e.mock.On("Sync", ctx)} +} + +func (_c *DriverInterfaceMock_Sync_Call) Run(run func(ctx context.Context)) *DriverInterfaceMock_Sync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DriverInterfaceMock_Sync_Call) Return() *DriverInterfaceMock_Sync_Call { + _c.Call.Return() + return _c +} + +func (_c *DriverInterfaceMock_Sync_Call) RunAndReturn(run func(context.Context)) *DriverInterfaceMock_Sync_Call { + _c.Run(run) + return _c +} + +// NewDriverInterfaceMock creates a new instance of DriverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDriverInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DriverInterfaceMock { + mock := &DriverInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/mock_l1_info_tree_syncer.go b/l1infotreesync/mock_l1_info_tree_syncer.go index fa1759b7f..3b77c3c79 100644 --- a/l1infotreesync/mock_l1_info_tree_syncer.go +++ b/l1infotreesync/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mocks/mock_reorg_detector.go b/l1infotreesync/mocks/mock_reorg_detector.go index d63339916..836850406 100644 --- a/l1infotreesync/mocks/mock_reorg_detector.go +++ b/l1infotreesync/mocks/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c0481add9..6693a95fb 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -16,6 +16,7 @@ import ( "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree" treetypes "github.com/agglayer/aggkit/tree/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/russross/meddler" @@ -253,6 +254,19 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return p.getLastProcessedBlockWithTx(p.db) } +// GetLastProcessedBlock returns the last processed block +func (p *processor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + var lastProcessedBlockNum uint64 + var hash *string + row := p.db.QueryRow("SELECT num, hash FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlockNum, &hash) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + hdr := aggkittypes.NewBlockHeader(lastProcessedBlockNum, common.HexToHash(*hash), 0, nil) + return hdr, err +} + func (p *processor) getLastProcessedBlockWithTx(tx dbtypes.Querier) (uint64, error) { var lastProcessedBlockNum uint64 diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 0163de852..e676090a1 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -544,3 +544,60 @@ func TestCalculateGER(t *testing.T) { }) } } + +func TestGetLastProcessedBlockHeader(t *testing.T) { + t.Parallel() + ctx := t.Context() + + t.Run("returns nil when no blocks are processed", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_empty.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.Nil(t, hdr) + }) + + t.Run("returns last processed block when single block exists", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_single.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + expectedHash := common.HexToHash("0xabc123") + expectedNum := uint64(1) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, expectedNum, expectedHash.String()) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.NotNil(t, hdr) + require.Equal(t, expectedNum, hdr.Number) + require.Equal(t, expectedHash, hdr.Hash) + }) + + t.Run("returns last processed block when multiple blocks exist", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_multiple.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + // Insert multiple blocks + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, 1, common.HexToHash("0x1").String()) + require.NoError(t, err) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, 2, common.HexToHash("0x2").String()) + require.NoError(t, err) + expectedHash := common.HexToHash("0x3") + expectedNum := uint64(3) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, expectedNum, expectedHash.String()) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.NotNil(t, hdr) + require.Equal(t, expectedNum, hdr.Number) + require.Equal(t, expectedHash, hdr.Hash) + }) +} diff --git a/l2gersync/mocks/mock_l1_info_tree_querier.go b/l2gersync/mocks/mock_l1_info_tree_querier.go index d3f49b954..2cec88aee 100644 --- a/l2gersync/mocks/mock_l1_info_tree_querier.go +++ b/l2gersync/mocks/mock_l1_info_tree_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index 7dcb58af9..949671da3 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -145,7 +145,7 @@ func TestE2E(t *testing.T) { _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "block 4") require.NoError(t, err) testData.SimulatedL1.Commit() // Block 4 - _, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ + logs, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ Addresses: []common.Address{testData.LogEmitterAddr}, FromBlock: big.NewInt(0), ToBlock: big.NewInt(int64(latestBlock + 2)), diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index d2802c2f3..239e42559 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -54,7 +54,7 @@ type EVMMultidownloader struct { cancel context.CancelFunc } -var _ aggkittypes.MultiDownloader = (*EVMMultidownloader)(nil) +var _ aggkittypes.MultiDownloaderLegacy = (*EVMMultidownloader)(nil) // NewEVMMultidownloader creates a new EVM multidownloader instance with proper validation func NewEVMMultidownloader(log aggkitcommon.Logger, @@ -196,7 +196,7 @@ func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, return fmt.Errorf("detectReorgs: block number %d not found in storage", number) } if storageBlock.Hash != rpcBlock.Hash { - return mdrtypes.NewReorgError(storageBlock.Number, storageBlock.Hash, rpcBlock.Hash, + return mdrtypes.NewDetectedReorgError(storageBlock.Number, storageBlock.Hash, rpcBlock.Hash, fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", number, storageBlock.Hash.String(), rpcBlock.Hash.String())) } @@ -320,7 +320,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { err := dh.StartStep(runCtx) if err != nil { - reorgErr := mdrtypes.CastReorgError(err) + reorgErr := mdrtypes.CastDetectedReorgError(err) if reorgErr == nil { dh.log.Warnf("Error running multidownloader: %s ", err.Error()) time.Sleep(time.Millisecond) // Brief pause before retry @@ -454,7 +454,7 @@ func (dh *EVMMultidownloader) StartStepOld(ctx context.Context) error { if err = dh.sync(ctx, dh.StepUnsafe, "unsafe"); err != nil { return err } - dh.log.Infof("waiting new block...") + if err = dh.WaitForNewLatestBlocks(ctx); err != nil { return err } @@ -468,6 +468,7 @@ func (dh *EVMMultidownloader) StartStepOld(ctx context.Context) error { func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error { latestSyncedBlock := dh.state.GetHighestBlockNumberPendingToSync() + dh.log.Infof("waiting new block (latest>%d)...", latestSyncedBlock) _, err := dh.waitForNewBlocks(ctx, aggkittypes.LatestBlock, latestSyncedBlock) return err } @@ -602,6 +603,13 @@ func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { return dh.state.IsAvailable(query) } +// Check if the given log query is partially available +func (dh *EVMMultidownloader) IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) { + dh.mutex.Lock() + defer dh.mutex.Unlock() + return dh.state.IsPartiallyAvailable(query) +} + // getTotalPendingBlockRange returns the full pending block range without taking in // consideration addrs func (dh *EVMMultidownloader) getTotalPendingBlockRange() *aggkitcommon.BlockRange { diff --git a/multidownloader/evm_multidownloader_reorg.go b/multidownloader/evm_multidownloader_reorg.go new file mode 100644 index 000000000..abde85f35 --- /dev/null +++ b/multidownloader/evm_multidownloader_reorg.go @@ -0,0 +1,48 @@ +package multidownloader + +import ( + "context" + "fmt" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" +) + +// CheckValidBlock checks if the given blockNumber and blockHash are still valid +// returns: isValid bool, reorgChainID uint64, err error +func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber uint64, + blockHash common.Hash) (bool, uint64, error) { + // Check if is stored as valid block + storedBlock, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot get BlockHeader number=%d: %w", + blockNumber, err) + } + if storedBlock != nil { + // Is valid? + if storedBlock.Hash == blockHash { + return true, 0, nil + } + } + // From this point is invalid or unknown + // Check in blocks_reorged + chainID, found, err := dh.storage.GetBlockReorgedChainID(nil, blockNumber, blockHash) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot check blocks_reorged for blockNumber=%d: %w", + blockNumber, err) + } + if found { + dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (chainID=%d)", + blockNumber, blockHash.Hex(), chainID) + return false, chainID, nil + } + // Not found anywhere, consider invalid + return false, 0, fmt.Errorf( + "EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", + blockNumber, blockHash.Hex()) +} + +func (dh *EVMMultidownloader) GetReorgedDataByChainID(ctx context.Context, + reorgChainID uint64) (*mdrtypes.ReorgData, error) { + return dh.storage.GetReorgedDataByChainID(nil, reorgChainID) +} diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 224efaccc..f997be27f 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -9,7 +9,6 @@ import ( mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -101,41 +100,59 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, return blockHeader, nil } +// HeaderByNumber gets the block header for the given block number from storage or ethClient +func (dh *EVMMultidownloader) StorageHeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) { + if number == nil { + number = &aggkittypes.LatestBlock + } + // Resolve blockNumber + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, *number) + if err != nil { + return nil, false, fmt.Errorf("EVMMultidownloader.StorageHeaderByNumber: cannot get block number for finality=%s: %w", + number.String(), err) + } + // Is this block in storage? + block, finalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) + if err != nil { + return nil, false, fmt.Errorf("EVMMultidownloader.StorageHeaderByNumber: cannot get BlockHeader number=%s: %w", + number.String(), err) + } + return block, finalized, nil +} + // EthClient returns the underlying eth client func (dh *EVMMultidownloader) EthClient() aggkittypes.BaseEthereumClienter { return dh.ethClient } -// CheckValidBlock checks if the given blockNumber and blockHash are still valid -// returns: isValid bool, reorgChainID uint64, err error -func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber uint64, - blockHash common.Hash) (bool, uint64, error) { - // Check if is stored as valid block - storedBlock, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) +func (dh *EVMMultidownloader) LogQuery(ctx context.Context, + query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) { + dh.mutex.Lock() + defer dh.mutex.Unlock() + isAval, availQuery := dh.state.IsPartiallyAvailable(query) + if !isAval { + return mdrtypes.LogQueryResponse{}, + fmt.Errorf("EVMMultidownloader.LogQuery: logs not synced for query: %s", + query.String()) + } + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) if err != nil { - return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot get BlockHeader number=%d: %w", - blockNumber, err) - } - if storedBlock != nil { - // Is valid? - if storedBlock.Hash == blockHash { - return true, 0, nil - } + return mdrtypes.LogQueryResponse{}, + fmt.Errorf("EVMMultidownloader.LogQuery: cannot get finalized block number: %w", + err) } - // From this point is invalid or unknown - // Check in blocks_reorged - chainID, found, err := dh.storage.GetBlockReorgedChainID(nil, blockNumber, blockHash) + // Calculate UnsafeRange + + result, err := dh.storage.LogQuery(nil, *availQuery) if err != nil { - return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot check blocks_reorged for blockNumber=%d: %w", - blockNumber, err) - } - if found { - dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (chainID=%d)", - blockNumber, blockHash.Hex(), chainID) - return false, chainID, nil - } - // Not found anywhere, consider invalid - return false, 0, fmt.Errorf( - "EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", - blockNumber, blockHash.Hex()) + // Calculate UnsafeRange + _, unsafePendingBlockRange := result.ResponseRange.SplitByBlockNumber(finalizedBlockNumber) + result.UnsafeRange = unsafePendingBlockRange + } + return result, err +} + +func (dh *EVMMultidownloader) Finality() aggkittypes.BlockNumberFinality { + return dh.cfg.BlockFinality } diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index 66cc8f487..0987ee993 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -62,6 +62,8 @@ func TestEVMMultidownloader_BlockHeader(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock). Return(uint64(123456), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123456)). + Return(nil, false, nil) // Block not found in storage, will fetch from ethClient testData.mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, aggkittypes.NewBlockNumber(123456)). Return(&aggkittypes.BlockHeader{ Number: 123456, @@ -75,6 +77,9 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { t.Run("negative block number returns error", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + // FinalizedBlock is not a numeric finality, so GetCurrentBlockNumber will fail + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), errors.New("only numeric block finalities are supported")) // Test result, err := testData.mdr.HeaderByNumber(context.Background(), &aggkittypes.FinalizedBlock) @@ -88,6 +93,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { t.Run("storage error returns error", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, errStorageExample) @@ -108,10 +115,12 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { expectedBlock := &aggkittypes.BlockHeader{ Number: 123, } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). Return(expectedBlock, false, nil) - // Test + // Test result, err := testData.mdr.HeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(123)) // Assertions @@ -123,6 +132,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, nil) // Block not found in storage @@ -144,6 +155,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, nil) // Block not found in storage diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index b20f67831..27cdc28af 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -4,41 +4,45 @@ import ( "context" "fmt" "math/big" - "os" "sync" "sync/atomic" "testing" "time" - jRPC "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" "github.com/agglayer/aggkit/db" - "github.com/agglayer/aggkit/etherman" mockethermantypes "github.com/agglayer/aggkit/etherman/types/mocks" - "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" - "github.com/agglayer/aggkit/reorgdetector" - aggkitsync "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" mocktypes "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -const runL1InfoTree = true -const l1InfoTreeUseMultidownloader = true -const storagePath = "../tmp/ut/" +// Imports below are only used in skipped tests but need to remain commented to avoid import cycles: +// jRPC "github.com/0xPolygon/cdk-rpc/rpc" +// "github.com/agglayer/aggkit/etherman" +// "github.com/agglayer/aggkit/l1infotreesync" +// "github.com/agglayer/aggkit/reorgdetector" +// aggkitsync "github.com/agglayer/aggkit/sync" +// "github.com/ethereum/go-ethereum/ethclient" +// "github.com/ethereum/go-ethereum/rpc" +// "os" + +// Commented out constants only used in skipped tests +// const runL1InfoTree = true +// const l1InfoTreeUseMultidownloader = true +// const storagePath = "../tmp/ut/" func TestEVMMultidownloader(t *testing.T) { - t.Skip("code to test/debug not real unittest") + t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") + /* Commented out to avoid import cycles cfgLog := log.Config{ Environment: "development", Level: "info", @@ -89,7 +93,7 @@ func TestEVMMultidownloader(t *testing.T) { ctx := context.TODO() var l1infotree *l1infotreesync.L1InfoTreeSync if runL1InfoTree == true { - var multidownloader aggkittypes.MultiDownloader + var multidownloader aggkittypes.MultiDownloaderLegacy var dbPath string if l1InfoTreeUseMultidownloader { multidownloader = mdr @@ -169,10 +173,12 @@ func TestEVMMultidownloader(t *testing.T) { log.Infof("L1InfoTree sync finished in %s", timer.String()) }() wg.Wait() + */ } func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { - t.Skip("it's a exploratory test for batch requests") + t.Skip("it's a exploratory test for batch requests - requires external dependencies") + /* Commented out to avoid import cycles l1url := os.Getenv("L1URL") ethClient, err := rpc.DialContext(t.Context(), l1url) require.NoError(t, err) @@ -206,10 +212,12 @@ func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { log.Infof("blockNumber: %s, chainID: %s", blockNumber, chainID) log.Infof("latestBlock: %+v", latestBlock) + */ } func TestDownloaderParellelvsBatch(t *testing.T) { - t.Skip("it's a benchmarking test") + t.Skip("it's a benchmarking test - requires external dependencies") + /* Commented out to avoid import cycles l1url := os.Getenv("L1URL") ethClient, err := ethclient.Dial(l1url) require.NoError(t, err) @@ -246,16 +254,18 @@ func TestDownloaderParellelvsBatch(t *testing.T) { require.NotNil(t, headerB) require.Equal(t, headerP.Hash, headerB.Hash) } + */ } -func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { - for _, h := range headers { - if h.Number == bn { - return h - } - } - return nil -} +// getBlockHeader is only used in skipped tests +// func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { +// for _, h := range headers { +// if h.Number == bn { +// return h +// } +// } +// return nil +// } func TestEVMMultidownloader_NewEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "evm_multidownloader_test") diff --git a/multidownloader/state.go b/multidownloader/state.go index 2c4abd3e2..82d6f6b33 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -74,6 +74,10 @@ func (s *State) IsAvailable(query mdrtypes.LogQuery) bool { return s.Synced.IsAvailable(query) } +func (s *State) IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) { + return s.Synced.IsPartiallyAvailable(query) +} + func (s *State) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { return s.Pending.GetTotalPendingBlockRange() } diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index 69d0e6e81..f2fa72f7b 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -180,6 +180,7 @@ type logAndBlockRow struct { BlockHash common.Hash `meddler:"block_hash,hash"` BlockTimestamp uint64 `meddler:"block_timestamp"` BlockParentHash *common.Hash `meddler:"block_parent_hash,hash"` + IsFinal bool `meddler:"is_final"` } func (a *MultidownloaderStorage) GetEthLogs(tx dbtypes.Querier, query mdrtypes.LogQuery) ([]types.Log, error) { @@ -232,6 +233,87 @@ func (a *MultidownloaderStorage) GetEthLogs(tx dbtypes.Querier, query mdrtypes.L return logs, nil } +func (a *MultidownloaderStorage) LogQuery(tx dbtypes.Querier, + query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + + dbRows := make([]*logAndBlockRow, 0) + sqlQuery := ` + SELECT * FROM logs + LEFT JOIN blocks ON logs.block_number = blocks.block_number + WHERE address IN (?) + AND logs.block_number>=? AND logs.block_number<=? + ORDER BY logs.block_number ASC, log_index ASC + ` + addrs := make([]string, 0, len(query.Addrs)) + for _, addr := range query.Addrs { + addrs = append(addrs, addr.Hex()) + } + // This is used to extend the address slice into the query + queryStr, args, err := sqlx.In(sqlQuery, addrs, query.BlockRange.FromBlock, query.BlockRange.ToBlock) + if err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error building SQL query: %w", err) + } + err = meddler.QueryAll(tx, &dbRows, queryStr, args...) + if err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error querying logs: %w", err) + } + + // Group logs by block number + blockLogsMap := make(map[uint64]*mdrtypes.BlockWithLogs) + blockOrder := make([]uint64, 0) + + for _, dbRow := range dbRows { + var topics []common.Hash + if err := json.Unmarshal([]byte(dbRow.Topics), &topics); err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error unmarshaling topics: %w", err) + } + log := mdrtypes.Log{ + Address: dbRow.Address, + Topics: topics, + Data: dbRow.Data, + BlockNumber: dbRow.BlockNumber, + TxHash: dbRow.TxHash, + TxIndex: dbRow.TxIndex, + Index: dbRow.Index, + BlockTimestamp: dbRow.BlockTimestamp, + Removed: false, + } + + // Add block to map if not already present + if _, exists := blockLogsMap[dbRow.BlockNumber]; !exists { + blockLogsMap[dbRow.BlockNumber] = &mdrtypes.BlockWithLogs{ + Header: aggkittypes.BlockHeader{ + Number: dbRow.BlockNumber, + Hash: dbRow.BlockHash, + Time: dbRow.BlockTimestamp, + ParentHash: dbRow.BlockParentHash, + }, + IsFinal: dbRow.IsFinal, + Logs: make([]mdrtypes.Log, 0), + } + blockOrder = append(blockOrder, dbRow.BlockNumber) + } + + blockLogsMap[dbRow.BlockNumber].Logs = append(blockLogsMap[dbRow.BlockNumber].Logs, log) + } + + // Build response maintaining block order + blocks := make([]mdrtypes.BlockWithLogs, 0, len(blockOrder)) + for _, blockNum := range blockOrder { + blocks = append(blocks, *blockLogsMap[blockNum]) + } + + return mdrtypes.LogQueryResponse{ + Blocks: blocks, + ResponseRange: query.BlockRange, + }, nil +} + // tx dbtypes.Txer func (a *MultidownloaderStorage) SaveEthLogs(tx dbtypes.Querier, logs []types.Log, isFinal bool) error { return a.saveLogsAndBlocks(tx, NewBlockRowsFromLogs(logs, isFinal), NewLogRowsFromEthLogs(logs)) diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 2c7fac56f..c3515cbcf 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -1,11 +1,14 @@ package storage import ( + "database/sql" + "errors" "fmt" aggkitcommon "github.com/agglayer/aggkit/common" dbtypes "github.com/agglayer/aggkit/db/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/russross/meddler" ) @@ -117,3 +120,46 @@ func (a *MultidownloaderStorage) GetBlockReorgedChainID(tx dbtypes.Querier, } return *chainIDRow.ChainID, true, nil } + +func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, + reorgedChainID uint64) (*mdrtypes.ReorgData, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + + var row reorgRow + query := `SELECT chain_id, detected_at_block, reorged_from_block, reorged_to_block, + detected_timestamp, network_latest_block, network_finalized_block, network_finalized_block_name + FROM reorgs WHERE chain_id = ? LIMIT 1;` + + err := meddler.QueryRow(tx, &row, query, reorgedChainID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("GetReorgedDataByChainID: error querying reorgs table: %w", err) + } + + // Convert string to BlockNumberFinality + blockFinality, err := aggkittypes.NewBlockNumberFinality(row.NetworkFinalizedBlockName) + if err != nil { + return nil, fmt.Errorf("GetReorgedDataByChainID: error parsing NetworkFinalizedBlockName: %w", err) + } + + reorgData := &mdrtypes.ReorgData{ + ChainID: row.ChainID, + BlockRangeAffected: aggkitcommon.BlockRange{ + FromBlock: row.ReorgedFromBlock, + ToBlock: row.ReorgedToBlock, + }, + DetectedAtBlock: row.DetectedAtBlock, + DetectedTimestamp: row.DetectedTimestamp, + NetworkLatestBlock: row.NetworkLatestBlock, + NetworkFinalizedBlock: row.NetworkFinalizedBlock, + NetworkFinalizedBlockName: *blockFinality, + } + + return reorgData, nil +} diff --git a/multidownloader/storage/storage_reorg_test.go b/multidownloader/storage/storage_reorg_test.go index e2510855a..265f9afd3 100644 --- a/multidownloader/storage/storage_reorg_test.go +++ b/multidownloader/storage/storage_reorg_test.go @@ -65,3 +65,104 @@ func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { require.Nil(t, hdr, "block header should not be in blocks table anymore") } } + +func TestStorage_GetReorgedDataByChainID(t *testing.T) { + t.Run("returns reorg data when found", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert a reorg + expectedReorgData := mdrtypes.ReorgData{ + ChainID: 0, // will be set by InsertNewReorg + BlockRangeAffected: aggkitcommon.NewBlockRange(1000, 1010), + DetectedAtBlock: 1020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, expectedReorgData) + require.NoError(t, err) + require.Equal(t, uint64(1), chainID) + err = tx.Commit() + require.NoError(t, err) + + // Retrieve the reorg data + reorgData, err := storage.GetReorgedDataByChainID(nil, chainID) + require.NoError(t, err) + require.NotNil(t, reorgData, "reorg data should not be nil when found") + require.Equal(t, chainID, reorgData.ChainID) + require.Equal(t, expectedReorgData.BlockRangeAffected, reorgData.BlockRangeAffected) + require.Equal(t, expectedReorgData.DetectedAtBlock, reorgData.DetectedAtBlock) + require.Equal(t, expectedReorgData.DetectedTimestamp, reorgData.DetectedTimestamp) + require.Equal(t, expectedReorgData.NetworkLatestBlock, reorgData.NetworkLatestBlock) + require.Equal(t, expectedReorgData.NetworkFinalizedBlock, reorgData.NetworkFinalizedBlock) + require.Equal(t, expectedReorgData.NetworkFinalizedBlockName, reorgData.NetworkFinalizedBlockName) + }) + + t.Run("returns nil when chainID not found", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Try to retrieve a non-existent chainID + reorgData, err := storage.GetReorgedDataByChainID(nil, 999) + require.NoError(t, err, "should not return error when chainID not found") + require.Nil(t, reorgData, "reorg data should be nil when not found") + }) + + t.Run("returns correct data for multiple reorgs", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple reorgs + reorgData1 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1000, 1010), + DetectedAtBlock: 1020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + + reorgData2 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(2000, 2020), + DetectedAtBlock: 2030, + DetectedTimestamp: 1630004000, + NetworkLatestBlock: 3000, + NetworkFinalizedBlock: 2990, + NetworkFinalizedBlockName: aggkittypes.SafeBlock, + } + + tx1, err := storage.NewTx(t.Context()) + require.NoError(t, err) + chainID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + require.NoError(t, err) + require.Equal(t, uint64(1), chainID1) + err = tx1.Commit() + require.NoError(t, err) + + tx2, err := storage.NewTx(t.Context()) + require.NoError(t, err) + chainID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + require.NoError(t, err) + require.Equal(t, uint64(2), chainID2) + err = tx2.Commit() + require.NoError(t, err) + + // Retrieve first reorg + retrieved1, err := storage.GetReorgedDataByChainID(nil, chainID1) + require.NoError(t, err) + require.NotNil(t, retrieved1) + require.Equal(t, chainID1, retrieved1.ChainID) + require.Equal(t, reorgData1.BlockRangeAffected, retrieved1.BlockRangeAffected) + require.Equal(t, reorgData1.NetworkFinalizedBlockName, retrieved1.NetworkFinalizedBlockName) + + // Retrieve second reorg + retrieved2, err := storage.GetReorgedDataByChainID(nil, chainID2) + require.NoError(t, err) + require.NotNil(t, retrieved2) + require.Equal(t, chainID2, retrieved2.ChainID) + require.Equal(t, reorgData2.BlockRangeAffected, retrieved2.BlockRangeAffected) + require.Equal(t, reorgData2.NetworkFinalizedBlockName, retrieved2.NetworkFinalizedBlockName) + }) +} diff --git a/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go b/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go new file mode 100644 index 000000000..441469600 --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go @@ -0,0 +1,114 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// CertificateSubmissionServiceClient is an autogenerated mock type for the CertificateSubmissionServiceClient type +type CertificateSubmissionServiceClient struct { + mock.Mock +} + +type CertificateSubmissionServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *CertificateSubmissionServiceClient) EXPECT() *CertificateSubmissionServiceClient_Expecter { + return &CertificateSubmissionServiceClient_Expecter{mock: &_m.Mock} +} + +// SubmitCertificate provides a mock function with given fields: ctx, in, opts +func (_m *CertificateSubmissionServiceClient) SubmitCertificate(ctx context.Context, in *nodev1.SubmitCertificateRequest, opts ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubmitCertificate") + } + + var r0 *nodev1.SubmitCertificateResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) *nodev1.SubmitCertificateResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.SubmitCertificateResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CertificateSubmissionServiceClient_SubmitCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitCertificate' +type CertificateSubmissionServiceClient_SubmitCertificate_Call struct { + *mock.Call +} + +// SubmitCertificate is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.SubmitCertificateRequest +// - opts ...grpc.CallOption +func (_e *CertificateSubmissionServiceClient_Expecter) SubmitCertificate(ctx interface{}, in interface{}, opts ...interface{}) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + return &CertificateSubmissionServiceClient_SubmitCertificate_Call{Call: _e.mock.On("SubmitCertificate", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) Run(run func(ctx context.Context, in *nodev1.SubmitCertificateRequest, opts ...grpc.CallOption)) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.SubmitCertificateRequest), variadicArgs...) + }) + return _c +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) Return(_a0 *nodev1.SubmitCertificateResponse, _a1 error) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) RunAndReturn(run func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error)) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Return(run) + return _c +} + +// NewCertificateSubmissionServiceClient creates a new instance of CertificateSubmissionServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCertificateSubmissionServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *CertificateSubmissionServiceClient { + mock := &CertificateSubmissionServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go b/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go new file mode 100644 index 000000000..4ec9191b4 --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go @@ -0,0 +1,114 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// ConfigurationServiceClient is an autogenerated mock type for the ConfigurationServiceClient type +type ConfigurationServiceClient struct { + mock.Mock +} + +type ConfigurationServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *ConfigurationServiceClient) EXPECT() *ConfigurationServiceClient_Expecter { + return &ConfigurationServiceClient_Expecter{mock: &_m.Mock} +} + +// GetEpochConfiguration provides a mock function with given fields: ctx, in, opts +func (_m *ConfigurationServiceClient) GetEpochConfiguration(ctx context.Context, in *nodev1.GetEpochConfigurationRequest, opts ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetEpochConfiguration") + } + + var r0 *nodev1.GetEpochConfigurationResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) *nodev1.GetEpochConfigurationResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetEpochConfigurationResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConfigurationServiceClient_GetEpochConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEpochConfiguration' +type ConfigurationServiceClient_GetEpochConfiguration_Call struct { + *mock.Call +} + +// GetEpochConfiguration is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetEpochConfigurationRequest +// - opts ...grpc.CallOption +func (_e *ConfigurationServiceClient_Expecter) GetEpochConfiguration(ctx interface{}, in interface{}, opts ...interface{}) *ConfigurationServiceClient_GetEpochConfiguration_Call { + return &ConfigurationServiceClient_GetEpochConfiguration_Call{Call: _e.mock.On("GetEpochConfiguration", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) Run(run func(ctx context.Context, in *nodev1.GetEpochConfigurationRequest, opts ...grpc.CallOption)) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetEpochConfigurationRequest), variadicArgs...) + }) + return _c +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) Return(_a0 *nodev1.GetEpochConfigurationResponse, _a1 error) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) RunAndReturn(run func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error)) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Return(run) + return _c +} + +// NewConfigurationServiceClient creates a new instance of ConfigurationServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfigurationServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ConfigurationServiceClient { + mock := &ConfigurationServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go b/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go new file mode 100644 index 000000000..e5f3ed15d --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go @@ -0,0 +1,262 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// NodeStateServiceClient is an autogenerated mock type for the NodeStateServiceClient type +type NodeStateServiceClient struct { + mock.Mock +} + +type NodeStateServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeStateServiceClient) EXPECT() *NodeStateServiceClient_Expecter { + return &NodeStateServiceClient_Expecter{mock: &_m.Mock} +} + +// GetCertificateHeader provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetCertificateHeader(ctx context.Context, in *nodev1.GetCertificateHeaderRequest, opts ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateHeader") + } + + var r0 *nodev1.GetCertificateHeaderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) *nodev1.GetCertificateHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetCertificateHeaderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateHeader' +type NodeStateServiceClient_GetCertificateHeader_Call struct { + *mock.Call +} + +// GetCertificateHeader is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetCertificateHeaderRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetCertificateHeader(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetCertificateHeader_Call { + return &NodeStateServiceClient_GetCertificateHeader_Call{Call: _e.mock.On("GetCertificateHeader", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) Run(run func(ctx context.Context, in *nodev1.GetCertificateHeaderRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetCertificateHeaderRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) Return(_a0 *nodev1.GetCertificateHeaderResponse, _a1 error) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) RunAndReturn(run func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error)) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestCertificateHeader provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetLatestCertificateHeader(ctx context.Context, in *nodev1.GetLatestCertificateHeaderRequest, opts ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetLatestCertificateHeader") + } + + var r0 *nodev1.GetLatestCertificateHeaderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) *nodev1.GetLatestCertificateHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetLatestCertificateHeaderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetLatestCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestCertificateHeader' +type NodeStateServiceClient_GetLatestCertificateHeader_Call struct { + *mock.Call +} + +// GetLatestCertificateHeader is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetLatestCertificateHeaderRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetLatestCertificateHeader(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + return &NodeStateServiceClient_GetLatestCertificateHeader_Call{Call: _e.mock.On("GetLatestCertificateHeader", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) Run(run func(ctx context.Context, in *nodev1.GetLatestCertificateHeaderRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetLatestCertificateHeaderRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) Return(_a0 *nodev1.GetLatestCertificateHeaderResponse, _a1 error) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) RunAndReturn(run func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error)) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + +// GetNetworkInfo provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetNetworkInfo(ctx context.Context, in *nodev1.GetNetworkInfoRequest, opts ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetNetworkInfo") + } + + var r0 *nodev1.GetNetworkInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) *nodev1.GetNetworkInfoResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetNetworkInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetNetworkInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkInfo' +type NodeStateServiceClient_GetNetworkInfo_Call struct { + *mock.Call +} + +// GetNetworkInfo is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetNetworkInfoRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetNetworkInfo(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetNetworkInfo_Call { + return &NodeStateServiceClient_GetNetworkInfo_Call{Call: _e.mock.On("GetNetworkInfo", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) Run(run func(ctx context.Context, in *nodev1.GetNetworkInfoRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetNetworkInfoRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) Return(_a0 *nodev1.GetNetworkInfoResponse, _a1 error) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) RunAndReturn(run func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error)) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeStateServiceClient creates a new instance of NodeStateServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeStateServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeStateServiceClient { + mock := &NodeStateServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go new file mode 100644 index 000000000..d29665525 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go @@ -0,0 +1,188 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + proverv1 "buf.build/gen/go/agglayer/provers/protocolbuffers/go/aggkit/prover/v1" +) + +// AggchainProofServiceClient is an autogenerated mock type for the AggchainProofServiceClient type +type AggchainProofServiceClient struct { + mock.Mock +} + +type AggchainProofServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *AggchainProofServiceClient) EXPECT() *AggchainProofServiceClient_Expecter { + return &AggchainProofServiceClient_Expecter{mock: &_m.Mock} +} + +// GenerateAggchainProof provides a mock function with given fields: ctx, in, opts +func (_m *AggchainProofServiceClient) GenerateAggchainProof(ctx context.Context, in *proverv1.GenerateAggchainProofRequest, opts ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GenerateAggchainProof") + } + + var r0 *proverv1.GenerateAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) *proverv1.GenerateAggchainProofResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceClient_GenerateAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateAggchainProof' +type AggchainProofServiceClient_GenerateAggchainProof_Call struct { + *mock.Call +} + +// GenerateAggchainProof is a helper method to define mock.On call +// - ctx context.Context +// - in *proverv1.GenerateAggchainProofRequest +// - opts ...grpc.CallOption +func (_e *AggchainProofServiceClient_Expecter) GenerateAggchainProof(ctx interface{}, in interface{}, opts ...interface{}) *AggchainProofServiceClient_GenerateAggchainProof_Call { + return &AggchainProofServiceClient_GenerateAggchainProof_Call{Call: _e.mock.On("GenerateAggchainProof", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) Run(run func(ctx context.Context, in *proverv1.GenerateAggchainProofRequest, opts ...grpc.CallOption)) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*proverv1.GenerateAggchainProofRequest), variadicArgs...) + }) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) Return(_a0 *proverv1.GenerateAggchainProofResponse, _a1 error) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error)) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// GenerateOptimisticAggchainProof provides a mock function with given fields: ctx, in, opts +func (_m *AggchainProofServiceClient) GenerateOptimisticAggchainProof(ctx context.Context, in *proverv1.GenerateOptimisticAggchainProofRequest, opts ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GenerateOptimisticAggchainProof") + } + + var r0 *proverv1.GenerateOptimisticAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) *proverv1.GenerateOptimisticAggchainProofResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateOptimisticAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateOptimisticAggchainProof' +type AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call struct { + *mock.Call +} + +// GenerateOptimisticAggchainProof is a helper method to define mock.On call +// - ctx context.Context +// - in *proverv1.GenerateOptimisticAggchainProofRequest +// - opts ...grpc.CallOption +func (_e *AggchainProofServiceClient_Expecter) GenerateOptimisticAggchainProof(ctx interface{}, in interface{}, opts ...interface{}) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + return &AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call{Call: _e.mock.On("GenerateOptimisticAggchainProof", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) Run(run func(ctx context.Context, in *proverv1.GenerateOptimisticAggchainProofRequest, opts ...grpc.CallOption)) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*proverv1.GenerateOptimisticAggchainProofRequest), variadicArgs...) + }) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) Return(_a0 *proverv1.GenerateOptimisticAggchainProofResponse, _a1 error) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error)) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// NewAggchainProofServiceClient creates a new instance of AggchainProofServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggchainProofServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *AggchainProofServiceClient { + mock := &AggchainProofServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go new file mode 100644 index 000000000..1b8617a37 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go @@ -0,0 +1,155 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + proverv1 "buf.build/gen/go/agglayer/provers/protocolbuffers/go/aggkit/prover/v1" + mock "github.com/stretchr/testify/mock" +) + +// AggchainProofServiceServer is an autogenerated mock type for the AggchainProofServiceServer type +type AggchainProofServiceServer struct { + mock.Mock +} + +type AggchainProofServiceServer_Expecter struct { + mock *mock.Mock +} + +func (_m *AggchainProofServiceServer) EXPECT() *AggchainProofServiceServer_Expecter { + return &AggchainProofServiceServer_Expecter{mock: &_m.Mock} +} + +// GenerateAggchainProof provides a mock function with given fields: _a0, _a1 +func (_m *AggchainProofServiceServer) GenerateAggchainProof(_a0 context.Context, _a1 *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GenerateAggchainProof") + } + + var r0 *proverv1.GenerateAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest) *proverv1.GenerateAggchainProofResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateAggchainProofRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceServer_GenerateAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateAggchainProof' +type AggchainProofServiceServer_GenerateAggchainProof_Call struct { + *mock.Call +} + +// GenerateAggchainProof is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *proverv1.GenerateAggchainProofRequest +func (_e *AggchainProofServiceServer_Expecter) GenerateAggchainProof(_a0 interface{}, _a1 interface{}) *AggchainProofServiceServer_GenerateAggchainProof_Call { + return &AggchainProofServiceServer_GenerateAggchainProof_Call{Call: _e.mock.On("GenerateAggchainProof", _a0, _a1)} +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) Run(run func(_a0 context.Context, _a1 *proverv1.GenerateAggchainProofRequest)) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*proverv1.GenerateAggchainProofRequest)) + }) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) Return(_a0 *proverv1.GenerateAggchainProofResponse, _a1 error) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error)) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// GenerateOptimisticAggchainProof provides a mock function with given fields: _a0, _a1 +func (_m *AggchainProofServiceServer) GenerateOptimisticAggchainProof(_a0 context.Context, _a1 *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GenerateOptimisticAggchainProof") + } + + var r0 *proverv1.GenerateOptimisticAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) *proverv1.GenerateOptimisticAggchainProofResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateOptimisticAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateOptimisticAggchainProof' +type AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call struct { + *mock.Call +} + +// GenerateOptimisticAggchainProof is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *proverv1.GenerateOptimisticAggchainProofRequest +func (_e *AggchainProofServiceServer_Expecter) GenerateOptimisticAggchainProof(_a0 interface{}, _a1 interface{}) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + return &AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call{Call: _e.mock.On("GenerateOptimisticAggchainProof", _a0, _a1)} +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) Run(run func(_a0 context.Context, _a1 *proverv1.GenerateOptimisticAggchainProofRequest)) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*proverv1.GenerateOptimisticAggchainProofRequest)) + }) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) Return(_a0 *proverv1.GenerateOptimisticAggchainProofResponse, _a1 error) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error)) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// NewAggchainProofServiceServer creates a new instance of AggchainProofServiceServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggchainProofServiceServer(t interface { + mock.TestingT + Cleanup(func()) +}) *AggchainProofServiceServer { + mock := &AggchainProofServiceServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go b/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go new file mode 100644 index 000000000..9f081d656 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go @@ -0,0 +1,64 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// UnsafeAggchainProofServiceServer is an autogenerated mock type for the UnsafeAggchainProofServiceServer type +type UnsafeAggchainProofServiceServer struct { + mock.Mock +} + +type UnsafeAggchainProofServiceServer_Expecter struct { + mock *mock.Mock +} + +func (_m *UnsafeAggchainProofServiceServer) EXPECT() *UnsafeAggchainProofServiceServer_Expecter { + return &UnsafeAggchainProofServiceServer_Expecter{mock: &_m.Mock} +} + +// mustEmbedUnimplementedAggchainProofServiceServer provides a mock function with no fields +func (_m *UnsafeAggchainProofServiceServer) mustEmbedUnimplementedAggchainProofServiceServer() { + _m.Called() +} + +// UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedAggchainProofServiceServer' +type UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call struct { + *mock.Call +} + +// mustEmbedUnimplementedAggchainProofServiceServer is a helper method to define mock.On call +func (_e *UnsafeAggchainProofServiceServer_Expecter) mustEmbedUnimplementedAggchainProofServiceServer() *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + return &UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call{Call: _e.mock.On("mustEmbedUnimplementedAggchainProofServiceServer")} +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) Run(run func()) *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) Return() *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Call.Return() + return _c +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) RunAndReturn(run func()) *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Run(run) + return _c +} + +// NewUnsafeAggchainProofServiceServer creates a new instance of UnsafeAggchainProofServiceServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnsafeAggchainProofServiceServer(t interface { + mock.TestingT + Cleanup(func()) +}) *UnsafeAggchainProofServiceServer { + mock := &UnsafeAggchainProofServiceServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/download.go b/multidownloader/sync/download.go new file mode 100644 index 000000000..914925be3 --- /dev/null +++ b/multidownloader/sync/download.go @@ -0,0 +1,309 @@ +package multidownloader + +import ( + "context" + "errors" + "fmt" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/core/types" +) + +const ( + percentComplete = 100.0 +) + +var ( + ErrLogsNotAvailable = fmt.Errorf("logs not available") +) + +type Downloader struct { + mdr mdrsynctypes.MultidownloaderInterface + logger aggkitcommon.Logger + rh *sync.RetryHandler + appender sync.LogAppenderMap + // Maximum duration to wait to catch up the maximum request + waitPeriodToCatchUpMaximumLogRange time.Duration + pullingPeriod time.Duration +} + +func NewDownloader( + mdr mdrsynctypes.MultidownloaderInterface, + logger aggkitcommon.Logger, + rh *sync.RetryHandler, + appender sync.LogAppenderMap, + waitPeriodToCatchUpMaximumLogRange time.Duration, + pullingPeriod time.Duration, +) *Downloader { + return &Downloader{ + mdr: mdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: waitPeriodToCatchUpMaximumLogRange, + pullingPeriod: pullingPeriod, + } +} + +func (d *Downloader) Finality() aggkittypes.BlockNumberFinality { + return d.mdr.Finality() +} + +func (d *Downloader) DownloadNextBlocks(ctx context.Context, + lastBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { + // Check Context cancellation + if ctx.Err() != nil { + return nil, ctx.Err() + } + err := d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return nil, err + } + maxLogQuery := d.newMaxLogQuery(lastBlockHeader, maxBlocks, syncerConfig) + var result *mdrsynctypes.DownloadResult + + // Create timeout timer once for the entire retry period + timeoutTimer := time.NewTimer(d.waitPeriodToCatchUpMaximumLogRange) + defer timeoutTimer.Stop() + waitingForLogs := true + // Retry loop: wait pullingPeriod between retries + for waitingForLogs { + pullingTimer := time.NewTimer(d.pullingPeriod) + err = d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return nil, err + } + // Retry the query + result, err = d.executeLogQuery(ctx, maxLogQuery) + // Loop continues to check condition + if err == nil { + waitingForLogs = false + break + } + // The only allowed error is ErrLogsNotAvailable + if err != nil && !errors.Is(err, ErrLogsNotAvailable) { + return nil, err + } + select { + case <-pullingTimer.C: + pullingTimer.Stop() + // Check for reorg before retrying + + case <-timeoutTimer.C: + pullingTimer.Stop() + return nil, fmt.Errorf("DownloadNextBlocks: logs not available after waiting %s for %s: %w", + d.waitPeriodToCatchUpMaximumLogRange.String(), maxLogQuery.String(), ErrLogsNotAvailable) + case <-ctx.Done(): + pullingTimer.Stop() + return nil, fmt.Errorf("DownloadNextBlocks: "+ + "context done while waiting for logs %s to be available: %w", + maxLogQuery.String(), ctx.Err()) + } + } + + // TODO: Add extra empty block is is in unsafe zone + err = d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return nil, err + } + if result == nil { + d.logger.Debugf("Downloader.DownloadNextBlocks: no logs found for blocks %s", maxLogQuery.BlockRange.String()) + result = &mdrsynctypes.DownloadResult{ + Data: nil, + PercentComplete: percentComplete, + } + } + return result, nil +} + +// executeLogQuery executes the log query, checking for partial availability +// if there are no logs available returns an error +func (d *Downloader) executeLogQuery(ctx context.Context, + fullLogQuery mdrtypes.LogQuery) (*mdrsynctypes.DownloadResult, error) { + logQuery := fullLogQuery + if !d.mdr.IsAvailable(fullLogQuery) { + isPartial, partialLogQuery := d.mdr.IsPartiallyAvailable(fullLogQuery) + if !isPartial { + return nil, fmt.Errorf("DownloadNextBlocks: logs not available for query: %s. Err: %w", fullLogQuery.String(), + ErrLogsNotAvailable) + } + logQuery = *partialLogQuery + } + + logQueryResponse, err := d.mdr.LogQuery(ctx, logQuery) + if err != nil { + return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) + } + totalLogs := logQueryResponse.CountLogs() + + result := &mdrsynctypes.DownloadResult{ + Data: d.logQueryResponseToEVMBlocks(ctx, logQueryResponse), + PercentComplete: 0.0, + } + err = d.addLastBlockIfNotIncluded(ctx, result, + logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) + if err != nil { + return nil, fmt.Errorf("Downloader.executeLogQuery: adding last block: %w", err) + } + d.logger.Infof("Downloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) + return result, nil +} +func (d *Downloader) addLastBlockIfNotIncluded(ctx context.Context, + result *mdrsynctypes.DownloadResult, + responseRange aggkitcommon.BlockRange, + unsafeRange aggkitcommon.BlockRange) error { + lastBlockNumber := responseRange.ToBlock + // If it's already included, return + for _, b := range result.Data { + if b.Num == lastBlockNumber { + return nil + } + } + + hdr, _, err := d.mdr.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) + if err != nil { + d.logger.Errorf("Downloader: error getting block header for block number %d: %v", lastBlockNumber, err) + return nil + } + if hdr == nil { + // Check that we are not in the unsafe zone. Because in that case we can't fake the Hash and it's an error + // because the block must in in storage + if unsafeRange.ContainsBlockNumber(lastBlockNumber) { + err := fmt.Errorf("Downloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) + d.logger.Error(err) + return err + } + hdr = &aggkittypes.BlockHeader{ + Number: lastBlockNumber, + Hash: aggkitcommon.ZeroHash, + Time: 0, + ParentHash: nil, + } + } + // Add empty block + emptyBlock := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: lastBlockNumber, + Hash: hdr.Hash, + Timestamp: hdr.Time, + }, + Events: []interface{}{}, + } + if hdr.ParentHash != nil { + emptyBlock.ParentHash = *hdr.ParentHash + } + d.logger.Debugf("Downloader.addLastBlockIfNotIncluded: adding empty block number %d / %s", + lastBlockNumber, hdr.Hash.Hex()) + result.Data = append(result.Data, emptyBlock) + return nil +} + +func (d *Downloader) logQueryResponseToEVMBlocks( + ctx context.Context, response mdrtypes.LogQueryResponse) sync.EVMBlocks { + blocks := make(sync.EVMBlocks, 0, len(response.Blocks)) + for _, blockWithLogs := range response.Blocks { + evmBlock := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: blockWithLogs.Header.Number, + Hash: blockWithLogs.Header.Hash, + Timestamp: blockWithLogs.Header.Time, + }, + IsFinalizedBlock: blockWithLogs.IsFinal, + Events: []interface{}{}, + } + if blockWithLogs.Header.ParentHash != nil { + evmBlock.ParentHash = *blockWithLogs.Header.ParentHash + } + // Convert mdrtypes.Log to types.Log and append + for _, mdrLog := range blockWithLogs.Logs { + ethLog := types.Log{ + Address: mdrLog.Address, + Topics: mdrLog.Topics, + Data: mdrLog.Data, + BlockNumber: mdrLog.BlockNumber, + TxHash: mdrLog.TxHash, + TxIndex: mdrLog.TxIndex, + BlockHash: blockWithLogs.Header.Hash, + Index: mdrLog.Index, + Removed: mdrLog.Removed, + BlockTimestamp: mdrLog.BlockTimestamp, + } + d.appendLog(ctx, evmBlock, ethLog) + } + blocks = append(blocks, evmBlock) + } + return blocks +} + +func (d *Downloader) appendLog(ctx context.Context, block *sync.EVMBlock, log types.Log) { + appenderFn := d.appender[log.Topics[0]] + if appenderFn == nil { + // d.logger.Debugf("no appender function found for topic: %s", log.Topics[0].Hex()) + return + } + attempts := 0 + for { + err := appenderFn(block, log) + if err != nil { + attempts++ + d.logger.Errorf("error trying to append log (attempt %d): %v", attempts, err) + d.rh.Handle(ctx, "appendLogs", attempts) + continue + } + break + } +} + +// newMaxLogQuery creates a new LogQuery based on the syncerConfig and maxBlocks +func (d *Downloader) newMaxLogQuery(lastBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) mdrtypes.LogQuery { + var fromBlock uint64 + if lastBlockHeader != nil { + fromBlock = lastBlockHeader.Number + 1 + } else { + fromBlock = syncerConfig.FromBlock + } + toBlock := fromBlock + maxBlocks - 1 + logQuery := mdrtypes.NewLogQuery(fromBlock, toBlock, syncerConfig.ContractAddresses) + return logQuery +} + +func (d *Downloader) checkReorgedBlock(ctx context.Context, + blockHeader *aggkittypes.BlockHeader) error { + // Check Context cancellation + if ctx.Err() != nil { + return ctx.Err() + } + // If blockHeader is nil, there's nothing to check + // if hash== ZeroHash means that is a 'fake' block added to mark the end of the log range + if blockHeader == nil || blockHeader.Hash == aggkitcommon.ZeroHash { + return nil + } + // Check blockHeader is not reorged + isValid, reorgChainID, err := d.mdr.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) + if err != nil { + return err + } + if !isValid { + reorgData, err := d.mdr.GetReorgedDataByChainID(ctx, reorgChainID) + if err != nil { + return err + } + // TODO: if reorgData is nil?? can't happen + if reorgData == nil { + return fmt.Errorf("reorg data not found for chain ID %d", reorgChainID) + } + return mdrtypes.NewReorgedError(reorgData.BlockRangeAffected, reorgChainID, + fmt.Sprintf("block number %d is reorged", blockHeader.Number), + ) + } + return nil +} diff --git a/multidownloader/sync/download_test.go b/multidownloader/sync/download_test.go new file mode 100644 index 000000000..8c85489f4 --- /dev/null +++ b/multidownloader/sync/download_test.go @@ -0,0 +1,1061 @@ +package multidownloader + +// Unit tests for download.go +// Coverage: Most functions have 100% coverage including: +// - executeLogQuery: 100% +// - logsToEVMBlock: 100% +// - appendLog: 100% +// - newMaxLogQuery: 100% +// - checkReorgedBlock: 100% +// - DownloadNextBlocks: 91.3% (includes retry, timeout, and context cancellation scenarios) + +import ( + "context" + "fmt" + "testing" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" + mdrsynctypesmocks "github.com/agglayer/aggkit/multidownloader/sync/types/mocks" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestDownloadNextBlocks_Success(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + // Create a mock appender + appenderCalled := false + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + appenderCalled = true + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // Setup mocks + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil) + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true) + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0xblock101"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 101, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 101, ToBlock: 110}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 1100, + }, mdrtypes.Finalized, nil) + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(101), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) + require.True(t, appenderCalled) +} + +func TestDownloadNextBlocks_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Equal(t, context.Canceled, err) +} + +func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + reorgData := &mdrtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + // Setup mocks - reorg detected + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil) + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.True(t, mdrtypes.IsReorgedError(err)) + reorgErr := mdrtypes.CastReorgedError(err) + require.Equal(t, uint64(1), reorgErr.ReorgedChainID) +} + +func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // Setup mocks + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true) + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 50, + Hash: common.HexToHash("0xblock50"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 50, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 50, ToBlock: 59}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 59, + Hash: common.HexToHash("0xblock59"), + Time: 1090, + }, mdrtypes.Finalized, nil) + + result, err := download.DownloadNextBlocks(ctx, nil, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(50), result.Data[0].Num) + require.Equal(t, uint64(59), result.Data[1].Num) +} + +func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 500 * time.Millisecond, + pullingPeriod: 50 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // First call: checkReorgedBlock returns valid + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // First executeLogQuery: logs not available + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() + + // Second iteration in retry loop + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // Second executeLogQuery: logs now available + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true).Once() + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0xblock101"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 101, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 101, ToBlock: 110}, + }, nil).Once() + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 1100, + }, mdrtypes.Finalized, nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(101), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) +} + +func TestDownloadNextBlocks_TimeoutWaitingForLogs(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 100 * time.Millisecond, + pullingPeriod: 200 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // First call: checkReorgedBlock returns valid + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // executeLogQuery called twice: once initially (line 40), once in for loop header (line 45) + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Times(2) + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Times(2) + + // After timeout breaks the loop, calls final checkReorgedBlock + // which overwrites err. If checkReorgedBlock succeeds, returns result (nil) with nil error + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + // After timeout and successful checkReorgedBlock, returns empty result with nil error + require.NoError(t, err) + require.NotNil(t, result) + require.Nil(t, result.Data) + require.Equal(t, 100.0, result.PercentComplete) +} + +func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 5 * time.Second, + pullingPeriod: 50 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // First call: checkReorgedBlock returns valid + mockMdr.EXPECT().CheckValidBlock(mock.Anything, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // executeLogQuery: logs not available (may be called multiple times depending on timing) + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Maybe() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Maybe() + + // During retry loop, cancel the context after a short delay + go func() { + time.Sleep(30 * time.Millisecond) + cancel() + }() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "context") +} + +func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 500 * time.Millisecond, + pullingPeriod: 30 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + reorgData := &mdrtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + // First call: checkReorgedBlock returns valid + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // executeLogQuery called twice: once initially (line 40), once in for loop header (line 45) + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Times(2) + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Times(2) + + // In retry loop after timer fires: reorg detected during checkReorgedBlock + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil).Once() + mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.True(t, mdrtypes.IsReorgedError(err)) +} + +func TestExecuteLogQuery_FullyAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) + mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 105, + Hash: common.HexToHash("0xblock105"), + Time: 2000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 105, + BlockTimestamp: 2000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 100, ToBlock: 110}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, mdrtypes.Finalized, nil) + + result, err := download.executeLogQuery(ctx, logQuery) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(105), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) +} + +func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + partialQuery := mdrtypes.NewLogQuery(100, 105, []common.Address{common.HexToAddress("0x123")}) + + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) + mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(true, &partialQuery) + mockMdr.EXPECT().LogQuery(ctx, partialQuery).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 103, + Hash: common.HexToHash("0xblock103"), + Time: 2000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 103, + BlockTimestamp: 2000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 100, ToBlock: 105}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, mdrtypes.Finalized, nil) + + result, err := download.executeLogQuery(ctx, logQuery) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(103), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) +} + +func TestExecuteLogQuery_NotAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) + mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(false, nil) + + result, err := download.executeLogQuery(ctx, logQuery) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "logs not available") +} + +func TestExecuteLogQuery_GetEthLogsError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) + mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{}, fmt.Errorf("database error")) + + result, err := download.executeLogQuery(ctx, logQuery) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "cannot get logs") +} + +func TestNewMaxLogQuery_WithLastBlockHeader(t *testing.T) { + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: nil, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + query := download.newMaxLogQuery(lastBlockHeader, 10, syncerConfig) + + require.Equal(t, uint64(101), query.BlockRange.FromBlock) + require.Equal(t, uint64(110), query.BlockRange.ToBlock) + require.Equal(t, syncerConfig.ContractAddresses, query.Addrs) +} + +func TestNewMaxLogQuery_WithoutLastBlockHeader(t *testing.T) { + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: nil, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + query := download.newMaxLogQuery(nil, 10, syncerConfig) + + require.Equal(t, uint64(50), query.BlockRange.FromBlock) + require.Equal(t, uint64(59), query.BlockRange.ToBlock) + require.Equal(t, syncerConfig.ContractAddresses, query.Addrs) +} + +func TestCheckReorgedBlock_NilBlockHeader(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + // When blockHeader is nil, no reorg check should be performed + err := download.checkReorgedBlock(ctx, nil) + + require.NoError(t, err) +} + +func TestCheckReorgedBlock_ValidBlock(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(true, uint64(0), nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.NoError(t, err) +} + +func TestCheckReorgedBlock_InvalidBlock(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + reorgData := &mdrtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.True(t, mdrtypes.IsReorgedError(err)) + reorgErr := mdrtypes.CastReorgedError(err) + require.Equal(t, uint64(1), reorgErr.ReorgedChainID) +} + +func TestCheckReorgedBlock_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Equal(t, context.Canceled, err) +} + +func TestCheckReorgedBlock_CheckValidBlockError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(0), fmt.Errorf("check error")) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "check error") +} + +func TestCheckReorgedBlock_GetReorgedDataError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(nil, fmt.Errorf("database error")) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "database error") +} + +func TestCheckReorgedBlock_NilReorgData(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &Downloader{ + mdr: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(nil, nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "reorg data not found") +} + +func TestAppendLog_Success(t *testing.T) { + ctx := context.Background() + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + callCount := 0 + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + callCount++ + b.Events = append(b.Events, "event") + return nil + }, + } + + download := &Downloader{ + mdr: nil, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + block := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 100, + }, + Events: []interface{}{}, + } + + log := types.Log{ + BlockNumber: 100, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + } + + download.appendLog(ctx, block, log) + + require.Equal(t, 1, callCount) + require.Len(t, block.Events, 1) +} + +func TestAppendLog_RetryOnError(t *testing.T) { + ctx := context.Background() + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + callCount := 0 + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + callCount++ + if callCount < 3 { + return fmt.Errorf("temporary error") + } + b.Events = append(b.Events, "event") + return nil + }, + } + + download := &Downloader{ + mdr: nil, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + block := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 100, + }, + Events: []interface{}{}, + } + + log := types.Log{ + BlockNumber: 100, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + } + + download.appendLog(ctx, block, log) + + require.Equal(t, 3, callCount) + require.Len(t, block.Events, 1) +} diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go new file mode 100644 index 000000000..5ef28a8e2 --- /dev/null +++ b/multidownloader/sync/evmdriver.go @@ -0,0 +1,155 @@ +package multidownloader + +import ( + "context" + "errors" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type EVMDriver struct { + processor mdrsynctypes.ProcessorInterface + downloader mdrsynctypes.DownloaderInterface + syncerConfig aggkittypes.SyncerConfig + rh *sync.RetryHandler + logger aggkitcommon.Logger + + syncBlockChunkSize uint64 +} + +func NewEVMDriver(processor mdrsynctypes.ProcessorInterface, + downloader mdrsynctypes.DownloaderInterface, + syncerConfig aggkittypes.SyncerConfig, + syncBlockChunkSize uint64, + rh *sync.RetryHandler, + logger aggkitcommon.Logger) *EVMDriver { + return &EVMDriver{ + processor: processor, + downloader: downloader, + syncerConfig: syncerConfig, + syncBlockChunkSize: syncBlockChunkSize, + rh: rh, + logger: logger, + } +} + +func (d *EVMDriver) Sync(ctx context.Context) { +reset: + // TODO: Add if err = d.compatibilityChecker.Check(ctx, nil); err != nil { + for { + if ctx.Err() != nil { + d.logger.Info("context cancelled") + return + } + lastBlockHeader := d.getLastProcessedBlock(ctx) + if lastBlockHeader == nil { + d.logger.Info("no last processed block found, starting from beginning") + } else { + d.logger.Infof("EVMDriver.Sync: starting sync from last processed block: %d", lastBlockHeader.Number) + } + + blocks, err := d.downloader.DownloadNextBlocks(ctx, + lastBlockHeader, + d.syncBlockChunkSize, + d.syncerConfig) + + if err != nil && mdrtypes.IsReorgedError(err) { + err := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)) + if err != nil { + d.logger.Error("error handling reorg: ", err) + d.rh.Handle(ctx, "Sync", 0) + continue + } + goto reset + } + + if err != nil && !errors.Is(err, ErrLogsNotAvailable) { + d.logger.Error("error downloading next blocks: ", err) + d.rh.Handle(ctx, "Sync", 0) + continue + } + if errors.Is(err, ErrLogsNotAvailable) { + // No logs available yet, wait and retry + d.logger.Debugf("no logs available yet, waiting to retry") + d.rh.Handle(ctx, "Sync", 0) + continue + } + err = d.ProcessBlocks(ctx, blocks) + if err != nil { + d.logger.Error("error processing blocks: ", err) + d.rh.Handle(ctx, "Sync", 0) + continue + } + } +} + +func (d *EVMDriver) ProcessBlocks(ctx context.Context, b *mdrsynctypes.DownloadResult) error { + if b == nil || len(b.Data) == 0 { + return nil + } + for _, block := range b.Data { + err := d.processBlock(ctx, block) + if err != nil { + return err + } + } + return nil +} + +func (d *EVMDriver) processBlock(ctx context.Context, b *sync.EVMBlock) error { + return d.withRetry(ctx, "processBlock", func() error { + block := sync.Block{ + Num: b.Num, + Hash: b.Hash, + Events: b.Events, + } + return d.processor.ProcessBlock(ctx, block) + }) +} + +func (d *EVMDriver) handleReorg(ctx context.Context, err *mdrtypes.ReorgedError) error { + d.logger.Warnf("reorg detected: %s", err.Error()) + return d.withRetry(ctx, "handleReorg", func() error { + return d.processor.Reorg(ctx, err.BlockRangeReorged.FromBlock) + }) +} + +func (d *EVMDriver) getLastProcessedBlock(ctx context.Context) *aggkittypes.BlockHeader { + attempts := 0 + for { + // TODO: Case header == nil -> ? + header, err := d.processor.GetLastProcessedBlockHeader(ctx) + if err != nil { + attempts++ + d.logger.Error("error getting last processed block: ", err) + d.rh.Handle(ctx, "Sync", attempts) + continue + } + return header + } +} + +// withRetry is a helper wrapper function that invokes the fn callback on failed attempts +func (d *EVMDriver) withRetry(ctx context.Context, opName string, fn func() error) error { + attempts := 0 + for { + select { + case <-ctx.Done(): + d.logger.Warnf("context canceled during %s", opName) + return nil + default: + err := fn() + if err != nil { + attempts++ + d.logger.Errorf("error during %s (attempt %d): %v", opName, attempts, err) + d.rh.Handle(ctx, opName, attempts) + } else { + return nil + } + } + } +} diff --git a/multidownloader/sync/types/evm_downloader.go b/multidownloader/sync/types/evm_downloader.go new file mode 100644 index 000000000..54f04851b --- /dev/null +++ b/multidownloader/sync/types/evm_downloader.go @@ -0,0 +1,50 @@ +package types + +import ( + "context" + + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type DownloadResult struct { + Data sync.EVMBlocks + // PercentComplete indicates the percent of completion of the download + // 0 -> 0%, 100 -> 100% + PercentComplete float64 +} + +func (d *DownloadResult) AnyUnsafeBlock() bool { + if d == nil || len(d.Data) == 0 { + return false + } + for _, b := range d.Data { + if !b.IsFinalizedBlock { + return true + } + } + return false +} + +type DownloaderInterface interface { + // DownloadNextBlocks downloads the next blocks starting from fromBlockHeader + // up to maxBlocks, according to the syncerConfig + // parameters: + // - fromBlockHeader: the block header to start downloading from (exclusive) + // If it's nil means that there are no previous blocks processed + // - maxBlocks: the maximum number of blocks to return (it could return less or none) + // - syncerConfig: the syncer configuration + // returns: + // - DownloadResult: the result of the download, containing the blocks and the percent complete + // DownloadResult is never nil + // DownloadResult.Data could be nil if no blocks were downloaded + // DownloadResult.PercentComplete indicates the percent of completion of the download + // 0 -> 0%, 100 -> 100% + // - error: if any error occurred during the download + // special error: errors.Is(err, ErrLogsNotAvailable) indicates that it works + // but there are no logs yet + DownloadNextBlocks(ctx context.Context, + fromBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) (*DownloadResult, error) +} diff --git a/multidownloader/sync/types/evm_multidownloader.go b/multidownloader/sync/types/evm_multidownloader.go new file mode 100644 index 000000000..618ed779e --- /dev/null +++ b/multidownloader/sync/types/evm_multidownloader.go @@ -0,0 +1,31 @@ +package types + +import ( + "context" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type MultidownloaderInterface interface { + // CheckValidBlock checks if the given blockNumber and blockHash are still valid + // returns: isValid bool, reorgChainID uint64, err error + CheckValidBlock(ctx context.Context, blockNumber uint64, + blockHash common.Hash) (bool, uint64, error) + // GetReorgedDataByChainID retrieves the reorged data by chain ID + GetReorgedDataByChainID(ctx context.Context, reorgedChainID uint64) (*mdrtypes.ReorgData, error) + // IsAvailable checks if the logs for the given query are available + IsAvailable(query mdrtypes.LogQuery) bool + // IsPartiallyAvailable checks if the logs for the given query are partially available + IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) + // GetEthLogs retrieves the logs for the given query + LogQuery(ctx context.Context, query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) + // Finality is which block to consider final (typically finalizedBlock) + Finality() aggkittypes.BlockNumberFinality + // HeaderByNumber gets the block header for the given block number finality + HeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) + StorageHeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) +} diff --git a/multidownloader/sync/types/mocks/mock_downloader_interface.go b/multidownloader/sync/types/mocks/mock_downloader_interface.go new file mode 100644 index 000000000..d0b7afd2e --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_downloader_interface.go @@ -0,0 +1,100 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + synctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/types" +) + +// DownloaderInterface is an autogenerated mock type for the DownloaderInterface type +type DownloaderInterface struct { + mock.Mock +} + +type DownloaderInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *DownloaderInterface) EXPECT() *DownloaderInterface_Expecter { + return &DownloaderInterface_Expecter{mock: &_m.Mock} +} + +// DownloadNextBlocks provides a mock function with given fields: ctx, fromBlockHeader, maxBlocks, syncerConfig +func (_m *DownloaderInterface) DownloadNextBlocks(ctx context.Context, fromBlockHeader *types.BlockHeader, maxBlocks uint64, syncerConfig types.SyncerConfig) (*synctypes.DownloadResult, error) { + ret := _m.Called(ctx, fromBlockHeader, maxBlocks, syncerConfig) + + if len(ret) == 0 { + panic("no return value specified for DownloadNextBlocks") + } + + var r0 *synctypes.DownloadResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) (*synctypes.DownloadResult, error)); ok { + return rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) *synctypes.DownloadResult); ok { + r0 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synctypes.DownloadResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) error); ok { + r1 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DownloaderInterface_DownloadNextBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DownloadNextBlocks' +type DownloaderInterface_DownloadNextBlocks_Call struct { + *mock.Call +} + +// DownloadNextBlocks is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockHeader *types.BlockHeader +// - maxBlocks uint64 +// - syncerConfig types.SyncerConfig +func (_e *DownloaderInterface_Expecter) DownloadNextBlocks(ctx interface{}, fromBlockHeader interface{}, maxBlocks interface{}, syncerConfig interface{}) *DownloaderInterface_DownloadNextBlocks_Call { + return &DownloaderInterface_DownloadNextBlocks_Call{Call: _e.mock.On("DownloadNextBlocks", ctx, fromBlockHeader, maxBlocks, syncerConfig)} +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Run(run func(ctx context.Context, fromBlockHeader *types.BlockHeader, maxBlocks uint64, syncerConfig types.SyncerConfig)) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.BlockHeader), args[2].(uint64), args[3].(types.SyncerConfig)) + }) + return _c +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Return(_a0 *synctypes.DownloadResult, _a1 error) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) RunAndReturn(run func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) (*synctypes.DownloadResult, error)) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Return(run) + return _c +} + +// NewDownloaderInterface creates a new instance of DownloaderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDownloaderInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *DownloaderInterface { + mock := &DownloaderInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go new file mode 100644 index 000000000..3c34cb95f --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go @@ -0,0 +1,496 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + aggkittypes "github.com/agglayer/aggkit/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" +) + +// MultidownloaderInterface is an autogenerated mock type for the MultidownloaderInterface type +type MultidownloaderInterface struct { + mock.Mock +} + +type MultidownloaderInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MultidownloaderInterface) EXPECT() *MultidownloaderInterface_Expecter { + return &MultidownloaderInterface_Expecter{mock: &_m.Mock} +} + +// CheckValidBlock provides a mock function with given fields: ctx, blockNumber, blockHash +func (_m *MultidownloaderInterface) CheckValidBlock(ctx context.Context, blockNumber uint64, blockHash common.Hash) (bool, uint64, error) { + ret := _m.Called(ctx, blockNumber, blockHash) + + if len(ret) == 0 { + panic("no return value specified for CheckValidBlock") + } + + var r0 bool + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, common.Hash) (bool, uint64, error)); ok { + return rf(ctx, blockNumber, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, common.Hash) bool); ok { + r0 = rf(ctx, blockNumber, blockHash) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, common.Hash) uint64); ok { + r1 = rf(ctx, blockNumber, blockHash) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, common.Hash) error); ok { + r2 = rf(ctx, blockNumber, blockHash) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultidownloaderInterface_CheckValidBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckValidBlock' +type MultidownloaderInterface_CheckValidBlock_Call struct { + *mock.Call +} + +// CheckValidBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - blockHash common.Hash +func (_e *MultidownloaderInterface_Expecter) CheckValidBlock(ctx interface{}, blockNumber interface{}, blockHash interface{}) *MultidownloaderInterface_CheckValidBlock_Call { + return &MultidownloaderInterface_CheckValidBlock_Call{Call: _e.mock.On("CheckValidBlock", ctx, blockNumber, blockHash)} +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) Run(run func(ctx context.Context, blockNumber uint64, blockHash common.Hash)) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(common.Hash)) + }) + return _c +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) Return(_a0 bool, _a1 uint64, _a2 error) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) RunAndReturn(run func(context.Context, uint64, common.Hash) (bool, uint64, error)) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Return(run) + return _c +} + +// Finality provides a mock function with no fields +func (_m *MultidownloaderInterface) Finality() aggkittypes.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Finality") + } + + var r0 aggkittypes.BlockNumberFinality + if rf, ok := ret.Get(0).(func() aggkittypes.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(aggkittypes.BlockNumberFinality) + } + + return r0 +} + +// MultidownloaderInterface_Finality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finality' +type MultidownloaderInterface_Finality_Call struct { + *mock.Call +} + +// Finality is a helper method to define mock.On call +func (_e *MultidownloaderInterface_Expecter) Finality() *MultidownloaderInterface_Finality_Call { + return &MultidownloaderInterface_Finality_Call{Call: _e.mock.On("Finality")} +} + +func (_c *MultidownloaderInterface_Finality_Call) Run(run func()) *MultidownloaderInterface_Finality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MultidownloaderInterface_Finality_Call) Return(_a0 aggkittypes.BlockNumberFinality) *MultidownloaderInterface_Finality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultidownloaderInterface_Finality_Call) RunAndReturn(run func() aggkittypes.BlockNumberFinality) *MultidownloaderInterface_Finality_Call { + _c.Call.Return(run) + return _c +} + +// GetReorgedDataByChainID provides a mock function with given fields: ctx, reorgedChainID +func (_m *MultidownloaderInterface) GetReorgedDataByChainID(ctx context.Context, reorgedChainID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(ctx, reorgedChainID) + + if len(ret) == 0 { + panic("no return value specified for GetReorgedDataByChainID") + } + + var r0 *multidownloadertypes.ReorgData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)); ok { + return rf(ctx, reorgedChainID) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *multidownloadertypes.ReorgData); ok { + r0 = rf(ctx, reorgedChainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.ReorgData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, reorgedChainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_GetReorgedDataByChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByChainID' +type MultidownloaderInterface_GetReorgedDataByChainID_Call struct { + *mock.Call +} + +// GetReorgedDataByChainID is a helper method to define mock.On call +// - ctx context.Context +// - reorgedChainID uint64 +func (_e *MultidownloaderInterface_Expecter) GetReorgedDataByChainID(ctx interface{}, reorgedChainID interface{}) *MultidownloaderInterface_GetReorgedDataByChainID_Call { + return &MultidownloaderInterface_GetReorgedDataByChainID_Call{Call: _e.mock.On("GetReorgedDataByChainID", ctx, reorgedChainID)} +} + +func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) Run(run func(ctx context.Context, reorgedChainID uint64)) *MultidownloaderInterface_GetReorgedDataByChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *MultidownloaderInterface_GetReorgedDataByChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) RunAndReturn(run func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)) *MultidownloaderInterface_GetReorgedDataByChainID_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultidownloaderInterface) HeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) *aggkittypes.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type MultidownloaderInterface_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *aggkittypes.BlockNumberFinality +func (_e *MultidownloaderInterface_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *MultidownloaderInterface_HeaderByNumber_Call { + return &MultidownloaderInterface_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) Run(run func(ctx context.Context, number *aggkittypes.BlockNumberFinality)) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*aggkittypes.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 error) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error)) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// IsAvailable provides a mock function with given fields: query +func (_m *MultidownloaderInterface) IsAvailable(query multidownloadertypes.LogQuery) bool { + ret := _m.Called(query) + + if len(ret) == 0 { + panic("no return value specified for IsAvailable") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) bool); ok { + r0 = rf(query) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MultidownloaderInterface_IsAvailable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsAvailable' +type MultidownloaderInterface_IsAvailable_Call struct { + *mock.Call +} + +// IsAvailable is a helper method to define mock.On call +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) IsAvailable(query interface{}) *MultidownloaderInterface_IsAvailable_Call { + return &MultidownloaderInterface_IsAvailable_Call{Call: _e.mock.On("IsAvailable", query)} +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) Run(run func(query multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) Return(_a0 bool) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) RunAndReturn(run func(multidownloadertypes.LogQuery) bool) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Return(run) + return _c +} + +// IsPartiallyAvailable provides a mock function with given fields: query +func (_m *MultidownloaderInterface) IsPartiallyAvailable(query multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery) { + ret := _m.Called(query) + + if len(ret) == 0 { + panic("no return value specified for IsPartiallyAvailable") + } + + var r0 bool + var r1 *multidownloadertypes.LogQuery + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery)); ok { + return rf(query) + } + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) bool); ok { + r0 = rf(query) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(multidownloadertypes.LogQuery) *multidownloadertypes.LogQuery); ok { + r1 = rf(query) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*multidownloadertypes.LogQuery) + } + } + + return r0, r1 +} + +// MultidownloaderInterface_IsPartiallyAvailable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsPartiallyAvailable' +type MultidownloaderInterface_IsPartiallyAvailable_Call struct { + *mock.Call +} + +// IsPartiallyAvailable is a helper method to define mock.On call +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) IsPartiallyAvailable(query interface{}) *MultidownloaderInterface_IsPartiallyAvailable_Call { + return &MultidownloaderInterface_IsPartiallyAvailable_Call{Call: _e.mock.On("IsPartiallyAvailable", query)} +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) Run(run func(query multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) Return(_a0 bool, _a1 *multidownloadertypes.LogQuery) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) RunAndReturn(run func(multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Return(run) + return _c +} + +// LogQuery provides a mock function with given fields: ctx, query +func (_m *MultidownloaderInterface) LogQuery(ctx context.Context, query multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error) { + ret := _m.Called(ctx, query) + + if len(ret) == 0 { + panic("no return value specified for LogQuery") + } + + var r0 multidownloadertypes.LogQueryResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)); ok { + return rf(ctx, query) + } + if rf, ok := ret.Get(0).(func(context.Context, multidownloadertypes.LogQuery) multidownloadertypes.LogQueryResponse); ok { + r0 = rf(ctx, query) + } else { + r0 = ret.Get(0).(multidownloadertypes.LogQueryResponse) + } + + if rf, ok := ret.Get(1).(func(context.Context, multidownloadertypes.LogQuery) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_LogQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LogQuery' +type MultidownloaderInterface_LogQuery_Call struct { + *mock.Call +} + +// LogQuery is a helper method to define mock.On call +// - ctx context.Context +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) LogQuery(ctx interface{}, query interface{}) *MultidownloaderInterface_LogQuery_Call { + return &MultidownloaderInterface_LogQuery_Call{Call: _e.mock.On("LogQuery", ctx, query)} +} + +func (_c *MultidownloaderInterface_LogQuery_Call) Run(run func(ctx context.Context, query multidownloadertypes.LogQuery)) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_LogQuery_Call) Return(_a0 multidownloadertypes.LogQueryResponse, _a1 error) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_LogQuery_Call) RunAndReturn(run func(context.Context, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Return(run) + return _c +} + +// StorageHeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultidownloaderInterface) StorageHeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for StorageHeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 multidownloadertypes.FinalizedType + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) *aggkittypes.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockNumberFinality) multidownloadertypes.FinalizedType); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) + } + + if rf, ok := ret.Get(2).(func(context.Context, *aggkittypes.BlockNumberFinality) error); ok { + r2 = rf(ctx, number) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultidownloaderInterface_StorageHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StorageHeaderByNumber' +type MultidownloaderInterface_StorageHeaderByNumber_Call struct { + *mock.Call +} + +// StorageHeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *aggkittypes.BlockNumberFinality +func (_e *MultidownloaderInterface_Expecter) StorageHeaderByNumber(ctx interface{}, number interface{}) *MultidownloaderInterface_StorageHeaderByNumber_Call { + return &MultidownloaderInterface_StorageHeaderByNumber_Call{Call: _e.mock.On("StorageHeaderByNumber", ctx, number)} +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) Run(run func(ctx context.Context, number *aggkittypes.BlockNumberFinality)) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*aggkittypes.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewMultidownloaderInterface creates a new instance of MultidownloaderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMultidownloaderInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MultidownloaderInterface { + mock := &MultidownloaderInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/mocks/mock_processor_interface.go b/multidownloader/sync/types/mocks/mock_processor_interface.go new file mode 100644 index 000000000..cdaa5a1ed --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_processor_interface.go @@ -0,0 +1,191 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + sync "github.com/agglayer/aggkit/sync" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/types" +) + +// ProcessorInterface is an autogenerated mock type for the ProcessorInterface type +type ProcessorInterface struct { + mock.Mock +} + +type ProcessorInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ProcessorInterface) EXPECT() *ProcessorInterface_Expecter { + return &ProcessorInterface_Expecter{mock: &_m.Mock} +} + +// GetLastProcessedBlockHeader provides a mock function with given fields: ctx +func (_m *ProcessorInterface) GetLastProcessedBlockHeader(ctx context.Context) (*types.BlockHeader, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlockHeader") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.BlockHeader, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.BlockHeader); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessorInterface_GetLastProcessedBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlockHeader' +type ProcessorInterface_GetLastProcessedBlockHeader_Call struct { + *mock.Call +} + +// GetLastProcessedBlockHeader is a helper method to define mock.On call +// - ctx context.Context +func (_e *ProcessorInterface_Expecter) GetLastProcessedBlockHeader(ctx interface{}) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + return &ProcessorInterface_GetLastProcessedBlockHeader_Call{Call: _e.mock.On("GetLastProcessedBlockHeader", ctx)} +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) Run(run func(ctx context.Context)) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) Return(_a0 *types.BlockHeader, _a1 error) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) RunAndReturn(run func(context.Context) (*types.BlockHeader, error)) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Return(run) + return _c +} + +// ProcessBlock provides a mock function with given fields: ctx, block +func (_m *ProcessorInterface) ProcessBlock(ctx context.Context, block sync.Block) error { + ret := _m.Called(ctx, block) + + if len(ret) == 0 { + panic("no return value specified for ProcessBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, sync.Block) error); ok { + r0 = rf(ctx, block) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessorInterface_ProcessBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlock' +type ProcessorInterface_ProcessBlock_Call struct { + *mock.Call +} + +// ProcessBlock is a helper method to define mock.On call +// - ctx context.Context +// - block sync.Block +func (_e *ProcessorInterface_Expecter) ProcessBlock(ctx interface{}, block interface{}) *ProcessorInterface_ProcessBlock_Call { + return &ProcessorInterface_ProcessBlock_Call{Call: _e.mock.On("ProcessBlock", ctx, block)} +} + +func (_c *ProcessorInterface_ProcessBlock_Call) Run(run func(ctx context.Context, block sync.Block)) *ProcessorInterface_ProcessBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sync.Block)) + }) + return _c +} + +func (_c *ProcessorInterface_ProcessBlock_Call) Return(_a0 error) *ProcessorInterface_ProcessBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorInterface_ProcessBlock_Call) RunAndReturn(run func(context.Context, sync.Block) error) *ProcessorInterface_ProcessBlock_Call { + _c.Call.Return(run) + return _c +} + +// Reorg provides a mock function with given fields: ctx, firstReorgedBlock +func (_m *ProcessorInterface) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + ret := _m.Called(ctx, firstReorgedBlock) + + if len(ret) == 0 { + panic("no return value specified for Reorg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, firstReorgedBlock) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessorInterface_Reorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reorg' +type ProcessorInterface_Reorg_Call struct { + *mock.Call +} + +// Reorg is a helper method to define mock.On call +// - ctx context.Context +// - firstReorgedBlock uint64 +func (_e *ProcessorInterface_Expecter) Reorg(ctx interface{}, firstReorgedBlock interface{}) *ProcessorInterface_Reorg_Call { + return &ProcessorInterface_Reorg_Call{Call: _e.mock.On("Reorg", ctx, firstReorgedBlock)} +} + +func (_c *ProcessorInterface_Reorg_Call) Run(run func(ctx context.Context, firstReorgedBlock uint64)) *ProcessorInterface_Reorg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ProcessorInterface_Reorg_Call) Return(_a0 error) *ProcessorInterface_Reorg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorInterface_Reorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ProcessorInterface_Reorg_Call { + _c.Call.Return(run) + return _c +} + +// NewProcessorInterface creates a new instance of ProcessorInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcessorInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ProcessorInterface { + mock := &ProcessorInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/processor.go b/multidownloader/sync/types/processor.go new file mode 100644 index 000000000..1249da739 --- /dev/null +++ b/multidownloader/sync/types/processor.go @@ -0,0 +1,14 @@ +package types + +import ( + "context" + + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ProcessorInterface interface { + GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) + ProcessBlock(ctx context.Context, block sync.Block) error + Reorg(ctx context.Context, firstReorgedBlock uint64) error +} diff --git a/multidownloader/types/log_query_response.go b/multidownloader/types/log_query_response.go new file mode 100644 index 000000000..acc0f8027 --- /dev/null +++ b/multidownloader/types/log_query_response.go @@ -0,0 +1,59 @@ +package types + +import ( + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type Log struct { + // Consensus fields: + // address of the contract that generated the event + Address common.Address `json:"address" gencodec:"required"` + // list of topics provided by the contract. + Topics []common.Hash `json:"topics" gencodec:"required"` + // supplied by the contract, usually ABI-encoded + Data []byte `json:"data" gencodec:"required"` + + // Derived fields. These fields are filled in by the node + // but not secured by consensus. + // block in which the transaction was included + BlockNumber uint64 `json:"blockNumber" rlp:"-"` + // hash of the transaction + TxHash common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` + // index of the transaction in the block + TxIndex uint `json:"transactionIndex" rlp:"-"` + // timestamp of the block in which the transaction was included + BlockTimestamp uint64 `json:"blockTimestamp" rlp:"-"` + // index of the log in the block + Index uint `json:"logIndex" rlp:"-"` + + // The Removed field is true if this log was reverted due to a chain reorganisation. + // You must pay attention to this field if you receive logs through a filter query. + Removed bool `json:"removed" rlp:"-"` +} + +type BlockWithLogs struct { + Header aggkittypes.BlockHeader + IsFinal bool + Logs []Log +} + +type LogQueryResponse struct { + Blocks []BlockWithLogs + // ResponseRange indicates the block range covered by the response, even if blocks are empty + ResponseRange aggkitcommon.BlockRange + // UnsafeRange indicates the block range that are in unsafe zone (not finalized) + UnsafeRange aggkitcommon.BlockRange +} + +func (lqr *LogQueryResponse) CountLogs() int { + if lqr == nil { + return 0 + } + count := 0 + for _, block := range lqr.Blocks { + count += len(block.Logs) + } + return count +} diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go index e9b869d3e..15db004ab 100644 --- a/multidownloader/types/mocks/mock_reorg_processor.go +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index 5335fff63..101d38134 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks @@ -31,7 +31,7 @@ func (_m *Storager) EXPECT() *Storager_Expecter { } // GetBlockHeaderByNumber provides a mock function with given fields: tx, blockNumber -func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) { +func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { ret := _m.Called(tx, blockNumber) if len(ret) == 0 { @@ -39,9 +39,9 @@ func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) } var r0 *aggkittypes.BlockHeader - var r1 bool + var r1 multidownloadertypes.FinalizedType var r2 error - if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, bool, error)); ok { + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { return rf(tx, blockNumber) } if rf, ok := ret.Get(0).(func(types.Querier, uint64) *aggkittypes.BlockHeader); ok { @@ -52,10 +52,10 @@ func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) } } - if rf, ok := ret.Get(1).(func(types.Querier, uint64) bool); ok { + if rf, ok := ret.Get(1).(func(types.Querier, uint64) multidownloadertypes.FinalizedType); ok { r1 = rf(tx, blockNumber) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) } if rf, ok := ret.Get(2).(func(types.Querier, uint64) error); ok { @@ -86,12 +86,12 @@ func (_c *Storager_GetBlockHeaderByNumber_Call) Run(run func(tx types.Querier, b return _c } -func (_c *Storager_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 bool, _a2 error) *Storager_GetBlockHeaderByNumber_Call { +func (_c *Storager_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *Storager_GetBlockHeaderByNumber_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, bool, error)) *Storager_GetBlockHeaderByNumber_Call { +func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *Storager_GetBlockHeaderByNumber_Call { _c.Call.Return(run) return _c } @@ -403,6 +403,65 @@ func (_c *Storager_GetRangeBlockHeader_Call) RunAndReturn(run func(types.Querier return _c } +// GetReorgedDataByChainID provides a mock function with given fields: tx, reorgedChainID +func (_m *Storager) GetReorgedDataByChainID(tx types.Querier, reorgedChainID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(tx, reorgedChainID) + + if len(ret) == 0 { + panic("no return value specified for GetReorgedDataByChainID") + } + + var r0 *multidownloadertypes.ReorgData + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)); ok { + return rf(tx, reorgedChainID) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64) *multidownloadertypes.ReorgData); ok { + r0 = rf(tx, reorgedChainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.ReorgData) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64) error); ok { + r1 = rf(tx, reorgedChainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetReorgedDataByChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByChainID' +type Storager_GetReorgedDataByChainID_Call struct { + *mock.Call +} + +// GetReorgedDataByChainID is a helper method to define mock.On call +// - tx types.Querier +// - reorgedChainID uint64 +func (_e *Storager_Expecter) GetReorgedDataByChainID(tx interface{}, reorgedChainID interface{}) *Storager_GetReorgedDataByChainID_Call { + return &Storager_GetReorgedDataByChainID_Call{Call: _e.mock.On("GetReorgedDataByChainID", tx, reorgedChainID)} +} + +func (_c *Storager_GetReorgedDataByChainID_Call) Run(run func(tx types.Querier, reorgedChainID uint64)) *Storager_GetReorgedDataByChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *Storager_GetReorgedDataByChainID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *Storager_GetReorgedDataByChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetReorgedDataByChainID_Call) RunAndReturn(run func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)) *Storager_GetReorgedDataByChainID_Call { + _c.Call.Return(run) + return _c +} + // GetSyncedBlockRangePerContract provides a mock function with given fields: tx func (_m *Storager) GetSyncedBlockRangePerContract(tx types.Querier) (multidownloadertypes.SetSyncSegment, error) { ret := _m.Called(tx) @@ -623,6 +682,63 @@ func (_c *Storager_InsertValue_Call) RunAndReturn(run func(types.Querier, string return _c } +// LogQuery provides a mock function with given fields: tx, query +func (_m *Storager) LogQuery(tx types.Querier, query multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error) { + ret := _m.Called(tx, query) + + if len(ret) == 0 { + panic("no return value specified for LogQuery") + } + + var r0 multidownloadertypes.LogQueryResponse + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)); ok { + return rf(tx, query) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.LogQuery) multidownloadertypes.LogQueryResponse); ok { + r0 = rf(tx, query) + } else { + r0 = ret.Get(0).(multidownloadertypes.LogQueryResponse) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.LogQuery) error); ok { + r1 = rf(tx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_LogQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LogQuery' +type Storager_LogQuery_Call struct { + *mock.Call +} + +// LogQuery is a helper method to define mock.On call +// - tx types.Querier +// - query multidownloadertypes.LogQuery +func (_e *Storager_Expecter) LogQuery(tx interface{}, query interface{}) *Storager_LogQuery_Call { + return &Storager_LogQuery_Call{Call: _e.mock.On("LogQuery", tx, query)} +} + +func (_c *Storager_LogQuery_Call) Run(run func(tx types.Querier, query multidownloadertypes.LogQuery)) *Storager_LogQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *Storager_LogQuery_Call) Return(_a0 multidownloadertypes.LogQueryResponse, _a1 error) *Storager_LogQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_LogQuery_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)) *Storager_LogQuery_Call { + _c.Call.Return(run) + return _c +} + // NewTx provides a mock function with given fields: ctx func (_m *Storager) NewTx(ctx context.Context) (types.Txer, error) { ret := _m.Called(ctx) diff --git a/multidownloader/types/mocks/mock_storager_for_reorg.go b/multidownloader/types/mocks/mock_storager_for_reorg.go index 74bf29868..24cbd600d 100644 --- a/multidownloader/types/mocks/mock_storager_for_reorg.go +++ b/multidownloader/types/mocks/mock_storager_for_reorg.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go index a087a92e7..a50b365dc 100644 --- a/multidownloader/types/reorg_error.go +++ b/multidownloader/types/reorg_error.go @@ -4,28 +4,29 @@ import ( "errors" "fmt" + aggkitcommon "github.com/agglayer/aggkit/common" "github.com/ethereum/go-ethereum/common" ) -// ReorgError is an error that is raised when a reorg is detected +// DetectedReorgError is an error that is raised when a reorg is detected // The block is one of the blocks that were reorged, but not necessarily the first one -type ReorgError struct { +type DetectedReorgError struct { OffendingBlockNumber uint64 // Important: is not the first reorged block, but one of them OldHash common.Hash NewHash common.Hash Message string } -// IsReorgError checks if an error is a ReorgError -func IsReorgError(err error) bool { - c := CastReorgError(err) +// IsDetectedReorgError checks if an error is a DetectedReorgError +func IsDetectedReorgError(err error) bool { + c := CastDetectedReorgError(err) return c != nil } -// NewReorgError creates a new ReorgError -func NewReorgError(offendingBlockNumber uint64, - oldHash, newHash common.Hash, msg string) *ReorgError { - return &ReorgError{ +// NewDetectedReorgError creates a new DetectedReorgError +func NewDetectedReorgError(offendingBlockNumber uint64, + oldHash, newHash common.Hash, msg string) *DetectedReorgError { + return &DetectedReorgError{ OffendingBlockNumber: offendingBlockNumber, OldHash: oldHash, NewHash: newHash, @@ -33,31 +34,66 @@ func NewReorgError(offendingBlockNumber uint64, } } -func (e *ReorgError) Error() string { +func (e *DetectedReorgError) Error() string { return fmt.Sprintf("reorgError: block number %d: old hash %s != new hash %s: %s", e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) } -func CastReorgError(err error) *ReorgError { - var reorgErr *ReorgError +func CastDetectedReorgError(err error) *DetectedReorgError { + var reorgErr *DetectedReorgError if errors.As(err, &reorgErr) { return reorgErr } return nil } -// // GetReorgErrorBlockNumber returns the block number that caused the reorg -// func GetReorgErrorBlockNumber(err error) uint64 { -// if reorgErr, ok := err.(*ReorgError); ok { +// // GetDetectedReorgErrorBlockNumber returns the block number that caused the reorg +// func GetDetectedReorgErrorBlockNumber(err error) uint64 { +// if reorgErr, ok := err.(*DetectedReorgError); ok { // return reorgErr.BlockNumber // } // return 0 // } -// // GetReorgErrorWrappedError returns the wrapped error that caused the reorg -// func GetReorgErrorWrappedError(err error) error { -// if reorgErr, ok := err.(*ReorgError); ok { +// // GetDetectedReorgErrorWrappedError returns the wrapped error that caused the reorg +// func GetDetectedReorgErrorWrappedError(err error) error { +// if reorgErr, ok := err.(*DetectedReorgError); ok { // return reorgErr.Err // } // return nil // } + +type ReorgedError struct { + Message string + BlockRangeReorged aggkitcommon.BlockRange + ReorgedChainID uint64 +} + +func NewReorgedError(blockRangeReorged aggkitcommon.BlockRange, + reorgedChainID uint64, + msg string) *ReorgedError { + return &ReorgedError{ + Message: msg, + BlockRangeReorged: blockRangeReorged, + ReorgedChainID: reorgedChainID, + } +} + +func (e *ReorgedError) Error() string { + return fmt.Sprintf("reorgedError: chainID=%d blockRangeReorged=%s: %s", + e.ReorgedChainID, e.BlockRangeReorged.String(), e.Message) +} + +// IsReorgedError checks if an error is a ReorgedError +func IsReorgedError(err error) bool { + c := CastReorgedError(err) + return c != nil +} + +func CastReorgedError(err error) *ReorgedError { + var reorgErr *ReorgedError + if errors.As(err, &reorgErr) { + return reorgErr + } + return nil +} diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index f00de014c..427503229 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -178,6 +178,60 @@ func (f *SetSyncSegment) IsAvailable(query LogQuery) bool { return true } +// IsPartiallyAvailable checks if some part of the LogQuery is already synced +// always starting from FromBlock +// If there are any data avaible, it returns true and the LogQuery with the available data +func (f *SetSyncSegment) IsPartiallyAvailable(query LogQuery) (bool, *LogQuery) { + if f == nil || len(query.Addrs) == 0 { + return false, nil + } + + // Find the maximum contiguous range starting from FromBlock that is available + // for all addresses in the query + var maxAvailableToBlock *uint64 + + for _, addr := range query.Addrs { + segment, exists := f.GetByContract(addr) + if !exists { + // If any address is not synced at all, nothing is available + return false, nil + } + + // Calculate the intersection between the segment and the query range + intersection := segment.BlockRange.Intersect(query.BlockRange) + if intersection.IsEmpty() { + // If there's no overlap, nothing is available + return false, nil + } + + // Check if the intersection starts at FromBlock + // If not, there's a gap at the beginning, so nothing is available + if intersection.FromBlock != query.BlockRange.FromBlock { + return false, nil + } + + // Update the minimum ToBlock (the bottleneck across all addresses) + if maxAvailableToBlock == nil || intersection.ToBlock < *maxAvailableToBlock { + maxAvailableToBlock = &intersection.ToBlock + } + } + + if maxAvailableToBlock == nil { + return false, nil + } + + // Create the available LogQuery + availableQuery := &LogQuery{ + Addrs: query.Addrs, + BlockRange: aggkitcommon.NewBlockRange( + query.BlockRange.FromBlock, + *maxAvailableToBlock, + ), + } + + return true, availableQuery +} + // NextQuery generates the next LogQuery to sync based on the lowest FromBlock pending // to synchronize func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uint64) (*LogQuery, error) { diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 00aca7e55..ac04732c9 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -198,6 +198,213 @@ func TestSetSyncSegment_IsAvailable(t *testing.T) { }) } +func TestSetSyncSegment_IsPartiallyAvailable(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var set *SetSyncSegment + query := LogQuery{ + Addrs: []common.Address{common.HexToAddress("0x123")}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("empty addresses in query", func(t *testing.T) { + set := NewSetSyncSegment() + query := LogQuery{ + Addrs: []common.Address{}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("address not synced at all", func(t *testing.T) { + set := NewSetSyncSegment() + query := LogQuery{ + Addrs: []common.Address{common.HexToAddress("0x123")}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("no overlap between query and segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(50, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("gap at the beginning - segment starts after FromBlock", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("partially available - segment covers beginning but not all", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr}, result.Addrs) + }) + + t.Run("fully available - segment covers entire query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr}, result.Addrs) + }) + + t.Run("multiple addresses - all have partial data, find bottleneck", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 70), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 50), // Bottleneck + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr1, addr2}, result.Addrs) + }) + + t.Run("multiple addresses - one has gap at beginning", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(10, 100), // Gap at beginning + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("multiple addresses - one not synced at all", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + // addr2 not added + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("segment extends beyond query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 200), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(100), result.BlockRange.ToBlock) + }) +} + func TestSetSyncSegment_NextQuery(t *testing.T) { t.Run("nil or empty segments", func(t *testing.T) { var set *SetSyncSegment diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 6ecdbd207..10702986f 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -23,10 +23,12 @@ type Storager interface { GetSyncedBlockRangePerContract(tx dbtypes.Querier) (SetSyncSegment, error) SaveEthLogsWithHeaders(tx dbtypes.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []types.Log, isFinal bool) error + // TODO: Deprecate GetEthLogs and use LogQuery instead GetEthLogs(tx dbtypes.Querier, query LogQuery) ([]types.Log, error) + LogQuery(tx dbtypes.Querier, query LogQuery) (LogQueryResponse, error) UpdateSyncedStatus(tx dbtypes.Querier, segments []SyncSegment) error UpsertSyncerConfigs(tx dbtypes.Querier, configs []ContractConfig) error - GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) + GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, FinalizedType, error) NewTx(ctx context.Context) (dbtypes.Txer, error) // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock // if maxBlock is nil, retrieves all not finalized blocks @@ -40,6 +42,8 @@ type Storager interface { // second return value indicates if the block is reorged GetBlockReorgedChainID(tx dbtypes.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) + GetReorgedDataByChainID(tx dbtypes.Querier, + reorgedChainID uint64) (*ReorgData, error) } type StoragerForReorg interface { diff --git a/sync/adapter_eth_to_multidownloader.go b/sync/adapter_eth_to_multidownloader.go index 036e80013..7349e1f09 100644 --- a/sync/adapter_eth_to_multidownloader.go +++ b/sync/adapter_eth_to_multidownloader.go @@ -19,7 +19,7 @@ type AdaptEthClientToMultidownloader struct { ethClient aggkittypes.BaseEthereumClienter } -var _ (aggkittypes.MultiDownloader) = (*AdaptEthClientToMultidownloader)(nil) +var _ (aggkittypes.MultiDownloaderLegacy) = (*AdaptEthClientToMultidownloader)(nil) func NewAdapterEthClientToMultidownloader(ethClient aggkittypes.BaseEthereumClienter) *AdaptEthClientToMultidownloader { return &AdaptEthClientToMultidownloader{ diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index effde14e4..45991b12e 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -60,7 +60,7 @@ type EVMDownloader struct { func NewEVMDownloader( syncerID string, - ethClient aggkittypes.MultiDownloader, + ethClient aggkittypes.MultiDownloaderLegacy, syncBlockChunkSize uint64, finality aggkittypes.BlockNumberFinality, waitForNewBlocksPeriod time.Duration, @@ -252,7 +252,7 @@ func (d *EVMDownloader) reportEmptyBlock(ctx context.Context, downloadedCh chan } type EVMDownloaderImplementation struct { - ethClient aggkittypes.MultiDownloader + ethClient aggkittypes.MultiDownloaderLegacy blockFinality aggkittypes.BlockNumberFinality waitForNewBlocksPeriod time.Duration appender LogAppenderMap @@ -269,7 +269,7 @@ type EVMDownloaderImplementation struct { // finalizedBlockType can be nil, in this case, it means that the reorgs are not happening on the network func NewEVMDownloaderImplementation( syncerID string, - ethClient aggkittypes.MultiDownloader, + ethClient aggkittypes.MultiDownloaderLegacy, blockFinality aggkittypes.BlockNumberFinality, waitForNewBlocksPeriod time.Duration, appender LogAppenderMap, diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index cd1a83351..773164ba8 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -383,20 +383,20 @@ func TestWaitForNewBlocks(t *testing.T) { currentBlock := uint64(5) expectedBlock := uint64(6) aggkittypesBlockHeader := aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil) - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypesBlockHeader, nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypesBlockHeader, nil).Once() actualBlock := d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) // 2 iterations - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(5, common.Hash{}, 0, nil), nil).Once() - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(5, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() actualBlock = d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) // after error from client - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() actualBlock = d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) } @@ -428,7 +428,7 @@ func TestWaitForNewBlocksWithReorgDetection(t *testing.T) { headerHash := latestHeader.Hash() trackedBlock := &reorgdetector.Header{Hash: common.HexToHash("0x456")} - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return( + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return( aggkittypes.NewBlockHeaderFromEthHeader(latestHeader), nil).Once() reorgDetectorMock.EXPECT().GetTrackedBlockByBlockNumber("test-reorg-detector-id", currentBlockNumber).Return(trackedBlock, nil).Once() reorgDetectorMock.EXPECT().AddBlockToTrack(ctx, "test-reorg-detector-id", currentBlockNumber, headerHash).Return(nil).Once() @@ -462,10 +462,10 @@ func TestWaitForNewBlocksWithReorgDetection(t *testing.T) { latestHeader := &types.Header{Number: big.NewInt(int64(currentBlockNumber))} latestHeaderNext := &types.Header{Number: big.NewInt(int64(currentBlockNumber + 1))} - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return( + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return( aggkittypes.NewBlockHeaderFromEthHeader(latestHeader), nil).Once() reorgDetectorMock.EXPECT().GetTrackedBlockByBlockNumber("test-reorg-detector-id", currentBlockNumber).Return(nil, errors.New("database error")).Once() - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return(aggkittypes.NewBlockHeaderFromEthHeader(latestHeaderNext), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeaderFromEthHeader(latestHeaderNext), nil).Once() headerHashNext := latestHeaderNext.Hash() reorgDetectorMock.EXPECT().AddBlockToTrack(ctx, "test-reorg-detector-id", currentBlockNumber+1, headerHashNext).Return(nil).Once() diff --git a/sync/mock_downloader.go b/sync/mock_downloader.go index c197d450b..b16babcc7 100644 --- a/sync/mock_downloader.go +++ b/sync/mock_downloader.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package sync diff --git a/sync/mock_evm_downloader_interface.go b/sync/mock_evm_downloader_interface.go index 7b01b3f4a..83b114b54 100644 --- a/sync/mock_evm_downloader_interface.go +++ b/sync/mock_evm_downloader_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package sync diff --git a/sync/mock_processor_interface.go b/sync/mock_processor_interface.go index 96ece8d42..e0f285140 100644 --- a/sync/mock_processor_interface.go +++ b/sync/mock_processor_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package sync diff --git a/sync/mock_reorg_detector.go b/sync/mock_reorg_detector.go index ce220bd56..3430bcd90 100644 --- a/sync/mock_reorg_detector.go +++ b/sync/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package sync diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 8d2f0f662..00b9221d9 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -159,7 +159,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), } - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy if useMultidownloaderForTest { multidownloaderClient, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), diff --git a/tree/types/mocks/mock_full_treer.go b/tree/types/mocks/mock_full_treer.go index 91187f9ff..83f82f9c2 100644 --- a/tree/types/mocks/mock_full_treer.go +++ b/tree/types/mocks/mock_full_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_leaf_writer.go b/tree/types/mocks/mock_leaf_writer.go index 2d8da5b5a..a24025876 100644 --- a/tree/types/mocks/mock_leaf_writer.go +++ b/tree/types/mocks/mock_leaf_writer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_read_treer.go b/tree/types/mocks/mock_read_treer.go index d9a4da2b2..91120ccf6 100644 --- a/tree/types/mocks/mock_read_treer.go +++ b/tree/types/mocks/mock_read_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_reorganize_treer.go b/tree/types/mocks/mock_reorganize_treer.go index 2f8b51d12..d1a26ab91 100644 --- a/tree/types/mocks/mock_reorganize_treer.go +++ b/tree/types/mocks/mock_reorganize_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/types/list_block_header_test.go b/types/list_block_header_test.go new file mode 100644 index 000000000..7b84dd41e --- /dev/null +++ b/types/list_block_header_test.go @@ -0,0 +1,167 @@ +package types + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestNewListBlockHeadersEmpty(t *testing.T) { + t.Run("creates empty list with pre-allocated capacity", func(t *testing.T) { + size := 10 + list := NewListBlockHeadersEmpty(size) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + require.Equal(t, size, cap(list)) + }) + + t.Run("creates empty list with zero capacity", func(t *testing.T) { + list := NewListBlockHeadersEmpty(0) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + }) +} + +func TestNewListBlockHeaders(t *testing.T) { + t.Run("creates list with specified size filled with nil", func(t *testing.T) { + size := 5 + list := NewListBlockHeaders(size) + + require.NotNil(t, list) + require.Equal(t, size, list.Len()) + for i := range size { + require.Nil(t, list[i]) + } + }) + + t.Run("creates empty list when size is zero", func(t *testing.T) { + list := NewListBlockHeaders(0) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + }) +} + +func TestListBlockHeaders_Len(t *testing.T) { + t.Run("returns correct length for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + require.Equal(t, 0, list.Len()) + }) + + t.Run("returns correct length for list with elements", func(t *testing.T) { + list := ListBlockHeaders{ + NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil), + NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil), + NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil), + } + require.Equal(t, 3, list.Len()) + }) + + t.Run("returns correct length for list with nil elements", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + require.Equal(t, 3, list.Len()) + }) +} + +func TestListBlockHeaders_ToMap(t *testing.T) { + t.Run("converts empty list to empty map", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.ToMap() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("converts list with headers to map", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.ToMap() + + require.Equal(t, 3, len(result)) + require.Equal(t, header1, result[1]) + require.Equal(t, header2, result[2]) + require.Equal(t, header3, result[5]) + }) + + t.Run("skips nil headers when converting to map", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header3 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + + list := ListBlockHeaders{header1, nil, header3, nil} + result := list.ToMap() + + require.Equal(t, 2, len(result)) + require.Equal(t, header1, result[1]) + require.Equal(t, header3, result[3]) + _, exists := result[0] + require.False(t, exists) + }) + + t.Run("handles list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.ToMap() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) +} + +func TestListBlockHeaders_BlockNumbers(t *testing.T) { + t.Run("returns empty slice for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.BlockNumbers() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("returns sorted block numbers", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(8, common.HexToHash("0x08"), 8000, nil) + header4 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3, header4} + result := list.BlockNumbers() + + require.Equal(t, 4, len(result)) + require.Equal(t, []uint64{1, 2, 5, 8}, result) + }) + + t.Run("skips nil headers when extracting block numbers", func(t *testing.T) { + header1 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + header2 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{nil, header1, nil, header2, nil} + result := list.BlockNumbers() + + require.Equal(t, 2, len(result)) + require.Equal(t, []uint64{1, 3}, result) + }) + + t.Run("returns empty slice for list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.BlockNumbers() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("handles duplicate block numbers", func(t *testing.T) { + header1 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02b"), 2001, nil) + header3 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockNumbers() + + require.Equal(t, 3, len(result)) + require.Equal(t, []uint64{1, 2, 2}, result) + }) +} diff --git a/types/mocks/mock_base_ethereum_clienter.go b/types/mocks/mock_base_ethereum_clienter.go index 8b5fad3e7..9ae1aa962 100644 --- a/types/mocks/mock_base_ethereum_clienter.go +++ b/types/mocks/mock_base_ethereum_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_custom_ethereum_clienter.go b/types/mocks/mock_custom_ethereum_clienter.go new file mode 100644 index 000000000..72e41ca93 --- /dev/null +++ b/types/mocks/mock_custom_ethereum_clienter.go @@ -0,0 +1,96 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// CustomEthereumClienter is an autogenerated mock type for the CustomEthereumClienter type +type CustomEthereumClienter struct { + mock.Mock +} + +type CustomEthereumClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *CustomEthereumClienter) EXPECT() *CustomEthereumClienter_Expecter { + return &CustomEthereumClienter_Expecter{mock: &_m.Mock} +} + +// CustomHeaderByNumber provides a mock function with given fields: ctx, number +func (_m *CustomEthereumClienter) CustomHeaderByNumber(ctx context.Context, number *types.BlockNumberFinality) (*types.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for CustomHeaderByNumber") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) *types.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CustomEthereumClienter_CustomHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CustomHeaderByNumber' +type CustomEthereumClienter_CustomHeaderByNumber_Call struct { + *mock.Call +} + +// CustomHeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *types.BlockNumberFinality +func (_e *CustomEthereumClienter_Expecter) CustomHeaderByNumber(ctx interface{}, number interface{}) *CustomEthereumClienter_CustomHeaderByNumber_Call { + return &CustomEthereumClienter_CustomHeaderByNumber_Call{Call: _e.mock.On("CustomHeaderByNumber", ctx, number)} +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) Run(run func(ctx context.Context, number *types.BlockNumberFinality)) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.BlockNumberFinality)) + }) + return _c +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) Return(_a0 *types.BlockHeader, _a1 error) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) RunAndReturn(run func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewCustomEthereumClienter creates a new instance of CustomEthereumClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCustomEthereumClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *CustomEthereumClienter { + mock := &CustomEthereumClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/mock_eth_chain_reader.go b/types/mocks/mock_eth_chain_reader.go new file mode 100644 index 000000000..6d3ab7f20 --- /dev/null +++ b/types/mocks/mock_eth_chain_reader.go @@ -0,0 +1,99 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthChainReader is an autogenerated mock type for the EthChainReader type +type EthChainReader struct { + mock.Mock +} + +type EthChainReader_Expecter struct { + mock *mock.Mock +} + +func (_m *EthChainReader) EXPECT() *EthChainReader_Expecter { + return &EthChainReader_Expecter{mock: &_m.Mock} +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthChainReader) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthChainReader_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthChainReader_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthChainReader_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthChainReader_HeaderByHash_Call { + return &EthChainReader_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthChainReader_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthChainReader_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthChainReader_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthChainReader_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthChainReader_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthChainReader_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// NewEthChainReader creates a new instance of EthChainReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthChainReader(t interface { + mock.TestingT + Cleanup(func()) +}) *EthChainReader { + mock := &EthChainReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/mock_eth_clienter.go b/types/mocks/mock_eth_clienter.go index e2b42fef2..b21942554 100644 --- a/types/mocks/mock_eth_clienter.go +++ b/types/mocks/mock_eth_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_ethereum_clienter.go b/types/mocks/mock_ethereum_clienter.go index 5491bc945..0e7cb09a1 100644 --- a/types/mocks/mock_ethereum_clienter.go +++ b/types/mocks/mock_ethereum_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_multi_downloader.go b/types/mocks/mock_multi_downloader.go index 91dbca44e..a0dadcd22 100644 --- a/types/mocks/mock_multi_downloader.go +++ b/types/mocks/mock_multi_downloader.go @@ -26,65 +26,6 @@ func (_m *MultiDownloader) EXPECT() *MultiDownloader_Expecter { return &MultiDownloader_Expecter{mock: &_m.Mock} } -// BlockHeader provides a mock function with given fields: ctx, finality -func (_m *MultiDownloader) BlockHeader(ctx context.Context, finality types.BlockNumberFinality) (*types.BlockHeader, error) { - ret := _m.Called(ctx, finality) - - if len(ret) == 0 { - panic("no return value specified for BlockHeader") - } - - var r0 *types.BlockHeader - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (*types.BlockHeader, error)); ok { - return rf(ctx, finality) - } - if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) *types.BlockHeader); ok { - r0 = rf(ctx, finality) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.BlockHeader) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { - r1 = rf(ctx, finality) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MultiDownloader_BlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHeader' -type MultiDownloader_BlockHeader_Call struct { - *mock.Call -} - -// BlockHeader is a helper method to define mock.On call -// - ctx context.Context -// - finality types.BlockNumberFinality -func (_e *MultiDownloader_Expecter) BlockHeader(ctx interface{}, finality interface{}) *MultiDownloader_BlockHeader_Call { - return &MultiDownloader_BlockHeader_Call{Call: _e.mock.On("BlockHeader", ctx, finality)} -} - -func (_c *MultiDownloader_BlockHeader_Call) Run(run func(ctx context.Context, finality types.BlockNumberFinality)) *MultiDownloader_BlockHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) - }) - return _c -} - -func (_c *MultiDownloader_BlockHeader_Call) Return(_a0 *types.BlockHeader, _a1 error) *MultiDownloader_BlockHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MultiDownloader_BlockHeader_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (*types.BlockHeader, error)) *MultiDownloader_BlockHeader_Call { - _c.Call.Return(run) - return _c -} - // BlockNumber provides a mock function with given fields: ctx, finality func (_m *MultiDownloader) BlockNumber(ctx context.Context, finality types.BlockNumberFinality) (uint64, error) { ret := _m.Called(ctx, finality) diff --git a/types/mocks/mock_multi_downloader_legacy.go b/types/mocks/mock_multi_downloader_legacy.go new file mode 100644 index 000000000..a8a2928f9 --- /dev/null +++ b/types/mocks/mock_multi_downloader_legacy.go @@ -0,0 +1,411 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + ethereum "github.com/ethereum/go-ethereum" + coretypes "github.com/ethereum/go-ethereum/core/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/types" +) + +// MultiDownloaderLegacy is an autogenerated mock type for the MultiDownloaderLegacy type +type MultiDownloaderLegacy struct { + mock.Mock +} + +type MultiDownloaderLegacy_Expecter struct { + mock *mock.Mock +} + +func (_m *MultiDownloaderLegacy) EXPECT() *MultiDownloaderLegacy_Expecter { + return &MultiDownloaderLegacy_Expecter{mock: &_m.Mock} +} + +// BlockNumber provides a mock function with given fields: ctx, finality +func (_m *MultiDownloaderLegacy) BlockNumber(ctx context.Context, finality types.BlockNumberFinality) (uint64, error) { + ret := _m.Called(ctx, finality) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (uint64, error)); ok { + return rf(ctx, finality) + } + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) uint64); ok { + r0 = rf(ctx, finality) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { + r1 = rf(ctx, finality) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type MultiDownloaderLegacy_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +// - finality types.BlockNumberFinality +func (_e *MultiDownloaderLegacy_Expecter) BlockNumber(ctx interface{}, finality interface{}) *MultiDownloaderLegacy_BlockNumber_Call { + return &MultiDownloaderLegacy_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx, finality)} +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) Run(run func(ctx context.Context, finality types.BlockNumberFinality)) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) Return(_a0 uint64, _a1 error) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (uint64, error)) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// ChainID provides a mock function with given fields: ctx +func (_m *MultiDownloaderLegacy) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type MultiDownloaderLegacy_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultiDownloaderLegacy_Expecter) ChainID(ctx interface{}) *MultiDownloaderLegacy_ChainID_Call { + return &MultiDownloaderLegacy_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) Run(run func(ctx context.Context)) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) Return(_a0 uint64, _a1 error) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Return(run) + return _c +} + +// EthClient provides a mock function with no fields +func (_m *MultiDownloaderLegacy) EthClient() types.BaseEthereumClienter { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EthClient") + } + + var r0 types.BaseEthereumClienter + if rf, ok := ret.Get(0).(func() types.BaseEthereumClienter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.BaseEthereumClienter) + } + } + + return r0 +} + +// MultiDownloaderLegacy_EthClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EthClient' +type MultiDownloaderLegacy_EthClient_Call struct { + *mock.Call +} + +// EthClient is a helper method to define mock.On call +func (_e *MultiDownloaderLegacy_Expecter) EthClient() *MultiDownloaderLegacy_EthClient_Call { + return &MultiDownloaderLegacy_EthClient_Call{Call: _e.mock.On("EthClient")} +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) Run(run func()) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) Return(_a0 types.BaseEthereumClienter) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) RunAndReturn(run func() types.BaseEthereumClienter) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *MultiDownloaderLegacy) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]coretypes.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []coretypes.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []coretypes.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coretypes.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type MultiDownloaderLegacy_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *MultiDownloaderLegacy_Expecter) FilterLogs(ctx interface{}, q interface{}) *MultiDownloaderLegacy_FilterLogs_Call { + return &MultiDownloaderLegacy_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) Return(_a0 []coretypes.Log, _a1 error) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultiDownloaderLegacy) HeaderByNumber(ctx context.Context, number *types.BlockNumberFinality) (*types.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) *types.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type MultiDownloaderLegacy_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *types.BlockNumberFinality +func (_e *MultiDownloaderLegacy_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *MultiDownloaderLegacy_HeaderByNumber_Call { + return &MultiDownloaderLegacy_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) Run(run func(ctx context.Context, number *types.BlockNumberFinality)) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) Return(_a0 *types.BlockHeader, _a1 error) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// RegisterSyncer provides a mock function with given fields: data +func (_m *MultiDownloaderLegacy) RegisterSyncer(data types.SyncerConfig) error { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for RegisterSyncer") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.SyncerConfig) error); ok { + r0 = rf(data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiDownloaderLegacy_RegisterSyncer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisterSyncer' +type MultiDownloaderLegacy_RegisterSyncer_Call struct { + *mock.Call +} + +// RegisterSyncer is a helper method to define mock.On call +// - data types.SyncerConfig +func (_e *MultiDownloaderLegacy_Expecter) RegisterSyncer(data interface{}) *MultiDownloaderLegacy_RegisterSyncer_Call { + return &MultiDownloaderLegacy_RegisterSyncer_Call{Call: _e.mock.On("RegisterSyncer", data)} +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) Run(run func(data types.SyncerConfig)) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.SyncerConfig)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) Return(_a0 error) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) RunAndReturn(run func(types.SyncerConfig) error) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: ctx +func (_m *MultiDownloaderLegacy) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiDownloaderLegacy_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type MultiDownloaderLegacy_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultiDownloaderLegacy_Expecter) Start(ctx interface{}) *MultiDownloaderLegacy_Start_Call { + return &MultiDownloaderLegacy_Start_Call{Call: _e.mock.On("Start", ctx)} +} + +func (_c *MultiDownloaderLegacy_Start_Call) Run(run func(ctx context.Context)) *MultiDownloaderLegacy_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_Start_Call) Return(_a0 error) *MultiDownloaderLegacy_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_Start_Call) RunAndReturn(run func(context.Context) error) *MultiDownloaderLegacy_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewMultiDownloaderLegacy creates a new instance of MultiDownloaderLegacy. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMultiDownloaderLegacy(t interface { + mock.TestingT + Cleanup(func()) +}) *MultiDownloaderLegacy { + mock := &MultiDownloaderLegacy{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/mock_rpc_clienter.go b/types/mocks/mock_rpc_clienter.go index 2d2c63566..5c99bed91 100644 --- a/types/mocks/mock_rpc_clienter.go +++ b/types/mocks/mock_rpc_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks diff --git a/types/multidownloader.go b/types/multidownloader.go index 1deab838f..611cc887d 100644 --- a/types/multidownloader.go +++ b/types/multidownloader.go @@ -19,7 +19,7 @@ type SyncerConfig struct { ToBlock BlockNumberFinality } -type MultiDownloader interface { +type MultiDownloaderLegacy interface { ChainID(ctx context.Context) (uint64, error) BlockNumber(ctx context.Context, finality BlockNumberFinality) (uint64, error) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]ethtypes.Log, error) From 8e5d88b0fb4d3adab0ad644f02fc5c459735bceb Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 29 Jan 2026 17:17:28 +0100 Subject: [PATCH 14/75] feat: polling with timeout --- common/polling_with_timeout.go | 51 +++++++ common/polling_with_timeout_test.go | 207 ++++++++++++++++++++++++++++ multidownloader/sync/download.go | 55 +++----- 3 files changed, 280 insertions(+), 33 deletions(-) create mode 100644 common/polling_with_timeout.go create mode 100644 common/polling_with_timeout_test.go diff --git a/common/polling_with_timeout.go b/common/polling_with_timeout.go new file mode 100644 index 000000000..b98498174 --- /dev/null +++ b/common/polling_with_timeout.go @@ -0,0 +1,51 @@ +package common + +import ( + "context" + "fmt" + "time" +) + +var ( + ErrTimeoutReached = fmt.Errorf("timeout reached") +) + +// It execute 'checkCondition' each pollingPeriod, until either the condition is met, +// the timeoutPeriod is reached, or the context is done. +// It returns true if the condition is met, false if timeout is reached, or an error. +func PollingWithTimeout( + ctx context.Context, + pollingPeriod, timeoutPeriod time.Duration, + checkCondition func() (bool, error)) (bool, error) { + timeoutTimer := time.NewTimer(timeoutPeriod) + defer timeoutTimer.Stop() + waitingForCondition := true + for waitingForCondition { + pollingTimer := time.NewTimer(pollingPeriod) + conditionMet, err := checkCondition() + if err != nil { + return false, err + } + if conditionMet { + waitingForCondition = false + pollingTimer.Stop() + return true, nil + } + select { + case <-pollingTimer.C: + pollingTimer.Stop() + // Loop continues to check condition + + case <-timeoutTimer.C: + pollingTimer.Stop() + return false, fmt.Errorf("pollingWithTimeout: condition not met after waiting %s: %w", + timeoutPeriod.String(), ErrTimeoutReached) + case <-ctx.Done(): + pollingTimer.Stop() + return false, fmt.Errorf("pollingWithTimeout: "+ + "context done while waiting for condition to be met: %w", + ctx.Err()) + } + } + return false, nil +} diff --git a/common/polling_with_timeout_test.go b/common/polling_with_timeout_test.go new file mode 100644 index 000000000..1d445e751 --- /dev/null +++ b/common/polling_with_timeout_test.go @@ -0,0 +1,207 @@ +package common + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPollingWithTimeout(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + pollingPeriod time.Duration + timeoutPeriod time.Duration + setupCheckFunction func() func() (bool, error) + setupContext func() context.Context + expectedResult bool + expectedError error + expectedErrorMsg string + }{ + { + name: "condition met immediately", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return true, nil + } + }, + setupContext: func() context.Context { return context.Background() }, + expectedResult: true, + expectedError: nil, + }, + { + name: "condition met after several attempts", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 200 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + attempts := 0 + return func() (bool, error) { + attempts++ + if attempts >= 3 { + return true, nil + } + return false, nil + } + }, + setupContext: func() context.Context { return context.Background() }, + expectedResult: true, + expectedError: nil, + }, + { + name: "timeout reached", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 50 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return false, nil + } + }, + setupContext: func() context.Context { return context.Background() }, + expectedResult: false, + expectedError: ErrTimeoutReached, + expectedErrorMsg: "pollingWithTimeout: condition not met after waiting", + }, + { + name: "context cancelled", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 500 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return false, nil + } + }, + setupContext: func() context.Context { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Millisecond) + // Don't cancel here, let the test run and timeout naturally + _ = cancel + return ctx + }, + expectedResult: false, + expectedError: context.DeadlineExceeded, + expectedErrorMsg: "context done while waiting for condition to be met", + }, + { + name: "check function returns error", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + testErr := errors.New("check function error") + return func() (bool, error) { + return false, testErr + } + }, + setupContext: func() context.Context { return context.Background() }, + expectedResult: false, + expectedErrorMsg: "check function error", + }, + { + name: "condition met on last attempt before timeout", + pollingPeriod: 20 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + attempts := 0 + return func() (bool, error) { + attempts++ + // Meet condition after ~80ms (4 attempts * 20ms) + if attempts >= 4 { + return true, nil + } + return false, nil + } + }, + setupContext: func() context.Context { return context.Background() }, + expectedResult: true, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := tt.setupContext() + checkFunc := tt.setupCheckFunction() + + result, err := PollingWithTimeout(ctx, tt.pollingPeriod, tt.timeoutPeriod, checkFunc) + + require.Equal(t, tt.expectedResult, result) + + if tt.expectedError != nil { + require.Error(t, err) + require.ErrorIs(t, err, tt.expectedError) + } + + if tt.expectedErrorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedErrorMsg) + } + + if tt.expectedError == nil && tt.expectedErrorMsg == "" { + require.NoError(t, err) + } + }) + } +} + +func TestPollingWithTimeout_Timing(t *testing.T) { + t.Parallel() + + t.Run("respects polling period", func(t *testing.T) { + t.Parallel() + + pollingPeriod := 50 * time.Millisecond + timeoutPeriod := 500 * time.Millisecond + attempts := 0 + start := time.Now() + + checkFunc := func() (bool, error) { + attempts++ + if attempts >= 3 { + return true, nil + } + return false, nil + } + + result, err := PollingWithTimeout(context.Background(), pollingPeriod, timeoutPeriod, checkFunc) + + elapsed := time.Since(start) + + require.NoError(t, err) + require.True(t, result) + require.Equal(t, 3, attempts) + // Should take at least 2 polling periods (between attempt 1 and 3) + require.GreaterOrEqual(t, elapsed, 2*pollingPeriod) + // But not more than timeout + require.Less(t, elapsed, timeoutPeriod) + }) + + t.Run("timeout is enforced", func(t *testing.T) { + t.Parallel() + + pollingPeriod := 20 * time.Millisecond + timeoutPeriod := 100 * time.Millisecond + start := time.Now() + + checkFunc := func() (bool, error) { + return false, nil + } + + result, err := PollingWithTimeout(context.Background(), pollingPeriod, timeoutPeriod, checkFunc) + + elapsed := time.Since(start) + + require.Error(t, err) + require.False(t, result) + require.ErrorIs(t, err, ErrTimeoutReached) + // Should take approximately the timeout period + require.GreaterOrEqual(t, elapsed, timeoutPeriod) + // Allow some margin for timing variance (20ms) + require.Less(t, elapsed, timeoutPeriod+20*time.Millisecond) + }) +} diff --git a/multidownloader/sync/download.go b/multidownloader/sync/download.go index 914925be3..51124d088 100644 --- a/multidownloader/sync/download.go +++ b/multidownloader/sync/download.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/agglayer/aggkit/common" aggkitcommon "github.com/agglayer/aggkit/common" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" @@ -68,44 +69,32 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, } maxLogQuery := d.newMaxLogQuery(lastBlockHeader, maxBlocks, syncerConfig) var result *mdrsynctypes.DownloadResult - - // Create timeout timer once for the entire retry period - timeoutTimer := time.NewTimer(d.waitPeriodToCatchUpMaximumLogRange) - defer timeoutTimer.Stop() - waitingForLogs := true - // Retry loop: wait pullingPeriod between retries - for waitingForLogs { - pullingTimer := time.NewTimer(d.pullingPeriod) + conditionMet, err := common.PollingWithTimeout(ctx, d.pullingPeriod, d.waitPeriodToCatchUpMaximumLogRange, func() (bool, error) { + var err error err = d.checkReorgedBlock(ctx, lastBlockHeader) if err != nil { - return nil, err + return false, err } - // Retry the query result, err = d.executeLogQuery(ctx, maxLogQuery) - // Loop continues to check condition - if err == nil { - waitingForLogs = false - break - } - // The only allowed error is ErrLogsNotAvailable - if err != nil && !errors.Is(err, ErrLogsNotAvailable) { - return nil, err - } - select { - case <-pullingTimer.C: - pullingTimer.Stop() - // Check for reorg before retrying - - case <-timeoutTimer.C: - pullingTimer.Stop() - return nil, fmt.Errorf("DownloadNextBlocks: logs not available after waiting %s for %s: %w", - d.waitPeriodToCatchUpMaximumLogRange.String(), maxLogQuery.String(), ErrLogsNotAvailable) - case <-ctx.Done(): - pullingTimer.Stop() - return nil, fmt.Errorf("DownloadNextBlocks: "+ - "context done while waiting for logs %s to be available: %w", - maxLogQuery.String(), ctx.Err()) + if err != nil { + // The only allowed error is ErrLogsNotAvailable + if errors.Is(err, ErrLogsNotAvailable) { + return false, nil + } + return false, err } + return true, nil + }) + if errors.Is(err, common.ErrTimeoutReached) { + return nil, fmt.Errorf("Downloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", + maxLogQuery.String(), d.waitPeriodToCatchUpMaximumLogRange.String(), ErrLogsNotAvailable) + } + if err != nil { + return nil, err + } + if !conditionMet { + return nil, fmt.Errorf("Downloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", + maxLogQuery.String(), ErrLogsNotAvailable) } // TODO: Add extra empty block is is in unsafe zone From db9303b8bd085732966e138d8defc72ada6e4c09 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Sat, 31 Jan 2026 10:59:37 +0100 Subject: [PATCH 15/75] fix: fix reorg case that disapears blocks --- common/polling_with_timeout.go | 5 +- common/polling_with_timeout_test.go | 10 +- config/default.go | 2 + etherman/errors.go | 13 + l1infotreesync/e2e_test.go | 112 ++++++--- l1infotreesync/l1infotreesync.go | 1 + multidownloader/config.go | 2 + multidownloader/e2e_test.go | 6 +- multidownloader/evm_multidownloader.go | 250 ++++++++++---------- multidownloader/evm_multidownloader_rpc.go | 13 + multidownloader/evm_multidownloader_test.go | 32 --- multidownloader/reorg_processor.go | 15 +- multidownloader/reorg_processor_port.go | 22 +- multidownloader/state.go | 3 +- multidownloader/storage/storage_reorg.go | 43 ++++ multidownloader/sync/download.go | 39 +-- multidownloader/sync/evmdriver.go | 4 +- multidownloader/types/reorg_error.go | 61 +++-- multidownloader/types/set_sync_segment.go | 9 +- 19 files changed, 392 insertions(+), 250 deletions(-) diff --git a/common/polling_with_timeout.go b/common/polling_with_timeout.go index b98498174..043ed7cb1 100644 --- a/common/polling_with_timeout.go +++ b/common/polling_with_timeout.go @@ -19,15 +19,14 @@ func PollingWithTimeout( checkCondition func() (bool, error)) (bool, error) { timeoutTimer := time.NewTimer(timeoutPeriod) defer timeoutTimer.Stop() - waitingForCondition := true - for waitingForCondition { + + for { pollingTimer := time.NewTimer(pollingPeriod) conditionMet, err := checkCondition() if err != nil { return false, err } if conditionMet { - waitingForCondition = false pollingTimer.Stop() return true, nil } diff --git a/common/polling_with_timeout_test.go b/common/polling_with_timeout_test.go index 1d445e751..43f68cedf 100644 --- a/common/polling_with_timeout_test.go +++ b/common/polling_with_timeout_test.go @@ -31,7 +31,7 @@ func TestPollingWithTimeout(t *testing.T) { return true, nil } }, - setupContext: func() context.Context { return context.Background() }, + setupContext: context.Background, expectedResult: true, expectedError: nil, }, @@ -49,7 +49,7 @@ func TestPollingWithTimeout(t *testing.T) { return false, nil } }, - setupContext: func() context.Context { return context.Background() }, + setupContext: context.Background, expectedResult: true, expectedError: nil, }, @@ -62,7 +62,7 @@ func TestPollingWithTimeout(t *testing.T) { return false, nil } }, - setupContext: func() context.Context { return context.Background() }, + setupContext: context.Background, expectedResult: false, expectedError: ErrTimeoutReached, expectedErrorMsg: "pollingWithTimeout: condition not met after waiting", @@ -96,7 +96,7 @@ func TestPollingWithTimeout(t *testing.T) { return false, testErr } }, - setupContext: func() context.Context { return context.Background() }, + setupContext: context.Background, expectedResult: false, expectedErrorMsg: "check function error", }, @@ -115,7 +115,7 @@ func TestPollingWithTimeout(t *testing.T) { return false, nil } }, - setupContext: func() context.Context { return context.Background() }, + setupContext: context.Background, expectedResult: true, expectedError: nil, }, diff --git a/config/default.go b/config/default.go index 9c617e2a6..1be88340e 100644 --- a/config/default.go +++ b/config/default.go @@ -338,6 +338,7 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" [L1Multidownloader] Enabled = false + DeveloperMode = false StoragePath = "{{PathRWData}}/l1_multidownloader.sqlite" BlockChunkSize = 10000 MaxParallelBlockHeaderRetrieval = 30 @@ -347,6 +348,7 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" [L2Multidownloader] Enabled = false + DeveloperMode = false StoragePath = "{{PathRWData}}/l2_multidownloader.sqlite" BlockChunkSize = 10000 MaxParallelBlockHeaderRetrieval = 30 diff --git a/etherman/errors.go b/etherman/errors.go index a2d748e7b..7ce4f9161 100644 --- a/etherman/errors.go +++ b/etherman/errors.go @@ -56,3 +56,16 @@ func TryParseError(err error) (error, bool) { return parsedError, exists } + +func IsErrNotFound(err error) bool { + if err == nil { + return false + } + if errors.Is(err, ErrNotFound) { + return true + } + if err.Error() == ErrNotFound.Error() { + return true + } + return false +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index f23d0a72d..fd75d429b 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -30,7 +30,7 @@ import ( "github.com/stretchr/testify/require" ) -const useMultidownloaderForTests = false +const useMultidownloaderForTests = true func newSimulatedClient(t *testing.T) ( *simulated.Backend, @@ -158,19 +158,31 @@ func TestWithReorgs(t *testing.T) { client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) - rdConfig := reorgdetector.Config{ - DBPath: dbPathReorg, - CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), - FinalizedBlock: aggkittypes.FinalizedBlock, + cfg := l1infotreesync.Config{ + DBPath: dbPathSyncer, + InitialBlock: 0, + SyncBlockChunkSize: 10, + BlockFinality: aggkittypes.LatestBlock, + GlobalExitRootAddr: gerAddr, + RollupManagerAddr: verifyAddr, + RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), + MaxRetryAttemptsAfterError: 25, + RequireStorageContentCompatibility: true, + WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), } - rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - var multidownloaderClient aggkittypes.MultiDownloaderLegacy + + var syncer *l1infotreesync.L1InfoTreeSync + var err error + var evmMultidownloader *multidownloader.EVMMultidownloader if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true - multidownloaderClient, err = multidownloader.NewEVMMultidownloader( + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") + require.NoError(t, err) + cfgMD.BlockFinality = *finality + cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 100) + cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 100) + evmMultidownloader, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), cfgMD, "testMD", @@ -181,24 +193,25 @@ func TestWithReorgs(t *testing.T) { nil, // reorgProcessor will be created internally ) require.NoError(t, err) + syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go func() { + err = evmMultidownloader.Start(ctx) + require.NoError(t, err) + }() } else { - multidownloaderClient = sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - } - - cfg := l1infotreesync.Config{ - DBPath: dbPathSyncer, - InitialBlock: 0, - SyncBlockChunkSize: 10, - BlockFinality: aggkittypes.LatestBlock, - GlobalExitRootAddr: gerAddr, - RollupManagerAddr: verifyAddr, - RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), - MaxRetryAttemptsAfterError: 25, - RequireStorageContentCompatibility: true, - WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + rdConfig := reorgdetector.Config{ + DBPath: dbPathReorg, + CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), + FinalizedBlock: aggkittypes.FinalizedBlock, + } + rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) + require.NoError(t, err) + require.NoError(t, rd.Start(ctx)) + multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) go syncer.Start(ctx) // Commit block 6 @@ -261,6 +274,7 @@ func TestWithReorgs(t *testing.T) { require.NoError(t, err) blockNum, err := client.Client().BlockNumber(ctx) + log.Infof("Current block number after fork: %d", blockNum) require.NoError(t, err) require.Equal(t, header.Number.Uint64(), blockNum) @@ -270,17 +284,28 @@ func TestWithReorgs(t *testing.T) { // Assert rollup exit root after committing new blocks on the fork expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) // wait for syncer to process the reorg + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + + lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + log.Infof("Last processed block after reorg: %d", lastProcessedBlock) actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + showLeafs(t, ctx, syncer, "Before second fork: ") + // Forking from block 6 again + log.Infof("πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ–– Forking again from block (6) %d", reorgFrom.Hex()) err = client.Fork(reorgFrom) require.NoError(t, err) time.Sleep(time.Millisecond * 500) - + // wait for syncer to process the reorg helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 - + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) // create some events and update the trees updateL1InfoTreeAndRollupExitTree(2, 1) helpers.CommitBlocks(t, client, 1, time.Millisecond*100) @@ -293,9 +318,40 @@ func TestWithReorgs(t *testing.T) { require.NoError(t, err) actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) + showLeafs(t, ctx, syncer, "After second fork: ") + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) } +func checkBlocks(t *testing.T, ctx context.Context, rawClient simulated.Client, mdr *multidownloader.EVMMultidownloader, fromBlock, toBlock uint64) { + t.Helper() + if mdr == nil { + log.Warn("checkBlocks: multidownloader is nil, skipping block check") + return + } + for i := fromBlock; i <= toBlock; i++ { + block, errRaw := rawClient.BlockByNumber(ctx, big.NewInt(int64(i))) + blockMDR, errMDR := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(i)) + require.Equal(t, errRaw == nil, errMDR == nil, "block number %d: errRaw=%v, errMDR=%v blockMDR=%s", i, errRaw, errMDR, blockMDR.String()) + if errRaw == nil && errMDR == nil { + require.Equal(t, block.Hash(), blockMDR.Hash, "block number %d", i) + } + } +} + +func showLeafs(t *testing.T, ctx context.Context, syncer *l1infotreesync.L1InfoTreeSync, prefix string) { + t.Helper() + for i := 0; i < 6; i++ { + leaf, err := syncer.GetInfoByIndex(ctx, uint32(i)) + if err != nil { + log.Infof(prefix+"Leaf %d: error: %s", i, err.Error()) + } else { + log.Infof(prefix+"Leaf %d: %+v", i, leaf) + } + } +} + func TestStressAndReorgs(t *testing.T) { t.Skip("Skipping E2E test, this test is works locally but fails in CI") const ( diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index dbefd50fd..677144721 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -98,6 +98,7 @@ func NewMultidownloadBased( return nil, fmt.Errorf("failed to register l1infotreesync in multidownloader: %w", err) } logger := log.WithFields("syncer", syncerID) + // TODO: move the durations to config file (mdrsync.NewDownloader) downloader := mdrsync.NewDownloader( l1Multidownloader, logger, diff --git a/multidownloader/config.go b/multidownloader/config.go index 442b57b5e..1eec5c1c0 100644 --- a/multidownloader/config.go +++ b/multidownloader/config.go @@ -37,6 +37,8 @@ type Config struct { // PeriodToCheckReorgs is the duration to wait before checking for reorgs // If is 0 reorgs are checked only when a new block appears PeriodToCheckReorgs types.Duration + // DeveloperMode enables developer mode features like forcing reorgs + DeveloperMode bool } const ( diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index 949671da3..be0dff31d 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -140,7 +140,7 @@ func TestE2E(t *testing.T) { Id: big.NewInt(123), Message: "hello world", }, emitterLogs[1]) - timeStart := time.Now() + testData.SimulatedL1.Commit() // Block 3 _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "block 4") require.NoError(t, err) @@ -152,13 +152,13 @@ func TestE2E(t *testing.T) { }) require.NoError(t, err) require.Equal(t, 3, len(logs)) - elapsed := time.Since(timeStart) - logger.Infof("E2E test completed in %s", elapsed.String()) + showChainStatus(t, ctx, logger, testData.SimulatedL1) blk4, err := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(4)) require.NoError(t, err) // Forking at block 3 -> so block 4 will be reorged + // ---------- FORKING POINT ---------------------------------------- forkAt(t, ctx, logger, testData.SimulatedL1, 3) // Now se have to create a longer chain to force reorg diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 239e42559..4ec669700 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -52,6 +52,9 @@ type EVMMultidownloader struct { isRunning bool wg sync.WaitGroup cancel context.CancelFunc + + // Debug fields + debug *EVMMultidownloaderDebug } var _ aggkittypes.MultiDownloaderLegacy = (*EVMMultidownloader)(nil) @@ -93,6 +96,11 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, log.Infof("NewEVMMultidownloader: creating default ReorgProcessor for multidownloader (%s)", name) reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB) } + var debug *EVMMultidownloaderDebug + if cfg.DeveloperMode { + log.Warnf("NewEVMMultidownloader: enabling debug mode for multidownloader (%s)", name) + debug = NewEVMMultidownloaderDebug() + } return &EVMMultidownloader{ log: log, @@ -105,6 +113,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, statistics: NewStatistics(), name: name, reorgProcessor: reorgProcessor, + debug: debug, }, nil } @@ -189,14 +198,19 @@ func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, for _, number := range blocksNumber { rpcBlock, exists := rpcBlocks[number] if !exists { - return fmt.Errorf("detectReorgs: block number %d not found in RPC", number) + return mdrtypes.NewDetectedReorgError(number, + mdrtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, common.Hash{}, + fmt.Sprintf("detectReorgs: block number %d not found in RPC", number)) } storageBlock, exists := storageBlocks[number] if !exists { return fmt.Errorf("detectReorgs: block number %d not found in storage", number) } if storageBlock.Hash != rpcBlock.Hash { - return mdrtypes.NewDetectedReorgError(storageBlock.Number, storageBlock.Hash, rpcBlock.Hash, + return mdrtypes.NewDetectedReorgError(storageBlock.Number, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + storageBlock.Hash, rpcBlock.Hash, fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", number, storageBlock.Hash.String(), rpcBlock.Hash.String())) } @@ -255,31 +269,34 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { if err != nil { return err } - // Get synced segments per contract + newState, err := dh.newStateFromStorage() + if err != nil { + return fmt.Errorf("Initialize: error creating new state from storage: %w", err) + } + // What is pending to download? + dh.state = newState + dh.log.Infof("Initialization completed. state: %s", + dh.state.String()) + return nil +} +func (dh *EVMMultidownloader) newStateFromStorage() (*State, error) { syncSegments, err := dh.syncersConfig.SyncSegments() if err != nil { - return err + return nil, err } // Update TargetToBlock from name to real block numbers - err = syncSegments.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) + err = syncSegments.UpdateTargetBlockToNumber(context.Background(), dh.blockNotifierManager) if err != nil { - return fmt.Errorf("Initialize: cannot update TargetToBlock in sync segments: %w", err) + return nil, fmt.Errorf("newStateFromStorage: cannot update TargetToBlock in sync segments: %w", err) } // Get synced segments from storage storageSyncSegments, err := dh.storage.GetSyncedBlockRangePerContract(nil) if err != nil { - return err - } - newState, err := NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) - if err != nil { - return err + return nil, fmt.Errorf("newStateFromStorage: cannot get synced block ranges from storage: %w", err) } - // What is pending to download? - dh.state = newState - dh.log.Infof("Initialization completed. state: %s", - dh.state.String()) - return nil + return NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) } + func (dh *EVMMultidownloader) Start(ctx context.Context) error { dh.mutex.Lock() if dh.isRunning { @@ -310,6 +327,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { return err } } + dh.statistics.StartSyncing() for { // check if context is done @@ -317,8 +335,13 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { dh.log.Infof("EVMMultidownloader.Start: context done, exiting...") return runCtx.Err() } - - err := dh.StartStep(runCtx) + err := dh.debug.GetInjectedStartStepError() + if err != nil { + dh.log.Warnf("EVMMultidownloader.Start: debug forced error set: %s", + err.Error()) + } else { + err = dh.StartStep(runCtx) + } if err != nil { reorgErr := mdrtypes.CastDetectedReorgError(err) if reorgErr == nil { @@ -328,6 +351,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { } dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) for { + dh.mutex.Lock() // check if context is done during reorg processing if runCtx.Err() != nil { dh.log.Infof("EVMMultidownloader.Start: context done during reorg processing, exiting...") @@ -341,6 +365,14 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { time.Sleep(1 * time.Second) continue } + newState, err := dh.newStateFromStorage() + if err != nil { + dh.log.Warnf("Error recreating state after reorg processing: %s", err.Error()) + time.Sleep(1 * time.Second) + continue + } + dh.state = newState + dh.mutex.Unlock() break } } @@ -440,141 +472,101 @@ func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { return nil } -func (dh *EVMMultidownloader) StartStepOld(ctx context.Context) error { - dh.log.Infof("checking unsafe blocks on DB...") - var err error - if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { - return err - } - if err = dh.sync(ctx, dh.StepSafe, "safe"); err != nil { - return err - } - for { - dh.log.Infof("Unsafe sync iteration starting...") - if err = dh.sync(ctx, dh.StepUnsafe, "unsafe"); err != nil { - return err - } - - if err = dh.WaitForNewLatestBlocks(ctx); err != nil { - return err - } - - dh.log.Infof("waiting new checkReorgUntilNewBlock...") - if err = dh.checkReorgUntilNewBlock(ctx); err != nil { - return err - } - } -} - func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error { - latestSyncedBlock := dh.state.GetHighestBlockNumberPendingToSync() - dh.log.Infof("waiting new block (latest>%d)...", latestSyncedBlock) - _, err := dh.waitForNewBlocks(ctx, aggkittypes.LatestBlock, latestSyncedBlock) + latestSyncedBlockNumber, lastSyncedBlockTag := dh.state.GetHighestBlockNumberPendingToSync() + lastBlockHeader, finalized, err := dh.storage.GetBlockHeaderByNumber(nil, latestSyncedBlockNumber) + if err != nil { + return fmt.Errorf("WaitForNewLatestBlocks: cannot get block header for latest synced block %d: %w", + latestSyncedBlockNumber, err) + } + dh.log.Infof("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) + _, err = dh.waitForNewBlocks(ctx, lastSyncedBlockTag, lastBlockHeader, finalized) return err } func (dh *EVMMultidownloader) waitForNewBlocks(ctx context.Context, blockTag aggkittypes.BlockNumberFinality, - latestSyncedBlock uint64) (uint64, error) { + lastBlockHeader *aggkittypes.BlockHeader, + finalized mdrtypes.FinalizedType) (uint64, error) { // TODO: This var dh.cfg.PeriodToCheckReorgs.Duration is the best choice? ticker := time.NewTicker(dh.cfg.PeriodToCheckReorgs.Duration) defer ticker.Stop() dh.log.Debugf("waitForNewBlocks: waiting for new blocks %s after %d. Check each %s...", blockTag.String(), - latestSyncedBlock, + lastBlockHeader.Number, dh.cfg.PeriodToCheckReorgs.String()) for { select { case <-ctx.Done(): dh.log.Info("context cancelled") - return latestSyncedBlock, ctx.Err() + return lastBlockHeader.Number, ctx.Err() case <-ticker.C: - currentBlock, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, blockTag) - if err != nil { - return latestSyncedBlock, fmt.Errorf("WaitForNewBlocks: cannot get current block number: %w", err) + var currentBlock uint64 + var err error + if finalized == mdrtypes.NotFinalized { + // Check reorg + currentHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, &blockTag) + if err != nil { + return lastBlockHeader.Number, fmt.Errorf("WaitForNewBlocks: cannot get current block header: %w", err) + } + dh.log.Debugf("waitForNewBlocks: tag:%s currentHeader.Number=%d, lastBlockHeader.Number=%d checking Hash", + blockTag.String(), currentHeader.Number, lastBlockHeader.Number) + if currentHeader.Number == lastBlockHeader.Number { + if currentHeader.Hash != lastBlockHeader.Hash { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + lastBlockHeader.Hash, + currentHeader.Hash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: stored hash %s != current hash %s", + lastBlockHeader.Number, + lastBlockHeader.Hash.String(), + currentHeader.Hash.String())) + } + } + if currentHeader.Number == lastBlockHeader.Number+1 && currentHeader.ParentHash != nil { + if *currentHeader.ParentHash != lastBlockHeader.Hash { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_ParentHashMismatch, + lastBlockHeader.Hash, + *currentHeader.ParentHash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: "+ + "stored hash %s != parent hash %s of new block %d", + lastBlockHeader.Number, + lastBlockHeader.Hash.String(), + currentHeader.ParentHash.String(), + currentHeader.Number)) + } + } + if currentHeader.Number < lastBlockHeader.Number { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_MissingBlock, + lastBlockHeader.Hash, + currentHeader.Hash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: "+ + "current block number %d < last synced block number %d", + lastBlockHeader.Number, + currentHeader.Number, + lastBlockHeader.Number)) + } + currentBlock = currentHeader.Number + } else { + currentBlock, err = dh.blockNotifierManager.GetCurrentBlockNumber(ctx, blockTag) + if err != nil { + return lastBlockHeader.Number, fmt.Errorf("WaitForNewBlocks: cannot get current block number: %w", err) + } } - if currentBlock > latestSyncedBlock { - dh.log.Debugf("waitForNewBlocks: Find new block %d > latestSyncedBlock %d", - currentBlock, latestSyncedBlock) + if currentBlock > lastBlockHeader.Number { + dh.log.Debugf("waitForNewBlocks: Find new block %d > lastBlockHeader.Number %d", + currentBlock, lastBlockHeader.Number) return currentBlock, nil } } } } -// This function check the tip of the chain to prevent any reorg, meanwhile -// wait for a new block to arrive -func (dh *EVMMultidownloader) checkReorgUntilNewBlock(ctx context.Context) error { - initialFinalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) - if err != nil { - return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) - } - lowestBlock, highestBlock, err := dh.storage.GetRangeBlockHeader(nil, mdrtypes.NotFinalized) - if err != nil { - return fmt.Errorf("checkReorgUntilNewBlock: cannot get highest unsafe block: %w", err) - } - if lowestBlock == nil || highestBlock == nil { - dh.log.Infof("checkReorgUntilNewBlock: no unsafe blocks to check for reorgs") - return nil - } - - for { - select { - case <-time.After(dh.cfg.PeriodToCheckReorgs.Duration): - if err := dh.detectReorgs(ctx, []*aggkittypes.BlockHeader{highestBlock}); err != nil { - return fmt.Errorf("checkReorgUntilNewBlock: cannot check reorg on tip block %d: %w", - highestBlock.Number, err) - } - if err := dh.state.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager); err != nil { - return fmt.Errorf("checkReorgUntilNewBlock: cannot update TargetToBlock in pendingSync: %w", err) - } - highestBlockPendingToSync := dh.state.GetHighestBlockNumberPendingToSync() - if highestBlockPendingToSync > highestBlock.Number { - dh.log.Infof("checkReorgUntilNewBlock: new block to sync (old: %d, new: %d), ", - highestBlock.Number, highestBlockPendingToSync) - return nil - } - finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) - if err != nil { - return fmt.Errorf("checkReorgUntilNewBlock: cannot get finalized block number: %w", err) - } - if finalizedBlockNumber != initialFinalizedBlockNumber { - dh.log.Infof("checkReorgUntilNewBlock: finalized block advanced from %d to %d, re-checking reorgs", - initialFinalizedBlockNumber, finalizedBlockNumber) - return nil - } - case <-ctx.Done(): - return fmt.Errorf("checkReorgUntilNewBlock: context done: %w", ctx.Err()) - } - } -} - -// sync is an internal function that executes the given stepFunc until it returns done=true or error -func (dh *EVMMultidownloader) sync(ctx context.Context, - stepFunc func(ctx context.Context) (bool, error), name string) error { - dh.statistics.StartSyncing() - - iteration := 0 - dh.log.Infof("πŸš€πŸš€πŸš€πŸš€πŸš€πŸš€ start syncing %s ...", name) - // Execute steps until done or error - for done, err := stepFunc(ctx); !done; done, err = stepFunc(ctx) { - if err != nil { - dh.log.Warnf("🐞🐞🐞🐞🐞 sync %s fails after %d iterations. err: %w", - name, iteration, err) - return err - } - if ctx.Err() != nil { - dh.log.Infof("🐞🐞🐞🐞🐞 sync %s fails after %d iterations. err: %w", - name, iteration, ctx.Err()) - return ctx.Err() - } - iteration++ - } - dh.log.Infof("πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰πŸŽ‰ sync %s completed after %d iterations.", name, iteration) - dh.statistics.FinishSyncing() - return nil -} - func getBlockNumbers(logs []types.Log) []uint64 { blockNumbers := make(map[uint64]struct{}) result := make([]uint64, 0) diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 4d8492688..9b62bd148 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -53,3 +53,16 @@ func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { } return info, nil } + +func (b *EVMMultidownloaderRPC) Reorg(mismatchingBlockNumber uint64) (interface{}, rpc.Error) { + if b.downloader.debug == nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.ForceReorg: debug is not enabled") + } + b.downloader.debug.ForceRorg(mismatchingBlockNumber) + return struct { + Message string `json:"message"` + }{ + Message: "Reorg forced successfully", + }, nil +} diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 27cdc28af..7466b1a20 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -404,44 +404,12 @@ func TestEVMMultidownloader_StepSafe(t *testing.T) { require.NoError(t, err) require.True(t, finished) - err = testData.mdr.sync(t.Context(), testData.mdr.StepSafe, "safe") - require.NoError(t, err) - require.True(t, finished) - ctx, cancel := context.WithCancel(context.TODO()) cancel() _, err = testData.mdr.StepSafe(ctx) require.ErrorIs(t, err, context.Canceled) } -func TestEVMMultidownloader_sync(t *testing.T) { - testData := newEVMMultidownloaderTestData(t, false) - t.Run("context canceled", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - err := testData.mdr.sync(ctx, func(ctx context.Context) (bool, error) { - return false, nil - }, "test_sync") - require.ErrorIs(t, err, context.Canceled) - }) - t.Run("sync func returns an error", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - returnedErr := fmt.Errorf("sync function error") - err := testData.mdr.sync(ctx, func(ctx context.Context) (bool, error) { - return false, returnedErr - }, "test_sync") - require.ErrorIs(t, err, returnedErr) - }) - - t.Run("sync func finished no errors", func(t *testing.T) { - err := testData.mdr.sync(t.Context(), func(ctx context.Context) (bool, error) { - return true, nil - }, "test_sync") - require.NoError(t, err) - }) -} - func TestEVMMultidownloader_Start(t *testing.T) { t.Run("initialization error is returned", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 77d4264aa..98597e410 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -118,11 +118,11 @@ func (rm *ReorgProcessor) findFirstUnaffectedBlock(ctx context.Context, } data, err := rm.port.GetBlockStorageAndRPC(ctx, tx, currentBlockNumber) if err != nil { - return 0, err + return 0, fmt.Errorf("findFirstUnaffectedBlock: error getting block storage and RPC: %w", err) } match, err := rm.checkBlocks(data) if err != nil { - return 0, err + return 0, fmt.Errorf("findFirstUnaffectedBlock: error checking blocks: %w", err) } if match { // Found the first unaffected block @@ -134,9 +134,14 @@ func (rm *ReorgProcessor) findFirstUnaffectedBlock(ctx context.Context, // checkBlocks compares storage and rpc block headers and returns true if they match func (rm *ReorgProcessor) checkBlocks(blocks *compareBlockHeaders) (bool, error) { - if blocks == nil || blocks.StorageHeader == nil || blocks.RpcHeader == nil { - // Block not in storage, so it is a reorg - return false, fmt.Errorf("checkBlocks bad input data (nil)") + if blocks == nil { + return false, fmt.Errorf("checkBlocks: blocks is nil") + } + if blocks.StorageHeader == nil || blocks.RpcHeader == nil { + // Block not in storage or not in RPC so is a missmatch + rm.log.Warnf("checkBlocks: block %d missing storage=%t and rpc=%t", + blocks.BlockNumber, blocks.ExistsStorageBlock(), blocks.ExistsRPCBlock()) + return false, nil } if blocks.StorageHeader.Number != blocks.RpcHeader.Number { return false, fmt.Errorf("checkBlocks block numbers do not match: storage=%d rpc=%d", diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index c14e2cdb5..fb9556eec 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -5,16 +5,31 @@ import ( "fmt" dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/etherman" mdtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" ) type compareBlockHeaders struct { + BlockNumber uint64 StorageHeader *aggkittypes.BlockHeader IsFinalized mdtypes.FinalizedType RpcHeader *aggkittypes.BlockHeader } +func (c *compareBlockHeaders) ExistsRPCBlock() bool { + if c == nil { + return false + } + return c.RpcHeader != nil +} +func (c *compareBlockHeaders) ExistsStorageBlock() bool { + if c == nil { + return false + } + return c.StorageHeader != nil +} + type ReorgPort struct { ethClient aggkittypes.BaseEthereumClienter rpcClient aggkittypes.RPCClienter @@ -29,13 +44,14 @@ func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querie blockNumber uint64) (*compareBlockHeaders, error) { currentStorageBlock, finalized, err := r.storage.GetBlockHeaderByNumber(tx, blockNumber) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting block in storage: %w", err) } rpcBlock, err := r.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) - if err != nil { - return nil, err + if err != nil && !etherman.IsErrNotFound(err) { + return nil, fmt.Errorf("error getting block in RPC: %w", err) } return &compareBlockHeaders{ + BlockNumber: blockNumber, StorageHeader: currentStorageBlock, IsFinalized: finalized, RpcHeader: rpcBlock, diff --git a/multidownloader/state.go b/multidownloader/state.go index 82d6f6b33..2bf132272 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -7,6 +7,7 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/etherman/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) @@ -66,7 +67,7 @@ func (s *State) UpdateTargetBlockToNumber(ctx context.Context, blockNotifier typ return s.Pending.UpdateTargetBlockToNumber(ctx, blockNotifier) } -func (s *State) GetHighestBlockNumberPendingToSync() uint64 { +func (s *State) GetHighestBlockNumberPendingToSync() (uint64, aggkittypes.BlockNumberFinality) { return s.Pending.GetHighestBlockNumber() } diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index c3515cbcf..9989a3a13 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -67,6 +67,11 @@ func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtyp reorgData.BlockRangeAffected); err != nil { return 0, fmt.Errorf("InsertNewReorg: error moving reorged blocks to block_reorged: %w", err) } + // Adjust sync_status table to reflect the reorg + err = a.adjustSyncStatusForReorgNoMutex(tx, reorgData) + if err != nil { + return 0, fmt.Errorf("InsertNewReorg: error adjusting sync_status for reorg: %w", err) + } return reorgRow.ChainID, nil } @@ -163,3 +168,41 @@ func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, return reorgData, nil } + +// AdjustSyncStatusForReorg adjusts the sync_status table after a reorg by setting +// synced_to_block to the block before the reorg started for all affected contracts +func (a *MultidownloaderStorage) adjustSyncStatusForReorgNoMutex(tx dbtypes.Querier, + reorgData mdrtypes.ReorgData) error { + if tx == nil { + return fmt.Errorf("AdjustSyncStatusForReorg: require a tx to ensure atomicity") + } + // Calculate the new synced_to_block (one block before the reorg) + var newSyncedToBlock uint64 + if reorgData.BlockRangeAffected.FromBlock > 0 { + newSyncedToBlock = reorgData.BlockRangeAffected.FromBlock - 1 + } else { + newSyncedToBlock = 0 + } + + // Update all contracts that have synced beyond the reorg point + query := `UPDATE sync_status + SET synced_to_block = ? + WHERE synced_to_block >= ?` + + result, err := tx.Exec(query, newSyncedToBlock, reorgData.BlockRangeAffected.FromBlock) + if err != nil { + return fmt.Errorf("AdjustSyncStatusForReorg: error updating sync_status: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("AdjustSyncStatusForReorg: error getting rows affected: %w", err) + } + + a.logger.Infof("AdjustSyncStatusForReorg: adjusted %d contract(s) to synced_to_block=%d "+ + "due to reorg at blocks [%d-%d]", + rowsAffected, newSyncedToBlock, + reorgData.BlockRangeAffected.FromBlock, reorgData.BlockRangeAffected.ToBlock) + + return nil +} diff --git a/multidownloader/sync/download.go b/multidownloader/sync/download.go index 51124d088..4a328f243 100644 --- a/multidownloader/sync/download.go +++ b/multidownloader/sync/download.go @@ -6,7 +6,6 @@ import ( "fmt" "time" - "github.com/agglayer/aggkit/common" aggkitcommon "github.com/agglayer/aggkit/common" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" @@ -69,23 +68,24 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, } maxLogQuery := d.newMaxLogQuery(lastBlockHeader, maxBlocks, syncerConfig) var result *mdrsynctypes.DownloadResult - conditionMet, err := common.PollingWithTimeout(ctx, d.pullingPeriod, d.waitPeriodToCatchUpMaximumLogRange, func() (bool, error) { - var err error - err = d.checkReorgedBlock(ctx, lastBlockHeader) - if err != nil { - return false, err - } - result, err = d.executeLogQuery(ctx, maxLogQuery) - if err != nil { - // The only allowed error is ErrLogsNotAvailable - if errors.Is(err, ErrLogsNotAvailable) { - return false, nil + conditionMet, err := aggkitcommon.PollingWithTimeout(ctx, d.pullingPeriod, + d.waitPeriodToCatchUpMaximumLogRange, func() (bool, error) { + var err error + err = d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return false, err } - return false, err - } - return true, nil - }) - if errors.Is(err, common.ErrTimeoutReached) { + result, err = d.executeLogQuery(ctx, maxLogQuery) + if err != nil { + // The only allowed error is ErrLogsNotAvailable + if errors.Is(err, ErrLogsNotAvailable) { + return false, nil + } + return false, err + } + return true, nil + }) + if errors.Is(err, aggkitcommon.ErrTimeoutReached) { return nil, fmt.Errorf("Downloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", maxLogQuery.String(), d.waitPeriodToCatchUpMaximumLogRange.String(), ErrLogsNotAvailable) } @@ -188,7 +188,8 @@ func (d *Downloader) addLastBlockIfNotIncluded(ctx context.Context, if hdr.ParentHash != nil { emptyBlock.ParentHash = *hdr.ParentHash } - d.logger.Debugf("Downloader.addLastBlockIfNotIncluded: adding empty block number %d / %s", + d.logger.Debugf("Downloader.addLastBlockIfNotIncluded: to response %s adding empty block number %d / %s", + responseRange.String(), lastBlockNumber, hdr.Hash.Hex()) result.Data = append(result.Data, emptyBlock) return nil @@ -291,7 +292,7 @@ func (d *Downloader) checkReorgedBlock(ctx context.Context, return fmt.Errorf("reorg data not found for chain ID %d", reorgChainID) } return mdrtypes.NewReorgedError(reorgData.BlockRangeAffected, reorgChainID, - fmt.Sprintf("block number %d is reorged", blockHeader.Number), + fmt.Sprintf("detected at block number %d", blockHeader.Number), ) } return nil diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 5ef28a8e2..19f042b5a 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -56,7 +56,9 @@ reset: lastBlockHeader, d.syncBlockChunkSize, d.syncerConfig) - + if err != nil { + d.logger.Error("error downloading next blocks: ", err) + } if err != nil && mdrtypes.IsReorgedError(err) { err := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)) if err != nil { diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go index a50b365dc..fb5e5bdc5 100644 --- a/multidownloader/types/reorg_error.go +++ b/multidownloader/types/reorg_error.go @@ -8,12 +8,36 @@ import ( "github.com/ethereum/go-ethereum/common" ) +type ReorgDetectionReason int + +const ( + ReorgDetectionReason_BlockHashMismatch ReorgDetectionReason = iota + 1 + ReorgDetectionReason_ParentHashMismatch + ReorgDetectionReason_MissingBlock + ReorgDetectionReason_Forced +) + +func (r ReorgDetectionReason) String() string { + switch r { + case ReorgDetectionReason_BlockHashMismatch: + return "BlockHashMismatch" + case ReorgDetectionReason_ParentHashMismatch: + return "ParentHashMismatch" + case ReorgDetectionReason_MissingBlock: + return "MissingBlock" + case ReorgDetectionReason_Forced: + return "Forced" + } + return fmt.Sprintf("ReorgDetectionReason(%d)", int(r)) +} + // DetectedReorgError is an error that is raised when a reorg is detected // The block is one of the blocks that were reorged, but not necessarily the first one type DetectedReorgError struct { OffendingBlockNumber uint64 // Important: is not the first reorged block, but one of them OldHash common.Hash NewHash common.Hash + ReorgDetectionReason ReorgDetectionReason Message string } @@ -25,18 +49,35 @@ func IsDetectedReorgError(err error) bool { // NewDetectedReorgError creates a new DetectedReorgError func NewDetectedReorgError(offendingBlockNumber uint64, + reason ReorgDetectionReason, oldHash, newHash common.Hash, msg string) *DetectedReorgError { return &DetectedReorgError{ OffendingBlockNumber: offendingBlockNumber, OldHash: oldHash, NewHash: newHash, + ReorgDetectionReason: reason, Message: msg, } } func (e *DetectedReorgError) Error() string { - return fmt.Sprintf("reorgError: block number %d: old hash %s != new hash %s: %s", - e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) + switch e.ReorgDetectionReason { + case ReorgDetectionReason_MissingBlock: + return fmt.Sprintf("reorgError: block number %d is missing: %s", + e.OffendingBlockNumber, e.Message) + case ReorgDetectionReason_BlockHashMismatch: + return fmt.Sprintf("reorgError: block number %d: old hash %s != new hash %s: %s", + e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) + case ReorgDetectionReason_ParentHashMismatch: + return fmt.Sprintf("reorgError: block number %d: old parent hash %s != new parent hash %s: %s", + e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) + case ReorgDetectionReason_Forced: + return fmt.Sprintf("reorgError: block number %d: forced reason: %s", + e.OffendingBlockNumber, e.Message) + default: + return fmt.Sprintf("reorgError: block number %d: reason %d: %s", + e.OffendingBlockNumber, e.ReorgDetectionReason, e.Message) + } } func CastDetectedReorgError(err error) *DetectedReorgError { @@ -47,22 +88,6 @@ func CastDetectedReorgError(err error) *DetectedReorgError { return nil } -// // GetDetectedReorgErrorBlockNumber returns the block number that caused the reorg -// func GetDetectedReorgErrorBlockNumber(err error) uint64 { -// if reorgErr, ok := err.(*DetectedReorgError); ok { -// return reorgErr.BlockNumber -// } -// return 0 -// } - -// // GetDetectedReorgErrorWrappedError returns the wrapped error that caused the reorg -// func GetDetectedReorgErrorWrappedError(err error) error { -// if reorgErr, ok := err.(*DetectedReorgError); ok { -// return reorgErr.Err -// } -// return nil -// } - type ReorgedError struct { Message string BlockRangeReorged aggkitcommon.BlockRange diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 427503229..909df74e9 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -7,6 +7,7 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" ethermantypes "github.com/agglayer/aggkit/etherman/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) @@ -261,17 +262,19 @@ func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uin BlockRange: br, }, nil } -func (f *SetSyncSegment) GetHighestBlockNumber() uint64 { +func (f *SetSyncSegment) GetHighestBlockNumber() (uint64, aggkittypes.BlockNumberFinality) { if f == nil || len(f.segments) == 0 { - return 0 + return 0, aggkittypes.LatestBlock } highest := uint64(0) + finality := aggkittypes.LatestBlock for _, segment := range f.segments { if segment.BlockRange.ToBlock > highest { highest = segment.BlockRange.ToBlock + finality = segment.TargetToBlock } } - return highest + return highest, finality } func (f *SetSyncSegment) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { From b465e584a86711d3a9077f9c5bcd2b811232b928 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:39:27 +0100 Subject: [PATCH 16/75] fix: missing file --- multidownloader/evm_multidownloader_debug.go | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 multidownloader/evm_multidownloader_debug.go diff --git a/multidownloader/evm_multidownloader_debug.go b/multidownloader/evm_multidownloader_debug.go new file mode 100644 index 000000000..4a4d7ff82 --- /dev/null +++ b/multidownloader/evm_multidownloader_debug.go @@ -0,0 +1,47 @@ +package multidownloader + +import ( + "fmt" + "sync" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" +) + +type EVMMultidownloaderDebug struct { + mutexDebug sync.Mutex + debugStepForcedReturnError error +} + +func NewEVMMultidownloaderDebug() *EVMMultidownloaderDebug { + return &EVMMultidownloaderDebug{} +} + +func (dh *EVMMultidownloaderDebug) ForceRorg(mismatchingBlockNumber uint64) { + if dh == nil { + return + } + dh.mutexDebug.Lock() + defer dh.mutexDebug.Unlock() + dh.debugStepForcedReturnError = mdrtypes.NewDetectedReorgError( + mismatchingBlockNumber, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + common.Hash{}, + common.Hash{}, + fmt.Sprintf("ForceRorg: forced reorg at block number %d", mismatchingBlockNumber), + ) +} + +func (dh *EVMMultidownloaderDebug) GetInjectedStartStepError() error { + if dh == nil { + return nil + } + dh.mutexDebug.Lock() + defer dh.mutexDebug.Unlock() + if dh.debugStepForcedReturnError != nil { + err := dh.debugStepForcedReturnError + dh.debugStepForcedReturnError = nil + return err + } + return nil +} From 3261e6318341597f6e75af5c632157d974f80104 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:52:22 +0100 Subject: [PATCH 17/75] fix: undo mock changes and add l1infotreesync e2e test check legacy and multidownloader --- agglayer/mocks/mock_agglayer_client.go | 2 +- .../mock_agg_oracle_committee_contract.go | 2 +- aggoracle/mocks/mock_eth_tx_manager.go | 2 +- .../mocks/mock_l2_ger_manager_contract.go | 2 +- .../mock_agg_proof_public_values_querier.go | 2 +- aggsender/mocks/mock_agg_sender_storage.go | 2 +- .../mock_agg_sender_storage_maintainer.go | 2 +- .../mocks/mock_aggchain_fep_rollup_querier.go | 2 +- .../mock_aggchain_proof_client_interface.go | 2 +- aggsender/mocks/mock_aggchain_proof_flow.go | 2 +- .../mocks/mock_aggchain_proof_generation.go | 2 +- .../mocks/mock_aggchain_proof_querier.go | 2 +- .../mocks/mock_agglayer_bridge_l2_reader.go | 2 +- .../mocks/mock_aggsender_builder_flow.go | 2 +- aggsender/mocks/mock_aggsender_flow_baser.go | 2 +- aggsender/mocks/mock_aggsender_interface.go | 2 +- aggsender/mocks/mock_aggsender_storer.go | 2 +- .../mocks/mock_aggsender_validator_client.go | 2 +- .../mocks/mock_aggsender_verifier_flow.go | 2 +- aggsender/mocks/mock_bridge_querier.go | 2 +- aggsender/mocks/mock_certificate_querier.go | 2 +- .../mocks/mock_certificate_send_trigger.go | 2 +- .../mocks/mock_certificate_status_checker.go | 2 +- .../mocks/mock_certificate_trigger_event.go | 2 +- .../mock_certificate_validate_and_signer.go | 2 +- aggsender/mocks/mock_certificate_validator.go | 2 +- aggsender/mocks/mock_chain_ger_reader.go | 2 +- aggsender/mocks/mock_emit_log_func.go | 2 +- aggsender/mocks/mock_fep_contract_querier.go | 2 +- aggsender/mocks/mock_fep_inputs_querier.go | 2 +- aggsender/mocks/mock_ger_querier.go | 2 +- .../mocks/mock_l1_info_tree_data_querier.go | 2 +- aggsender/mocks/mock_l1_info_tree_syncer.go | 2 +- aggsender/mocks/mock_l2_bridge_syncer.go | 2 +- aggsender/mocks/mock_ler_querier.go | 2 +- aggsender/mocks/mock_local_exit_root_query.go | 2 +- aggsender/mocks/mock_logger.go | 2 +- ...k_max_l2_block_number_limiter_interface.go | 2 +- aggsender/mocks/mock_multisig_contract.go | 2 +- aggsender/mocks/mock_multisig_querier.go | 2 +- aggsender/mocks/mock_op_node_clienter.go | 2 +- .../mocks/mock_optimistic_mode_querier.go | 2 +- aggsender/mocks/mock_optimistic_signer.go | 2 +- aggsender/mocks/mock_rollup_data_querier.go | 2 +- ...ck_storage_retain_certificates_policier.go | 2 +- aggsender/mocks/mock_validator_client.go | 2 +- aggsender/mocks/mock_validator_poller.go | 2 +- .../types/mocks/mock_epoch_notifier.go | 2 +- .../mock_l1_info_tree_root_by_leaf_querier.go | 2 +- .../mock_agglayer_manager_upgrade_querier.go | 2 +- bridgeservice/mocks/mock_bridger.go | 2 +- .../mocks/mock_l1_info_tree_syncer.go | 2 +- bridgeservice/mocks/mock_l2_ger_syncer.go | 2 +- bridgesync/mock_bridge_querier.go | 2 +- bridgesync/mocks/mock_reorg_detector.go | 2 +- common/mocks/mock_logger.go | 2 +- common/mocks/mock_pub_sub.go | 2 +- common/types/mocks/mock_retry_handler.go | 2 +- .../mocks/mock_retry_policy_configurer.go | 2 +- .../mocks/mock_compatibility_checker.go | 2 +- .../mocks/mock_compatibility_data_storager.go | 2 +- .../mocks/mock_runtime_data_getter_func.go | 2 +- db/mocks/mock_d_ber.go | 2 +- db/mocks/mock_key_value_storager.go | 2 +- db/mocks/mock_querier.go | 2 +- db/mocks/mock_sql_txer.go | 2 +- db/mocks/mock_txer.go | 2 +- etherman/mocks/mock_op_node_clienter.go | 2 +- .../mocks/mock_rollup_manager_contract.go | 2 +- etherman/types/mocks/mock_block_notifier.go | 2 +- .../mocks/mock_block_notifier_manager.go | 2 +- l1infotreesync/e2e_test.go | 252 ++++++++++-------- l1infotreesync/l1infotreesync.go | 9 +- l1infotreesync/mock_downloader_interface.go | 2 +- l1infotreesync/mock_driver_interface.go | 2 +- l1infotreesync/mock_l1_info_tree_syncer.go | 2 +- l1infotreesync/mocks/mock_reorg_detector.go | 2 +- l2gersync/mocks/mock_l1_info_tree_querier.go | 2 +- .../types/mocks/mock_downloader_interface.go | 2 +- .../mocks/mock_multidownloader_interface.go | 2 +- .../types/mocks/mock_processor_interface.go | 2 +- .../types/mocks/mock_reorg_processor.go | 2 +- multidownloader/types/mocks/mock_storager.go | 2 +- .../types/mocks/mock_storager_for_reorg.go | 2 +- sync/mock_downloader.go | 2 +- sync/mock_evm_downloader_interface.go | 2 +- sync/mock_processor_interface.go | 2 +- sync/mock_reorg_detector.go | 2 +- tree/types/mocks/mock_full_treer.go | 2 +- tree/types/mocks/mock_leaf_writer.go | 2 +- tree/types/mocks/mock_read_treer.go | 2 +- tree/types/mocks/mock_reorganize_treer.go | 2 +- types/mocks/mock_base_ethereum_clienter.go | 2 +- types/mocks/mock_custom_ethereum_clienter.go | 2 +- types/mocks/mock_eth_chain_reader.go | 2 +- types/mocks/mock_eth_clienter.go | 2 +- types/mocks/mock_ethereum_clienter.go | 2 +- types/mocks/mock_multi_downloader_legacy.go | 2 +- types/mocks/mock_rpc_clienter.go | 2 +- 99 files changed, 241 insertions(+), 214 deletions(-) diff --git a/agglayer/mocks/mock_agglayer_client.go b/agglayer/mocks/mock_agglayer_client.go index d3969de91..a14237a31 100644 --- a/agglayer/mocks/mock_agglayer_client.go +++ b/agglayer/mocks/mock_agglayer_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_agg_oracle_committee_contract.go b/aggoracle/mocks/mock_agg_oracle_committee_contract.go index 97586accd..3adaa0220 100644 --- a/aggoracle/mocks/mock_agg_oracle_committee_contract.go +++ b/aggoracle/mocks/mock_agg_oracle_committee_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_eth_tx_manager.go b/aggoracle/mocks/mock_eth_tx_manager.go index 59ffea2f6..a055f9581 100644 --- a/aggoracle/mocks/mock_eth_tx_manager.go +++ b/aggoracle/mocks/mock_eth_tx_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggoracle/mocks/mock_l2_ger_manager_contract.go b/aggoracle/mocks/mock_l2_ger_manager_contract.go index 4708f6062..6d0d55007 100644 --- a/aggoracle/mocks/mock_l2_ger_manager_contract.go +++ b/aggoracle/mocks/mock_l2_ger_manager_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_proof_public_values_querier.go b/aggsender/mocks/mock_agg_proof_public_values_querier.go index 6900d4f4c..67eee292c 100644 --- a/aggsender/mocks/mock_agg_proof_public_values_querier.go +++ b/aggsender/mocks/mock_agg_proof_public_values_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_sender_storage.go b/aggsender/mocks/mock_agg_sender_storage.go index 865f31b33..b0ff20665 100644 --- a/aggsender/mocks/mock_agg_sender_storage.go +++ b/aggsender/mocks/mock_agg_sender_storage.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agg_sender_storage_maintainer.go b/aggsender/mocks/mock_agg_sender_storage_maintainer.go index c5586a08f..ed2e4a384 100644 --- a/aggsender/mocks/mock_agg_sender_storage_maintainer.go +++ b/aggsender/mocks/mock_agg_sender_storage_maintainer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_fep_rollup_querier.go b/aggsender/mocks/mock_aggchain_fep_rollup_querier.go index 8e43575df..c9eeaa16b 100644 --- a/aggsender/mocks/mock_aggchain_fep_rollup_querier.go +++ b/aggsender/mocks/mock_aggchain_fep_rollup_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_client_interface.go b/aggsender/mocks/mock_aggchain_proof_client_interface.go index ef5f61c52..418a64a4d 100644 --- a/aggsender/mocks/mock_aggchain_proof_client_interface.go +++ b/aggsender/mocks/mock_aggchain_proof_client_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_flow.go b/aggsender/mocks/mock_aggchain_proof_flow.go index ef17180d2..a72d1c8e4 100644 --- a/aggsender/mocks/mock_aggchain_proof_flow.go +++ b/aggsender/mocks/mock_aggchain_proof_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_generation.go b/aggsender/mocks/mock_aggchain_proof_generation.go index 45f0b0836..11b5d0c84 100644 --- a/aggsender/mocks/mock_aggchain_proof_generation.go +++ b/aggsender/mocks/mock_aggchain_proof_generation.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggchain_proof_querier.go b/aggsender/mocks/mock_aggchain_proof_querier.go index 0b6bbac88..caebcf74b 100644 --- a/aggsender/mocks/mock_aggchain_proof_querier.go +++ b/aggsender/mocks/mock_aggchain_proof_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go index b3ab28fe2..a97430c9b 100644 --- a/aggsender/mocks/mock_agglayer_bridge_l2_reader.go +++ b/aggsender/mocks/mock_agglayer_bridge_l2_reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_builder_flow.go b/aggsender/mocks/mock_aggsender_builder_flow.go index a18464ba7..2963c3653 100644 --- a/aggsender/mocks/mock_aggsender_builder_flow.go +++ b/aggsender/mocks/mock_aggsender_builder_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_flow_baser.go b/aggsender/mocks/mock_aggsender_flow_baser.go index 06195a0a6..ab816984c 100644 --- a/aggsender/mocks/mock_aggsender_flow_baser.go +++ b/aggsender/mocks/mock_aggsender_flow_baser.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_interface.go b/aggsender/mocks/mock_aggsender_interface.go index be57ec7cb..c7964ecfc 100644 --- a/aggsender/mocks/mock_aggsender_interface.go +++ b/aggsender/mocks/mock_aggsender_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_storer.go b/aggsender/mocks/mock_aggsender_storer.go index 2587c9f66..157b9ce86 100644 --- a/aggsender/mocks/mock_aggsender_storer.go +++ b/aggsender/mocks/mock_aggsender_storer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_validator_client.go b/aggsender/mocks/mock_aggsender_validator_client.go index a04d4fe04..2d653c487 100644 --- a/aggsender/mocks/mock_aggsender_validator_client.go +++ b/aggsender/mocks/mock_aggsender_validator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_aggsender_verifier_flow.go b/aggsender/mocks/mock_aggsender_verifier_flow.go index b42567ed4..31fd954e8 100644 --- a/aggsender/mocks/mock_aggsender_verifier_flow.go +++ b/aggsender/mocks/mock_aggsender_verifier_flow.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_bridge_querier.go b/aggsender/mocks/mock_bridge_querier.go index 9a3571b39..8916837fb 100644 --- a/aggsender/mocks/mock_bridge_querier.go +++ b/aggsender/mocks/mock_bridge_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_querier.go b/aggsender/mocks/mock_certificate_querier.go index ae09b838e..4edcc2512 100644 --- a/aggsender/mocks/mock_certificate_querier.go +++ b/aggsender/mocks/mock_certificate_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_send_trigger.go b/aggsender/mocks/mock_certificate_send_trigger.go index 73a14996b..c9c51c6cc 100644 --- a/aggsender/mocks/mock_certificate_send_trigger.go +++ b/aggsender/mocks/mock_certificate_send_trigger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_status_checker.go b/aggsender/mocks/mock_certificate_status_checker.go index 11a0170f1..5937d264e 100644 --- a/aggsender/mocks/mock_certificate_status_checker.go +++ b/aggsender/mocks/mock_certificate_status_checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_trigger_event.go b/aggsender/mocks/mock_certificate_trigger_event.go index 3774cdc92..5d1497bcc 100644 --- a/aggsender/mocks/mock_certificate_trigger_event.go +++ b/aggsender/mocks/mock_certificate_trigger_event.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_validate_and_signer.go b/aggsender/mocks/mock_certificate_validate_and_signer.go index 19c1e3a06..cd65e1de6 100644 --- a/aggsender/mocks/mock_certificate_validate_and_signer.go +++ b/aggsender/mocks/mock_certificate_validate_and_signer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_certificate_validator.go b/aggsender/mocks/mock_certificate_validator.go index aa2bc5a2e..ca88aa196 100644 --- a/aggsender/mocks/mock_certificate_validator.go +++ b/aggsender/mocks/mock_certificate_validator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_chain_ger_reader.go b/aggsender/mocks/mock_chain_ger_reader.go index 5f1ff9a92..f1fe567ea 100644 --- a/aggsender/mocks/mock_chain_ger_reader.go +++ b/aggsender/mocks/mock_chain_ger_reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_emit_log_func.go b/aggsender/mocks/mock_emit_log_func.go index fd3b744e0..6322e76eb 100644 --- a/aggsender/mocks/mock_emit_log_func.go +++ b/aggsender/mocks/mock_emit_log_func.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_fep_contract_querier.go b/aggsender/mocks/mock_fep_contract_querier.go index f43c86a14..311373bb6 100644 --- a/aggsender/mocks/mock_fep_contract_querier.go +++ b/aggsender/mocks/mock_fep_contract_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_fep_inputs_querier.go b/aggsender/mocks/mock_fep_inputs_querier.go index ac1bc5d9f..2de0ac3c7 100644 --- a/aggsender/mocks/mock_fep_inputs_querier.go +++ b/aggsender/mocks/mock_fep_inputs_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_ger_querier.go b/aggsender/mocks/mock_ger_querier.go index e8b171b73..fe05effd3 100644 --- a/aggsender/mocks/mock_ger_querier.go +++ b/aggsender/mocks/mock_ger_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l1_info_tree_data_querier.go b/aggsender/mocks/mock_l1_info_tree_data_querier.go index 3ddda7b78..56139dc77 100644 --- a/aggsender/mocks/mock_l1_info_tree_data_querier.go +++ b/aggsender/mocks/mock_l1_info_tree_data_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l1_info_tree_syncer.go b/aggsender/mocks/mock_l1_info_tree_syncer.go index 4b9d0c9b4..be3e51cd0 100644 --- a/aggsender/mocks/mock_l1_info_tree_syncer.go +++ b/aggsender/mocks/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_l2_bridge_syncer.go b/aggsender/mocks/mock_l2_bridge_syncer.go index 7fb7a2c84..23fd452c4 100644 --- a/aggsender/mocks/mock_l2_bridge_syncer.go +++ b/aggsender/mocks/mock_l2_bridge_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_ler_querier.go b/aggsender/mocks/mock_ler_querier.go index 5cf500103..7e147e224 100644 --- a/aggsender/mocks/mock_ler_querier.go +++ b/aggsender/mocks/mock_ler_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_local_exit_root_query.go b/aggsender/mocks/mock_local_exit_root_query.go index c59f0e851..10f0e2f39 100644 --- a/aggsender/mocks/mock_local_exit_root_query.go +++ b/aggsender/mocks/mock_local_exit_root_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_logger.go b/aggsender/mocks/mock_logger.go index 569e2a50a..670c84686 100644 --- a/aggsender/mocks/mock_logger.go +++ b/aggsender/mocks/mock_logger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go b/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go index fb0aa2bca..9b6cbe31c 100644 --- a/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go +++ b/aggsender/mocks/mock_max_l2_block_number_limiter_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_multisig_contract.go b/aggsender/mocks/mock_multisig_contract.go index 9e4cd4c4f..fcf43497f 100644 --- a/aggsender/mocks/mock_multisig_contract.go +++ b/aggsender/mocks/mock_multisig_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_multisig_querier.go b/aggsender/mocks/mock_multisig_querier.go index fc96c760b..892573fa9 100644 --- a/aggsender/mocks/mock_multisig_querier.go +++ b/aggsender/mocks/mock_multisig_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_op_node_clienter.go b/aggsender/mocks/mock_op_node_clienter.go index f6c69acc9..7f1dfc2cb 100644 --- a/aggsender/mocks/mock_op_node_clienter.go +++ b/aggsender/mocks/mock_op_node_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_optimistic_mode_querier.go b/aggsender/mocks/mock_optimistic_mode_querier.go index 0be3c286c..a1c60d7ec 100644 --- a/aggsender/mocks/mock_optimistic_mode_querier.go +++ b/aggsender/mocks/mock_optimistic_mode_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_optimistic_signer.go b/aggsender/mocks/mock_optimistic_signer.go index a8a40f88e..bd259d59e 100644 --- a/aggsender/mocks/mock_optimistic_signer.go +++ b/aggsender/mocks/mock_optimistic_signer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_rollup_data_querier.go b/aggsender/mocks/mock_rollup_data_querier.go index 119b0ad17..4e13addc9 100644 --- a/aggsender/mocks/mock_rollup_data_querier.go +++ b/aggsender/mocks/mock_rollup_data_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_storage_retain_certificates_policier.go b/aggsender/mocks/mock_storage_retain_certificates_policier.go index 051b0bea2..8967caa8c 100644 --- a/aggsender/mocks/mock_storage_retain_certificates_policier.go +++ b/aggsender/mocks/mock_storage_retain_certificates_policier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_validator_client.go b/aggsender/mocks/mock_validator_client.go index 0da60d441..0c1a96c47 100644 --- a/aggsender/mocks/mock_validator_client.go +++ b/aggsender/mocks/mock_validator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/mocks/mock_validator_poller.go b/aggsender/mocks/mock_validator_poller.go index 6f403b7f5..4d425d9a6 100644 --- a/aggsender/mocks/mock_validator_poller.go +++ b/aggsender/mocks/mock_validator_poller.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/trigger/types/mocks/mock_epoch_notifier.go b/aggsender/trigger/types/mocks/mock_epoch_notifier.go index 49be1e0de..bfa37e45b 100644 --- a/aggsender/trigger/types/mocks/mock_epoch_notifier.go +++ b/aggsender/trigger/types/mocks/mock_epoch_notifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go b/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go index bbed9de57..135c8c722 100644 --- a/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go +++ b/aggsender/validator/mocks/mock_l1_info_tree_root_by_leaf_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go b/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go index 5858600f3..d66de8bae 100644 --- a/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go +++ b/bridgeservice/mocks/mock_agglayer_manager_upgrade_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_bridger.go b/bridgeservice/mocks/mock_bridger.go index b2304c1cd..180d33c90 100644 --- a/bridgeservice/mocks/mock_bridger.go +++ b/bridgeservice/mocks/mock_bridger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_l1_info_tree_syncer.go b/bridgeservice/mocks/mock_l1_info_tree_syncer.go index 0b788e7cb..648f6ff40 100644 --- a/bridgeservice/mocks/mock_l1_info_tree_syncer.go +++ b/bridgeservice/mocks/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/bridgeservice/mocks/mock_l2_ger_syncer.go b/bridgeservice/mocks/mock_l2_ger_syncer.go index 7cfe30906..07e17c89e 100644 --- a/bridgeservice/mocks/mock_l2_ger_syncer.go +++ b/bridgeservice/mocks/mock_l2_ger_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/bridgesync/mock_bridge_querier.go b/bridgesync/mock_bridge_querier.go index a7941330b..9f0b01f1e 100644 --- a/bridgesync/mock_bridge_querier.go +++ b/bridgesync/mock_bridge_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package bridgesync diff --git a/bridgesync/mocks/mock_reorg_detector.go b/bridgesync/mocks/mock_reorg_detector.go index 24783d874..d50b74a53 100644 --- a/bridgesync/mocks/mock_reorg_detector.go +++ b/bridgesync/mocks/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/common/mocks/mock_logger.go b/common/mocks/mock_logger.go index 569e2a50a..670c84686 100644 --- a/common/mocks/mock_logger.go +++ b/common/mocks/mock_logger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/common/mocks/mock_pub_sub.go b/common/mocks/mock_pub_sub.go index 1eefc7564..5cf4220eb 100644 --- a/common/mocks/mock_pub_sub.go +++ b/common/mocks/mock_pub_sub.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/common/types/mocks/mock_retry_handler.go b/common/types/mocks/mock_retry_handler.go index 29a1d2d98..5e8cd6277 100644 --- a/common/types/mocks/mock_retry_handler.go +++ b/common/types/mocks/mock_retry_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/common/types/mocks/mock_retry_policy_configurer.go b/common/types/mocks/mock_retry_policy_configurer.go index f400153ee..0c8aadfe8 100644 --- a/common/types/mocks/mock_retry_policy_configurer.go +++ b/common/types/mocks/mock_retry_policy_configurer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_compatibility_checker.go b/db/compatibility/mocks/mock_compatibility_checker.go index e5bb2b174..cf3dac66f 100644 --- a/db/compatibility/mocks/mock_compatibility_checker.go +++ b/db/compatibility/mocks/mock_compatibility_checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_compatibility_data_storager.go b/db/compatibility/mocks/mock_compatibility_data_storager.go index bac0731f6..c691e5a39 100644 --- a/db/compatibility/mocks/mock_compatibility_data_storager.go +++ b/db/compatibility/mocks/mock_compatibility_data_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/compatibility/mocks/mock_runtime_data_getter_func.go b/db/compatibility/mocks/mock_runtime_data_getter_func.go index e658e4e09..db6758ec4 100644 --- a/db/compatibility/mocks/mock_runtime_data_getter_func.go +++ b/db/compatibility/mocks/mock_runtime_data_getter_func.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_d_ber.go b/db/mocks/mock_d_ber.go index 48a28f534..76f54538d 100644 --- a/db/mocks/mock_d_ber.go +++ b/db/mocks/mock_d_ber.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_key_value_storager.go b/db/mocks/mock_key_value_storager.go index 357734a8c..611304528 100644 --- a/db/mocks/mock_key_value_storager.go +++ b/db/mocks/mock_key_value_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_querier.go b/db/mocks/mock_querier.go index a3f206a87..5bf4dc754 100644 --- a/db/mocks/mock_querier.go +++ b/db/mocks/mock_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_sql_txer.go b/db/mocks/mock_sql_txer.go index b11f1cf2d..0730d62fb 100644 --- a/db/mocks/mock_sql_txer.go +++ b/db/mocks/mock_sql_txer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/db/mocks/mock_txer.go b/db/mocks/mock_txer.go index e64b72434..75e613ae9 100644 --- a/db/mocks/mock_txer.go +++ b/db/mocks/mock_txer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/etherman/mocks/mock_op_node_clienter.go b/etherman/mocks/mock_op_node_clienter.go index 167e98b50..c30df8b25 100644 --- a/etherman/mocks/mock_op_node_clienter.go +++ b/etherman/mocks/mock_op_node_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/etherman/querier/mocks/mock_rollup_manager_contract.go b/etherman/querier/mocks/mock_rollup_manager_contract.go index 293a3b649..989f2055e 100644 --- a/etherman/querier/mocks/mock_rollup_manager_contract.go +++ b/etherman/querier/mocks/mock_rollup_manager_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/etherman/types/mocks/mock_block_notifier.go b/etherman/types/mocks/mock_block_notifier.go index 69d0e1c24..96cebceb2 100644 --- a/etherman/types/mocks/mock_block_notifier.go +++ b/etherman/types/mocks/mock_block_notifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/etherman/types/mocks/mock_block_notifier_manager.go b/etherman/types/mocks/mock_block_notifier_manager.go index 565d1dce5..6113fb7fd 100644 --- a/etherman/types/mocks/mock_block_notifier_manager.go +++ b/etherman/types/mocks/mock_block_notifier_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index fd75d429b..082cb0929 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -152,29 +152,51 @@ func TestE2E(t *testing.T) { } func TestWithReorgs(t *testing.T) { - ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync.sqlite") - dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg.sqlite") + tests := []struct { + name string + useMultidownloaderForTest bool + }{ + { + name: "with legacy reorgdetector", + useMultidownloaderForTest: false, + }, + { + name: "with new multidownloader", + useMultidownloaderForTest: true, + }, + } - client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() - cfg := l1infotreesync.Config{ - DBPath: dbPathSyncer, - InitialBlock: 0, - SyncBlockChunkSize: 10, - BlockFinality: aggkittypes.LatestBlock, - GlobalExitRootAddr: gerAddr, - RollupManagerAddr: verifyAddr, - RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), - MaxRetryAttemptsAfterError: 25, - RequireStorageContentCompatibility: true, - WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), - } + ctx := context.Background() + suffix := "legacy" + if tt.useMultidownloaderForTest { + suffix = "multidownloader" + } + dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync_"+suffix+".sqlite") + dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg_"+suffix+".sqlite") + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + + cfg := l1infotreesync.Config{ + DBPath: dbPathSyncer, + InitialBlock: 0, + SyncBlockChunkSize: 10, + BlockFinality: aggkittypes.LatestBlock, + GlobalExitRootAddr: gerAddr, + RollupManagerAddr: verifyAddr, + RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), + MaxRetryAttemptsAfterError: 25, + RequireStorageContentCompatibility: true, + WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + } - var syncer *l1infotreesync.L1InfoTreeSync - var err error - var evmMultidownloader *multidownloader.EVMMultidownloader - if useMultidownloaderForTests { + var syncer *l1infotreesync.L1InfoTreeSync + var err error + var evmMultidownloader *multidownloader.EVMMultidownloader + if tt.useMultidownloaderForTest { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") @@ -209,119 +231,121 @@ func TestWithReorgs(t *testing.T) { require.NoError(t, err) require.NoError(t, rd.Start(ctx)) multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - } - go syncer.Start(ctx) - - // Commit block 6 - header, err := client.Client().HeaderByHash(ctx, client.Commit()) - require.NoError(t, err) - reorgFrom := header.Hash() - - // Commit block 7 - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + } + go syncer.Start(ctx) - updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { - // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + // Commit block 6 + header, err := client.Client().HeaderByHash(ctx, client.Commit()) require.NoError(t, err) + reorgFrom := header.Hash() - // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) + // Commit block 7 + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - // Update Rollup Exit Tree - newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) - require.NoError(t, err) - } + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(1, 1) + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) - // Commit block 8 that contains the transaction that updates the trees - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + // Commit block 8 that contains the transaction that updates the trees + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - // Assert L1 Info tree root - expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) - require.NoError(t, err) + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) - require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - // Forking from block 6 - // Note: reorged trx will be added to pending transactions - // and will be committed when the forked block is committed - err = client.Fork(reorgFrom) - require.NoError(t, err) + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) - blockNum, err := client.Client().BlockNumber(ctx) - log.Infof("Current block number after fork: %d", blockNum) - require.NoError(t, err) - require.Equal(t, header.Number.Uint64(), blockNum) + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) - // Commit block 7, 8, 9 after the fork - helpers.CommitBlocks(t, client, 5, time.Millisecond*100) + // Forking from block 6 + // Note: reorged trx will be added to pending transactions + // and will be committed when the forked block is committed + err = client.Fork(reorgFrom) + require.NoError(t, err) - // Assert rollup exit root after committing new blocks on the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - // TODO: Remove ths sleep - time.Sleep(time.Second * 1) // wait for syncer to process the reorg - checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + blockNum, err := client.Client().BlockNumber(ctx) + log.Infof("Current block number after fork: %d", blockNum) + require.NoError(t, err) + require.Equal(t, header.Number.Uint64(), blockNum) - lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - log.Infof("Last processed block after reorg: %d", lastProcessedBlock) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + // Commit block 7, 8, 9 after the fork + helpers.CommitBlocks(t, client, 5, time.Millisecond*100) - showLeafs(t, ctx, syncer, "Before second fork: ") + // Assert rollup exit root after committing new blocks on the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) // wait for syncer to process the reorg + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) - // Forking from block 6 again - log.Infof("πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ–– Forking again from block (6) %d", reorgFrom.Hex()) - err = client.Fork(reorgFrom) - require.NoError(t, err) - time.Sleep(time.Millisecond * 500) - // wait for syncer to process the reorg - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 - // TODO: Remove ths sleep - time.Sleep(time.Second * 1) - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(2, 1) - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) - - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + log.Infof("Last processed block after reorg: %d", lastProcessedBlock) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - // Assert rollup exit root after the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - showLeafs(t, ctx, syncer, "After second fork: ") - checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + showLeafs(t, ctx, syncer, "Before second fork: ") - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + // Forking from block 6 again + log.Infof("πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ–– Forking again from block (6) %d", reorgFrom.Hex()) + err = client.Fork(reorgFrom) + require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + // wait for syncer to process the reorg + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) + + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + showLeafs(t, ctx, syncer, "After second fork: ") + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + }) + } } func checkBlocks(t *testing.T, ctx context.Context, rawClient simulated.Client, mdr *multidownloader.EVMMultidownloader, fromBlock, toBlock uint64) { diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 677144721..9fca00dca 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "math/big" - "time" jRPC "github.com/0xPolygon/cdk-rpc/rpc" "github.com/agglayer/aggkit/db" @@ -99,13 +98,17 @@ func NewMultidownloadBased( } logger := log.WithFields("syncer", syncerID) // TODO: move the durations to config file (mdrsync.NewDownloader) + logger.Infof("Creating L1 Info Tree Syncer with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", + cfg.WaitForNewBlocksPeriod.Duration.String(), + cfg.RetryAfterErrorPeriod.Duration.String(), + ) downloader := mdrsync.NewDownloader( l1Multidownloader, logger, rh, appender, - 5*time.Second, - time.Second, + cfg.RetryAfterErrorPeriod.Duration, + cfg.WaitForNewBlocksPeriod.Duration, ) driver := mdrsync.NewEVMDriver(processor, downloader, syncerConfig, diff --git a/l1infotreesync/mock_downloader_interface.go b/l1infotreesync/mock_downloader_interface.go index 5cdbc4b27..dc68fb63a 100644 --- a/l1infotreesync/mock_downloader_interface.go +++ b/l1infotreesync/mock_downloader_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mock_driver_interface.go b/l1infotreesync/mock_driver_interface.go index a9c76d35e..d5f5b4a33 100644 --- a/l1infotreesync/mock_driver_interface.go +++ b/l1infotreesync/mock_driver_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mock_l1_info_tree_syncer.go b/l1infotreesync/mock_l1_info_tree_syncer.go index 3b77c3c79..fa1759b7f 100644 --- a/l1infotreesync/mock_l1_info_tree_syncer.go +++ b/l1infotreesync/mock_l1_info_tree_syncer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mocks/mock_reorg_detector.go b/l1infotreesync/mocks/mock_reorg_detector.go index 836850406..d63339916 100644 --- a/l1infotreesync/mocks/mock_reorg_detector.go +++ b/l1infotreesync/mocks/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/l2gersync/mocks/mock_l1_info_tree_querier.go b/l2gersync/mocks/mock_l1_info_tree_querier.go index 2cec88aee..d3f49b954 100644 --- a/l2gersync/mocks/mock_l1_info_tree_querier.go +++ b/l2gersync/mocks/mock_l1_info_tree_querier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/sync/types/mocks/mock_downloader_interface.go b/multidownloader/sync/types/mocks/mock_downloader_interface.go index d0b7afd2e..ea08278e8 100644 --- a/multidownloader/sync/types/mocks/mock_downloader_interface.go +++ b/multidownloader/sync/types/mocks/mock_downloader_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go index 3c34cb95f..367f2a593 100644 --- a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go +++ b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/sync/types/mocks/mock_processor_interface.go b/multidownloader/sync/types/mocks/mock_processor_interface.go index cdaa5a1ed..fafd625b7 100644 --- a/multidownloader/sync/types/mocks/mock_processor_interface.go +++ b/multidownloader/sync/types/mocks/mock_processor_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go index 15db004ab..e9b869d3e 100644 --- a/multidownloader/types/mocks/mock_reorg_processor.go +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index 101d38134..a1ab40653 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/multidownloader/types/mocks/mock_storager_for_reorg.go b/multidownloader/types/mocks/mock_storager_for_reorg.go index 24cbd600d..74bf29868 100644 --- a/multidownloader/types/mocks/mock_storager_for_reorg.go +++ b/multidownloader/types/mocks/mock_storager_for_reorg.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/sync/mock_downloader.go b/sync/mock_downloader.go index b16babcc7..c197d450b 100644 --- a/sync/mock_downloader.go +++ b/sync/mock_downloader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync diff --git a/sync/mock_evm_downloader_interface.go b/sync/mock_evm_downloader_interface.go index 83b114b54..7b01b3f4a 100644 --- a/sync/mock_evm_downloader_interface.go +++ b/sync/mock_evm_downloader_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync diff --git a/sync/mock_processor_interface.go b/sync/mock_processor_interface.go index e0f285140..96ece8d42 100644 --- a/sync/mock_processor_interface.go +++ b/sync/mock_processor_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync diff --git a/sync/mock_reorg_detector.go b/sync/mock_reorg_detector.go index 3430bcd90..ce220bd56 100644 --- a/sync/mock_reorg_detector.go +++ b/sync/mock_reorg_detector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync diff --git a/tree/types/mocks/mock_full_treer.go b/tree/types/mocks/mock_full_treer.go index 83f82f9c2..91187f9ff 100644 --- a/tree/types/mocks/mock_full_treer.go +++ b/tree/types/mocks/mock_full_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_leaf_writer.go b/tree/types/mocks/mock_leaf_writer.go index a24025876..2d8da5b5a 100644 --- a/tree/types/mocks/mock_leaf_writer.go +++ b/tree/types/mocks/mock_leaf_writer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_read_treer.go b/tree/types/mocks/mock_read_treer.go index 91120ccf6..d9a4da2b2 100644 --- a/tree/types/mocks/mock_read_treer.go +++ b/tree/types/mocks/mock_read_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/tree/types/mocks/mock_reorganize_treer.go b/tree/types/mocks/mock_reorganize_treer.go index d1a26ab91..2f8b51d12 100644 --- a/tree/types/mocks/mock_reorganize_treer.go +++ b/tree/types/mocks/mock_reorganize_treer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_base_ethereum_clienter.go b/types/mocks/mock_base_ethereum_clienter.go index 9ae1aa962..8b5fad3e7 100644 --- a/types/mocks/mock_base_ethereum_clienter.go +++ b/types/mocks/mock_base_ethereum_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_custom_ethereum_clienter.go b/types/mocks/mock_custom_ethereum_clienter.go index 72e41ca93..99cdf3376 100644 --- a/types/mocks/mock_custom_ethereum_clienter.go +++ b/types/mocks/mock_custom_ethereum_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_eth_chain_reader.go b/types/mocks/mock_eth_chain_reader.go index 6d3ab7f20..479a653c2 100644 --- a/types/mocks/mock_eth_chain_reader.go +++ b/types/mocks/mock_eth_chain_reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_eth_clienter.go b/types/mocks/mock_eth_clienter.go index b21942554..e2b42fef2 100644 --- a/types/mocks/mock_eth_clienter.go +++ b/types/mocks/mock_eth_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_ethereum_clienter.go b/types/mocks/mock_ethereum_clienter.go index 0e7cb09a1..5491bc945 100644 --- a/types/mocks/mock_ethereum_clienter.go +++ b/types/mocks/mock_ethereum_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_multi_downloader_legacy.go b/types/mocks/mock_multi_downloader_legacy.go index a8a2928f9..eb5329d35 100644 --- a/types/mocks/mock_multi_downloader_legacy.go +++ b/types/mocks/mock_multi_downloader_legacy.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/types/mocks/mock_rpc_clienter.go b/types/mocks/mock_rpc_clienter.go index 5c99bed91..2d2c63566 100644 --- a/types/mocks/mock_rpc_clienter.go +++ b/types/mocks/mock_rpc_clienter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks From de1683dab12622200c6cfcee86d40902d6f289ae Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:16:42 +0100 Subject: [PATCH 18/75] fix: lint --- l1infotreesync/e2e_test.go | 257 ++++++++++++++++--------------- l1infotreesync/l1infotreesync.go | 4 +- 2 files changed, 131 insertions(+), 130 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 082cb0929..057ec2210 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -152,6 +152,7 @@ func TestE2E(t *testing.T) { } func TestWithReorgs(t *testing.T) { + t.Parallel() tests := []struct { name string useMultidownloaderForTest bool @@ -197,153 +198,153 @@ func TestWithReorgs(t *testing.T) { var err error var evmMultidownloader *multidownloader.EVMMultidownloader if tt.useMultidownloaderForTest { - cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) - cfgMD.Enabled = true - finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") - require.NoError(t, err) - cfgMD.BlockFinality = *finality - cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 100) - cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 100) - evmMultidownloader, err = multidownloader.NewEVMMultidownloader( - log.WithFields("module", "multidownloader"), - cfgMD, - "testMD", - etherman.NewDefaultEthClient(client.Client(), nil, nil), - nil, // rpcClient - nil, // Storage will be created internally - nil, // blockNotifierManager will be created internally - nil, // reorgProcessor will be created internally - ) - require.NoError(t, err) - syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - go func() { - err = evmMultidownloader.Start(ctx) - require.NoError(t, err) - }() - } else { - rdConfig := reorgdetector.Config{ - DBPath: dbPathReorg, - CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), - FinalizedBlock: aggkittypes.FinalizedBlock, - } - rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - } - go syncer.Start(ctx) + cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) + cfgMD.Enabled = true + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") + require.NoError(t, err) + cfgMD.BlockFinality = *finality + cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 100) + cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 100) + evmMultidownloader, err = multidownloader.NewEVMMultidownloader( + log.WithFields("module", "multidownloader"), + cfgMD, + "testMD", + etherman.NewDefaultEthClient(client.Client(), nil, nil), + nil, // rpcClient + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally + ) + require.NoError(t, err) + syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go func() { + err = evmMultidownloader.Start(ctx) + require.NoError(t, err) + }() + } else { + rdConfig := reorgdetector.Config{ + DBPath: dbPathReorg, + CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), + FinalizedBlock: aggkittypes.FinalizedBlock, + } + rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) + require.NoError(t, err) + require.NoError(t, rd.Start(ctx)) + multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + } + go syncer.Start(ctx) - // Commit block 6 - header, err := client.Client().HeaderByHash(ctx, client.Commit()) - require.NoError(t, err) - reorgFrom := header.Hash() + // Commit block 6 + header, err := client.Client().HeaderByHash(ctx, client.Commit()) + require.NoError(t, err) + reorgFrom := header.Hash() - // Commit block 7 - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + // Commit block 7 + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { - // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) - // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) - // Update Rollup Exit Tree - newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) - require.NoError(t, err) - } + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(1, 1) + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) - // Commit block 8 that contains the transaction that updates the trees - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + // Commit block 8 that contains the transaction that updates the trees + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - // Assert L1 Info tree root - expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) - require.NoError(t, err) + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) - require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) - // Forking from block 6 - // Note: reorged trx will be added to pending transactions - // and will be committed when the forked block is committed - err = client.Fork(reorgFrom) - require.NoError(t, err) + // Forking from block 6 + // Note: reorged trx will be added to pending transactions + // and will be committed when the forked block is committed + err = client.Fork(reorgFrom) + require.NoError(t, err) - blockNum, err := client.Client().BlockNumber(ctx) - log.Infof("Current block number after fork: %d", blockNum) - require.NoError(t, err) - require.Equal(t, header.Number.Uint64(), blockNum) + blockNum, err := client.Client().BlockNumber(ctx) + log.Infof("Current block number after fork: %d", blockNum) + require.NoError(t, err) + require.Equal(t, header.Number.Uint64(), blockNum) - // Commit block 7, 8, 9 after the fork - helpers.CommitBlocks(t, client, 5, time.Millisecond*100) + // Commit block 7, 8, 9 after the fork + helpers.CommitBlocks(t, client, 5, time.Millisecond*100) - // Assert rollup exit root after committing new blocks on the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - // TODO: Remove ths sleep - time.Sleep(time.Second * 1) // wait for syncer to process the reorg - checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + // Assert rollup exit root after committing new blocks on the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) // wait for syncer to process the reorg + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) - lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - log.Infof("Last processed block after reorg: %d", lastProcessedBlock) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + log.Infof("Last processed block after reorg: %d", lastProcessedBlock) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - showLeafs(t, ctx, syncer, "Before second fork: ") + showLeafs(t, ctx, syncer, "Before second fork: ") - // Forking from block 6 again - log.Infof("πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ–– Forking again from block (6) %d", reorgFrom.Hex()) - err = client.Fork(reorgFrom) - require.NoError(t, err) - time.Sleep(time.Millisecond * 500) - // wait for syncer to process the reorg - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 - // TODO: Remove ths sleep - time.Sleep(time.Second * 1) - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(2, 1) - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) - - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) - - // Assert rollup exit root after the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - showLeafs(t, ctx, syncer, "After second fork: ") - checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + // Forking from block 6 again + log.Infof("πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ––πŸ–– Forking again from block (6) %d", reorgFrom.Hex()) + err = client.Fork(reorgFrom) + require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + // wait for syncer to process the reorg + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 + // TODO: Remove ths sleep + time.Sleep(time.Second * 1) + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) + + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + showLeafs(t, ctx, syncer, "After second fork: ") + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) }) } } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 9fca00dca..c1d5eb7e0 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -99,8 +99,8 @@ func NewMultidownloadBased( logger := log.WithFields("syncer", syncerID) // TODO: move the durations to config file (mdrsync.NewDownloader) logger.Infof("Creating L1 Info Tree Syncer with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", - cfg.WaitForNewBlocksPeriod.Duration.String(), - cfg.RetryAfterErrorPeriod.Duration.String(), + cfg.WaitForNewBlocksPeriod.String(), + cfg.RetryAfterErrorPeriod.String(), ) downloader := mdrsync.NewDownloader( l1Multidownloader, From 684981be0340e87d7a73db29a899961556c3c1e6 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:58:10 +0100 Subject: [PATCH 19/75] fix: ut --- multidownloader/sync/download_test.go | 62 ++++++++++++++------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/multidownloader/sync/download_test.go b/multidownloader/sync/download_test.go index 8c85489f4..9a66954ff 100644 --- a/multidownloader/sync/download_test.go +++ b/multidownloader/sync/download_test.go @@ -294,17 +294,17 @@ func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { ContractAddresses: []common.Address{common.HexToAddress("0x123")}, } - // First call: checkReorgedBlock returns valid + // First call: checkReorgedBlock before PollingWithTimeout (line 65) mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() - // First executeLogQuery: logs not available + // First iteration: PollingWithTimeout calls checkCondition immediately + // This calls checkReorgedBlock (line 74) and executeLogQuery + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() - // Second iteration in retry loop + // Second iteration in polling loop mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() - - // Second executeLogQuery: logs now available mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true).Once() mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ Blocks: []mdrtypes.BlockWithLogs{ @@ -334,6 +334,9 @@ func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { Time: 1100, }, mdrtypes.Finalized, nil).Once() + // Final checkReorgedBlock after PollingWithTimeout completes (line 101) + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) require.NoError(t, err) @@ -371,24 +374,22 @@ func TestDownloadNextBlocks_TimeoutWaitingForLogs(t *testing.T) { ContractAddresses: []common.Address{common.HexToAddress("0x123")}, } - // First call: checkReorgedBlock returns valid + // First call: checkReorgedBlock before PollingWithTimeout (line 65) mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() - // executeLogQuery called twice: once initially (line 40), once in for loop header (line 45) - mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Times(2) - mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Times(2) - - // After timeout breaks the loop, calls final checkReorgedBlock - // which overwrites err. If checkReorgedBlock succeeds, returns result (nil) with nil error + // PollingWithTimeout calls checkCondition multiple times until timeout + // Each call includes checkReorgedBlock and executeLogQuery + // Since timeout is 100ms and polling period is 200ms, it will try only once before timeout mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) - // After timeout and successful checkReorgedBlock, returns empty result with nil error - require.NoError(t, err) - require.NotNil(t, result) - require.Nil(t, result.Data) - require.Equal(t, 100.0, result.PercentComplete) + // After timeout, should return error + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "logs not available") } func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { @@ -420,10 +421,9 @@ func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { ContractAddresses: []common.Address{common.HexToAddress("0x123")}, } - // First call: checkReorgedBlock returns valid - mockMdr.EXPECT().CheckValidBlock(mock.Anything, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() - - // executeLogQuery: logs not available (may be called multiple times depending on timing) + // checkReorgedBlock and executeLogQuery may be called multiple times before context is cancelled + // Using Maybe() to allow flexible number of calls depending on timing + mockMdr.EXPECT().CheckValidBlock(mock.Anything, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Maybe() mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Maybe() mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Maybe() @@ -474,14 +474,15 @@ func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { DetectedAtBlock: 106, } - // First call: checkReorgedBlock returns valid + // First call: checkReorgedBlock before PollingWithTimeout (line 65) mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() - // executeLogQuery called twice: once initially (line 40), once in for loop header (line 45) - mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Times(2) - mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Times(2) + // First iteration: PollingWithTimeout calls checkCondition immediately + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() - // In retry loop after timer fires: reorg detected during checkReorgedBlock + // Second iteration: reorg detected during checkReorgedBlock mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil).Once() mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil).Once() @@ -609,10 +610,11 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { }, ResponseRange: aggkitcommon.BlockRange{FromBlock: 100, ToBlock: 105}, }, nil) + // When using partial query, addLastBlockIfNotIncluded uses responseRange.ToBlock (105) mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ - Number: 110, - Hash: common.HexToHash("0xblock110"), - Time: 2100, + Number: 105, + Hash: common.HexToHash("0xblock105"), + Time: 2050, }, mdrtypes.Finalized, nil) result, err := download.executeLogQuery(ctx, logQuery) @@ -621,7 +623,7 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { require.NotNil(t, result) require.Len(t, result.Data, 2) require.Equal(t, uint64(103), result.Data[0].Num) - require.Equal(t, uint64(110), result.Data[1].Num) + require.Equal(t, uint64(105), result.Data[1].Num) // Last block is from partial response range } func TestExecuteLogQuery_NotAvailable(t *testing.T) { From 06eeddb8309e9445383c31b025a89846e57d7cbf Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 11:44:01 +0100 Subject: [PATCH 20/75] fix: ut --- l1infotreesync/e2e_test.go | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 057ec2210..c229a76d2 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -73,19 +73,29 @@ func TestE2E(t *testing.T) { ctx := t.Context() dbPath := path.Join(t.TempDir(), "l1infotreesyncTestE2E.sqlite") - mockReorgDetector := mocks.NewReorgDetectorMock(t) - mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(&reorgdetector.Subscription{}, nil) - mockReorgDetector.EXPECT().AddBlockToTrack(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(aggkittypes.FinalizedBlock).Once() - mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) - client, auth, gerAddr, verifyAddr, gerSc, _ := newSimulatedClient(t) - var multidownloaderClient aggkittypes.MultiDownloaderLegacy + cfg := l1infotreesync.Config{ + DBPath: dbPath, + InitialBlock: 0, + SyncBlockChunkSize: 10, + BlockFinality: aggkittypes.LatestBlock, + GlobalExitRootAddr: gerAddr, + RollupManagerAddr: verifyAddr, + RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), + MaxRetryAttemptsAfterError: 25, + RequireStorageContentCompatibility: true, + WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + } + var syncer *l1infotreesync.L1InfoTreeSync var err error + var evmMultidownloader *multidownloader.EVMMultidownloader if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true - multidownloaderClient, err = multidownloader.NewEVMMultidownloader( + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") + require.NoError(t, err) + cfgMD.BlockFinality = *finality + evmMultidownloader, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), cfgMD, "testMD", @@ -96,25 +106,25 @@ func TestE2E(t *testing.T) { nil, // reorgProcessor will be created internally ) require.NoError(t, err) + syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go func() { + err = evmMultidownloader.Start(ctx) + log.Infof("Multidownloader exited with error: %v", err) + //require.NoError(t, err) + }() } else { - multidownloaderClient = sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - } - - cfg := l1infotreesync.Config{ - DBPath: dbPath, - InitialBlock: 0, - SyncBlockChunkSize: 10, - BlockFinality: aggkittypes.LatestBlock, - GlobalExitRootAddr: gerAddr, - RollupManagerAddr: verifyAddr, - RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), - MaxRetryAttemptsAfterError: 25, - RequireStorageContentCompatibility: true, - WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + mockReorgDetector := mocks.NewReorgDetectorMock(t) + mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(&reorgdetector.Subscription{}, nil) + mockReorgDetector.EXPECT().AddBlockToTrack(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(aggkittypes.FinalizedBlock).Once() + mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) + + multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, mockReorgDetector, + l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, mockReorgDetector, - l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) go syncer.Start(ctx) @@ -149,6 +159,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) require.Equal(t, common.Hash(expectedGER), latestGER) } + log.Infof("FINISH TEST OK!!!!!!!!!!!!!!!!!!!!!!") } func TestWithReorgs(t *testing.T) { From c2facc7927b8061f004bd4413c55ff2aa707164a Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 12:44:58 +0100 Subject: [PATCH 21/75] feat: add check DB compatibility for new mdrsync objects --- l1infotreesync/e2e_test.go | 2 +- l1infotreesync/l1infotreesync.go | 29 ++++++- l1infotreesync/processor.go | 10 +-- multidownloader/sync/download.go | 4 + multidownloader/sync/evmdriver.go | 51 +++++++---- multidownloader/sync/types/evm_downloader.go | 1 + .../sync/types/evm_multidownloader.go | 2 + .../types/mocks/mock_downloader_interface.go | 85 ++++++++++++++++--- .../mocks/mock_multidownloader_interface.go | 56 ++++++++++++ 9 files changed, 200 insertions(+), 40 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index c229a76d2..e1d7a3b83 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -109,9 +109,9 @@ func TestE2E(t *testing.T) { syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go func() { + // This always return an error becase at the end of the test the context is cancelled err = evmMultidownloader.Start(ctx) log.Infof("Multidownloader exited with error: %v", err) - //require.NoError(t, err) }() } else { mockReorgDetector := mocks.NewReorgDetectorMock(t) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index c1d5eb7e0..d21568dc3 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -7,6 +7,7 @@ import ( "math/big" jRPC "github.com/0xPolygon/cdk-rpc/rpc" + aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" @@ -51,6 +52,8 @@ type L1InfoTreeSync struct { downloader DownloaderInterface } +type RuntimeData = mdrsync.RuntimeData + func NewReadOnly( ctx context.Context, dbPath string, @@ -111,8 +114,25 @@ func NewMultidownloadBased( cfg.WaitForNewBlocksPeriod.Duration, ) - driver := mdrsync.NewEVMDriver(processor, downloader, syncerConfig, - cfg.SyncBlockChunkSize, rh, logger) + compatibilityChecker := compatibility.NewCompatibilityCheck( + cfg.RequireStorageContentCompatibility, + func(ctx context.Context) (RuntimeData, error) { + chainID, err := downloader.ChainID(ctx) + if err != nil { + return RuntimeData{}, err + } + return RuntimeData{ + ChainID: chainID, + Addresses: addressesToQuery, + }, nil + }, + compatibility.NewKeyValueToCompatibilityStorage[RuntimeData]( + db.NewKeyValueStorage(processor.getDB()), + aggkitcommon.L1INFOTREESYNC, + )) + + driver := mdrsync.NewEVMDriver(logger, processor, downloader, syncerConfig, + cfg.SyncBlockChunkSize, rh, compatibilityChecker) if err != nil { return nil, err } @@ -200,7 +220,10 @@ func New( compatibilityChecker := compatibility.NewCompatibilityCheck( cfg.RequireStorageContentCompatibility, downloader.RuntimeData, - processor) + compatibility.NewKeyValueToCompatibilityStorage[sync.RuntimeData]( + db.NewKeyValueStorage(processor.getDB()), + aggkitcommon.L1INFOTREESYNC, + )) driver, err := sync.NewEVMDriver(reorgDetector, processor, downloader, syncerID, downloadBufferSize, rh, compatibilityChecker) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 6693a95fb..c6f504784 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -9,7 +9,6 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" - "github.com/agglayer/aggkit/db/compatibility" dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/l1infotreesync/migrations" "github.com/agglayer/aggkit/log" @@ -35,7 +34,6 @@ type processor struct { halted bool haltedReason string log *log.Logger - compatibility.CompatibilityDataStorager[sync.RuntimeData] } // UpdateL1InfoTree representation of the UpdateL1InfoTree event @@ -153,13 +151,13 @@ func newProcessor(dbPath string) (*processor, error) { l1InfoTree: tree.NewAppendOnlyTree(database, migrations.L1InfoTreePrefix), rollupExitTree: tree.NewUpdatableTree(database, migrations.RollupExitTreePrefix), log: log.WithFields("processor", "l1infotreesync"), - CompatibilityDataStorager: compatibility.NewKeyValueToCompatibilityStorage[sync.RuntimeData]( - db.NewKeyValueStorage(database), - aggkitcommon.L1INFOTREESYNC, - ), }, nil } +func (p *processor) getDB() *sql.DB { + return p.db +} + // GetLatestL1InfoLeafUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned func (p *processor) GetLatestL1InfoLeafUntilBlock(ctx context.Context, blockNum *uint64) (*L1InfoTreeLeaf, error) { diff --git a/multidownloader/sync/download.go b/multidownloader/sync/download.go index 4a328f243..3f3e90b48 100644 --- a/multidownloader/sync/download.go +++ b/multidownloader/sync/download.go @@ -112,6 +112,10 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, return result, nil } +func (d *Downloader) ChainID(ctx context.Context) (uint64, error) { + return d.mdr.ChainID(ctx) +} + // executeLogQuery executes the log query, checking for partial availability // if there are no logs available returns an error func (d *Downloader) executeLogQuery(ctx context.Context, diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 19f042b5a..c799c61b9 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -5,6 +5,7 @@ import ( "errors" aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db/compatibility" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" "github.com/agglayer/aggkit/sync" @@ -12,34 +13,52 @@ import ( ) type EVMDriver struct { - processor mdrsynctypes.ProcessorInterface - downloader mdrsynctypes.DownloaderInterface - syncerConfig aggkittypes.SyncerConfig - rh *sync.RetryHandler - logger aggkitcommon.Logger - - syncBlockChunkSize uint64 + processor mdrsynctypes.ProcessorInterface + downloader mdrsynctypes.DownloaderInterface + syncerConfig aggkittypes.SyncerConfig + rh *sync.RetryHandler + logger aggkitcommon.Logger + compatibilityChecker compatibility.CompatibilityChecker + syncBlockChunkSize uint64 } -func NewEVMDriver(processor mdrsynctypes.ProcessorInterface, +func NewEVMDriver( + logger aggkitcommon.Logger, + processor mdrsynctypes.ProcessorInterface, downloader mdrsynctypes.DownloaderInterface, syncerConfig aggkittypes.SyncerConfig, syncBlockChunkSize uint64, rh *sync.RetryHandler, - logger aggkitcommon.Logger) *EVMDriver { + compatibilityChecker compatibility.CompatibilityChecker, +) *EVMDriver { return &EVMDriver{ - processor: processor, - downloader: downloader, - syncerConfig: syncerConfig, - syncBlockChunkSize: syncBlockChunkSize, - rh: rh, - logger: logger, + processor: processor, + downloader: downloader, + syncerConfig: syncerConfig, + syncBlockChunkSize: syncBlockChunkSize, + rh: rh, + logger: logger, + compatibilityChecker: compatibilityChecker, } } func (d *EVMDriver) Sync(ctx context.Context) { + var ( + err error + attempts int + ) + reset: - // TODO: Add if err = d.compatibilityChecker.Check(ctx, nil); err != nil { + for { + if err = d.compatibilityChecker.Check(ctx, nil); err != nil { + attempts++ + d.logger.Error("error checking compatibility data between downloader (runtime) and processor (db): ", err) + d.rh.Handle(ctx, "CompatibilityChecker", attempts) + continue + } + break + } + for { if ctx.Err() != nil { d.logger.Info("context cancelled") diff --git a/multidownloader/sync/types/evm_downloader.go b/multidownloader/sync/types/evm_downloader.go index 54f04851b..407e8582f 100644 --- a/multidownloader/sync/types/evm_downloader.go +++ b/multidownloader/sync/types/evm_downloader.go @@ -47,4 +47,5 @@ type DownloaderInterface interface { fromBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig) (*DownloadResult, error) + ChainID(ctx context.Context) (uint64, error) } diff --git a/multidownloader/sync/types/evm_multidownloader.go b/multidownloader/sync/types/evm_multidownloader.go index 618ed779e..da53931ef 100644 --- a/multidownloader/sync/types/evm_multidownloader.go +++ b/multidownloader/sync/types/evm_multidownloader.go @@ -28,4 +28,6 @@ type MultidownloaderInterface interface { number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) StorageHeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) + // ChainID returns the chain ID of the EVM chain + ChainID(ctx context.Context) (uint64, error) } diff --git a/multidownloader/sync/types/mocks/mock_downloader_interface.go b/multidownloader/sync/types/mocks/mock_downloader_interface.go index ea08278e8..9e017f3e6 100644 --- a/multidownloader/sync/types/mocks/mock_downloader_interface.go +++ b/multidownloader/sync/types/mocks/mock_downloader_interface.go @@ -5,10 +5,11 @@ package mocks import ( context "context" - synctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + aggkittypes "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" - types "github.com/agglayer/aggkit/types" + types "github.com/agglayer/aggkit/multidownloader/sync/types" ) // DownloaderInterface is an autogenerated mock type for the DownloaderInterface type @@ -24,28 +25,84 @@ func (_m *DownloaderInterface) EXPECT() *DownloaderInterface_Expecter { return &DownloaderInterface_Expecter{mock: &_m.Mock} } +// ChainID provides a mock function with given fields: ctx +func (_m *DownloaderInterface) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DownloaderInterface_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type DownloaderInterface_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *DownloaderInterface_Expecter) ChainID(ctx interface{}) *DownloaderInterface_ChainID_Call { + return &DownloaderInterface_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *DownloaderInterface_ChainID_Call) Run(run func(ctx context.Context)) *DownloaderInterface_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DownloaderInterface_ChainID_Call) Return(_a0 uint64, _a1 error) *DownloaderInterface_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DownloaderInterface_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *DownloaderInterface_ChainID_Call { + _c.Call.Return(run) + return _c +} + // DownloadNextBlocks provides a mock function with given fields: ctx, fromBlockHeader, maxBlocks, syncerConfig -func (_m *DownloaderInterface) DownloadNextBlocks(ctx context.Context, fromBlockHeader *types.BlockHeader, maxBlocks uint64, syncerConfig types.SyncerConfig) (*synctypes.DownloadResult, error) { +func (_m *DownloaderInterface) DownloadNextBlocks(ctx context.Context, fromBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig) (*types.DownloadResult, error) { ret := _m.Called(ctx, fromBlockHeader, maxBlocks, syncerConfig) if len(ret) == 0 { panic("no return value specified for DownloadNextBlocks") } - var r0 *synctypes.DownloadResult + var r0 *types.DownloadResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) (*synctypes.DownloadResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) (*types.DownloadResult, error)); ok { return rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) } - if rf, ok := ret.Get(0).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) *synctypes.DownloadResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) *types.DownloadResult); ok { r0 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*synctypes.DownloadResult) + r0 = ret.Get(0).(*types.DownloadResult) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) error); ok { r1 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) } else { r1 = ret.Error(1) @@ -61,26 +118,26 @@ type DownloaderInterface_DownloadNextBlocks_Call struct { // DownloadNextBlocks is a helper method to define mock.On call // - ctx context.Context -// - fromBlockHeader *types.BlockHeader +// - fromBlockHeader *aggkittypes.BlockHeader // - maxBlocks uint64 -// - syncerConfig types.SyncerConfig +// - syncerConfig aggkittypes.SyncerConfig func (_e *DownloaderInterface_Expecter) DownloadNextBlocks(ctx interface{}, fromBlockHeader interface{}, maxBlocks interface{}, syncerConfig interface{}) *DownloaderInterface_DownloadNextBlocks_Call { return &DownloaderInterface_DownloadNextBlocks_Call{Call: _e.mock.On("DownloadNextBlocks", ctx, fromBlockHeader, maxBlocks, syncerConfig)} } -func (_c *DownloaderInterface_DownloadNextBlocks_Call) Run(run func(ctx context.Context, fromBlockHeader *types.BlockHeader, maxBlocks uint64, syncerConfig types.SyncerConfig)) *DownloaderInterface_DownloadNextBlocks_Call { +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Run(run func(ctx context.Context, fromBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig)) *DownloaderInterface_DownloadNextBlocks_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*types.BlockHeader), args[2].(uint64), args[3].(types.SyncerConfig)) + run(args[0].(context.Context), args[1].(*aggkittypes.BlockHeader), args[2].(uint64), args[3].(aggkittypes.SyncerConfig)) }) return _c } -func (_c *DownloaderInterface_DownloadNextBlocks_Call) Return(_a0 *synctypes.DownloadResult, _a1 error) *DownloaderInterface_DownloadNextBlocks_Call { +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Return(_a0 *types.DownloadResult, _a1 error) *DownloaderInterface_DownloadNextBlocks_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DownloaderInterface_DownloadNextBlocks_Call) RunAndReturn(run func(context.Context, *types.BlockHeader, uint64, types.SyncerConfig) (*synctypes.DownloadResult, error)) *DownloaderInterface_DownloadNextBlocks_Call { +func (_c *DownloaderInterface_DownloadNextBlocks_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) (*types.DownloadResult, error)) *DownloaderInterface_DownloadNextBlocks_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go index 367f2a593..9b1977a36 100644 --- a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go +++ b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go @@ -26,6 +26,62 @@ func (_m *MultidownloaderInterface) EXPECT() *MultidownloaderInterface_Expecter return &MultidownloaderInterface_Expecter{mock: &_m.Mock} } +// ChainID provides a mock function with given fields: ctx +func (_m *MultidownloaderInterface) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type MultidownloaderInterface_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultidownloaderInterface_Expecter) ChainID(ctx interface{}) *MultidownloaderInterface_ChainID_Call { + return &MultidownloaderInterface_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *MultidownloaderInterface_ChainID_Call) Run(run func(ctx context.Context)) *MultidownloaderInterface_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultidownloaderInterface_ChainID_Call) Return(_a0 uint64, _a1 error) *MultidownloaderInterface_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *MultidownloaderInterface_ChainID_Call { + _c.Call.Return(run) + return _c +} + // CheckValidBlock provides a mock function with given fields: ctx, blockNumber, blockHash func (_m *MultidownloaderInterface) CheckValidBlock(ctx context.Context, blockNumber uint64, blockHash common.Hash) (bool, uint64, error) { ret := _m.Called(ctx, blockNumber, blockHash) From a75787370477e9fe563acb2f507c8a46889770c8 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 12:49:33 +0100 Subject: [PATCH 22/75] feat: add check DB compatibility for new mdrsync objects --- multidownloader/sync/runtimedata.go | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 multidownloader/sync/runtimedata.go diff --git a/multidownloader/sync/runtimedata.go b/multidownloader/sync/runtimedata.go new file mode 100644 index 000000000..0aa58e151 --- /dev/null +++ b/multidownloader/sync/runtimedata.go @@ -0,0 +1,37 @@ +package multidownloader + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// RuntimeData is the data that is used to check that the DB is compatible with the runtime data +// basically it contains the relevant data from runtime environment +type RuntimeData struct { + ChainID uint64 + Addresses []common.Address +} + +func (r RuntimeData) String() string { + res := fmt.Sprintf("ChainID: %d, Addresses: ", r.ChainID) + for _, addr := range r.Addresses { + res += addr.String() + ", " + } + return res +} + +func (r RuntimeData) IsCompatible(other RuntimeData) error { + if r.ChainID != other.ChainID { + return fmt.Errorf("chain ID mismatch: %d != %d", r.ChainID, other.ChainID) + } + if len(r.Addresses) != len(other.Addresses) { + return fmt.Errorf("addresses len mismatch: %d != %d", len(r.Addresses), len(other.Addresses)) + } + for i, addr := range r.Addresses { + if addr != other.Addresses[i] { + return fmt.Errorf("addresses[%d] mismatch: %s != %s", i, addr.String(), other.Addresses[i].String()) + } + } + return nil +} From 246bb0a2143df012ace3cfcf4f6a45e33b56cb49 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 15:18:46 +0100 Subject: [PATCH 23/75] feat: coverage --- etherman/errors_test.go | 38 +++++++++++++++++++ .../{evm_downloader.go => downloader.go} | 12 ------ ..._multidownloader.go => multidownloader.go} | 0 3 files changed, 38 insertions(+), 12 deletions(-) rename multidownloader/sync/types/{evm_downloader.go => downloader.go} (88%) rename multidownloader/sync/types/{evm_multidownloader.go => multidownloader.go} (100%) diff --git a/etherman/errors_test.go b/etherman/errors_test.go index 91ca6c500..249d32828 100644 --- a/etherman/errors_test.go +++ b/etherman/errors_test.go @@ -1,10 +1,12 @@ package etherman import ( + "errors" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTryParseWithExactMatch(t *testing.T) { @@ -35,3 +37,39 @@ func TestTryParseWithNonExistingErr(t *testing.T) { assert.Nil(t, actualErr) assert.False(t, ok) } + +func TestIsErrNotFound(t *testing.T) { + t.Run("returns false when error is nil", func(t *testing.T) { + result := IsErrNotFound(nil) + require.False(t, result) + }) + + t.Run("returns true when error is ErrNotFound", func(t *testing.T) { + result := IsErrNotFound(ErrNotFound) + require.True(t, result) + }) + + t.Run("returns true when error is wrapped with ErrNotFound", func(t *testing.T) { + wrappedErr := fmt.Errorf("some context: %w", ErrNotFound) + result := IsErrNotFound(wrappedErr) + require.True(t, result) + }) + + t.Run("returns true when error has same message as ErrNotFound", func(t *testing.T) { + sameMessageErr := errors.New("not found") + result := IsErrNotFound(sameMessageErr) + require.True(t, result) + }) + + t.Run("returns false when error is different", func(t *testing.T) { + differentErr := errors.New("some other error") + result := IsErrNotFound(differentErr) + require.False(t, result) + }) + + t.Run("returns false when error message is different", func(t *testing.T) { + differentErr := ErrMissingTrieNode + result := IsErrNotFound(differentErr) + require.False(t, result) + }) +} diff --git a/multidownloader/sync/types/evm_downloader.go b/multidownloader/sync/types/downloader.go similarity index 88% rename from multidownloader/sync/types/evm_downloader.go rename to multidownloader/sync/types/downloader.go index 407e8582f..fd1b1096a 100644 --- a/multidownloader/sync/types/evm_downloader.go +++ b/multidownloader/sync/types/downloader.go @@ -14,18 +14,6 @@ type DownloadResult struct { PercentComplete float64 } -func (d *DownloadResult) AnyUnsafeBlock() bool { - if d == nil || len(d.Data) == 0 { - return false - } - for _, b := range d.Data { - if !b.IsFinalizedBlock { - return true - } - } - return false -} - type DownloaderInterface interface { // DownloadNextBlocks downloads the next blocks starting from fromBlockHeader // up to maxBlocks, according to the syncerConfig diff --git a/multidownloader/sync/types/evm_multidownloader.go b/multidownloader/sync/types/multidownloader.go similarity index 100% rename from multidownloader/sync/types/evm_multidownloader.go rename to multidownloader/sync/types/multidownloader.go From 2c378737eed91f3f28bca8a0bf246c1dd4ead888 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:12:23 +0100 Subject: [PATCH 24/75] feat: coverage --- .../evm_multidownloader_reorg_test.go | 197 ++++++++++++++++++ multidownloader/evm_multidownloader_test.go | 135 ++++++------ multidownloader/sync/evmdriver.go | 110 ++++------ multidownloader/sync/evmdriver_test.go | 114 ++++++++++ types/block_header.go | 7 + 5 files changed, 421 insertions(+), 142 deletions(-) create mode 100644 multidownloader/evm_multidownloader_reorg_test.go create mode 100644 multidownloader/sync/evmdriver_test.go diff --git a/multidownloader/evm_multidownloader_reorg_test.go b/multidownloader/evm_multidownloader_reorg_test.go new file mode 100644 index 000000000..a3ead02b5 --- /dev/null +++ b/multidownloader/evm_multidownloader_reorg_test.go @@ -0,0 +1,197 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { + t.Run("returns true when block is found and hash matches", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: blockHash, + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + + isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.True(t, isValid) + require.Equal(t, uint64(0), reorgChainID) + }) + + t.Run("returns error when GetBlockHeaderByNumber fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + expectedErr := fmt.Errorf("database error") + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(nil, mdrtypes.NotFinalized, expectedErr).Once() + + isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get BlockHeader") + require.True(t, isValid) + require.Equal(t, uint64(0), reorgChainID) + }) + + t.Run("returns false with chainID when block found in blocks_reorged", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + expectedChainID := uint64(42) + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x5678"), // Different hash + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + Return(expectedChainID, true, nil).Once() + + isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.False(t, isValid) + require.Equal(t, expectedChainID, reorgChainID) + }) + + t.Run("returns false when block not stored and not in blocks_reorged", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(nil, mdrtypes.NotFinalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + Return(uint64(0), false, nil).Once() + + isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "not found in storage or blocks_reorged") + require.False(t, isValid) + require.Equal(t, uint64(0), reorgChainID) + }) + + t.Run("returns false with chainID when stored block hash does not match", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + reorgChainID := uint64(99) + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0xabcd"), // Different hash + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + Return(reorgChainID, true, nil).Once() + + isValid, chainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.False(t, isValid) + require.Equal(t, reorgChainID, chainID) + }) + + t.Run("returns error when GetBlockReorgedChainID fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x5678"), // Different hash + } + + expectedErr := fmt.Errorf("reorg query error") + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + Return(uint64(0), false, expectedErr).Once() + + isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot check blocks_reorged") + require.True(t, isValid) + require.Equal(t, uint64(0), reorgChainID) + }) +} + +func TestEVMMultidownloader_GetReorgedDataByChainID(t *testing.T) { + t.Run("returns reorg data successfully", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedChainID := uint64(42) + expectedReorgData := &mdrtypes.ReorgData{ + ChainID: expectedChainID, + BlockRangeAffected: aggkitcommon.BlockRange{ + FromBlock: 100, + ToBlock: 200, + }, + DetectedAtBlock: 250, + DetectedTimestamp: 1234567890, + } + + testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + Return(expectedReorgData, nil).Once() + + result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, expectedReorgData.ChainID, result.ChainID) + require.Equal(t, expectedReorgData.BlockRangeAffected, result.BlockRangeAffected) + require.Equal(t, expectedReorgData.DetectedAtBlock, result.DetectedAtBlock) + require.Equal(t, expectedReorgData.DetectedTimestamp, result.DetectedTimestamp) + }) + + t.Run("returns error when storage query fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedChainID := uint64(42) + expectedErr := fmt.Errorf("database error") + + testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + Return(nil, expectedErr).Once() + + result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Nil(t, result) + }) + + t.Run("returns nil when chainID not found", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedChainID := uint64(999) + + testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + Return(nil, nil).Once() + + result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + + require.NoError(t, err) + require.Nil(t, result) + }) +} diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 7466b1a20..56e2ccc5c 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "os" "sync" "sync/atomic" "testing" @@ -12,37 +13,51 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" "github.com/agglayer/aggkit/db" + "github.com/agglayer/aggkit/etherman" mockethermantypes "github.com/agglayer/aggkit/etherman/types/mocks" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" + mdrsync "github.com/agglayer/aggkit/multidownloader/sync" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" + aggkitsync "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" mocktypes "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -// Imports below are only used in skipped tests but need to remain commented to avoid import cycles: -// jRPC "github.com/0xPolygon/cdk-rpc/rpc" -// "github.com/agglayer/aggkit/etherman" -// "github.com/agglayer/aggkit/l1infotreesync" -// "github.com/agglayer/aggkit/reorgdetector" -// aggkitsync "github.com/agglayer/aggkit/sync" -// "github.com/ethereum/go-ethereum/ethclient" -// "github.com/ethereum/go-ethereum/rpc" -// "os" - -// Commented out constants only used in skipped tests -// const runL1InfoTree = true -// const l1InfoTreeUseMultidownloader = true -// const storagePath = "../tmp/ut/" +const storagePath = "../tmp/ut/" +const runASyncer = true + +type testProcessor struct { + lastBlock *aggkittypes.BlockHeader +} + +func (tp *testProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + return tp.lastBlock, nil +} +func (tp *testProcessor) ProcessBlock(ctx context.Context, block aggkitsync.Block) error { + log.Infof("PROCESSOR: Processing block number %d", block.Num) + tp.lastBlock = &aggkittypes.BlockHeader{ + Number: block.Num, + Hash: block.Hash, + } + return nil +} +func (tp *testProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + log.Infof("PROCESSOR: Reorg from block number %d", firstReorgedBlock) + return nil +} func TestEVMMultidownloader(t *testing.T) { - t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") - /* Commented out to avoid import cycles + //t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") + cfgLog := log.Config{ Environment: "development", Level: "info", @@ -73,7 +88,7 @@ func TestEVMMultidownloader(t *testing.T) { WaitPeriodToCheckCatchUp: types.NewDuration(time.Second), PeriodToCheckReorgs: types.NewDuration(time.Second * 10), } - var rpcServices []jRPC.Service + mdr, err := NewEVMMultidownloader(logger, cfg, "l1", ethClient, ethRPCClient, db, nil, nil) @@ -89,64 +104,36 @@ func TestEVMMultidownloader(t *testing.T) { ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) - rpcServices = append(rpcServices, mdr.GetRPCServices()...) + ctx := context.TODO() - var l1infotree *l1infotreesync.L1InfoTreeSync - if runL1InfoTree == true { - var multidownloader aggkittypes.MultiDownloaderLegacy - var dbPath string - if l1InfoTreeUseMultidownloader { - multidownloader = mdr - dbPath = storagePath + "l1infotree_md.sqlite" - } else { - multidownloader = aggkitsync.NewAdapterEthClientToMultidownloader(ethClient) - dbPath = storagePath + "l1infotree_eth.sqlite" - } - reorgDetector, err := reorgdetector.New(ethClient, reorgdetector.Config{ - DBPath: storagePath + "l1_reorgdetector.sqlite", - CheckReorgsInterval: types.NewDuration(time.Second * 10), - FinalizedBlock: aggkittypes.FinalizedBlock, - }, reorgdetector.L1) - require.NoError(t, err) - l1infotree, err = l1infotreesync.New( - ctx, - l1infotreesync.Config{ - DBPath: dbPath, - InitialBlock: 5157574, - GlobalExitRootAddr: common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), - RollupManagerAddr: common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), - SyncBlockChunkSize: 6500, - WaitForNewBlocksPeriod: types.Duration{ - Duration: 5 * time.Second, - }, - BlockFinality: aggkittypes.FinalizedBlock, - }, - multidownloader, - reorgDetector, - // l1infotreesync.FlagStopOnFinalizedBlockReached, - l1infotreesync.FlagNone, + var syncer *mdrsync.EVMDriver + if runASyncer == true { + logger := log.WithFields("syncer", "test") + rh := &aggkitsync.RetryHandler{ + RetryAfterErrorPeriod: time.Second, + MaxRetryAttemptsAfterError: 0, + } + downloader := mdrsync.NewDownloader( + mdr, + logger, + rh, + nil, // appender, + time.Second, + time.Second, ) - require.NoError(t, err) - rpcServices = append(rpcServices, l1infotree.GetRPCServices()...) - } - if len(rpcServices) > 0 { - log.Infof("Registering %d RPC services", len(rpcServices)) - logger := log.WithFields("module", "RPC") - jRPCServer := jRPC.NewServer( - jRPC.Config{ - Host: "127.0.0.1", - Port: 5576, - MaxRequestsPerIPAndSecond: 10000.0, + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "l1infotree_syncer_test", + ContractAddresses: []common.Address{ + common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), // GlobalExitRootAddr + common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), // RollupManager }, - rpcServices, - jRPC.WithLogger(logger.GetSugaredLogger()), - ) - go func() { - if err := jRPCServer.Start(); err != nil { - log.Fatal(err) - } - }() + FromBlock: 5157574, + ToBlock: aggkittypes.LatestBlock, + } + processor := &testProcessor{} + syncer = mdrsync.NewEVMDriver(logger, processor, downloader, syncerConfig, + 100, rh, nil) } var wg sync.WaitGroup @@ -166,14 +153,14 @@ func TestEVMMultidownloader(t *testing.T) { defer wg.Done() timer := aggkitcommon.TimeTracker{} timer.Start() - if l1infotree != nil { - l1infotree.Start(t.Context()) + if syncer != nil { + syncer.Sync(t.Context()) } timer.Stop() log.Infof("L1InfoTree sync finished in %s", timer.String()) }() wg.Wait() - */ + } func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index c799c61b9..87fbc2f21 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -3,6 +3,7 @@ package multidownloader import ( "context" "errors" + "fmt" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db/compatibility" @@ -43,72 +44,60 @@ func NewEVMDriver( } func (d *EVMDriver) Sync(ctx context.Context) { - var ( - err error - attempts int - ) - -reset: - for { - if err = d.compatibilityChecker.Check(ctx, nil); err != nil { - attempts++ - d.logger.Error("error checking compatibility data between downloader (runtime) and processor (db): ", err) - d.rh.Handle(ctx, "CompatibilityChecker", attempts) - continue - } - break - } - + attempts := 0 for { if ctx.Err() != nil { d.logger.Info("context cancelled") return } - lastBlockHeader := d.getLastProcessedBlock(ctx) - if lastBlockHeader == nil { - d.logger.Info("no last processed block found, starting from beginning") - } else { - d.logger.Infof("EVMDriver.Sync: starting sync from last processed block: %d", lastBlockHeader.Number) + if err := d.syncStep(ctx); err != nil { + attempts++ + d.logger.Error("error during syncing ", err) + d.rh.Handle(ctx, "Sync", attempts) + continue } + } +} - blocks, err := d.downloader.DownloadNextBlocks(ctx, - lastBlockHeader, - d.syncBlockChunkSize, - d.syncerConfig) - if err != nil { - d.logger.Error("error downloading next blocks: ", err) - } - if err != nil && mdrtypes.IsReorgedError(err) { - err := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)) - if err != nil { - d.logger.Error("error handling reorg: ", err) - d.rh.Handle(ctx, "Sync", 0) - continue - } - goto reset +func (d *EVMDriver) syncStep(ctx context.Context) error { + if d.compatibilityChecker != nil { + if err := d.compatibilityChecker.Check(ctx, nil); err != nil { + err := fmt.Errorf("EVMDriver: error checking compatibility data between downloader (runtime) and processor (db): %w", err) + return err } + d.compatibilityChecker = nil // only check once per Sync run + } - if err != nil && !errors.Is(err, ErrLogsNotAvailable) { - d.logger.Error("error downloading next blocks: ", err) - d.rh.Handle(ctx, "Sync", 0) - continue - } - if errors.Is(err, ErrLogsNotAvailable) { - // No logs available yet, wait and retry - d.logger.Debugf("no logs available yet, waiting to retry") - d.rh.Handle(ctx, "Sync", 0) - continue - } - err = d.ProcessBlocks(ctx, blocks) - if err != nil { - d.logger.Error("error processing blocks: ", err) - d.rh.Handle(ctx, "Sync", 0) - continue + lastBlockHeader, err := d.processor.GetLastProcessedBlockHeader(ctx) + if err != nil { + return fmt.Errorf("EVMDriver: error getting last processed block from processor: %w", err) + } + d.logger.Infof("EVMDriver: starting sync from last processed block: %s", lastBlockHeader.Brief()) + blocks, err := d.downloader.DownloadNextBlocks(ctx, + lastBlockHeader, + d.syncBlockChunkSize, + d.syncerConfig) + + if err != nil { + switch { + case mdrtypes.IsReorgedError(err): + if reorgErr := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)); reorgErr != nil { + return fmt.Errorf("EVMDriver: error handling reorg: %w", reorgErr) + } + // Reorg processed + return nil + case errors.Is(err, ErrLogsNotAvailable): + d.logger.Debug("EVMDriver: no logs available yet, waiting to retry") + return nil } } + if err = d.processBlocks(ctx, blocks); err != nil { + return fmt.Errorf("EVMDriver: error processing blocks: %w", err) + } + return nil } -func (d *EVMDriver) ProcessBlocks(ctx context.Context, b *mdrsynctypes.DownloadResult) error { +func (d *EVMDriver) processBlocks(ctx context.Context, b *mdrsynctypes.DownloadResult) error { if b == nil || len(b.Data) == 0 { return nil } @@ -139,21 +128,6 @@ func (d *EVMDriver) handleReorg(ctx context.Context, err *mdrtypes.ReorgedError) }) } -func (d *EVMDriver) getLastProcessedBlock(ctx context.Context) *aggkittypes.BlockHeader { - attempts := 0 - for { - // TODO: Case header == nil -> ? - header, err := d.processor.GetLastProcessedBlockHeader(ctx) - if err != nil { - attempts++ - d.logger.Error("error getting last processed block: ", err) - d.rh.Handle(ctx, "Sync", attempts) - continue - } - return header - } -} - // withRetry is a helper wrapper function that invokes the fn callback on failed attempts func (d *EVMDriver) withRetry(ctx context.Context, opName string, fn func() error) error { attempts := 0 diff --git a/multidownloader/sync/evmdriver_test.go b/multidownloader/sync/evmdriver_test.go new file mode 100644 index 000000000..e066eb231 --- /dev/null +++ b/multidownloader/sync/evmdriver_test.go @@ -0,0 +1,114 @@ +package multidownloader + +import ( + "errors" + "testing" + "time" + + "github.com/agglayer/aggkit/common" + aggkitcommon "github.com/agglayer/aggkit/common" + compatibilityMocks "github.com/agglayer/aggkit/db/compatibility/mocks" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader/sync/types/mocks" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type evmDriverTestData struct { + driver *EVMDriver + mockProcessor *mocks.ProcessorInterface + mockDownloader *mocks.DownloaderInterface + mockCompatibilityChecker *compatibilityMocks.CompatibilityChecker + syncerConfig aggkittypes.SyncerConfig + logger common.Logger + rh *sync.RetryHandler +} + +func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *evmDriverTestData { + mockProcessor := mocks.NewProcessorInterface(t) + mockDownloader := mocks.NewDownloaderInterface(t) + mockCompatibilityChecker := compatibilityMocks.NewCompatibilityChecker(t) + syncerConfig := aggkittypes.SyncerConfig{} + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: time.Second, + MaxRetryAttemptsAfterError: 0, + } + if compatibilityCheckExpectations { + mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(nil).Maybe() + } + driver := NewEVMDriver( + logger, + mockProcessor, + mockDownloader, + syncerConfig, + 100, + rh, + mockCompatibilityChecker, + ) + require.NotNil(t, driver) + return &evmDriverTestData{ + driver: driver, + mockProcessor: mockProcessor, + mockDownloader: mockDownloader, + mockCompatibilityChecker: mockCompatibilityChecker, + syncerConfig: syncerConfig, + logger: logger, + rh: rh, + } +} + +func TestNewEVMDriver(t *testing.T) { + t.Run("fail compatibility check", func(t *testing.T) { + testData := newEVMDriverTestData(t, false) + expectedErr := errors.New("compatibility check failed") + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(expectedErr).Once() + ctx := t.Context() + err := testData.driver.syncStep(ctx) + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("compatibility check it's only executed 1 time", func(t *testing.T) { + testData := newEVMDriverTestData(t, false) + expectedErr := errors.New("compatibility check failed") + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(expectedErr).Once() + ctx := t.Context() + err := testData.driver.syncStep(ctx) + require.ErrorIs(t, err, expectedErr) + // This round the compatibility check is called because the previous one failed + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(nil).Once() + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() + err = testData.driver.syncStep(ctx) + require.NoError(t, err) + // This round the compatibility check should not be executed again + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() + err = testData.driver.syncStep(ctx) + require.NoError(t, err) + }) + + t.Run("DownloadNextBlocks returns ErrLogsNotAvailable", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, ErrLogsNotAvailable).Once() + err := testData.driver.syncStep(t.Context()) + require.NoError(t, err) + }) + + t.Run("DownloadNextBlocks returns ReorgedError", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + expectedErr := mdrtypes.NewReorgedError(aggkitcommon.NewBlockRange(10, 20), 20, "test") + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, expectedErr).Once() + testData.mockProcessor.EXPECT().Reorg(mock.Anything, uint64(10)).Return(nil).Once() + err := testData.driver.syncStep(t.Context()) + require.NoError(t, err) + }) + +} diff --git a/types/block_header.go b/types/block_header.go index 43a891f74..f491c3f3d 100644 --- a/types/block_header.go +++ b/types/block_header.go @@ -16,6 +16,13 @@ type BlockHeader struct { RequestedBlock *BlockNumberFinality } +func (gb *BlockHeader) Brief() string { + if gb == nil { + return "" + } + return fmt.Sprintf("BlockHeader{Number: %d, Hash: %s}", gb.Number, gb.Hash.Hex()) +} + func NewBlockHeader(number uint64, hash common.Hash, time uint64, parentHash *common.Hash) *BlockHeader { return &BlockHeader{ Number: number, From b62c62100b228f82dfdde0feeb552c6f06240cb0 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:26:27 +0100 Subject: [PATCH 25/75] feat: ut --- multidownloader/evm_multidownloader_test.go | 4 +--- multidownloader/sync/evmdriver.go | 5 +++-- multidownloader/sync/evmdriver_test.go | 5 ++--- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 56e2ccc5c..107a7f20a 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -21,7 +21,6 @@ import ( mdrtypes "github.com/agglayer/aggkit/multidownloader/types" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkitsync "github.com/agglayer/aggkit/sync" - aggkittypes "github.com/agglayer/aggkit/types" mocktypes "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum/common" @@ -56,7 +55,7 @@ func (tp *testProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) er } func TestEVMMultidownloader(t *testing.T) { - //t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") + t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") cfgLog := log.Config{ Environment: "development", @@ -160,7 +159,6 @@ func TestEVMMultidownloader(t *testing.T) { log.Infof("L1InfoTree sync finished in %s", timer.String()) }() wg.Wait() - } func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 87fbc2f21..c7e8d01fa 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -62,10 +62,11 @@ func (d *EVMDriver) Sync(ctx context.Context) { func (d *EVMDriver) syncStep(ctx context.Context) error { if d.compatibilityChecker != nil { if err := d.compatibilityChecker.Check(ctx, nil); err != nil { - err := fmt.Errorf("EVMDriver: error checking compatibility data between downloader (runtime) and processor (db): %w", err) + err := fmt.Errorf("EVMDriver: error checking compatibility data between downloader (runtime)"+ + " and processor (db): %w", err) return err } - d.compatibilityChecker = nil // only check once per Sync run + d.compatibilityChecker = nil // only check once } lastBlockHeader, err := d.processor.GetLastProcessedBlockHeader(ctx) diff --git a/multidownloader/sync/evmdriver_test.go b/multidownloader/sync/evmdriver_test.go index e066eb231..7b296820b 100644 --- a/multidownloader/sync/evmdriver_test.go +++ b/multidownloader/sync/evmdriver_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/agglayer/aggkit/common" aggkitcommon "github.com/agglayer/aggkit/common" compatibilityMocks "github.com/agglayer/aggkit/db/compatibility/mocks" "github.com/agglayer/aggkit/log" @@ -23,11 +22,12 @@ type evmDriverTestData struct { mockDownloader *mocks.DownloaderInterface mockCompatibilityChecker *compatibilityMocks.CompatibilityChecker syncerConfig aggkittypes.SyncerConfig - logger common.Logger + logger aggkitcommon.Logger rh *sync.RetryHandler } func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *evmDriverTestData { + t.Helper() mockProcessor := mocks.NewProcessorInterface(t) mockDownloader := mocks.NewDownloaderInterface(t) mockCompatibilityChecker := compatibilityMocks.NewCompatibilityChecker(t) @@ -110,5 +110,4 @@ func TestNewEVMDriver(t *testing.T) { err := testData.driver.syncStep(t.Context()) require.NoError(t, err) }) - } From eaa6d2b4c6925e993cf209639fb36f5f5db00c79 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:40:07 +0100 Subject: [PATCH 26/75] fix: coverage --- .../types/log_query_response_test.go | 311 ++++++++++++++++++ multidownloader/types/reorg_data_test.go | 24 ++ multidownloader/types/reorg_error_test.go | 309 +++++++++++++++++ 3 files changed, 644 insertions(+) create mode 100644 multidownloader/types/log_query_response_test.go create mode 100644 multidownloader/types/reorg_data_test.go create mode 100644 multidownloader/types/reorg_error_test.go diff --git a/multidownloader/types/log_query_response_test.go b/multidownloader/types/log_query_response_test.go new file mode 100644 index 000000000..4be7c0640 --- /dev/null +++ b/multidownloader/types/log_query_response_test.go @@ -0,0 +1,311 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestLogQueryResponse_CountLogs_Nil(t *testing.T) { + var lqr *LogQueryResponse + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLogQueryResponse_CountLogs_EmptyBlocks(t *testing.T) { + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{}, + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLogQueryResponse_CountLogs_SingleBlockWithLogs(t *testing.T) { + parentHash := common.HexToHash("0x1234") + block := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0x9abc")}, + Data: []byte("data2"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 1, + BlockTimestamp: 1234567890, + Index: 1, + Removed: false, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block}, + ResponseRange: aggkitcommon.NewBlockRange(100, 100), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + + count := lqr.CountLogs() + require.Equal(t, 2, count) +} + +func TestLogQueryResponse_CountLogs_MultipleBlocksWithLogs(t *testing.T) { + parentHash1 := common.HexToHash("0x1234") + parentHash2 := common.HexToHash("0x5678") + + block1 := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0x9abc")}, + Data: []byte("data2"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 1, + BlockTimestamp: 1234567890, + Index: 1, + Removed: false, + }, + }, + } + + block2 := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), + IsFinal: false, + Logs: []Log{ + { + Address: common.HexToAddress("0x3333"), + Topics: []common.Hash{common.HexToHash("0xaaa")}, + Data: []byte("data3"), + BlockNumber: 101, + TxHash: common.HexToHash("0xghi"), + TxIndex: 0, + BlockTimestamp: 1234567900, + Index: 0, + Removed: false, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block1, block2}, + ResponseRange: aggkitcommon.NewBlockRange(100, 101), + UnsafeRange: aggkitcommon.NewBlockRange(101, 101), + } + + count := lqr.CountLogs() + require.Equal(t, 3, count) +} + +func TestLogQueryResponse_CountLogs_MixedBlocks(t *testing.T) { + parentHash1 := common.HexToHash("0x1234") + parentHash2 := common.HexToHash("0x5678") + parentHash3 := common.HexToHash("0x9abc") + + blockWithLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + }, + } + + blockWithoutLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), + IsFinal: true, + Logs: []Log{}, + } + + blockWithMultipleLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(102, common.HexToHash("0xghi"), 1234567910, &parentHash3), + IsFinal: false, + Logs: []Log{ + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0xaaa")}, + Data: []byte("data2"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 0, + BlockTimestamp: 1234567910, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x3333"), + Topics: []common.Hash{common.HexToHash("0xbbb")}, + Data: []byte("data3"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 1, + BlockTimestamp: 1234567910, + Index: 1, + Removed: false, + }, + { + Address: common.HexToAddress("0x4444"), + Topics: []common.Hash{common.HexToHash("0xccc")}, + Data: []byte("data4"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 2, + BlockTimestamp: 1234567910, + Index: 2, + Removed: true, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{blockWithLogs, blockWithoutLogs, blockWithMultipleLogs}, + ResponseRange: aggkitcommon.NewBlockRange(100, 102), + UnsafeRange: aggkitcommon.NewBlockRange(102, 102), + } + + count := lqr.CountLogs() + require.Equal(t, 4, count) +} + +func TestLogQueryResponse_CountLogs_BlocksWithNilLogs(t *testing.T) { + parentHash := common.HexToHash("0x1234") + + blockWithNilLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: nil, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{blockWithNilLogs}, + ResponseRange: aggkitcommon.NewBlockRange(100, 100), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLog_Structure(t *testing.T) { + log := Log{ + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678"), common.HexToHash("0x9abc")}, + Data: []byte("test data"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 5, + BlockTimestamp: 1234567890, + Index: 10, + Removed: false, + } + + require.Equal(t, common.HexToAddress("0x1111"), log.Address) + require.Equal(t, 2, len(log.Topics)) + require.Equal(t, common.HexToHash("0x5678"), log.Topics[0]) + require.Equal(t, common.HexToHash("0x9abc"), log.Topics[1]) + require.Equal(t, []byte("test data"), log.Data) + require.Equal(t, uint64(100), log.BlockNumber) + require.Equal(t, common.HexToHash("0xdef"), log.TxHash) + require.Equal(t, uint(5), log.TxIndex) + require.Equal(t, uint64(1234567890), log.BlockTimestamp) + require.Equal(t, uint(10), log.Index) + require.False(t, log.Removed) +} + +func TestBlockWithLogs_Structure(t *testing.T) { + parentHash := common.HexToHash("0x1234") + header := aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash) + + logs := []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + Removed: false, + }, + } + + block := BlockWithLogs{ + Header: *header, + IsFinal: true, + Logs: logs, + } + + require.Equal(t, uint64(100), block.Header.Number) + require.Equal(t, common.HexToHash("0xabc"), block.Header.Hash) + require.True(t, block.IsFinal) + require.Equal(t, 1, len(block.Logs)) + require.Equal(t, common.HexToAddress("0x1111"), block.Logs[0].Address) +} + +func TestLogQueryResponse_Structure(t *testing.T) { + parentHash := common.HexToHash("0x1234") + + block := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + BlockNumber: 100, + }, + }, + } + + responseRange := aggkitcommon.NewBlockRange(100, 200) + unsafeRange := aggkitcommon.NewBlockRange(150, 200) + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block}, + ResponseRange: responseRange, + UnsafeRange: unsafeRange, + } + + require.Equal(t, 1, len(lqr.Blocks)) + require.Equal(t, responseRange, lqr.ResponseRange) + require.Equal(t, unsafeRange, lqr.UnsafeRange) + require.Equal(t, uint64(100), lqr.ResponseRange.FromBlock) + require.Equal(t, uint64(200), lqr.ResponseRange.ToBlock) + require.Equal(t, uint64(150), lqr.UnsafeRange.FromBlock) + require.Equal(t, uint64(200), lqr.UnsafeRange.ToBlock) +} diff --git a/multidownloader/types/reorg_data_test.go b/multidownloader/types/reorg_data_test.go new file mode 100644 index 000000000..00c681bde --- /dev/null +++ b/multidownloader/types/reorg_data_test.go @@ -0,0 +1,24 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestXxx(t *testing.T) { + reorgData := &ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + DetectedAtBlock: 250, + DetectedTimestamp: 1620000000, + NetworkLatestBlock: 300, + NetworkFinalizedBlock: 240, + NetworkFinalizedBlockName: aggkittypes.LatestBlock, + } + require.Equal(t, "ReorgData{ChainID: 1, BlockRangeAffected: From: 100, To: 200 (101), "+ + "DetectedAtBlock: 250, DetectedTimestamp: 1620000000, NetworkLatestBlock: 300, NetworkFinalizedBlock: 240 (LatestBlock)}", + reorgData.String()) +} diff --git a/multidownloader/types/reorg_error_test.go b/multidownloader/types/reorg_error_test.go new file mode 100644 index 000000000..40f3b26ee --- /dev/null +++ b/multidownloader/types/reorg_error_test.go @@ -0,0 +1,309 @@ +package types + +import ( + "errors" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestReorgDetectionReason_String(t *testing.T) { + tests := []struct { + name string + reason ReorgDetectionReason + expected string + }{ + { + name: "BlockHashMismatch", + reason: ReorgDetectionReason_BlockHashMismatch, + expected: "BlockHashMismatch", + }, + { + name: "ParentHashMismatch", + reason: ReorgDetectionReason_ParentHashMismatch, + expected: "ParentHashMismatch", + }, + { + name: "MissingBlock", + reason: ReorgDetectionReason_MissingBlock, + expected: "MissingBlock", + }, + { + name: "Forced", + reason: ReorgDetectionReason_Forced, + expected: "Forced", + }, + { + name: "Unknown reason", + reason: ReorgDetectionReason(99), + expected: "ReorgDetectionReason(99)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.reason.String() + require.Equal(t, tt.expected, result) + }) + } +} + +func TestNewDetectedReorgError(t *testing.T) { + blockNum := uint64(100) + reason := ReorgDetectionReason_BlockHashMismatch + oldHash := common.HexToHash("0x1234") + newHash := common.HexToHash("0x5678") + msg := "test message" + + err := NewDetectedReorgError(blockNum, reason, oldHash, newHash, msg) + + require.NotNil(t, err) + require.Equal(t, blockNum, err.OffendingBlockNumber) + require.Equal(t, reason, err.ReorgDetectionReason) + require.Equal(t, oldHash, err.OldHash) + require.Equal(t, newHash, err.NewHash) + require.Equal(t, msg, err.Message) +} + +func TestDetectedReorgError_Error(t *testing.T) { + blockNum := uint64(100) + oldHash := common.HexToHash("0x1234") + newHash := common.HexToHash("0x5678") + msg := "test message" + + tests := []struct { + name string + reason ReorgDetectionReason + expectedPrefix string + }{ + { + name: "MissingBlock error message", + reason: ReorgDetectionReason_MissingBlock, + expectedPrefix: "reorgError: block number 100 is missing: test message", + }, + { + name: "BlockHashMismatch error message", + reason: ReorgDetectionReason_BlockHashMismatch, + expectedPrefix: fmt.Sprintf("reorgError: block number 100: old hash %s != new hash %s: test message", oldHash.String(), newHash.String()), + }, + { + name: "ParentHashMismatch error message", + reason: ReorgDetectionReason_ParentHashMismatch, + expectedPrefix: fmt.Sprintf("reorgError: block number 100: old parent hash %s != new parent hash %s: test message", oldHash.String(), newHash.String()), + }, + { + name: "Forced error message", + reason: ReorgDetectionReason_Forced, + expectedPrefix: "reorgError: block number 100: forced reason: test message", + }, + { + name: "Unknown reason error message", + reason: ReorgDetectionReason(99), + expectedPrefix: "reorgError: block number 100: reason 99: test message", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := NewDetectedReorgError(blockNum, tt.reason, oldHash, newHash, msg) + result := err.Error() + require.Equal(t, tt.expectedPrefix, result) + }) + } +} + +func TestIsDetectedReorgError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Valid DetectedReorgError", + err: NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.Hash{}, common.Hash{}, "test"), + expected: true, + }, + { + name: "Wrapped DetectedReorgError", + err: fmt.Errorf("wrapped: %w", NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.Hash{}, common.Hash{}, "test")), + expected: true, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expected: false, + }, + { + name: "Nil error", + err: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsDetectedReorgError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCastDetectedReorgError(t *testing.T) { + originalErr := NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.HexToHash("0x1234"), common.HexToHash("0x5678"), "test") + + tests := []struct { + name string + err error + expectNil bool + expectEqual *DetectedReorgError + }{ + { + name: "Valid DetectedReorgError", + err: originalErr, + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Wrapped DetectedReorgError", + err: fmt.Errorf("wrapped: %w", originalErr), + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expectNil: true, + expectEqual: nil, + }, + { + name: "Nil error", + err: nil, + expectNil: true, + expectEqual: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CastDetectedReorgError(tt.err) + if tt.expectNil { + require.Nil(t, result) + } else { + require.NotNil(t, result) + require.Equal(t, tt.expectEqual, result) + } + }) + } +} + +func TestNewReorgedError(t *testing.T) { + blockRange := aggkitcommon.NewBlockRange(100, 200) + chainID := uint64(1) + msg := "test message" + + err := NewReorgedError(blockRange, chainID, msg) + + require.NotNil(t, err) + require.Equal(t, blockRange, err.BlockRangeReorged) + require.Equal(t, chainID, err.ReorgedChainID) + require.Equal(t, msg, err.Message) +} + +func TestReorgedError_Error(t *testing.T) { + blockRange := aggkitcommon.NewBlockRange(100, 200) + chainID := uint64(1) + msg := "test message" + + err := NewReorgedError(blockRange, chainID, msg) + result := err.Error() + + expected := fmt.Sprintf("reorgedError: chainID=%d blockRangeReorged=%s: %s", chainID, blockRange.String(), msg) + require.Equal(t, expected, result) +} + +func TestIsReorgedError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Valid ReorgedError", + err: NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test"), + expected: true, + }, + { + name: "Wrapped ReorgedError", + err: fmt.Errorf("wrapped: %w", NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test")), + expected: true, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expected: false, + }, + { + name: "Nil error", + err: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsReorgedError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCastReorgedError(t *testing.T) { + originalErr := NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test") + + tests := []struct { + name string + err error + expectNil bool + expectEqual *ReorgedError + }{ + { + name: "Valid ReorgedError", + err: originalErr, + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Wrapped ReorgedError", + err: fmt.Errorf("wrapped: %w", originalErr), + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expectNil: true, + expectEqual: nil, + }, + { + name: "Nil error", + err: nil, + expectNil: true, + expectEqual: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CastReorgedError(tt.err) + if tt.expectNil { + require.Nil(t, result) + } else { + require.NotNil(t, result) + require.Equal(t, tt.expectEqual, result) + } + }) + } +} From 5172853362cb415485a413d8257e579a92576681 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 10:59:49 +0100 Subject: [PATCH 27/75] feat: coverage --- multidownloader/reorg_processor.go | 13 +---- multidownloader/reorg_processor_port.go | 24 +------- .../types/log_query_response_test.go | 10 ++-- multidownloader/types/reorg_error_test.go | 17 +++--- types/block_header_test.go | 57 +++++++++++++++++++ 5 files changed, 74 insertions(+), 47 deletions(-) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 98597e410..7045f1b48 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -11,18 +11,9 @@ import ( aggkittypes "github.com/agglayer/aggkit/types" ) -type ReorgPorter interface { - NewTx(ctx context.Context) (dbtypes.Txer, error) - GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*compareBlockHeaders, error) - GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) - // Return ChainID of the inserted reorg - MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) - GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) -} - type ReorgProcessor struct { log aggkitcommon.Logger - port ReorgPorter + port mdtypes.ReorgPorter funcNow func() uint64 } @@ -133,7 +124,7 @@ func (rm *ReorgProcessor) findFirstUnaffectedBlock(ctx context.Context, } // checkBlocks compares storage and rpc block headers and returns true if they match -func (rm *ReorgProcessor) checkBlocks(blocks *compareBlockHeaders) (bool, error) { +func (rm *ReorgProcessor) checkBlocks(blocks *mdtypes.CompareBlockHeaders) (bool, error) { if blocks == nil { return false, fmt.Errorf("checkBlocks: blocks is nil") } diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index fb9556eec..a03aeb4eb 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -10,26 +10,6 @@ import ( aggkittypes "github.com/agglayer/aggkit/types" ) -type compareBlockHeaders struct { - BlockNumber uint64 - StorageHeader *aggkittypes.BlockHeader - IsFinalized mdtypes.FinalizedType - RpcHeader *aggkittypes.BlockHeader -} - -func (c *compareBlockHeaders) ExistsRPCBlock() bool { - if c == nil { - return false - } - return c.RpcHeader != nil -} -func (c *compareBlockHeaders) ExistsStorageBlock() bool { - if c == nil { - return false - } - return c.StorageHeader != nil -} - type ReorgPort struct { ethClient aggkittypes.BaseEthereumClienter rpcClient aggkittypes.RPCClienter @@ -41,7 +21,7 @@ func (r *ReorgPort) NewTx(ctx context.Context) (dbtypes.Txer, error) { } func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, - blockNumber uint64) (*compareBlockHeaders, error) { + blockNumber uint64) (*mdtypes.CompareBlockHeaders, error) { currentStorageBlock, finalized, err := r.storage.GetBlockHeaderByNumber(tx, blockNumber) if err != nil { return nil, fmt.Errorf("error getting block in storage: %w", err) @@ -50,7 +30,7 @@ func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querie if err != nil && !etherman.IsErrNotFound(err) { return nil, fmt.Errorf("error getting block in RPC: %w", err) } - return &compareBlockHeaders{ + return &mdtypes.CompareBlockHeaders{ BlockNumber: blockNumber, StorageHeader: currentStorageBlock, IsFinalized: finalized, diff --git a/multidownloader/types/log_query_response_test.go b/multidownloader/types/log_query_response_test.go index 4be7c0640..c171a3087 100644 --- a/multidownloader/types/log_query_response_test.go +++ b/multidownloader/types/log_query_response_test.go @@ -28,7 +28,7 @@ func TestLogQueryResponse_CountLogs_EmptyBlocks(t *testing.T) { func TestLogQueryResponse_CountLogs_SingleBlockWithLogs(t *testing.T) { parentHash := common.HexToHash("0x1234") block := BlockWithLogs{ - Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), IsFinal: true, Logs: []Log{ { @@ -71,7 +71,7 @@ func TestLogQueryResponse_CountLogs_MultipleBlocksWithLogs(t *testing.T) { parentHash2 := common.HexToHash("0x5678") block1 := BlockWithLogs{ - Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), IsFinal: true, Logs: []Log{ { @@ -100,7 +100,7 @@ func TestLogQueryResponse_CountLogs_MultipleBlocksWithLogs(t *testing.T) { } block2 := BlockWithLogs{ - Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), + Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), IsFinal: false, Logs: []Log{ { @@ -133,7 +133,7 @@ func TestLogQueryResponse_CountLogs_MixedBlocks(t *testing.T) { parentHash3 := common.HexToHash("0x9abc") blockWithLogs := BlockWithLogs{ - Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), IsFinal: true, Logs: []Log{ { @@ -157,7 +157,7 @@ func TestLogQueryResponse_CountLogs_MixedBlocks(t *testing.T) { } blockWithMultipleLogs := BlockWithLogs{ - Header: *aggkittypes.NewBlockHeader(102, common.HexToHash("0xghi"), 1234567910, &parentHash3), + Header: *aggkittypes.NewBlockHeader(102, common.HexToHash("0xghi"), 1234567910, &parentHash3), IsFinal: false, Logs: []Log{ { diff --git a/multidownloader/types/reorg_error_test.go b/multidownloader/types/reorg_error_test.go index 40f3b26ee..a739dc4fc 100644 --- a/multidownloader/types/reorg_error_test.go +++ b/multidownloader/types/reorg_error_test.go @@ -51,28 +51,28 @@ func TestReorgDetectionReason_String(t *testing.T) { } } +const testReorgMsg = "test message" + func TestNewDetectedReorgError(t *testing.T) { blockNum := uint64(100) reason := ReorgDetectionReason_BlockHashMismatch oldHash := common.HexToHash("0x1234") newHash := common.HexToHash("0x5678") - msg := "test message" - err := NewDetectedReorgError(blockNum, reason, oldHash, newHash, msg) + err := NewDetectedReorgError(blockNum, reason, oldHash, newHash, testReorgMsg) require.NotNil(t, err) require.Equal(t, blockNum, err.OffendingBlockNumber) require.Equal(t, reason, err.ReorgDetectionReason) require.Equal(t, oldHash, err.OldHash) require.Equal(t, newHash, err.NewHash) - require.Equal(t, msg, err.Message) + require.Equal(t, testReorgMsg, err.Message) } func TestDetectedReorgError_Error(t *testing.T) { blockNum := uint64(100) oldHash := common.HexToHash("0x1234") newHash := common.HexToHash("0x5678") - msg := "test message" tests := []struct { name string @@ -108,7 +108,7 @@ func TestDetectedReorgError_Error(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := NewDetectedReorgError(blockNum, tt.reason, oldHash, newHash, msg) + err := NewDetectedReorgError(blockNum, tt.reason, oldHash, newHash, testReorgMsg) result := err.Error() require.Equal(t, tt.expectedPrefix, result) }) @@ -202,14 +202,13 @@ func TestCastDetectedReorgError(t *testing.T) { func TestNewReorgedError(t *testing.T) { blockRange := aggkitcommon.NewBlockRange(100, 200) chainID := uint64(1) - msg := "test message" - err := NewReorgedError(blockRange, chainID, msg) + err := NewReorgedError(blockRange, chainID, testReorgMsg) require.NotNil(t, err) require.Equal(t, blockRange, err.BlockRangeReorged) require.Equal(t, chainID, err.ReorgedChainID) - require.Equal(t, msg, err.Message) + require.Equal(t, testReorgMsg, err.Message) } func TestReorgedError_Error(t *testing.T) { @@ -217,7 +216,7 @@ func TestReorgedError_Error(t *testing.T) { chainID := uint64(1) msg := "test message" - err := NewReorgedError(blockRange, chainID, msg) + err := NewReorgedError(blockRange, chainID, testReorgMsg) result := err.Error() expected := fmt.Sprintf("reorgedError: chainID=%d blockRangeReorged=%s: %s", chainID, blockRange.String(), msg) diff --git a/types/block_header_test.go b/types/block_header_test.go index 7ca86dea2..073c82e39 100644 --- a/types/block_header_test.go +++ b/types/block_header_test.go @@ -68,3 +68,60 @@ func TestBlockHeader_String(t *testing.T) { require.Equal(t, "", result) }) } + +func TestBlockHeader_Brief(t *testing.T) { + t.Run("with valid block header", func(t *testing.T) { + hash := common.HexToHash("0x1234567890abcdef") + parentHash := common.HexToHash("0xabcdef1234567890") + header := &BlockHeader{ + Number: 123, + Hash: hash, + Time: 1640995200, + ParentHash: &parentHash, + } + + result := header.Brief() + expected := "BlockHeader{Number: 123, Hash: 0x0000000000000000000000000000000000000000000000001234567890abcdef}" + require.Equal(t, expected, result) + }) + + t.Run("with nil block header", func(t *testing.T) { + var header *BlockHeader + result := header.Brief() + require.Equal(t, "", result) + }) +} + +func TestBlockHeader_Empty(t *testing.T) { + t.Run("with nil block header", func(t *testing.T) { + var header *BlockHeader + result := header.Empty() + require.True(t, result) + }) + + t.Run("with valid block header", func(t *testing.T) { + hash := common.HexToHash("0x1234567890abcdef") + parentHash := common.HexToHash("0xabcdef1234567890") + header := &BlockHeader{ + Number: 123, + Hash: hash, + Time: 1640995200, + ParentHash: &parentHash, + } + + result := header.Empty() + require.False(t, result) + }) + + t.Run("with zero-valued block header", func(t *testing.T) { + header := &BlockHeader{ + Number: 0, + Hash: common.Hash{}, + Time: 0, + ParentHash: nil, + } + + result := header.Empty() + require.False(t, result) + }) +} From cfdae2a08d0b5d804aea1970b202cd637d86d698 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 11:03:35 +0100 Subject: [PATCH 28/75] feat: coverage --- multidownloader/reorg_processor_port_test.go | 405 +++++++++++ multidownloader/reorg_processor_test.go | 650 ++++++++++++++++++ .../types/mocks/mock_reorg_porter.go | 329 +++++++++ multidownloader/types/reorg_port.go | 37 + 4 files changed, 1421 insertions(+) create mode 100644 multidownloader/reorg_processor_port_test.go create mode 100644 multidownloader/reorg_processor_test.go create mode 100644 multidownloader/types/mocks/mock_reorg_porter.go create mode 100644 multidownloader/types/reorg_port.go diff --git a/multidownloader/reorg_processor_port_test.go b/multidownloader/reorg_processor_port_test.go new file mode 100644 index 000000000..003ec8572 --- /dev/null +++ b/multidownloader/reorg_processor_port_test.go @@ -0,0 +1,405 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbmocks "github.com/agglayer/aggkit/db/mocks" + "github.com/agglayer/aggkit/etherman" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" + aggkittypes "github.com/agglayer/aggkit/types" + typesmocks "github.com/agglayer/aggkit/types/mocks" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestCompareBlockHeaders_ExistsRPCBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *mdtypes.CompareBlockHeaders + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns false when RpcHeader is nil", func(t *testing.T) { + c := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{Number: 100}, + RpcHeader: nil, + } + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns true when RpcHeader is not nil", func(t *testing.T) { + c := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + } + result := c.ExistsRPCBlock() + require.True(t, result) + }) +} + +func TestCompareBlockHeaders_ExistsStorageBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *mdtypes.CompareBlockHeaders + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns false when StorageHeader is nil", func(t *testing.T) { + c := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: nil, + RpcHeader: &aggkittypes.BlockHeader{Number: 100}, + } + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns true when StorageHeader is not nil", func(t *testing.T) { + c := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + } + result := c.ExistsStorageBlock() + require.True(t, result) + }) +} + +func TestReorgPort_NewTx(t *testing.T) { + t.Run("successfully creates new transaction", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewTxer(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + ctx := context.Background() + mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + result, err := reorgPort.NewTx(ctx) + + require.NoError(t, err) + require.Equal(t, mockTx, result) + }) + + t.Run("returns error when NewTx fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("database connection error") + mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + result, err := reorgPort.NewTx(ctx) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Nil(t, result) + }) +} + +func TestReorgPort_GetBlockStorageAndRPC(t *testing.T) { + t.Run("successfully gets block from both storage and RPC", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + rpcHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, blockNumber, result.BlockNumber) + require.Equal(t, storageHeader, result.StorageHeader) + require.Equal(t, rpcHeader, result.RpcHeader) + require.Equal(t, mdtypes.Finalized, result.IsFinalized) + }) + + t.Run("returns error when storage GetBlockHeaderByNumber fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + expectedErr := fmt.Errorf("storage error") + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(nil, mdtypes.NotFinalized, expectedErr).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block in storage") + require.Nil(t, result) + }) + + t.Run("returns error when RPC CustomHeaderByNumber fails with non-NotFound error", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + expectedErr := fmt.Errorf("RPC connection error") + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(nil, expectedErr).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block in RPC") + require.Nil(t, result) + }) + + t.Run("handles NotFound error from RPC gracefully", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(nil, etherman.ErrNotFound).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, blockNumber, result.BlockNumber) + require.Equal(t, storageHeader, result.StorageHeader) + require.Nil(t, result.RpcHeader) + require.Equal(t, mdtypes.Finalized, result.IsFinalized) + }) +} + +func TestReorgPort_GetLastBlockNumberInStorage(t *testing.T) { + t.Run("successfully gets highest block number", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + expectedBlockNumber := uint64(12345) + mockStorage.EXPECT().GetHighestBlockNumber(mock.Anything). + Return(expectedBlockNumber, nil).Once() + + result, err := reorgPort.GetLastBlockNumberInStorage(mockTx) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("returns error when GetHighestBlockNumber fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + expectedErr := fmt.Errorf("database query error") + mockStorage.EXPECT().GetHighestBlockNumber(mock.Anything). + Return(uint64(0), expectedErr).Once() + + result, err := reorgPort.GetLastBlockNumberInStorage(mockTx) + + require.Error(t, err) + require.Contains(t, err.Error(), "GetLastBlockNumberInStorage") + require.Contains(t, err.Error(), "error getting highest block from storage") + require.Equal(t, uint64(0), result) + }) +} + +func TestReorgPort_MoveReorgedBlocks(t *testing.T) { + t.Run("successfully moves reorged blocks", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + reorgData := mdtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + } + expectedAffectedRows := uint64(101) + + mockStorage.EXPECT().InsertReorgAndMoveReorgedBlocksAndLogs(mockTx, reorgData). + Return(expectedAffectedRows, nil).Once() + + result, err := reorgPort.MoveReorgedBlocks(mockTx, reorgData) + + require.NoError(t, err) + require.Equal(t, expectedAffectedRows, result) + }) + + t.Run("returns error when InsertReorgAndMoveReorgedBlocksAndLogs fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + reorgData := mdtypes.ReorgData{ + ChainID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + } + expectedErr := fmt.Errorf("transaction failed") + + mockStorage.EXPECT().InsertReorgAndMoveReorgedBlocksAndLogs(mockTx, reorgData). + Return(uint64(0), expectedErr).Once() + + result, err := reorgPort.MoveReorgedBlocks(mockTx, reorgData) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Equal(t, uint64(0), result) + }) +} + +func TestReorgPort_GetBlockNumberInRPC(t *testing.T) { + t.Run("successfully gets block number from RPC with latest finality", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Latest} + expectedBlockNumber := uint64(500) + + rpcHeader := &aggkittypes.BlockHeader{ + Number: expectedBlockNumber, + Hash: common.HexToHash("0xabcd"), + } + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("successfully gets block number from RPC with finalized finality", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Finalized} + expectedBlockNumber := uint64(450) + + rpcHeader := &aggkittypes.BlockHeader{ + Number: expectedBlockNumber, + Hash: common.HexToHash("0xdef0"), + } + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("returns error when CustomHeaderByNumber fails", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Latest} + expectedErr := fmt.Errorf("RPC connection timeout") + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(nil, expectedErr).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.Error(t, err) + require.Contains(t, err.Error(), "GetBlockNumberInRPC") + require.Contains(t, err.Error(), "error getting block number") + require.Equal(t, uint64(0), result) + }) +} diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go new file mode 100644 index 000000000..7c7232b25 --- /dev/null +++ b/multidownloader/reorg_processor_test.go @@ -0,0 +1,650 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + commonmocks "github.com/agglayer/aggkit/common/mocks" + dbmocks "github.com/agglayer/aggkit/db/mocks" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestReorgProcessor_CheckBlocks(t *testing.T) { + t.Run("returns error when blocks is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + match, err := processor.checkBlocks(nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "blocks is nil") + require.False(t, match) + }) + + t.Run("returns false when storage header is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: nil, + RpcHeader: &aggkittypes.BlockHeader{Number: 100}, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns false when RPC header is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{Number: 100}, + RpcHeader: nil, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns error when block numbers do not match", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x1234"), + }, + } + + match, err := processor.checkBlocks(blocks) + + require.Error(t, err) + require.Contains(t, err.Error(), "block numbers do not match") + require.False(t, match) + }) + + t.Run("returns false when hashes do not match (not finalized)", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + IsFinalized: mdtypes.NotFinalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns false when hashes do not match (finalized, logs warning)", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Once() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + IsFinalized: mdtypes.Finalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns true when blocks match", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + hash := common.HexToHash("0x1234") + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: hash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: hash, + }, + IsFinalized: mdtypes.Finalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.True(t, match) + }) +} + +func TestReorgProcessor_FindFirstUnaffectedBlock(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + + t.Run("returns error when genesis block is reached", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + hash1 := common.HexToHash("0x1234") + hash2 := common.HexToHash("0x5678") + + // Block 1 - mismatch, then loop decrements to 0 and checks genesis before calling again + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(1)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 1, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 1, + Hash: hash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 1, + Hash: hash2, + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 1) + + require.Error(t, err) + require.Contains(t, err.Error(), "genesis block reached") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("returns error when GetBlockStorageAndRPC fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("RPC connection error") + + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(nil, expectedErr).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block storage and RPC") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("finds first unaffected block after checking multiple blocks", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + differentHash1 := common.HexToHash("0x1234") + differentHash2 := common.HexToHash("0x5678") + + // Block 102 - mismatch + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(102)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 102, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 102, + Hash: differentHash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 102, + Hash: differentHash2, + }, + }, nil).Once() + + // Block 101 - mismatch + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(101)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 101, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: differentHash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: differentHash2, + }, + }, nil).Once() + + // Block 100 - match (first unaffected) + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 102) + + require.NoError(t, err) + require.Equal(t, uint64(100), result) + mockPort.AssertExpectations(t) + }) +} + +func TestReorgProcessor_ProcessReorg(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + + t.Run("returns error when NewTx fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("transaction creation error") + + mockPort.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error starting new tx") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when findFirstUnaffectedBlock fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("block search error") + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(nil, expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error finding first unaffected block") + mockPort.AssertExpectations(t) + }) + + t.Run("successfully processes reorg and commits transaction", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + nowValue := uint64(1234567890) + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + funcNow: func() uint64 { + return nowValue + }, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + offendingBlockNumber := uint64(105) + firstUnaffectedBlock := uint64(100) + lastBlockInStorage := uint64(110) + latestBlockInRPC := uint64(115) + finalizedBlockInRPC := uint64(100) + chainID := uint64(1) + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // findFirstUnaffectedBlock: Block 104 matches (first unaffected) + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, offendingBlockNumber-1). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: firstUnaffectedBlock, + StorageHeader: &aggkittypes.BlockHeader{ + Number: firstUnaffectedBlock, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: firstUnaffectedBlock, + Hash: matchingHash, + }, + }, nil).Once() + + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(lastBlockInStorage, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(latestBlockInRPC, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(finalizedBlockInRPC, nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() + + mockTx.EXPECT().Commit().Return(nil).Once() + + err := processor.ProcessReorg(ctx, offendingBlockNumber) + + require.NoError(t, err) + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetLastBlockNumberInStorage fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("storage query error") + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting last block number in storage") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when MoveReorgedBlocks fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + funcNow: func() uint64 { return 1234567890 }, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("move blocks error") + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error moving reorged blocks") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetBlockNumberInRPC for latest fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("RPC error for latest") + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting latest block number in RPC") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetBlockNumberInRPC for finalized fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("RPC error for finalized") + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting finalized block number in RPC") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when Commit fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + nowValue := uint64(1234567890) + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + funcNow: func() uint64 { + return nowValue + }, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("commit failed") + chainID := uint64(1) + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() + mockTx.EXPECT().Commit().Return(expectedErr).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error when checkBlocks fails in findFirstUnaffectedBlock", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + + // Return blocks with mismatched block numbers which will cause checkBlocks to error + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, // Different block number will cause error + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error checking blocks") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("logs error when rollback fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + rollbackErr := fmt.Errorf("rollback failed") + originalErr := fmt.Errorf("original error") + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockLogger.EXPECT().Errorf(mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(nil, originalErr).Once() + mockTx.EXPECT().Rollback().Return(rollbackErr).Once() + + err := processor.ProcessReorg(ctx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error finding first unaffected block") + mockPort.AssertExpectations(t) + }) +} diff --git a/multidownloader/types/mocks/mock_reorg_porter.go b/multidownloader/types/mocks/mock_reorg_porter.go new file mode 100644 index 000000000..e0e8db9c6 --- /dev/null +++ b/multidownloader/types/mocks/mock_reorg_porter.go @@ -0,0 +1,329 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + dbtypes "github.com/agglayer/aggkit/db/types" + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" + + types "github.com/agglayer/aggkit/types" +) + +// ReorgPorter is an autogenerated mock type for the ReorgPorter type +type ReorgPorter struct { + mock.Mock +} + +type ReorgPorter_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgPorter) EXPECT() *ReorgPorter_Expecter { + return &ReorgPorter_Expecter{mock: &_m.Mock} +} + +// GetBlockNumberInRPC provides a mock function with given fields: ctx, blockFinality +func (_m *ReorgPorter) GetBlockNumberInRPC(ctx context.Context, blockFinality types.BlockNumberFinality) (uint64, error) { + ret := _m.Called(ctx, blockFinality) + + if len(ret) == 0 { + panic("no return value specified for GetBlockNumberInRPC") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (uint64, error)); ok { + return rf(ctx, blockFinality) + } + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) uint64); ok { + r0 = rf(ctx, blockFinality) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { + r1 = rf(ctx, blockFinality) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetBlockNumberInRPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockNumberInRPC' +type ReorgPorter_GetBlockNumberInRPC_Call struct { + *mock.Call +} + +// GetBlockNumberInRPC is a helper method to define mock.On call +// - ctx context.Context +// - blockFinality types.BlockNumberFinality +func (_e *ReorgPorter_Expecter) GetBlockNumberInRPC(ctx interface{}, blockFinality interface{}) *ReorgPorter_GetBlockNumberInRPC_Call { + return &ReorgPorter_GetBlockNumberInRPC_Call{Call: _e.mock.On("GetBlockNumberInRPC", ctx, blockFinality)} +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) Run(run func(ctx context.Context, blockFinality types.BlockNumberFinality)) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) + }) + return _c +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (uint64, error)) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockStorageAndRPC provides a mock function with given fields: ctx, tx, blockNumber +func (_m *ReorgPorter) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*multidownloadertypes.CompareBlockHeaders, error) { + ret := _m.Called(ctx, tx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBlockStorageAndRPC") + } + + var r0 *multidownloadertypes.CompareBlockHeaders + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dbtypes.Querier, uint64) (*multidownloadertypes.CompareBlockHeaders, error)); ok { + return rf(ctx, tx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, dbtypes.Querier, uint64) *multidownloadertypes.CompareBlockHeaders); ok { + r0 = rf(ctx, tx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.CompareBlockHeaders) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dbtypes.Querier, uint64) error); ok { + r1 = rf(ctx, tx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetBlockStorageAndRPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockStorageAndRPC' +type ReorgPorter_GetBlockStorageAndRPC_Call struct { + *mock.Call +} + +// GetBlockStorageAndRPC is a helper method to define mock.On call +// - ctx context.Context +// - tx dbtypes.Querier +// - blockNumber uint64 +func (_e *ReorgPorter_Expecter) GetBlockStorageAndRPC(ctx interface{}, tx interface{}, blockNumber interface{}) *ReorgPorter_GetBlockStorageAndRPC_Call { + return &ReorgPorter_GetBlockStorageAndRPC_Call{Call: _e.mock.On("GetBlockStorageAndRPC", ctx, tx, blockNumber)} +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) Run(run func(ctx context.Context, tx dbtypes.Querier, blockNumber uint64)) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dbtypes.Querier), args[2].(uint64)) + }) + return _c +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) Return(_a0 *multidownloadertypes.CompareBlockHeaders, _a1 error) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) RunAndReturn(run func(context.Context, dbtypes.Querier, uint64) (*multidownloadertypes.CompareBlockHeaders, error)) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Return(run) + return _c +} + +// GetLastBlockNumberInStorage provides a mock function with given fields: tx +func (_m *ReorgPorter) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { + ret := _m.Called(tx) + + if len(ret) == 0 { + panic("no return value specified for GetLastBlockNumberInStorage") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(dbtypes.Querier) (uint64, error)); ok { + return rf(tx) + } + if rf, ok := ret.Get(0).(func(dbtypes.Querier) uint64); ok { + r0 = rf(tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(dbtypes.Querier) error); ok { + r1 = rf(tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetLastBlockNumberInStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastBlockNumberInStorage' +type ReorgPorter_GetLastBlockNumberInStorage_Call struct { + *mock.Call +} + +// GetLastBlockNumberInStorage is a helper method to define mock.On call +// - tx dbtypes.Querier +func (_e *ReorgPorter_Expecter) GetLastBlockNumberInStorage(tx interface{}) *ReorgPorter_GetLastBlockNumberInStorage_Call { + return &ReorgPorter_GetLastBlockNumberInStorage_Call{Call: _e.mock.On("GetLastBlockNumberInStorage", tx)} +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) Run(run func(tx dbtypes.Querier)) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(dbtypes.Querier)) + }) + return _c +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) RunAndReturn(run func(dbtypes.Querier) (uint64, error)) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Return(run) + return _c +} + +// MoveReorgedBlocks provides a mock function with given fields: tx, reorgData +func (_m *ReorgPorter) MoveReorgedBlocks(tx dbtypes.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for MoveReorgedBlocks") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(dbtypes.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(dbtypes.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(dbtypes.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_MoveReorgedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveReorgedBlocks' +type ReorgPorter_MoveReorgedBlocks_Call struct { + *mock.Call +} + +// MoveReorgedBlocks is a helper method to define mock.On call +// - tx dbtypes.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *ReorgPorter_Expecter) MoveReorgedBlocks(tx interface{}, reorgData interface{}) *ReorgPorter_MoveReorgedBlocks_Call { + return &ReorgPorter_MoveReorgedBlocks_Call{Call: _e.mock.On("MoveReorgedBlocks", tx, reorgData)} +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) Run(run func(tx dbtypes.Querier, reorgData multidownloadertypes.ReorgData)) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(dbtypes.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) RunAndReturn(run func(dbtypes.Querier, multidownloadertypes.ReorgData) (uint64, error)) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Return(run) + return _c +} + +// NewTx provides a mock function with given fields: ctx +func (_m *ReorgPorter) NewTx(ctx context.Context) (dbtypes.Txer, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for NewTx") + } + + var r0 dbtypes.Txer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (dbtypes.Txer, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) dbtypes.Txer); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(dbtypes.Txer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_NewTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTx' +type ReorgPorter_NewTx_Call struct { + *mock.Call +} + +// NewTx is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReorgPorter_Expecter) NewTx(ctx interface{}) *ReorgPorter_NewTx_Call { + return &ReorgPorter_NewTx_Call{Call: _e.mock.On("NewTx", ctx)} +} + +func (_c *ReorgPorter_NewTx_Call) Run(run func(ctx context.Context)) *ReorgPorter_NewTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReorgPorter_NewTx_Call) Return(_a0 dbtypes.Txer, _a1 error) *ReorgPorter_NewTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_NewTx_Call) RunAndReturn(run func(context.Context) (dbtypes.Txer, error)) *ReorgPorter_NewTx_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgPorter creates a new instance of ReorgPorter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgPorter(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgPorter { + mock := &ReorgPorter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/reorg_port.go b/multidownloader/types/reorg_port.go new file mode 100644 index 000000000..d7df1bc68 --- /dev/null +++ b/multidownloader/types/reorg_port.go @@ -0,0 +1,37 @@ +package types + +import ( + "context" + + dbtypes "github.com/agglayer/aggkit/db/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgPorter interface { + NewTx(ctx context.Context) (dbtypes.Txer, error) + GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*CompareBlockHeaders, error) + GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) + // Return ChainID of the inserted reorg + MoveReorgedBlocks(tx dbtypes.Querier, reorgData ReorgData) (uint64, error) + GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) +} + +type CompareBlockHeaders struct { + BlockNumber uint64 + StorageHeader *aggkittypes.BlockHeader + IsFinalized FinalizedType + RpcHeader *aggkittypes.BlockHeader +} + +func (c *CompareBlockHeaders) ExistsRPCBlock() bool { + if c == nil { + return false + } + return c.RpcHeader != nil +} +func (c *CompareBlockHeaders) ExistsStorageBlock() bool { + if c == nil { + return false + } + return c.StorageHeader != nil +} From 3fb1121f1dc09fbd910cb00e86215ee3ee64fa5b Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 11:20:27 +0100 Subject: [PATCH 29/75] feat: add description to reorgdata in DB --- multidownloader/evm_multidownloader.go | 2 +- multidownloader/reorg_processor.go | 7 ++++--- multidownloader/storage/storage_reorg.go | 5 ++++- multidownloader/types/reorg_data.go | 6 ++++-- multidownloader/types/reorg_data_test.go | 6 ++++-- multidownloader/types/reorg_processor.go | 2 +- 6 files changed, 18 insertions(+), 10 deletions(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 4ec669700..52add9812 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -359,7 +359,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { } dh.log.Infof("Processing reorg at block number %d...", reorgErr.OffendingBlockNumber) - err = dh.reorgProcessor.ProcessReorg(runCtx, reorgErr.OffendingBlockNumber) + err = dh.reorgProcessor.ProcessReorg(runCtx, *reorgErr) if err != nil { dh.log.Warnf("Error running reorg multidownloader: %s", err.Error()) time.Sleep(1 * time.Second) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 7045f1b48..18ac04305 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -34,14 +34,14 @@ func NewReorgProcessor(log aggkitcommon.Logger, } } -// After detecting a reorg at offendingBlockNumber, +// After detecting a reorg at detectedReorgError.OffendingBlockNumber, // - find affected blocks // - store the reorg info in storage func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, - offendingBlockNumber uint64) error { + detectedReorgError mdtypes.DetectedReorgError) error { // We known that offendingBlockNumber is affected, so we go backwards until we find // the first unaffected block - currentBlockNumber := offendingBlockNumber + currentBlockNumber := detectedReorgError.OffendingBlockNumber tx, err := rm.port.NewTx(ctx) if err != nil { return fmt.Errorf("ProcessReorg: error starting new tx: %w", err) @@ -83,6 +83,7 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, NetworkLatestBlock: latestBlockNumberInRPC, NetworkFinalizedBlock: finalizedBlockNumberInRPC, NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: detectedReorgError.Error(), } chainID, err := rm.port.MoveReorgedBlocks(tx, reorgData) if err != nil { diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 9989a3a13..fea01ba06 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -22,6 +22,7 @@ type reorgRow struct { NetworkLatestBlock uint64 `meddler:"network_latest_block"` NetworkFinalizedBlock uint64 `meddler:"network_finalized_block"` NetworkFinalizedBlockName string `meddler:"network_finalized_block_name"` + Description string `meddler:"description"` } func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { @@ -34,6 +35,7 @@ func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { NetworkLatestBlock: reorgData.NetworkLatestBlock, NetworkFinalizedBlock: reorgData.NetworkFinalizedBlock, NetworkFinalizedBlockName: reorgData.NetworkFinalizedBlockName.String(), + Description: reorgData.Description, } } @@ -136,7 +138,7 @@ func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, var row reorgRow query := `SELECT chain_id, detected_at_block, reorged_from_block, reorged_to_block, - detected_timestamp, network_latest_block, network_finalized_block, network_finalized_block_name + detected_timestamp, network_latest_block, network_finalized_block, network_finalized_block_name, description FROM reorgs WHERE chain_id = ? LIMIT 1;` err := meddler.QueryRow(tx, &row, query, reorgedChainID) @@ -164,6 +166,7 @@ func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, NetworkLatestBlock: row.NetworkLatestBlock, NetworkFinalizedBlock: row.NetworkFinalizedBlock, NetworkFinalizedBlockName: *blockFinality, + Description: row.Description, } return reorgData, nil diff --git a/multidownloader/types/reorg_data.go b/multidownloader/types/reorg_data.go index dbfea44b7..54a3b7487 100644 --- a/multidownloader/types/reorg_data.go +++ b/multidownloader/types/reorg_data.go @@ -15,16 +15,18 @@ type ReorgData struct { NetworkLatestBlock uint64 NetworkFinalizedBlock uint64 NetworkFinalizedBlockName aggkittypes.BlockNumberFinality + Description string } func (r *ReorgData) String() string { return fmt.Sprintf("ReorgData{ChainID: %d, BlockRangeAffected: %s, DetectedAtBlock: %d, DetectedTimestamp: %d, "+ - "NetworkLatestBlock: %d, NetworkFinalizedBlock: %d (%s)}", + "NetworkLatestBlock: %d, NetworkFinalizedBlock: %d (%s), Description: %s}", r.ChainID, r.BlockRangeAffected.String(), r.DetectedAtBlock, r.DetectedTimestamp, r.NetworkLatestBlock, r.NetworkFinalizedBlock, - r.NetworkFinalizedBlockName.String()) + r.NetworkFinalizedBlockName.String(), + r.Description) } diff --git a/multidownloader/types/reorg_data_test.go b/multidownloader/types/reorg_data_test.go index 00c681bde..355a17a6f 100644 --- a/multidownloader/types/reorg_data_test.go +++ b/multidownloader/types/reorg_data_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestXxx(t *testing.T) { +func TestReorgData_String(t *testing.T) { reorgData := &ReorgData{ ChainID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), @@ -17,8 +17,10 @@ func TestXxx(t *testing.T) { NetworkLatestBlock: 300, NetworkFinalizedBlock: 240, NetworkFinalizedBlockName: aggkittypes.LatestBlock, + Description: "Test reorg description", } require.Equal(t, "ReorgData{ChainID: 1, BlockRangeAffected: From: 100, To: 200 (101), "+ - "DetectedAtBlock: 250, DetectedTimestamp: 1620000000, NetworkLatestBlock: 300, NetworkFinalizedBlock: 240 (LatestBlock)}", + "DetectedAtBlock: 250, DetectedTimestamp: 1620000000, NetworkLatestBlock: 300, NetworkFinalizedBlock: 240 (LatestBlock), "+ + "Description: Test reorg description}", reorgData.String()) } diff --git a/multidownloader/types/reorg_processor.go b/multidownloader/types/reorg_processor.go index 537815a72..f64e415e4 100644 --- a/multidownloader/types/reorg_processor.go +++ b/multidownloader/types/reorg_processor.go @@ -6,5 +6,5 @@ type ReorgProcessor interface { // ProcessReorg processes a detected reorg starting from the offending block number. // It identifies the range of blocks affected by the reorg and takes necessary actions // to handle the reorganization. - ProcessReorg(ctx context.Context, offendingBlockNumber uint64) error + ProcessReorg(ctx context.Context, detectedReorgError DetectedReorgError) error } From 377976cfdfe1557764b644953ce871aceecc60d2 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 11:34:45 +0100 Subject: [PATCH 30/75] feat: coverage --- multidownloader/reorg_processor_port_test.go | 60 ---- multidownloader/reorg_processor_test.go | 81 +++++- multidownloader/storage/storage_test.go | 271 +++++++++++++++++++ multidownloader/sync/runtimedata_test.go | 103 +++++++ multidownloader/types/reorg_port_test.go | 69 +++++ 5 files changed, 515 insertions(+), 69 deletions(-) create mode 100644 multidownloader/sync/runtimedata_test.go create mode 100644 multidownloader/types/reorg_port_test.go diff --git a/multidownloader/reorg_processor_port_test.go b/multidownloader/reorg_processor_port_test.go index 003ec8572..a41744ea4 100644 --- a/multidownloader/reorg_processor_port_test.go +++ b/multidownloader/reorg_processor_port_test.go @@ -17,66 +17,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestCompareBlockHeaders_ExistsRPCBlock(t *testing.T) { - t.Run("returns false when receiver is nil", func(t *testing.T) { - var c *mdtypes.CompareBlockHeaders - result := c.ExistsRPCBlock() - require.False(t, result) - }) - - t.Run("returns false when RpcHeader is nil", func(t *testing.T) { - c := &mdtypes.CompareBlockHeaders{ - BlockNumber: 100, - StorageHeader: &aggkittypes.BlockHeader{Number: 100}, - RpcHeader: nil, - } - result := c.ExistsRPCBlock() - require.False(t, result) - }) - - t.Run("returns true when RpcHeader is not nil", func(t *testing.T) { - c := &mdtypes.CompareBlockHeaders{ - BlockNumber: 100, - RpcHeader: &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - }, - } - result := c.ExistsRPCBlock() - require.True(t, result) - }) -} - -func TestCompareBlockHeaders_ExistsStorageBlock(t *testing.T) { - t.Run("returns false when receiver is nil", func(t *testing.T) { - var c *mdtypes.CompareBlockHeaders - result := c.ExistsStorageBlock() - require.False(t, result) - }) - - t.Run("returns false when StorageHeader is nil", func(t *testing.T) { - c := &mdtypes.CompareBlockHeaders{ - BlockNumber: 100, - StorageHeader: nil, - RpcHeader: &aggkittypes.BlockHeader{Number: 100}, - } - result := c.ExistsStorageBlock() - require.False(t, result) - }) - - t.Run("returns true when StorageHeader is not nil", func(t *testing.T) { - c := &mdtypes.CompareBlockHeaders{ - BlockNumber: 100, - StorageHeader: &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x5678"), - }, - } - result := c.ExistsStorageBlock() - require.True(t, result) - }) -} - func TestReorgPort_NewTx(t *testing.T) { t.Run("successfully creates new transaction", func(t *testing.T) { mockStorage := mdmocks.NewStorager(t) diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index 7c7232b25..5e63bcd26 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -294,10 +294,17 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() expectedErr := fmt.Errorf("transaction creation error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockPort.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error starting new tx") @@ -315,6 +322,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() expectedErr := fmt.Errorf("block search error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Debugf(mock.Anything).Once() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() @@ -322,7 +336,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { Return(nil, expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error finding first unaffected block") @@ -350,6 +364,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { latestBlockInRPC := uint64(115) finalizedBlockInRPC := uint64(100) chainID := uint64(1) + reorgErr := mdtypes.NewDetectedReorgError( + offendingBlockNumber, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything).Once() @@ -376,7 +397,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockTx.EXPECT().Commit().Return(nil).Once() - err := processor.ProcessReorg(ctx, offendingBlockNumber) + err := processor.ProcessReorg(ctx, *reorgErr) require.NoError(t, err) mockPort.AssertExpectations(t) @@ -394,6 +415,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("storage query error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Debugf(mock.Anything).Once() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() @@ -412,7 +440,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error getting last block number in storage") @@ -432,6 +460,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("move blocks error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() mockLogger.EXPECT().Debugf(mock.Anything).Once() @@ -454,7 +489,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error moving reorged blocks") @@ -474,6 +509,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("RPC error for latest") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Debugf(mock.Anything).Once() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() @@ -493,7 +535,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error getting latest block number in RPC") @@ -513,6 +555,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("RPC error for finalized") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Debugf(mock.Anything).Once() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() @@ -533,7 +582,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error getting finalized block number in RPC") @@ -558,6 +607,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("commit failed") chainID := uint64(1) + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() @@ -579,7 +635,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() mockTx.EXPECT().Commit().Return(expectedErr).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "cannot commit tx") @@ -633,6 +689,13 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { ctx := context.Background() rollbackErr := fmt.Errorf("rollback failed") originalErr := fmt.Errorf("original error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) mockLogger.EXPECT().Debugf(mock.Anything).Once() mockLogger.EXPECT().Errorf(mock.Anything, mock.Anything).Once() @@ -641,7 +704,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { Return(nil, originalErr).Once() mockTx.EXPECT().Rollback().Return(rollbackErr).Once() - err := processor.ProcessReorg(ctx, 100) + err := processor.ProcessReorg(ctx, *reorgErr) require.Error(t, err) require.Contains(t, err.Error(), "error finding first unaffected block") diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index 7fae3f2ca..ce5b62659 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -172,6 +172,277 @@ func TestStorage_SaveEthLogsWithHeaders(t *testing.T) { require.Equal(t, logs[1], readLogs[1]) } +func TestStorage_LogQuery(t *testing.T) { + t.Run("returns empty response when no logs exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + query := mdrtypes.NewLogQuery(1000, 2000, []common.Address{exampleAddr1}) + + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Empty(t, response.Blocks) + require.Equal(t, query.BlockRange, response.ResponseRange) + }) + + t.Run("returns logs grouped by blocks with correct ordering", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + // Create block headers + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil), + aggkittypes.NewBlockHeader(1001, exampleTestHash[1], 1630000060, &exampleTestHash[0]), + aggkittypes.NewBlockHeader(1002, exampleTestHash[2], 1630000120, &exampleTestHash[1]), + } + + // Create logs - multiple logs per block and across different blocks + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[3]}, + Data: []byte{0x01}, + TxHash: exampleTestHash[5], + TxIndex: 0, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[4]}, + Data: []byte{0x02}, + TxHash: exampleTestHash[5], + TxIndex: 1, + Index: 1, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[6]}, + Data: []byte{0x03}, + TxHash: exampleTestHash[7], + TxIndex: 0, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 1002, + BlockHash: exampleTestHash[2], + BlockTimestamp: 1630000120, + Topics: []common.Hash{exampleTestHash[8]}, + Data: []byte{0x04}, + TxHash: exampleTestHash[9], + TxIndex: 0, + Index: 0, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query for logs from both addresses + query := mdrtypes.NewLogQuery(1000, 1002, []common.Address{exampleAddr1, exampleAddr2}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Equal(t, query.BlockRange, response.ResponseRange) + require.Len(t, response.Blocks, 3, "expected 3 blocks") + + // Verify first block (block 1000) - has 2 logs from exampleAddr1 + require.Equal(t, uint64(1000), response.Blocks[0].Header.Number) + require.Equal(t, exampleTestHash[0], response.Blocks[0].Header.Hash) + require.Equal(t, uint64(1630000000), response.Blocks[0].Header.Time) + require.True(t, response.Blocks[0].IsFinal) + require.Len(t, response.Blocks[0].Logs, 2) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[0].Address) + require.Equal(t, uint(0), response.Blocks[0].Logs[0].Index) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[1].Address) + require.Equal(t, uint(1), response.Blocks[0].Logs[1].Index) + + // Verify second block (block 1001) - has 1 log from exampleAddr2 + require.Equal(t, uint64(1001), response.Blocks[1].Header.Number) + require.Equal(t, exampleTestHash[1], response.Blocks[1].Header.Hash) + require.True(t, response.Blocks[1].IsFinal) + require.Len(t, response.Blocks[1].Logs, 1) + require.Equal(t, exampleAddr2, response.Blocks[1].Logs[0].Address) + + // Verify third block (block 1002) - has 1 log from exampleAddr1 + require.Equal(t, uint64(1002), response.Blocks[2].Header.Number) + require.Equal(t, exampleTestHash[2], response.Blocks[2].Header.Hash) + require.True(t, response.Blocks[2].IsFinal) + require.Len(t, response.Blocks[2].Logs, 1) + require.Equal(t, exampleAddr1, response.Blocks[2].Logs[0].Address) + }) + + t.Run("filters logs by single address", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(2000, exampleTestHash[0], 1630001000, nil), + } + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 2000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630001000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0xAA}, + Index: 0, + }, + { + Address: exampleAddr2, + BlockNumber: 2000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630001000, + Topics: []common.Hash{exampleTestHash[2]}, + Data: []byte{0xBB}, + Index: 1, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, false) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query only for exampleAddr1 + query := mdrtypes.NewLogQuery(2000, 2000, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1) + require.Len(t, response.Blocks[0].Logs, 1) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[0].Address) + require.Equal(t, []byte{0xAA}, response.Blocks[0].Logs[0].Data) + require.False(t, response.Blocks[0].IsFinal, "expected block to not be final") + }) + + t.Run("respects block range boundaries", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(3000, exampleTestHash[0], 1630002000, nil), + aggkittypes.NewBlockHeader(3001, exampleTestHash[1], 1630002060, &exampleTestHash[0]), + aggkittypes.NewBlockHeader(3002, exampleTestHash[2], 1630002120, &exampleTestHash[1]), + } + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 3000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630002000, + Topics: []common.Hash{}, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 3001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630002060, + Topics: []common.Hash{}, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 3002, + BlockHash: exampleTestHash[2], + BlockTimestamp: 1630002120, + Topics: []common.Hash{}, + Index: 0, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query for middle block only + query := mdrtypes.NewLogQuery(3001, 3001, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1, "expected only 1 block in range") + require.Equal(t, uint64(3001), response.Blocks[0].Header.Number) + }) + + t.Run("preserves log field values correctly", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + parentHash := exampleTestHash[9] + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(4000, exampleTestHash[0], 1630003000, &parentHash), + } + + expectedTopics := []common.Hash{exampleTestHash[1], exampleTestHash[2], exampleTestHash[3]} + expectedData := []byte{0xDE, 0xAD, 0xBE, 0xEF} + expectedTxHash := exampleTestHash[5] + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 4000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630003000, + Topics: expectedTopics, + Data: expectedData, + TxHash: expectedTxHash, + TxIndex: 42, + Index: 7, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + query := mdrtypes.NewLogQuery(4000, 4000, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1) + require.Len(t, response.Blocks[0].Logs, 1) + + log := response.Blocks[0].Logs[0] + require.Equal(t, exampleAddr1, log.Address) + require.Equal(t, expectedTopics, log.Topics) + require.Equal(t, expectedData, log.Data) + require.Equal(t, expectedTxHash, log.TxHash) + require.Equal(t, uint(42), log.TxIndex) + require.Equal(t, uint(7), log.Index) + require.Equal(t, uint64(4000), log.BlockNumber) + require.Equal(t, uint64(1630003000), log.BlockTimestamp) + require.False(t, log.Removed) + + // Verify block header fields + header := response.Blocks[0].Header + require.Equal(t, uint64(4000), header.Number) + require.Equal(t, exampleTestHash[0], header.Hash) + require.Equal(t, uint64(1630003000), header.Time) + require.NotNil(t, header.ParentHash) + require.Equal(t, parentHash, *header.ParentHash) + }) +} + func TestStorage_UpdateIsFinal(t *testing.T) { storage := newStorageForTest(t, nil) block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) diff --git a/multidownloader/sync/runtimedata_test.go b/multidownloader/sync/runtimedata_test.go new file mode 100644 index 000000000..54d274f67 --- /dev/null +++ b/multidownloader/sync/runtimedata_test.go @@ -0,0 +1,103 @@ +package multidownloader + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestRuntimeData_String(t *testing.T) { + data := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + } + + expected := "ChainID: 1, Addresses: 0x1234567890abcdef1234567890abcdef12345678, 0xabcdefabcdefabcdefabcdefabcdefabcdefabcd, " + require.Equal(t, expected, data.String()) +} + +func TestRuntimeData_IsCompatible_Success(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + } + + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + } + + err := data1.IsCompatible(data2) + require.NoError(t, err) +} + +func TestRuntimeData_IsCompatible_ChainIDMismatch(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + } + + data2 := RuntimeData{ + ChainID: 2, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID mismatch") +} + +func TestRuntimeData_IsCompatible_AddressLengthMismatch(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + } + + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") +} + +func TestRuntimeData_IsCompatible_AddressMismatch(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + } + + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses[0] mismatch") +} diff --git a/multidownloader/types/reorg_port_test.go b/multidownloader/types/reorg_port_test.go new file mode 100644 index 000000000..c28894779 --- /dev/null +++ b/multidownloader/types/reorg_port_test.go @@ -0,0 +1,69 @@ +package types + +import ( + "testing" + + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestCompareBlockHeaders_ExistsRPCBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *CompareBlockHeaders + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns false when RpcHeader is nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{Number: 100}, + RpcHeader: nil, + } + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns true when RpcHeader is not nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + } + result := c.ExistsRPCBlock() + require.True(t, result) + }) +} + +func TestCompareBlockHeaders_ExistsStorageBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *CompareBlockHeaders + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns false when StorageHeader is nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: nil, + RpcHeader: &aggkittypes.BlockHeader{Number: 100}, + } + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns true when StorageHeader is not nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + } + result := c.ExistsStorageBlock() + require.True(t, result) + }) +} From 0f4971389b82d4d6e993def368f4c3a1f3b99d86 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 11:52:32 +0100 Subject: [PATCH 31/75] feat: renamed download.go to downloader.go --- multidownloader/sync/{download.go => downloader.go} | 0 multidownloader/sync/{download_test.go => downloader_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename multidownloader/sync/{download.go => downloader.go} (100%) rename multidownloader/sync/{download_test.go => downloader_test.go} (100%) diff --git a/multidownloader/sync/download.go b/multidownloader/sync/downloader.go similarity index 100% rename from multidownloader/sync/download.go rename to multidownloader/sync/downloader.go diff --git a/multidownloader/sync/download_test.go b/multidownloader/sync/downloader_test.go similarity index 100% rename from multidownloader/sync/download_test.go rename to multidownloader/sync/downloader_test.go From d2cce54d2a337e1fb4b86647531f5801224c654c Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 11:57:35 +0100 Subject: [PATCH 32/75] feat: renamed Downloader to EVMDownloader --- l1infotreesync/l1infotreesync.go | 4 +- multidownloader/evm_multidownloader_test.go | 2 +- .../sync/{downloader.go => evmdownloader.go} | 42 ++++++++--------- ...wnloader_test.go => evmdownloader_test.go} | 46 +++++++++---------- 4 files changed, 47 insertions(+), 47 deletions(-) rename multidownloader/sync/{downloader.go => evmdownloader.go} (83%) rename multidownloader/sync/{downloader_test.go => evmdownloader_test.go} (98%) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index d21568dc3..b43ca5632 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -100,12 +100,12 @@ func NewMultidownloadBased( return nil, fmt.Errorf("failed to register l1infotreesync in multidownloader: %w", err) } logger := log.WithFields("syncer", syncerID) - // TODO: move the durations to config file (mdrsync.NewDownloader) + // TODO: move the durations to config file (mdrsync.NewEVMDownloader) logger.Infof("Creating L1 Info Tree Syncer with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", cfg.WaitForNewBlocksPeriod.String(), cfg.RetryAfterErrorPeriod.String(), ) - downloader := mdrsync.NewDownloader( + downloader := mdrsync.NewEVMDownloader( l1Multidownloader, logger, rh, diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 107a7f20a..a3656d899 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -113,7 +113,7 @@ func TestEVMMultidownloader(t *testing.T) { RetryAfterErrorPeriod: time.Second, MaxRetryAttemptsAfterError: 0, } - downloader := mdrsync.NewDownloader( + downloader := mdrsync.NewEVMDownloader( mdr, logger, rh, diff --git a/multidownloader/sync/downloader.go b/multidownloader/sync/evmdownloader.go similarity index 83% rename from multidownloader/sync/downloader.go rename to multidownloader/sync/evmdownloader.go index 3f3e90b48..3a2d90e6c 100644 --- a/multidownloader/sync/downloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -22,7 +22,7 @@ var ( ErrLogsNotAvailable = fmt.Errorf("logs not available") ) -type Downloader struct { +type EVMDownloader struct { mdr mdrsynctypes.MultidownloaderInterface logger aggkitcommon.Logger rh *sync.RetryHandler @@ -32,15 +32,15 @@ type Downloader struct { pullingPeriod time.Duration } -func NewDownloader( +func NewEVMDownloader( mdr mdrsynctypes.MultidownloaderInterface, logger aggkitcommon.Logger, rh *sync.RetryHandler, appender sync.LogAppenderMap, waitPeriodToCatchUpMaximumLogRange time.Duration, pullingPeriod time.Duration, -) *Downloader { - return &Downloader{ +) *EVMDownloader { + return &EVMDownloader{ mdr: mdr, logger: logger, rh: rh, @@ -50,11 +50,11 @@ func NewDownloader( } } -func (d *Downloader) Finality() aggkittypes.BlockNumberFinality { +func (d *EVMDownloader) Finality() aggkittypes.BlockNumberFinality { return d.mdr.Finality() } -func (d *Downloader) DownloadNextBlocks(ctx context.Context, +func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, lastBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { @@ -86,14 +86,14 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, return true, nil }) if errors.Is(err, aggkitcommon.ErrTimeoutReached) { - return nil, fmt.Errorf("Downloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", maxLogQuery.String(), d.waitPeriodToCatchUpMaximumLogRange.String(), ErrLogsNotAvailable) } if err != nil { return nil, err } if !conditionMet { - return nil, fmt.Errorf("Downloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", maxLogQuery.String(), ErrLogsNotAvailable) } @@ -103,7 +103,7 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, return nil, err } if result == nil { - d.logger.Debugf("Downloader.DownloadNextBlocks: no logs found for blocks %s", maxLogQuery.BlockRange.String()) + d.logger.Debugf("EVMDownloader.DownloadNextBlocks: no logs found for blocks %s", maxLogQuery.BlockRange.String()) result = &mdrsynctypes.DownloadResult{ Data: nil, PercentComplete: percentComplete, @@ -112,13 +112,13 @@ func (d *Downloader) DownloadNextBlocks(ctx context.Context, return result, nil } -func (d *Downloader) ChainID(ctx context.Context) (uint64, error) { +func (d *EVMDownloader) ChainID(ctx context.Context) (uint64, error) { return d.mdr.ChainID(ctx) } // executeLogQuery executes the log query, checking for partial availability // if there are no logs available returns an error -func (d *Downloader) executeLogQuery(ctx context.Context, +func (d *EVMDownloader) executeLogQuery(ctx context.Context, fullLogQuery mdrtypes.LogQuery) (*mdrsynctypes.DownloadResult, error) { logQuery := fullLogQuery if !d.mdr.IsAvailable(fullLogQuery) { @@ -143,12 +143,12 @@ func (d *Downloader) executeLogQuery(ctx context.Context, err = d.addLastBlockIfNotIncluded(ctx, result, logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) if err != nil { - return nil, fmt.Errorf("Downloader.executeLogQuery: adding last block: %w", err) + return nil, fmt.Errorf("EVMDownloader.executeLogQuery: adding last block: %w", err) } - d.logger.Infof("Downloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) + d.logger.Infof("EVMDownloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) return result, nil } -func (d *Downloader) addLastBlockIfNotIncluded(ctx context.Context, +func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, result *mdrsynctypes.DownloadResult, responseRange aggkitcommon.BlockRange, unsafeRange aggkitcommon.BlockRange) error { @@ -162,14 +162,14 @@ func (d *Downloader) addLastBlockIfNotIncluded(ctx context.Context, hdr, _, err := d.mdr.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) if err != nil { - d.logger.Errorf("Downloader: error getting block header for block number %d: %v", lastBlockNumber, err) + d.logger.Errorf("EVMDownloader: error getting block header for block number %d: %v", lastBlockNumber, err) return nil } if hdr == nil { // Check that we are not in the unsafe zone. Because in that case we can't fake the Hash and it's an error // because the block must in in storage if unsafeRange.ContainsBlockNumber(lastBlockNumber) { - err := fmt.Errorf("Downloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) + err := fmt.Errorf("EVMDownloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) d.logger.Error(err) return err } @@ -192,14 +192,14 @@ func (d *Downloader) addLastBlockIfNotIncluded(ctx context.Context, if hdr.ParentHash != nil { emptyBlock.ParentHash = *hdr.ParentHash } - d.logger.Debugf("Downloader.addLastBlockIfNotIncluded: to response %s adding empty block number %d / %s", + d.logger.Debugf("EVMDownloader.addLastBlockIfNotIncluded: to response %s adding empty block number %d / %s", responseRange.String(), lastBlockNumber, hdr.Hash.Hex()) result.Data = append(result.Data, emptyBlock) return nil } -func (d *Downloader) logQueryResponseToEVMBlocks( +func (d *EVMDownloader) logQueryResponseToEVMBlocks( ctx context.Context, response mdrtypes.LogQueryResponse) sync.EVMBlocks { blocks := make(sync.EVMBlocks, 0, len(response.Blocks)) for _, blockWithLogs := range response.Blocks { @@ -236,7 +236,7 @@ func (d *Downloader) logQueryResponseToEVMBlocks( return blocks } -func (d *Downloader) appendLog(ctx context.Context, block *sync.EVMBlock, log types.Log) { +func (d *EVMDownloader) appendLog(ctx context.Context, block *sync.EVMBlock, log types.Log) { appenderFn := d.appender[log.Topics[0]] if appenderFn == nil { // d.logger.Debugf("no appender function found for topic: %s", log.Topics[0].Hex()) @@ -256,7 +256,7 @@ func (d *Downloader) appendLog(ctx context.Context, block *sync.EVMBlock, log ty } // newMaxLogQuery creates a new LogQuery based on the syncerConfig and maxBlocks -func (d *Downloader) newMaxLogQuery(lastBlockHeader *aggkittypes.BlockHeader, +func (d *EVMDownloader) newMaxLogQuery(lastBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig) mdrtypes.LogQuery { var fromBlock uint64 @@ -270,7 +270,7 @@ func (d *Downloader) newMaxLogQuery(lastBlockHeader *aggkittypes.BlockHeader, return logQuery } -func (d *Downloader) checkReorgedBlock(ctx context.Context, +func (d *EVMDownloader) checkReorgedBlock(ctx context.Context, blockHeader *aggkittypes.BlockHeader) error { // Check Context cancellation if ctx.Err() != nil { diff --git a/multidownloader/sync/downloader_test.go b/multidownloader/sync/evmdownloader_test.go similarity index 98% rename from multidownloader/sync/downloader_test.go rename to multidownloader/sync/evmdownloader_test.go index 9a66954ff..af356ffa4 100644 --- a/multidownloader/sync/downloader_test.go +++ b/multidownloader/sync/evmdownloader_test.go @@ -46,7 +46,7 @@ func TestDownloadNextBlocks_Success(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -117,7 +117,7 @@ func TestDownloadNextBlocks_ContextCancellation(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -152,7 +152,7 @@ func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -206,7 +206,7 @@ func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -275,7 +275,7 @@ func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -355,7 +355,7 @@ func TestDownloadNextBlocks_TimeoutWaitingForLogs(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -402,7 +402,7 @@ func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -449,7 +449,7 @@ func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -509,7 +509,7 @@ func TestExecuteLogQuery_FullyAvailable(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -574,7 +574,7 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -635,7 +635,7 @@ func TestExecuteLogQuery_NotAvailable(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -665,7 +665,7 @@ func TestExecuteLogQuery_GetEthLogsError(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -693,7 +693,7 @@ func TestNewMaxLogQuery_WithLastBlockHeader(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: nil, logger: logger, rh: rh, @@ -726,7 +726,7 @@ func TestNewMaxLogQuery_WithoutLastBlockHeader(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: nil, logger: logger, rh: rh, @@ -756,7 +756,7 @@ func TestCheckReorgedBlock_NilBlockHeader(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -780,7 +780,7 @@ func TestCheckReorgedBlock_ValidBlock(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -810,7 +810,7 @@ func TestCheckReorgedBlock_InvalidBlock(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -852,7 +852,7 @@ func TestCheckReorgedBlock_ContextCancellation(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -881,7 +881,7 @@ func TestCheckReorgedBlock_CheckValidBlockError(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -912,7 +912,7 @@ func TestCheckReorgedBlock_GetReorgedDataError(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -944,7 +944,7 @@ func TestCheckReorgedBlock_NilReorgData(t *testing.T) { MaxRetryAttemptsAfterError: 3, } - download := &Downloader{ + download := &EVMDownloader{ mdr: mockMdr, logger: logger, rh: rh, @@ -984,7 +984,7 @@ func TestAppendLog_Success(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: nil, logger: logger, rh: rh, @@ -1033,7 +1033,7 @@ func TestAppendLog_RetryOnError(t *testing.T) { }, } - download := &Downloader{ + download := &EVMDownloader{ mdr: nil, logger: logger, rh: rh, From 988318a661c6937adabb976d9c43ff063e231eda Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 15:58:01 +0100 Subject: [PATCH 33/75] feat: unittest with custom syncer --- multidownloader/e2e_test.go | 319 ++++++++++++------ multidownloader/storage/storage_reorg.go | 10 +- multidownloader/storage/storage_reorg_test.go | 94 ++++++ 3 files changed, 320 insertions(+), 103 deletions(-) diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index be0dff31d..ce37ac4cb 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -3,7 +3,10 @@ package multidownloader import ( "context" "errors" + "fmt" "math/big" + "math/rand" + "sync" "testing" "time" @@ -11,9 +14,10 @@ import ( "github.com/agglayer/aggkit/etherman" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" + mdsync "github.com/agglayer/aggkit/multidownloader/sync" + aggkitsync "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/test/contracts/logemitter" aggkittypes "github.com/agglayer/aggkit/types" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -35,6 +39,140 @@ type mdrE2ESimulatedEnv struct { auth *bind.TransactOpts } +type PingEvent struct { + BlockPosition uint64 + From common.Address + Id uint64 + Message string +} + +type LogemitterEvent struct { + PingEvent *PingEvent +} + +func logemitterAppender(contract *logemitter.Logemitter) aggkitsync.LogAppenderMap { + appender := make(aggkitsync.LogAppenderMap) + appender[pingSignature] = func(b *aggkitsync.EVMBlock, l types.Log) error { + event, err := contract.ParsePing(l) + b.Events = append(b.Events, &LogemitterEvent{PingEvent: &PingEvent{ + BlockPosition: uint64(l.Index), + From: event.From, + Id: event.Id.Uint64(), + Message: event.Message, + }}) + return err + } + return appender +} + +type logemitterProcessor struct { + logger *log.Logger + mdr *EVMMultidownloader + mutex sync.Mutex + lastBlock *aggkittypes.BlockHeader + events map[uint64]*aggkitsync.Block +} + +func (p *logemitterProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.lastBlock, nil +} +func (p *logemitterProcessor) ProcessBlock(ctx context.Context, block aggkitsync.Block) error { + p.mutex.Lock() + defer p.mutex.Unlock() + p.lastBlock = &aggkittypes.BlockHeader{ + Number: block.Num, + Hash: block.Hash, + } + p.logger.Infof("Processed block number %d / %s with %d events", + block.Num, block.Hash.Hex(), len(block.Events)) + if p.events == nil { + p.events = make(map[uint64]*aggkitsync.Block) + } + p.events[block.Num] = &block + return nil +} +func (p *logemitterProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + p.mutex.Lock() + defer p.mutex.Unlock() + p.logger.Infof("Processing reorg from block number %d", firstReorgedBlock) + hdr, err := p.mdr.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(firstReorgedBlock-1)) + if err != nil { + return err + } + p.logger.Infof("New last block after reorg: %s", hdr.String()) + p.lastBlock = hdr + // remove reorged events from p.events + for blkNum := range p.events { + if blkNum >= firstReorgedBlock { + delete(p.events, blkNum) + } + } + return nil +} + +func (p *logemitterProcessor) lastPingEvent() *PingEvent { + p.mutex.Lock() + defer p.mutex.Unlock() + var lastEvent *PingEvent + var lastBlockNum uint64 + for blkNum, block := range p.events { + for _, ev := range block.Events { + logEv, ok := ev.(*LogemitterEvent) + if !ok { + continue + } + if logEv.PingEvent != nil { + if blkNum >= lastBlockNum { + lastBlockNum = blkNum + lastEvent = logEv.PingEvent + } + } + } + } + return lastEvent +} + +func newLogemitterSyncer(t *testing.T, mdr *EVMMultidownloader, + contract *logemitter.Logemitter, + syncerConfig aggkittypes.SyncerConfig) (*mdsync.EVMDriver, + *logemitterProcessor, *mdsync.EVMDownloader) { + t.Helper() + logger := log.WithFields("module", "sync_logemitter") + downloader := mdsync.NewEVMDownloader( + mdr, + logger, + &aggkitsync.RetryHandler{ + MaxRetryAttemptsAfterError: 5, + }, + logemitterAppender(contract), + 1*time.Minute, + 1*time.Second, + ) + + processor := &logemitterProcessor{ + logger: logger, + mdr: mdr, + } + + driver := mdsync.NewEVMDriver( + logger, + processor, + downloader, + syncerConfig, + 100, + &aggkitsync.RetryHandler{ + MaxRetryAttemptsAfterError: 5, + }, + nil, + ) + // TODO: Register syncer must be done by driver? + err := mdr.RegisterSyncer(syncerConfig) + require.NoError(t, err) + return driver, processor, downloader +} + func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { t.Helper() // Generate key + address @@ -64,15 +202,16 @@ func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { } } -func TestE2E(t *testing.T) { - if testing.Short() { - t.Skip("skipping E2E test in short mode") - } - // Simulated L1 - testData := buildL1Simulated(t) - - logger := log.WithFields("module", "mdr_e2e") +func newMultidownloader(t *testing.T, testData *mdrE2ESimulatedEnv) *EVMMultidownloader { + t.Helper() cfg := NewConfigDefault("e2e_test", t.TempDir()) + //logger := log.WithFields("module", "mdr_e2e_custom_syncer") + logger, _, err := log.NewLogger(log.Config{ + Level: "error", + Environment: "development", + Outputs: []string{"stdout"}, + }) + require.NoError(t, err) store, err := storage.NewMultidownloaderStorage(logger, storage.MultidownloaderStorageConfig{ DBPath: cfg.StoragePath, @@ -84,14 +223,13 @@ func TestE2E(t *testing.T) { require.NoError(t, err) cfg.BlockFinality = *simulatedFinalized - cfg.WaitPeriodToCheckCatchUp = configtypes.Duration{Duration: 1 * time.Millisecond} - cfg.PeriodToCheckReorgs = configtypes.Duration{Duration: 1 * time.Millisecond} - require.NoError(t, err) + cfg.WaitPeriodToCheckCatchUp = configtypes.Duration{Duration: 100 * time.Millisecond} + cfg.PeriodToCheckReorgs = configtypes.Duration{Duration: 500 * time.Millisecond} mdr, err := NewEVMMultidownloader( logger, cfg, - "mdr_e2e", + "mdr_e2e_custom_syncer", testData.ethClient, nil, // rpcClient store, @@ -100,112 +238,91 @@ func TestE2E(t *testing.T) { ) require.NoError(t, err) require.NotNil(t, mdr) - // Generate some logs - _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "hello world") - require.NoError(t, err) - testData.SimulatedL1.Commit() + return mdr +} - err = mdr.RegisterSyncer(aggkittypes.SyncerConfig{ - SyncerID: "log_emitter_e2e_test", +func TestE2E_CustomSyncer(t *testing.T) { + if testing.Short() { + t.Skip("skipping E2E test in short mode") + } + var err error + testData := buildL1Simulated(t) + mdr := newMultidownloader(t, testData) + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "log_emitter_e2e_test_custom_syncer", ContractAddresses: []common.Address{ testData.LogEmitterAddr, }, FromBlock: 0, ToBlock: aggkittypes.LatestBlock, - }) - require.NoError(t, err) - ctx := t.Context() + } + + driver, processor, _ := newLogemitterSyncer(t, mdr, testData.LogEmitterContract, syncerConfig) + ctx := context.TODO() err = mdr.Initialize(ctx) require.NoError(t, err) + // It's important, mdr must be started go func() { err := mdr.Start(ctx) if err != nil && !errors.Is(err, context.Canceled) { require.NoError(t, err) } }() - latestBlock, err := mdr.BlockNumber(ctx, aggkittypes.LatestBlock) - require.NoError(t, err) - logs, err := mdr.FilterLogs(ctx, ethereum.FilterQuery{ - Addresses: []common.Address{testData.LogEmitterAddr}, - FromBlock: big.NewInt(0), - ToBlock: big.NewInt(int64(latestBlock)), - }) - require.NoError(t, err) - emitterLogs := processEvents(t, testData.LogEmitterContract, logs) - require.Equal(t, 2, len(logs)) - require.Equal(t, testData.LogEmitterAddr, logs[0].Address) - require.Equal(t, logEmitterEvent{ - From: testData.auth.From, - Id: big.NewInt(123), - Message: "hello world", - }, emitterLogs[1]) - - testData.SimulatedL1.Commit() // Block 3 - _, err = testData.LogEmitterContract.EmitPing(testData.auth, big.NewInt(123), "block 4") - require.NoError(t, err) - testData.SimulatedL1.Commit() // Block 4 - logs, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ - Addresses: []common.Address{testData.LogEmitterAddr}, - FromBlock: big.NewInt(0), - ToBlock: big.NewInt(int64(latestBlock + 2)), - }) - require.NoError(t, err) - require.Equal(t, 3, len(logs)) - - showChainStatus(t, ctx, logger, testData.SimulatedL1) - blk4, err := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(4)) - require.NoError(t, err) - - // Forking at block 3 -> so block 4 will be reorged - // ---------- FORKING POINT ---------------------------------------- - forkAt(t, ctx, logger, testData.SimulatedL1, 3) - - // Now se have to create a longer chain to force reorg - testData.SimulatedL1.Commit() // reorg chain: Block 4 - testData.SimulatedL1.Commit() // reorg chain: Block 5 - showChainStatus(t, ctx, logger, testData.SimulatedL1) - _, err = mdr.FilterLogs(ctx, ethereum.FilterQuery{ - Addresses: []common.Address{testData.LogEmitterAddr}, - FromBlock: big.NewInt(0), - ToBlock: big.NewInt(int64(5)), - }) - require.NoError(t, err) - blkReorged4, err := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(4)) - require.NoError(t, err) - logger.Infof("Block 4 hash after reorg: %s", blkReorged4.Hash.Hex()) - require.NotEqual(t, blk4.Hash, blkReorged4.Hash, "block 4 hash should be different after reorg") - time.Sleep(1 * time.Second) - err = mdr.Stop(ctx) - require.NoError(t, err) - isValid, reorgChainID, err := mdr.CheckValidBlock(ctx, blk4.Number, blk4.Hash) - require.NoError(t, err) - require.False(t, isValid, "block 4 should not be valid after reorg") - require.Equal(t, uint64(1), reorgChainID, "reorgChainID should be 1") -} - -func forkAt(t *testing.T, ctx context.Context, logger *log.Logger, sim *simulated.Backend, blockNumber uint64) { - t.Helper() - blk, err := sim.Client().HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) - require.NoError(t, err) - require.NoError(t, err) - logger.Infof("Forking L1 at block %d (%s)... This will generate new block for reorg >%d", blockNumber, blk.Hash().Hex(), blockNumber) - - err = sim.Fork(blk.Hash()) - require.NoError(t, err) -} - -func showChainStatus(t *testing.T, ctx context.Context, logger *log.Logger, sim *simulated.Backend) { - t.Helper() - latestBlock, err := sim.Client().BlockNumber(ctx) + go func() { + driver.Sync(ctx) + }() - require.NoError(t, err) - logger.Infof("Current chain latest block: %d", latestBlock) - for i := uint64(0); i <= latestBlock; i++ { - blk, err := sim.Client().HeaderByNumber(ctx, big.NewInt(int64(i))) + for numReorgs := 0; numReorgs < 3; numReorgs++ { + var blocks []*types.Header + var lastBlock *types.Header + var logIndex int64 + for i := 0; i < 10; i++ { + logIndex++ + log.Infof("Emitting ping %d", logIndex) + _, err = testData.LogEmitterContract.EmitPing(testData.auth, + big.NewInt(logIndex), + fmt.Sprintf("iteration %d", logIndex)) + require.NoError(t, err) + testData.SimulatedL1.Commit() // Block 3 + hdr, err := testData.ethClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + if blocks == nil { + blocks = make([]*types.Header, 0) + } + if lastBlock == nil || (lastBlock.Number.Uint64() != hdr.Number.Uint64()) { + blocks = append(blocks, hdr) + lastBlock = hdr + } + } + // Catch up + for { + lastPing := processor.lastPingEvent() + log.Infof("Catching up: last ping id: %+v", lastPing) + if lastPing != nil && lastPing.Id == uint64(logIndex) { + break + } + time.Sleep(100 * time.Millisecond) + } + lastProcessedBlock, err := processor.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + // Pick a random index to fork (minimum 1 block must be refactored) + chooseBlockIndex := rand.Intn(len(blocks) - 2) + err = testData.SimulatedL1.Fork(blocks[chooseBlockIndex].Hash()) require.NoError(t, err) - logger.Infof(" Block %d: %s", i, blk.Hash().Hex()) + testData.SimulatedL1.Commit() // reorg chain: Block 4 + for { + currentBlock, err := processor.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + log.Infof("Catching up after reorg: previousLastBlock (%d) != currentLastBlock=%d", lastProcessedBlock.Number, currentBlock.Number) + if currentBlock.Number != lastProcessedBlock.Number { + break + } + time.Sleep(100 * time.Millisecond) + } + log.Infof("Finish reorg %d", numReorgs) } + log.Info("Finish tests") } type logEmitterEvent struct { diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index fea01ba06..92feb28b1 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -116,10 +116,16 @@ func (a *MultidownloaderStorage) GetBlockReorgedChainID(tx dbtypes.Querier, var chainIDRow struct { ChainID *uint64 `meddler:"chain_id"` } - query := `SELECT chain_id FROM blocks_reorged - WHERE block_number = ? AND block_hash = ? LIMIT 1;` + query := `SELECT br.chain_id FROM blocks_reorged br + INNER JOIN reorgs r ON br.chain_id = r.chain_id + WHERE br.block_number = ? AND br.block_hash = ? + ORDER BY r.reorged_from_block ASC + LIMIT 1;` err := tx.QueryRow(query, blockNumber, blockHash.Hex()).Scan(&chainIDRow.ChainID) if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, false, nil + } return 0, false, fmt.Errorf("GetBlockReorgedChainID: error querying blocks_reorged: %w", err) } if chainIDRow.ChainID == nil { diff --git a/multidownloader/storage/storage_reorg_test.go b/multidownloader/storage/storage_reorg_test.go index 265f9afd3..ed487b9a0 100644 --- a/multidownloader/storage/storage_reorg_test.go +++ b/multidownloader/storage/storage_reorg_test.go @@ -66,6 +66,100 @@ func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { } } +func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { + t.Run("returns chain_id with lowest reorged_from_block when block exists in multiple chains", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // First, populate some blocks that will be reorged + populateLogsAndBlocksForTest(t, storage, 1000, 50, 2) + + // Create first reorg with reorged_from_block=1010 + reorgData1 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1010, 1020), + DetectedAtBlock: 1025, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: "First reorg", + } + + tx1, err := storage.NewTx(t.Context()) + require.NoError(t, err) + chainID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + require.NoError(t, err) + require.Equal(t, uint64(1), chainID1) + err = tx1.Commit() + require.NoError(t, err) + + // Create second reorg with reorged_from_block=1005 (lower than first) + reorgData2 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1005, 1009), + DetectedAtBlock: 1030, + DetectedTimestamp: 1630004000, + NetworkLatestBlock: 2100, + NetworkFinalizedBlock: 2090, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: "Second reorg", + } + + tx2, err := storage.NewTx(t.Context()) + require.NoError(t, err) + chainID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + require.NoError(t, err) + require.Equal(t, uint64(2), chainID2) + err = tx2.Commit() + require.NoError(t, err) + + // The key test: insert the SAME block_number and block_hash into MULTIPLE chains + // This is the scenario the user wants to test - when a block exists in multiple reorg chains, + // the function should return the chain_id with the lowest reorged_from_block + testBlockNumber := uint64(2000) // Use a block number outside the reorg ranges + testBlockHash := exampleTestHash[7] + + tx3, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + // Insert the SAME block into chain 1 (reorged_from_block=1010) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (chain_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, chainID1, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + require.NoError(t, err) + + // Insert the SAME block into chain 2 (reorged_from_block=1005, lower!) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (chain_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, chainID2, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + require.NoError(t, err) + + err = tx3.Commit() + require.NoError(t, err) + + // Query for the block - should return chainID2 since it has the lowest reorged_from_block (1005 < 1010) + returnedChainID, found, err := storage.GetBlockReorgedChainID(nil, testBlockNumber, testBlockHash) + require.NoError(t, err) + require.True(t, found, "block should be found") + require.Equal(t, chainID2, returnedChainID, "should return chain_id with lowest reorged_from_block (chain 2 with reorged_from_block=1005)") + + // Verify the reorged_from_block values to confirm our expectation + reorgData1Retrieved, err := storage.GetReorgedDataByChainID(nil, chainID1) + require.NoError(t, err) + require.Equal(t, uint64(1010), reorgData1Retrieved.BlockRangeAffected.FromBlock) + + reorgData2Retrieved, err := storage.GetReorgedDataByChainID(nil, chainID2) + require.NoError(t, err) + require.Equal(t, uint64(1005), reorgData2Retrieved.BlockRangeAffected.FromBlock) + }) + + t.Run("returns false when block not found in any chain", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Query for non-existent block + chainID, found, err := storage.GetBlockReorgedChainID(nil, 9999, exampleTestHash[0]) + require.NoError(t, err) + require.False(t, found, "block should not be found") + require.Equal(t, uint64(0), chainID) + }) +} + func TestStorage_GetReorgedDataByChainID(t *testing.T) { t.Run("returns reorg data when found", func(t *testing.T) { storage := newStorageForTest(t, nil) From 0fb18f621912a4fb02fbd0c24559abbb90165f14 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 17:57:45 +0100 Subject: [PATCH 34/75] fix: percent of syncronization --- multidownloader/e2e_test.go | 29 +----------- .../evm_multidownloader_syncers.go | 8 ++-- multidownloader/sync/evmdownloader.go | 47 ++++++++++++++++--- multidownloader/sync/evmdownloader_test.go | 28 +++++++++-- multidownloader/sync/evmdriver.go | 3 ++ sync/evmtypes.go | 21 ++++++++- 6 files changed, 94 insertions(+), 42 deletions(-) diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index ce37ac4cb..35f5a3279 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -205,7 +205,7 @@ func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { func newMultidownloader(t *testing.T, testData *mdrE2ESimulatedEnv) *EVMMultidownloader { t.Helper() cfg := NewConfigDefault("e2e_test", t.TempDir()) - //logger := log.WithFields("module", "mdr_e2e_custom_syncer") + // This log logger will only log errors to avoid cluttering the test output logger, _, err := log.NewLogger(log.Config{ Level: "error", Environment: "development", @@ -324,30 +324,3 @@ func TestE2E_CustomSyncer(t *testing.T) { } log.Info("Finish tests") } - -type logEmitterEvent struct { - From common.Address - Id *big.Int - Message string -} - -func processEvents(t *testing.T, contract *logemitter.Logemitter, logs []types.Log) []logEmitterEvent { - t.Helper() - result := make([]logEmitterEvent, 0) - for _, lg := range logs { - if lg.Topics[0] == pingSignature { - event, err := contract.ParsePing(lg) - require.NoError(t, err) - log.Infof("Processed Ping event: From=%s, Id=%s, Message=%s", - event.From, event.Id, event.Message) - result = append(result, logEmitterEvent{ - From: event.From, - Id: event.Id, - Message: event.Message, - }) - } else { - t.Fatalf("Unknown event signature: %s", lg.Topics[0].Hex()) - } - } - return result -} diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index f997be27f..da6235eff 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -146,10 +146,12 @@ func (dh *EVMMultidownloader) LogQuery(ctx context.Context, result, err := dh.storage.LogQuery(nil, *availQuery) if err != nil { - // Calculate UnsafeRange - _, unsafePendingBlockRange := result.ResponseRange.SplitByBlockNumber(finalizedBlockNumber) - result.UnsafeRange = unsafePendingBlockRange + return mdrtypes.LogQueryResponse{}, fmt.Errorf("EVMMultidownloader.LogQuery: error executing log query %s: %w", + availQuery.String(), err) } + // Calculate UnsafeRange + _, unsafePendingBlockRange := result.ResponseRange.SplitByBlockNumber(finalizedBlockNumber) + result.UnsafeRange = unsafePendingBlockRange return result, err } diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index 3a2d90e6c..20164b726 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -66,6 +66,7 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, if err != nil { return nil, err } + maxLogQuery := d.newMaxLogQuery(lastBlockHeader, maxBlocks, syncerConfig) var result *mdrsynctypes.DownloadResult conditionMet, err := aggkitcommon.PollingWithTimeout(ctx, d.pullingPeriod, @@ -75,7 +76,7 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, if err != nil { return false, err } - result, err = d.executeLogQuery(ctx, maxLogQuery) + result, err = d.executeLogQuery(ctx, maxLogQuery, syncerConfig) if err != nil { // The only allowed error is ErrLogsNotAvailable if errors.Is(err, ErrLogsNotAvailable) { @@ -83,6 +84,7 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, } return false, err } + return true, nil }) if errors.Is(err, aggkitcommon.ErrTimeoutReached) { @@ -119,7 +121,7 @@ func (d *EVMDownloader) ChainID(ctx context.Context) (uint64, error) { // executeLogQuery executes the log query, checking for partial availability // if there are no logs available returns an error func (d *EVMDownloader) executeLogQuery(ctx context.Context, - fullLogQuery mdrtypes.LogQuery) (*mdrsynctypes.DownloadResult, error) { + fullLogQuery mdrtypes.LogQuery, syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { logQuery := fullLogQuery if !d.mdr.IsAvailable(fullLogQuery) { isPartial, partialLogQuery := d.mdr.IsPartiallyAvailable(fullLogQuery) @@ -135,10 +137,13 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) } totalLogs := logQueryResponse.CountLogs() - + percentComplete, err := d.calculatePercentCompletation(ctx, syncerConfig, logQuery.BlockRange) + if err != nil { + d.logger.Warn(err.Error()) + } result := &mdrsynctypes.DownloadResult{ Data: d.logQueryResponseToEVMBlocks(ctx, logQueryResponse), - PercentComplete: 0.0, + PercentComplete: percentComplete, } err = d.addLastBlockIfNotIncluded(ctx, result, logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) @@ -148,6 +153,34 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, d.logger.Infof("EVMDownloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) return result, nil } + +func (d *EVMDownloader) getFullBlockRange(ctx context.Context, + syncerConfig aggkittypes.SyncerConfig) (*aggkitcommon.BlockRange, error) { + blockTo, err := d.mdr.HeaderByNumber(ctx, &syncerConfig.ToBlock) + if err != nil || blockTo == nil { + return nil, fmt.Errorf("EVMDownloader.getFullBlockRange: error getting 'to' block header: %w", err) + } + br := aggkitcommon.NewBlockRange(syncerConfig.FromBlock, blockTo.Number) + return &br, nil +} + +// Return the percent of completion of synchronization +func (d *EVMDownloader) calculatePercentCompletation(ctx context.Context, + syncerConfig aggkittypes.SyncerConfig, lastRange aggkitcommon.BlockRange) (float64, error) { + fullRange, err := d.getFullBlockRange(ctx, syncerConfig) + if err != nil { + return 0, fmt.Errorf("EVMDownloader.calculatePercentCompletation: error getting full block range: %w", err) + } + totalBlocks := fullRange.CountBlocks() + pendingRange := aggkitcommon.NewBlockRange(lastRange.ToBlock+1, fullRange.ToBlock) + if pendingRange.CountBlocks() == 0 { + return percentComplete, nil + } + blocksCompleted := totalBlocks - pendingRange.CountBlocks() + percent := (float64(blocksCompleted) / float64(totalBlocks)) * percentComplete + return percent, nil +} + func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, result *mdrsynctypes.DownloadResult, responseRange aggkitcommon.BlockRange, @@ -165,10 +198,11 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, d.logger.Errorf("EVMDownloader: error getting block header for block number %d: %v", lastBlockNumber, err) return nil } + isFinalizedBlock := !unsafeRange.ContainsBlockNumber(lastBlockNumber) if hdr == nil { // Check that we are not in the unsafe zone. Because in that case we can't fake the Hash and it's an error // because the block must in in storage - if unsafeRange.ContainsBlockNumber(lastBlockNumber) { + if !isFinalizedBlock { err := fmt.Errorf("EVMDownloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) d.logger.Error(err) return err @@ -187,7 +221,8 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, Hash: hdr.Hash, Timestamp: hdr.Time, }, - Events: []interface{}{}, + IsFinalizedBlock: isFinalizedBlock, + Events: []interface{}{}, } if hdr.ParentHash != nil { emptyBlock.ParentHash = *hdr.ParentHash diff --git a/multidownloader/sync/evmdownloader_test.go b/multidownloader/sync/evmdownloader_test.go index af356ffa4..1de759c87 100644 --- a/multidownloader/sync/evmdownloader_test.go +++ b/multidownloader/sync/evmdownloader_test.go @@ -520,6 +520,11 @@ func TestExecuteLogQuery_FullyAvailable(t *testing.T) { logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{ Blocks: []mdrtypes.BlockWithLogs{ @@ -549,7 +554,7 @@ func TestExecuteLogQuery_FullyAvailable(t *testing.T) { Time: 2100, }, mdrtypes.Finalized, nil) - result, err := download.executeLogQuery(ctx, logQuery) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.NoError(t, err) require.NotNil(t, result) @@ -586,6 +591,11 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) partialQuery := mdrtypes.NewLogQuery(100, 105, []common.Address{common.HexToAddress("0x123")}) + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(true, &partialQuery) mockMdr.EXPECT().LogQuery(ctx, partialQuery).Return(mdrtypes.LogQueryResponse{ @@ -617,7 +627,7 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { Time: 2050, }, mdrtypes.Finalized, nil) - result, err := download.executeLogQuery(ctx, logQuery) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.NoError(t, err) require.NotNil(t, result) @@ -646,10 +656,15 @@ func TestExecuteLogQuery_NotAvailable(t *testing.T) { logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(false, nil) - result, err := download.executeLogQuery(ctx, logQuery) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.Error(t, err) require.Nil(t, result) @@ -676,10 +691,15 @@ func TestExecuteLogQuery_GetEthLogsError(t *testing.T) { logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{}, fmt.Errorf("database error")) - result, err := download.executeLogQuery(ctx, logQuery) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.Error(t, err) require.Nil(t, result) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index c7e8d01fa..b21fe426f 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -95,6 +95,9 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { if err = d.processBlocks(ctx, blocks); err != nil { return fmt.Errorf("EVMDriver: error processing blocks: %w", err) } + LastProcessedBlock := blocks.Data.LastBlock() + d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", + len(blocks.Data), blocks.PercentComplete, LastProcessedBlock.Brief()) return nil } diff --git a/sync/evmtypes.go b/sync/evmtypes.go index 739154f90..4e58d662f 100644 --- a/sync/evmtypes.go +++ b/sync/evmtypes.go @@ -1,6 +1,10 @@ package sync -import "github.com/ethereum/go-ethereum/common" +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) type EVMBlocks []*EVMBlock @@ -8,12 +12,27 @@ func (e EVMBlocks) Len() int { return len(e) } +func (e EVMBlocks) LastBlock() *EVMBlock { + if len(e) == 0 { + return nil + } + return e[len(e)-1] +} + type EVMBlock struct { EVMBlockHeader IsFinalizedBlock bool Events []interface{} } +func (e *EVMBlock) Brief() string { + if e == nil { + return "EVMBlock" + } + return fmt.Sprintf("EVMBlock{Num: %d, IsFinalizedBlock: %t, EventsCount: %d}", + e.Num, e.IsFinalizedBlock, len(e.Events)) +} + type EVMBlockHeader struct { Num uint64 Hash common.Hash From 10a8558fc59a0446a00776285b046dfa14f3fd07 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 3 Feb 2026 21:44:01 +0100 Subject: [PATCH 35/75] fix: ut --- multidownloader/sync/evmdriver.go | 8 +- multidownloader/sync/runtimedata_test.go | 520 ++++++++++++++++++++--- 2 files changed, 464 insertions(+), 64 deletions(-) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index b21fe426f..a8693648d 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -95,9 +95,11 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { if err = d.processBlocks(ctx, blocks); err != nil { return fmt.Errorf("EVMDriver: error processing blocks: %w", err) } - LastProcessedBlock := blocks.Data.LastBlock() - d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", - len(blocks.Data), blocks.PercentComplete, LastProcessedBlock.Brief()) + if blocks != nil { + LastProcessedBlock := blocks.Data.LastBlock() + d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", + len(blocks.Data), blocks.PercentComplete, LastProcessedBlock.Brief()) + } return nil } diff --git a/multidownloader/sync/runtimedata_test.go b/multidownloader/sync/runtimedata_test.go index 54d274f67..32fdbe6d5 100644 --- a/multidownloader/sync/runtimedata_test.go +++ b/multidownloader/sync/runtimedata_test.go @@ -8,96 +8,494 @@ import ( ) func TestRuntimeData_String(t *testing.T) { - data := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + tests := []struct { + name string + data RuntimeData + expected string + }{ + { + name: "empty addresses", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + expected: "ChainID: 1, Addresses: ", + }, + { + name: "single address", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + expected: "ChainID: 1, Addresses: 0x0000000000000000000000000000000000000123, ", + }, + { + name: "two addresses", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + expected: "ChainID: 1, Addresses: 0x1234567890AbcdEF1234567890aBcdef12345678, 0xABcdEFABcdEFabcdEfAbCdefabcdeFABcDEFabCD, ", + }, + { + name: "multiple addresses", + data: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + expected: "ChainID: 42, Addresses: 0x0000000000000000000000000000000000000123, 0x0000000000000000000000000000000000000456, 0x0000000000000000000000000000000000000789, ", + }, + { + name: "zero chain ID", + data: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0xabc")}, + }, + expected: "ChainID: 0, Addresses: 0x0000000000000000000000000000000000000aBc, ", }, } - expected := "ChainID: 1, Addresses: 0x1234567890abcdef1234567890abcdef12345678, 0xabcdefabcdefabcdefabcdefabcdefabcdefabcd, " - require.Equal(t, expected, data.String()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.data.String() + require.Equal(t, tt.expected, result) + }) + } } func TestRuntimeData_IsCompatible_Success(t *testing.T) { - data1 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + }{ + { + name: "identical data with single address", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, }, - } - - data2 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + { + name: "identical data with two addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + }, + { + name: "identical data with multiple addresses", + data1: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + data2: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + }, + { + name: "both have empty addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + }, + { + name: "zero chain ID with matching data", + data1: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x789")}, + }, + data2: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x789")}, + }, }, } - err := data1.IsCompatible(data2) - require.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.NoError(t, err) + }) + } } func TestRuntimeData_IsCompatible_ChainIDMismatch(t *testing.T) { - data1 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + chainID1 uint64 + chainID2 uint64 + }{ + { + name: "different chain IDs with same address", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 2, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + chainID1: 1, + chainID2: 2, }, - } - - data2 := RuntimeData{ - ChainID: 2, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + { + name: "chain ID 0 vs 1", + data1: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + chainID1: 0, + chainID2: 1, + }, + { + name: "large chain ID difference", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 999999, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + chainID1: 1, + chainID2: 999999, }, } - err := data1.IsCompatible(data2) - require.Error(t, err) - require.Contains(t, err.Error(), "chain ID mismatch") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID mismatch") + }) + } } -func TestRuntimeData_IsCompatible_AddressLengthMismatch(t *testing.T) { - data1 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), +func TestRuntimeData_IsCompatible_AddressesLenMismatch(t *testing.T) { + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + }{ + { + name: "data1 has more addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, }, - } - - data2 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), - common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + { + name: "data2 has more addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + }, + { + name: "data1 empty, data2 has addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + }, + { + name: "data1 has addresses, data2 empty", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + }, + { + name: "large difference in address count", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + common.HexToAddress("0x444"), + common.HexToAddress("0x555"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + }, + }, }, } - err := data1.IsCompatible(data2) - require.Error(t, err) - require.Contains(t, err.Error(), "addresses len mismatch") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) + } } func TestRuntimeData_IsCompatible_AddressMismatch(t *testing.T) { - data1 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + index int + }{ + { + name: "single address mismatch", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + index: 0, + }, + { + name: "first address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x789"), + common.HexToAddress("0x456"), + }, + }, + index: 0, + }, + { + name: "second address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x789"), + }, + }, + index: 1, + }, + { + name: "middle address differs in longer list", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + common.HexToAddress("0x444"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x999"), + common.HexToAddress("0x444"), + }, + }, + index: 2, + }, + { + name: "last address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x999"), + }, + }, + index: 2, }, } - data2 := RuntimeData{ - ChainID: 1, - Addresses: []common.Address{ - common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), - }, + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses") + require.Contains(t, err.Error(), "mismatch") + }) } +} + +func TestRuntimeData_IsCompatible_ErrorPrecedence(t *testing.T) { + t.Run("chain ID mismatch takes precedence over address differences", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } + data2 := RuntimeData{ + ChainID: 2, + Addresses: []common.Address{common.HexToAddress("0x456")}, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID mismatch") + }) + + t.Run("length mismatch checked before address comparison", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) +} + +func TestRuntimeData_IsCompatible_NilAddresses(t *testing.T) { + t.Run("both nil addresses", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + + err := data1.IsCompatible(data2) + require.NoError(t, err) + }) + + t.Run("one nil, one empty", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + } + + err := data1.IsCompatible(data2) + require.NoError(t, err) + }) + + t.Run("nil vs non-empty", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } - err := data1.IsCompatible(data2) - require.Error(t, err) - require.Contains(t, err.Error(), "addresses[0] mismatch") + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) } From 786dcdfaf1ea9f3d35090160bb37ec580e9e0747 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 09:26:44 +0100 Subject: [PATCH 36/75] fix: ut --- multidownloader/sync/evmdownloader_test.go | 30 ++++++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/multidownloader/sync/evmdownloader_test.go b/multidownloader/sync/evmdownloader_test.go index 1de759c87..daf686ccb 100644 --- a/multidownloader/sync/evmdownloader_test.go +++ b/multidownloader/sync/evmdownloader_test.go @@ -95,7 +95,11 @@ func TestDownloadNextBlocks_Success(t *testing.T) { Hash: common.HexToHash("0xblock110"), Time: 1100, }, mdrtypes.Finalized, nil) - + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) require.NoError(t, err) @@ -249,7 +253,11 @@ func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { Hash: common.HexToHash("0xblock59"), Time: 1090, }, mdrtypes.Finalized, nil) - + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) result, err := download.DownloadNextBlocks(ctx, nil, 10, syncerConfig) require.NoError(t, err) @@ -333,7 +341,11 @@ func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { Hash: common.HexToHash("0xblock110"), Time: 1100, }, mdrtypes.Finalized, nil).Once() - + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) // Final checkReorgedBlock after PollingWithTimeout completes (line 101) mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() @@ -553,7 +565,11 @@ func TestExecuteLogQuery_FullyAvailable(t *testing.T) { Hash: common.HexToHash("0xblock110"), Time: 2100, }, mdrtypes.Finalized, nil) - + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.NoError(t, err) @@ -626,7 +642,11 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { Hash: common.HexToHash("0xblock105"), Time: 2050, }, mdrtypes.Finalized, nil) - + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) require.NoError(t, err) From 97d2a9585cd5bc52d9495c0f8f592de1448ac5e5 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 10:19:23 +0100 Subject: [PATCH 37/75] feat: implemented forced reorg in developerMode --- multidownloader/evm_multidownloader.go | 2 +- multidownloader/evm_multidownloader_debug.go | 2 +- multidownloader/reorg_processor.go | 36 +++++--- multidownloader/reorg_processor_port.go | 5 ++ multidownloader/reorg_processor_test.go | 89 ++++++++++++++++--- .../types/mocks/mock_reorg_porter.go | 45 ++++++++++ .../types/mocks/mock_reorg_processor.go | 23 ++--- multidownloader/types/reorg_data.go | 7 +- multidownloader/types/reorg_error.go | 2 + multidownloader/types/reorg_port.go | 1 + 10 files changed, 174 insertions(+), 38 deletions(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 52add9812..d17d5935b 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -94,7 +94,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, if reorgProcessor == nil { log.Infof("NewEVMMultidownloader: creating default ReorgProcessor for multidownloader (%s)", name) - reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB) + reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB, false) } var debug *EVMMultidownloaderDebug if cfg.DeveloperMode { diff --git a/multidownloader/evm_multidownloader_debug.go b/multidownloader/evm_multidownloader_debug.go index 4a4d7ff82..2102e12db 100644 --- a/multidownloader/evm_multidownloader_debug.go +++ b/multidownloader/evm_multidownloader_debug.go @@ -25,7 +25,7 @@ func (dh *EVMMultidownloaderDebug) ForceRorg(mismatchingBlockNumber uint64) { defer dh.mutexDebug.Unlock() dh.debugStepForcedReturnError = mdrtypes.NewDetectedReorgError( mismatchingBlockNumber, - mdrtypes.ReorgDetectionReason_BlockHashMismatch, + mdrtypes.ReorgDetectionReason_Forced, common.Hash{}, common.Hash{}, fmt.Sprintf("ForceRorg: forced reorg at block number %d", mismatchingBlockNumber), diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 18ac04305..7043d5f73 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -3,7 +3,6 @@ package multidownloader import ( "context" "fmt" - "time" aggkitcommon "github.com/agglayer/aggkit/common" dbtypes "github.com/agglayer/aggkit/db/types" @@ -12,15 +11,16 @@ import ( ) type ReorgProcessor struct { - log aggkitcommon.Logger - port mdtypes.ReorgPorter - funcNow func() uint64 + log aggkitcommon.Logger + port mdtypes.ReorgPorter + developerMode bool } func NewReorgProcessor(log aggkitcommon.Logger, ethClient aggkittypes.BaseEthereumClienter, rpcClient aggkittypes.RPCClienter, - storage mdtypes.Storager) *ReorgProcessor { + storage mdtypes.Storager, + developerMode bool) *ReorgProcessor { return &ReorgProcessor{ log: log, port: &ReorgPort{ @@ -28,9 +28,7 @@ func NewReorgProcessor(log aggkitcommon.Logger, rpcClient: rpcClient, storage: storage, }, - funcNow: func() uint64 { - return uint64(time.Now().Unix()) - }, + developerMode: developerMode, } } @@ -39,6 +37,7 @@ func NewReorgProcessor(log aggkitcommon.Logger, // - store the reorg info in storage func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, detectedReorgError mdtypes.DetectedReorgError) error { + var err error // We known that offendingBlockNumber is affected, so we go backwards until we find // the first unaffected block currentBlockNumber := detectedReorgError.OffendingBlockNumber @@ -55,11 +54,26 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, } } }() - firstUnaffectedBlock, err := rm.findFirstUnaffectedBlock(ctx, tx, currentBlockNumber-1) if err != nil { return fmt.Errorf("ProcessReorg: error finding first unaffected block: %w", err) } + if detectedReorgError.ReorgDetectionReason == mdtypes.ReorgDetectionReason_Forced { + if rm.developerMode { + rm.log.Warnf("ProcessReorg: executing a forcedReorg in block %d "+ + "It acts as missing blocks, so is going to delete blocks > %d."+ + "Overriding real unaffected block found %d."+ + "(forbidden in production! but developerMode is enabled))!!. ", + currentBlockNumber, currentBlockNumber, firstUnaffectedBlock) + firstUnaffectedBlock = currentBlockNumber - 1 + } else { + rm.log.Warnf("ProcessReorg: forced reorg at block %d but developerMode is disabled, "+ + "so is going to use the first unaffected block found %d", + currentBlockNumber, firstUnaffectedBlock) + return nil + } + } + lastBlockNumberInStorage, err := rm.port.GetLastBlockNumberInStorage(tx) if err != nil { return fmt.Errorf("ProcessReorg: error getting last block number in storage: %w", err) @@ -78,8 +92,8 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, // TODO: Add hash to blockNumbers reorgData := mdtypes.ReorgData{ BlockRangeAffected: aggkitcommon.NewBlockRange(firstUnaffectedBlock+1, lastBlockNumberInStorage), - DetectedAtBlock: lastBlockNumberInStorage, - DetectedTimestamp: rm.funcNow(), + DetectedAtBlock: detectedReorgError.OffendingBlockNumber, + DetectedTimestamp: rm.port.TimeNowUnix(), NetworkLatestBlock: latestBlockNumberInRPC, NetworkFinalizedBlock: finalizedBlockNumberInRPC, NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index a03aeb4eb..2da633620 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -3,6 +3,7 @@ package multidownloader import ( "context" "fmt" + "time" dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/etherman" @@ -59,3 +60,7 @@ func (r *ReorgPort) GetBlockNumberInRPC( } return blockNumber.Number, nil } + +func (r *ReorgPort) TimeNowUnix() uint64 { + return uint64(time.Now().Unix()) +} diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index 5e63bcd26..b257ac63d 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -5,8 +5,11 @@ import ( "fmt" "testing" + aggkitcommon "github.com/agglayer/aggkit/common" commonmocks "github.com/agglayer/aggkit/common/mocks" dbmocks "github.com/agglayer/aggkit/db/mocks" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader/types" mdtypes "github.com/agglayer/aggkit/multidownloader/types" mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkittypes "github.com/agglayer/aggkit/types" @@ -351,11 +354,8 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { processor := &ReorgProcessor{ log: mockLogger, port: mockPort, - funcNow: func() uint64 { - return nowValue - }, } - + mockPort.EXPECT().TimeNowUnix().Return(nowValue).Maybe() ctx := context.Background() matchingHash := common.HexToHash("0xabcd") offendingBlockNumber := uint64(105) @@ -452,11 +452,10 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockTx := dbmocks.NewTxer(t) processor := &ReorgProcessor{ - log: mockLogger, - port: mockPort, - funcNow: func() uint64 { return 1234567890 }, + log: mockLogger, + port: mockPort, } - + mockPort.EXPECT().TimeNowUnix().Return(1234567890).Maybe() ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("move blocks error") @@ -598,11 +597,8 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { processor := &ReorgProcessor{ log: mockLogger, port: mockPort, - funcNow: func() uint64 { - return nowValue - }, } - + mockPort.EXPECT().TimeNowUnix().Return(nowValue).Maybe() ctx := context.Background() matchingHash := common.HexToHash("0xabcd") expectedErr := fmt.Errorf("commit failed") @@ -711,3 +707,72 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.AssertExpectations(t) }) } + +func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { + logger := log.WithFields("module", "test") + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: logger, + port: mockPort, + developerMode: true, + } + + ctx := context.Background() + //rollbackErr := fmt.Errorf("rollback failed") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_Forced, + common.Hash{}, + common.Hash{}, + "test reorg", + ) + nowTimestamp := uint64(1234567890) + mockPort.EXPECT().TimeNowUnix().Return(nowTimestamp).Maybe() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&types.CompareBlockHeaders{ + BlockNumber: 99, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x5678"), + }, + }, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). + Return(&types.CompareBlockHeaders{ + BlockNumber: 98, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + expectedReorgData := mdtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 110), + DetectedAtBlock: 100, + DetectedTimestamp: nowTimestamp, + NetworkLatestBlock: 115, + NetworkFinalizedBlock: 100, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: reorgErr.Error(), + } + mockPort.EXPECT().MoveReorgedBlocks(mockTx, expectedReorgData).Return(uint64(1), nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + err := processor.ProcessReorg(ctx, *reorgErr) + + require.NoError(t, err) + mockPort.AssertExpectations(t) + mockTx.AssertExpectations(t) + +} diff --git a/multidownloader/types/mocks/mock_reorg_porter.go b/multidownloader/types/mocks/mock_reorg_porter.go index e0e8db9c6..9cab54f05 100644 --- a/multidownloader/types/mocks/mock_reorg_porter.go +++ b/multidownloader/types/mocks/mock_reorg_porter.go @@ -314,6 +314,51 @@ func (_c *ReorgPorter_NewTx_Call) RunAndReturn(run func(context.Context) (dbtype return _c } +// TimeNowUnix provides a mock function with no fields +func (_m *ReorgPorter) TimeNowUnix() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeNowUnix") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// ReorgPorter_TimeNowUnix_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TimeNowUnix' +type ReorgPorter_TimeNowUnix_Call struct { + *mock.Call +} + +// TimeNowUnix is a helper method to define mock.On call +func (_e *ReorgPorter_Expecter) TimeNowUnix() *ReorgPorter_TimeNowUnix_Call { + return &ReorgPorter_TimeNowUnix_Call{Call: _e.mock.On("TimeNowUnix")} +} + +func (_c *ReorgPorter_TimeNowUnix_Call) Run(run func()) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ReorgPorter_TimeNowUnix_Call) Return(_a0 uint64) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgPorter_TimeNowUnix_Call) RunAndReturn(run func() uint64) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Return(run) + return _c +} + // NewReorgPorter creates a new instance of ReorgPorter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewReorgPorter(t interface { diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go index e9b869d3e..d20660d11 100644 --- a/multidownloader/types/mocks/mock_reorg_processor.go +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -5,6 +5,7 @@ package mocks import ( context "context" + types "github.com/agglayer/aggkit/multidownloader/types" mock "github.com/stretchr/testify/mock" ) @@ -21,17 +22,17 @@ func (_m *ReorgProcessor) EXPECT() *ReorgProcessor_Expecter { return &ReorgProcessor_Expecter{mock: &_m.Mock} } -// ProcessReorg provides a mock function with given fields: ctx, offendingBlockNumber -func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, offendingBlockNumber uint64) error { - ret := _m.Called(ctx, offendingBlockNumber) +// ProcessReorg provides a mock function with given fields: ctx, detectedReorgError +func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, detectedReorgError types.DetectedReorgError) error { + ret := _m.Called(ctx, detectedReorgError) if len(ret) == 0 { panic("no return value specified for ProcessReorg") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { - r0 = rf(ctx, offendingBlockNumber) + if rf, ok := ret.Get(0).(func(context.Context, types.DetectedReorgError) error); ok { + r0 = rf(ctx, detectedReorgError) } else { r0 = ret.Error(0) } @@ -46,14 +47,14 @@ type ReorgProcessor_ProcessReorg_Call struct { // ProcessReorg is a helper method to define mock.On call // - ctx context.Context -// - offendingBlockNumber uint64 -func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, offendingBlockNumber interface{}) *ReorgProcessor_ProcessReorg_Call { - return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, offendingBlockNumber)} +// - detectedReorgError types.DetectedReorgError +func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, detectedReorgError interface{}) *ReorgProcessor_ProcessReorg_Call { + return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, detectedReorgError)} } -func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, offendingBlockNumber uint64)) *ReorgProcessor_ProcessReorg_Call { +func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, detectedReorgError types.DetectedReorgError)) *ReorgProcessor_ProcessReorg_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64)) + run(args[0].(context.Context), args[1].(types.DetectedReorgError)) }) return _c } @@ -63,7 +64,7 @@ func (_c *ReorgProcessor_ProcessReorg_Call) Return(_a0 error) *ReorgProcessor_Pr return _c } -func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ReorgProcessor_ProcessReorg_Call { +func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, types.DetectedReorgError) error) *ReorgProcessor_ProcessReorg_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/types/reorg_data.go b/multidownloader/types/reorg_data.go index 54a3b7487..1e5801524 100644 --- a/multidownloader/types/reorg_data.go +++ b/multidownloader/types/reorg_data.go @@ -8,8 +8,11 @@ import ( ) type ReorgData struct { - ChainID uint64 - BlockRangeAffected aggkitcommon.BlockRange + // ChainID is the id of the roerged chain stored on DB (incremental ID) + ChainID uint64 + // BlockRangeAffected is the range of blocks affected by the reorg (from,to inclusive) + BlockRangeAffected aggkitcommon.BlockRange + // DetectedAtBlock is the block number where the reorg was detected DetectedAtBlock uint64 DetectedTimestamp uint64 NetworkLatestBlock uint64 diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go index fb5e5bdc5..1ecf9f25e 100644 --- a/multidownloader/types/reorg_error.go +++ b/multidownloader/types/reorg_error.go @@ -14,6 +14,8 @@ const ( ReorgDetectionReason_BlockHashMismatch ReorgDetectionReason = iota + 1 ReorgDetectionReason_ParentHashMismatch ReorgDetectionReason_MissingBlock + // Forced act as MissingBlock but without checking it is basically a debug mode + // to produce reorgs scenario (it must have enabled the develMode) ReorgDetectionReason_Forced ) diff --git a/multidownloader/types/reorg_port.go b/multidownloader/types/reorg_port.go index d7df1bc68..ce3bfdaa8 100644 --- a/multidownloader/types/reorg_port.go +++ b/multidownloader/types/reorg_port.go @@ -14,6 +14,7 @@ type ReorgPorter interface { // Return ChainID of the inserted reorg MoveReorgedBlocks(tx dbtypes.Querier, reorgData ReorgData) (uint64, error) GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) + TimeNowUnix() uint64 } type CompareBlockHeaders struct { From 5e20baebc8d6d76f0927b3b47f19605233d80523 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 10:27:30 +0100 Subject: [PATCH 38/75] feat: implemented forced reorg in developerMode and developerMode=false --- multidownloader/reorg_processor.go | 6 +-- multidownloader/reorg_processor_test.go | 63 ++++++++++++++++++++----- 2 files changed, 55 insertions(+), 14 deletions(-) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 7043d5f73..5432f994b 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -67,10 +67,10 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, currentBlockNumber, currentBlockNumber, firstUnaffectedBlock) firstUnaffectedBlock = currentBlockNumber - 1 } else { - rm.log.Warnf("ProcessReorg: forced reorg at block %d but developerMode is disabled, "+ - "so is going to use the first unaffected block found %d", + rm.log.Warnf("ProcessReorg: forced reorg at block %d with developerMode disabled, "+ + "using the first unaffected block found %d", currentBlockNumber, firstUnaffectedBlock) - return nil + // Continue with the reorg using the firstUnaffectedBlock found } } diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index b257ac63d..c76a2f5fe 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -709,6 +709,36 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { } func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { + testCases := []struct { + name string + developerMode bool + expectedReorgStartBlock uint64 + expectedReorgDescription string + }{ + { + name: "with developerMode enabled - reorgs from detected block", + developerMode: true, + expectedReorgStartBlock: 100, + expectedReorgDescription: "Reorgs from detected block (overriding first unaffected block)", + }, + { + name: "with developerMode disabled - reorgs from first unaffected block", + developerMode: false, + expectedReorgStartBlock: 99, + expectedReorgDescription: "Reorgs from first unaffected block + 1", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testForcedReorg(t, tc.developerMode, tc.expectedReorgStartBlock) + }) + } +} + +func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock uint64) { + t.Helper() + logger := log.WithFields("module", "test") mockPort := mdmocks.NewReorgPorter(t) mockTx := dbmocks.NewTxer(t) @@ -716,21 +746,28 @@ func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { processor := &ReorgProcessor{ log: logger, port: mockPort, - developerMode: true, + developerMode: developerMode, } ctx := context.Background() - //rollbackErr := fmt.Errorf("rollback failed") + detectedReorgBlock := uint64(100) reorgErr := mdtypes.NewDetectedReorgError( - 100, + detectedReorgBlock, mdtypes.ReorgDetectionReason_Forced, common.Hash{}, common.Hash{}, "test reorg", ) nowTimestamp := uint64(1234567890) + lastBlockInStorage := uint64(110) + latestBlockInRPC := uint64(115) + finalizedBlockInRPC := uint64(100) + + // Setup mock expectations mockPort.EXPECT().TimeNowUnix().Return(nowTimestamp).Maybe() mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock block 99 - mismatch mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). Return(&types.CompareBlockHeaders{ BlockNumber: 99, @@ -743,6 +780,8 @@ func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { Hash: common.HexToHash("0x5678"), }, }, nil).Once() + + // Mock block 98 - match (first unaffected block) mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). Return(&types.CompareBlockHeaders{ BlockNumber: 98, @@ -755,24 +794,26 @@ func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { Hash: common.HexToHash("0x1234"), }, }, nil).Once() - mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() - mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() - mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(lastBlockInStorage, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(latestBlockInRPC, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(finalizedBlockInRPC, nil).Once() + expectedReorgData := mdtypes.ReorgData{ - BlockRangeAffected: aggkitcommon.NewBlockRange(100, 110), - DetectedAtBlock: 100, + BlockRangeAffected: aggkitcommon.NewBlockRange(expectedReorgStartBlock, lastBlockInStorage), + DetectedAtBlock: detectedReorgBlock, DetectedTimestamp: nowTimestamp, - NetworkLatestBlock: 115, - NetworkFinalizedBlock: 100, + NetworkLatestBlock: latestBlockInRPC, + NetworkFinalizedBlock: finalizedBlockInRPC, NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, Description: reorgErr.Error(), } mockPort.EXPECT().MoveReorgedBlocks(mockTx, expectedReorgData).Return(uint64(1), nil).Once() mockTx.EXPECT().Commit().Return(nil).Once() + err := processor.ProcessReorg(ctx, *reorgErr) require.NoError(t, err) mockPort.AssertExpectations(t) mockTx.AssertExpectations(t) - } From d97c9eb3cdb4b36fbe73c9be37e28b0b15160e94 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 10:28:08 +0100 Subject: [PATCH 39/75] feat: implemented forced reorg in developerMode and developerMode=false --- multidownloader/evm_multidownloader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index d17d5935b..c52cb5810 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -94,7 +94,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, if reorgProcessor == nil { log.Infof("NewEVMMultidownloader: creating default ReorgProcessor for multidownloader (%s)", name) - reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB, false) + reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB, cfg.DeveloperMode) } var debug *EVMMultidownloaderDebug if cfg.DeveloperMode { From 7f92757f26ae3183e124101a3e23bfe9d9ef1a3f Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 15:14:25 +0100 Subject: [PATCH 40/75] fix: lint --- multidownloader/reorg_processor_test.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index c76a2f5fe..b0174b7b6 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -9,7 +9,6 @@ import ( commonmocks "github.com/agglayer/aggkit/common/mocks" dbmocks "github.com/agglayer/aggkit/db/mocks" "github.com/agglayer/aggkit/log" - "github.com/agglayer/aggkit/multidownloader/types" mdtypes "github.com/agglayer/aggkit/multidownloader/types" mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkittypes "github.com/agglayer/aggkit/types" @@ -710,10 +709,10 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { testCases := []struct { - name string - developerMode bool - expectedReorgStartBlock uint64 - expectedReorgDescription string + name string + developerMode bool + expectedReorgStartBlock uint64 + expectedReorgDescription string }{ { name: "with developerMode enabled - reorgs from detected block", @@ -769,7 +768,7 @@ func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock u // Mock block 99 - mismatch mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). - Return(&types.CompareBlockHeaders{ + Return(&mdtypes.CompareBlockHeaders{ BlockNumber: 99, StorageHeader: &aggkittypes.BlockHeader{ Number: 99, @@ -783,7 +782,7 @@ func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock u // Mock block 98 - match (first unaffected block) mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). - Return(&types.CompareBlockHeaders{ + Return(&mdtypes.CompareBlockHeaders{ BlockNumber: 98, StorageHeader: &aggkittypes.BlockHeader{ Number: 98, From 47ab11cdcc93391b7d3d72f83eaaef3fc0b353a9 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 4 Feb 2026 15:26:08 +0100 Subject: [PATCH 41/75] feat: coverage --- multidownloader/evm_multidownloader.go | 2 - multidownloader/storage/storage_block.go | 2 +- multidownloader/storage/storage_block_test.go | 182 ++++++++++++++++++ multidownloader/storage/storage_sync_test.go | 55 ++++++ multidownloader/storage/storage_test.go | 173 +++++++++++++++++ .../types/set_sync_segment_test.go | 2 +- 6 files changed, 412 insertions(+), 4 deletions(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index c52cb5810..d7660425e 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -180,8 +180,6 @@ func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) er func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, blocks aggkittypes.ListBlockHeaders) error { - // TODO: optimize this to don't check all blocks - // TODO: Find the first block to reorg if blocks.Len() == 0 { dh.log.Debugf("detectReorgs: no blocks to check for reorgs") return nil diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index b4acf26ac..2a41d2306 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -56,7 +56,7 @@ func (b *Blocks) Len() int { return len(b.Headers) } -func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, //nolint:unparam +func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, header *aggkittypes.BlockHeader, isFinal bool) error { blockRows := map[uint64]*blockRow{ header.Number: newBlockRowFromAggkitBlock(header, isFinal), diff --git a/multidownloader/storage/storage_block_test.go b/multidownloader/storage/storage_block_test.go index 7083f66ff..4c985331c 100644 --- a/multidownloader/storage/storage_block_test.go +++ b/multidownloader/storage/storage_block_test.go @@ -1,6 +1,7 @@ package storage import ( + "context" "testing" mdtypes "github.com/agglayer/aggkit/multidownloader/types" @@ -48,3 +49,184 @@ func TestStorage_GetRangeBlockHeader(t *testing.T) { require.True(t, lowest.Empty(), "lowest BlockHeader mismatch") require.True(t, highest.Empty(), "highest BlockHeader mismatch") } + +func TestStorage_GetHighestBlockNumber(t *testing.T) { + t.Run("returns 0 when no blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + highestBlockNum, err := storage.GetHighestBlockNumber(nil) + + require.NoError(t, err) + require.Equal(t, uint64(0), highestBlockNum) + }) + + t.Run("returns highest block number when blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err := storage.saveAggkitBlock(nil, block1, true) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(nil, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(1500, exampleTestHash[2], 1630000500, nil) + err = storage.saveAggkitBlock(nil, block3, true) + require.NoError(t, err) + + highestBlockNum, err := storage.GetHighestBlockNumber(nil) + + require.NoError(t, err) + require.Equal(t, uint64(2000), highestBlockNum, "expected highest block number to be 2000") + }) +} + +func TestStorage_GetBlockHeadersNotFinalized(t *testing.T) { + t.Run("returns empty list when no non-finalized blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + blocks, err := storage.GetBlockHeadersNotFinalized(nil, nil) + + require.NoError(t, err) + require.Empty(t, blocks) + }) + + t.Run("returns all non-finalized blocks when maxBlock is nil", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert finalized blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err := storage.saveAggkitBlock(nil, block1, true) + require.NoError(t, err) + + // Insert non-finalized blocks + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(nil, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(nil, block3, false) + require.NoError(t, err) + + blocks, err := storage.GetBlockHeadersNotFinalized(nil, nil) + + require.NoError(t, err) + require.Len(t, blocks, 2, "expected 2 non-finalized blocks") + }) + + t.Run("returns non-finalized blocks up to maxBlock", func(t *testing.T) { + storage := newStorageForTest(t, nil) + ctx := context.TODO() + tx, err := storage.NewTx(ctx) + require.NoError(t, err) + defer func() { + _ = tx.Rollback() + }() + // Insert non-finalized blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err = storage.saveAggkitBlock(tx, block1, false) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(tx, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(tx, block3, false) + require.NoError(t, err) + + maxBlock := uint64(2500) + blocks, err := storage.GetBlockHeadersNotFinalized(tx, &maxBlock) + + require.NoError(t, err) + require.Len(t, blocks, 2, "expected 2 non-finalized blocks <= 2500") + // Verify that block 3000 is not included + for _, block := range blocks { + require.LessOrEqual(t, block.Number, maxBlock, "block number should be <= maxBlock") + } + }) +} + +func TestBlocks_Add(t *testing.T) { + blocks := NewBlocks() + require.True(t, blocks.IsEmpty()) + + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + + require.False(t, blocks.IsEmpty()) + require.Equal(t, 1, blocks.Len()) + require.Contains(t, blocks.Headers, header.Number) + require.True(t, blocks.AreFinal[header.Number]) +} + +func TestBlocks_Get(t *testing.T) { + t.Run("returns header and finality when exists", func(t *testing.T) { + blocks := NewBlocks() + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + + retrievedHeader, isFinal, err := blocks.Get(1000) + + require.NoError(t, err) + require.Equal(t, header, retrievedHeader) + require.True(t, isFinal) + }) + + t.Run("returns error when header not found", func(t *testing.T) { + blocks := NewBlocks() + + retrievedHeader, isFinal, err := blocks.Get(9999) + + require.Error(t, err) + require.Contains(t, err.Error(), "block header not found") + require.Nil(t, retrievedHeader) + require.False(t, isFinal) + }) +} + +func TestBlocks_ListHeaders(t *testing.T) { + blocks := NewBlocks() + + header1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + header2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + header3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + + blocks.Add(header1, true) + blocks.Add(header2, false) + blocks.Add(header3, true) + + headers := blocks.ListHeaders() + + require.Len(t, headers, 3) + // Verify all headers are present (order may vary since it's from a map) + headerNumbers := make(map[uint64]bool) + for _, h := range headers { + headerNumbers[h.Number] = true + } + require.True(t, headerNumbers[1000]) + require.True(t, headerNumbers[2000]) + require.True(t, headerNumbers[3000]) +} + +func TestBlocks_IsEmpty(t *testing.T) { + blocks := NewBlocks() + require.True(t, blocks.IsEmpty()) + + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + require.False(t, blocks.IsEmpty()) +} + +func TestBlocks_Len(t *testing.T) { + blocks := NewBlocks() + require.Equal(t, 0, blocks.Len()) + + blocks.Add(aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil), true) + require.Equal(t, 1, blocks.Len()) + + blocks.Add(aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil), false) + require.Equal(t, 2, blocks.Len()) +} diff --git a/multidownloader/storage/storage_sync_test.go b/multidownloader/storage/storage_sync_test.go index 3d9334cac..f233675ae 100644 --- a/multidownloader/storage/storage_sync_test.go +++ b/multidownloader/storage/storage_sync_test.go @@ -111,3 +111,58 @@ func TestStorage_UpdateSyncedStatus(t *testing.T) { require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) } + +func TestSyncStatusRow_ToSyncSegment(t *testing.T) { + t.Run("converts row to sync segment successfully with finalized block", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "FinalizedBlock", + SyncedFromBlock: 1000, + SyncedToBlock: 2000, + SyncersIDs: "syncer1,syncer2", + } + + segment, err := row.ToSyncSegment() + + require.NoError(t, err) + require.Equal(t, exampleAddr1, segment.ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), segment.BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, segment.TargetToBlock) + }) + + t.Run("converts row to sync segment successfully with latest block", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr2, + TargetFromBlock: 500, + TargetToBlock: "LatestBlock", + SyncedFromBlock: 500, + SyncedToBlock: 1500, + SyncersIDs: "syncer3", + } + + segment, err := row.ToSyncSegment() + + require.NoError(t, err) + require.Equal(t, exampleAddr2, segment.ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(500, 1500), segment.BlockRange) + require.Equal(t, aggkittypes.LatestBlock, segment.TargetToBlock) + }) + + t.Run("returns error for invalid target to block finality", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "invalid_finality", + SyncedFromBlock: 1000, + SyncedToBlock: 2000, + SyncersIDs: "syncer1", + } + + segment, err := row.ToSyncSegment() + + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing target to block finality") + require.Equal(t, mdrtypes.SyncSegment{}, segment) + }) +} diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index ce5b62659..57274b26f 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -1,6 +1,7 @@ package storage import ( + "encoding/json" "path" "testing" @@ -556,3 +557,175 @@ func populateLogsAndBlocksForTest(t *testing.T, storage *MultidownloaderStorage, err := storage.SaveEthLogsWithHeaders(nil, blocks, logs, true) require.NoError(t, err, "cannot populate logs and blocks") } + +func TestNewLogRowFromEthLog(t *testing.T) { + ethLog := types.Log{ + Address: exampleAddr1, + BlockNumber: 1234, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{ + exampleTestHash[1], + exampleTestHash[2], + }, + Data: []byte{0xDE, 0xAD, 0xBE, 0xEF}, + TxHash: exampleTestHash[3], + TxIndex: 42, + Index: 7, + } + + row := NewLogRowFromEthLog(ethLog) + + require.NotNil(t, row) + require.Equal(t, ethLog.Address, row.Address) + require.Equal(t, ethLog.BlockNumber, row.BlockNumber) + require.Equal(t, ethLog.Data, row.Data) + require.Equal(t, ethLog.TxHash, row.TxHash) + require.Equal(t, ethLog.TxIndex, row.TxIndex) + require.Equal(t, ethLog.Index, row.Index) + + // Verify topics are correctly marshaled as JSON + var topics []common.Hash + err := json.Unmarshal([]byte(row.Topics), &topics) + require.NoError(t, err) + require.Equal(t, ethLog.Topics, topics) +} + +func TestNewLogRowsFromEthLogs(t *testing.T) { + ethLogs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + TxHash: exampleTestHash[2], + TxIndex: 10, + Index: 0, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[3], exampleTestHash[4]}, + Data: []byte{0x02, 0x03}, + TxHash: exampleTestHash[5], + TxIndex: 20, + Index: 1, + }, + } + + rows := NewLogRowsFromEthLogs(ethLogs) + + require.Len(t, rows, 2) + require.Equal(t, ethLogs[0].Address, rows[0].Address) + require.Equal(t, ethLogs[0].BlockNumber, rows[0].BlockNumber) + require.Equal(t, ethLogs[1].Address, rows[1].Address) + require.Equal(t, ethLogs[1].BlockNumber, rows[1].BlockNumber) +} + +func TestNewBlockRowFromEthLog(t *testing.T) { + ethLog := types.Log{ + Address: exampleAddr1, + BlockNumber: 5000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630002000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + } + + row := NewBlockRowFromEthLog(ethLog, true) + + require.NotNil(t, row) + require.Equal(t, ethLog.BlockNumber, row.BlockNumber) + require.Equal(t, ethLog.BlockHash, row.BlockHash) + require.Equal(t, ethLog.BlockTimestamp, row.BlockTimestamp) + require.Nil(t, row.BlockParentHash) + require.True(t, row.IsFinal) + + rowNotFinal := NewBlockRowFromEthLog(ethLog, false) + require.False(t, rowNotFinal.IsFinal) +} + +func TestNewBlockRowFromAggkitBlock(t *testing.T) { + parentHash := exampleTestHash[0] + block := aggkittypes.NewBlockHeader(3000, exampleTestHash[1], 1630003000, &parentHash) + + row := newBlockRowFromAggkitBlock(block, true) + + require.NotNil(t, row) + require.Equal(t, block.Number, row.BlockNumber) + require.Equal(t, block.Hash, row.BlockHash) + require.Equal(t, block.Time, row.BlockTimestamp) + require.NotNil(t, row.BlockParentHash) + require.Equal(t, parentHash, *row.BlockParentHash) + require.True(t, row.IsFinal) +} + +func TestNewBlockRowsFromLogs(t *testing.T) { + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + }, + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[2]}, + Data: []byte{0x02}, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[3]}, + Data: []byte{0x03}, + }, + } + + blockRows := NewBlockRowsFromLogs(logs, true) + + require.Len(t, blockRows, 2, "expected 2 unique blocks") + require.NotNil(t, blockRows[1000]) + require.Equal(t, uint64(1000), blockRows[1000].BlockNumber) + require.Equal(t, exampleTestHash[0], blockRows[1000].BlockHash) + require.True(t, blockRows[1000].IsFinal) + require.NotNil(t, blockRows[1001]) + require.Equal(t, uint64(1001), blockRows[1001].BlockNumber) + require.Equal(t, exampleTestHash[1], blockRows[1001].BlockHash) + require.True(t, blockRows[1001].IsFinal) +} + +func TestNewBlockRowsFromAggkitBlock(t *testing.T) { + parentHash1 := exampleTestHash[0] + parentHash2 := exampleTestHash[1] + blockHeaders := aggkittypes.ListBlockHeaders{ + aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, &parentHash1), + aggkittypes.NewBlockHeader(2001, exampleTestHash[2], 1630001060, &parentHash2), + } + + blockRows := NewBlockRowsFromAggkitBlock(blockHeaders, false) + + require.Len(t, blockRows, 2) + require.NotNil(t, blockRows[2000]) + require.Equal(t, uint64(2000), blockRows[2000].BlockNumber) + require.Equal(t, exampleTestHash[1], blockRows[2000].BlockHash) + require.NotNil(t, blockRows[2000].BlockParentHash) + require.Equal(t, parentHash1, *blockRows[2000].BlockParentHash) + require.False(t, blockRows[2000].IsFinal) + + require.NotNil(t, blockRows[2001]) + require.Equal(t, uint64(2001), blockRows[2001].BlockNumber) + require.NotNil(t, blockRows[2001].BlockParentHash) + require.Equal(t, parentHash2, *blockRows[2001].BlockParentHash) + require.False(t, blockRows[2001].IsFinal) +} diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index ac04732c9..7ac1199f7 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -571,7 +571,7 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Equal(t, uint64(100), res.BlockRange.ToBlock) }) - t.Run("fulfill totally a segment,set it as empty ", func(t *testing.T) { + t.Run("fulfill totally a segment,set it as empty", func(t *testing.T) { set := NewSetSyncSegment() addr := common.HexToAddress("0x123") segment := SyncSegment{ From 45fa1c3c0ab72ae1194aaceaa7611fc7e32fada9 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:09:06 +0100 Subject: [PATCH 42/75] fix: coverage --- multidownloader/storage/storage_block.go | 17 ++-- multidownloader/storage/storage_block_test.go | 90 ++++++++++++++++--- multidownloader/types/sync_segment.go | 3 +- 3 files changed, 90 insertions(+), 20 deletions(-) diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index 2a41d2306..016ffa15d 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -105,14 +105,17 @@ func (a *MultidownloaderStorage) GetHighestBlockNumber(tx dbtypes.Querier) (uint return 0, nil } -// GetRangeBlockHeader retrieves the highest block header stored in the database -// return lowest and highest block headers +// GetRangeBlockHeader retrieves the lowest and highest block headers stored in the database +// for the specified finality type. Returns lowest and highest block headers. func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, isFinal mdtypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + a.mutex.RLock() + defer a.mutex.RUnlock() + highestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks "+ - "WHERE is_final=? order by block_number DESC LIMIT 1", isFinal) + "WHERE is_final=? ORDER BY block_number DESC LIMIT 1", isFinal) if err != nil { - return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) } if highestBlock.IsEmpty() { return nil, nil, nil @@ -122,9 +125,9 @@ func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, } lowestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? "+ - "order by block_number DESC LIMIT 1", isFinal) + "ORDER BY block_number ASC LIMIT 1", isFinal) if err != nil { - return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) + return nil, nil, fmt.Errorf("GetRangeBlockHeader:lowest: %w", err) } if lowestBlock.IsEmpty() { return nil, nil, nil @@ -132,7 +135,7 @@ func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, if lowestBlock.Len() > 1 { return nil, nil, fmt.Errorf("GetRangeBlockHeader:lowest: more than one block returned (%d)", lowestBlock.Len()) } - return highestBlock.ListHeaders()[0], lowestBlock.ListHeaders()[0], nil + return lowestBlock.ListHeaders()[0], highestBlock.ListHeaders()[0], nil } func (a *MultidownloaderStorage) GetBlockHeaderByNumber(tx dbtypes.Querier, diff --git a/multidownloader/storage/storage_block_test.go b/multidownloader/storage/storage_block_test.go index 4c985331c..abdab5d13 100644 --- a/multidownloader/storage/storage_block_test.go +++ b/multidownloader/storage/storage_block_test.go @@ -34,20 +34,86 @@ func TestStorage_GetBlock(t *testing.T) { } func TestStorage_GetRangeBlockHeader(t *testing.T) { - storage := newStorageForTest(t, nil) - block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) - err := storage.saveAggkitBlock(nil, block, mdtypes.NotFinalized) - require.NoError(t, err, "cannot insert BlockHeader") + t.Run("returns same block when only one block exists", func(t *testing.T) { + storage := newStorageForTest(t, nil) + block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) + err := storage.saveAggkitBlock(nil, block, mdtypes.NotFinalized) + require.NoError(t, err, "cannot insert BlockHeader") + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.Equal(t, block, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block, highest, "highest BlockHeader mismatch") + }) + + t.Run("returns nil when no blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.Finalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.Nil(t, lowest, "expected nil lowest BlockHeader") + require.Nil(t, highest, "expected nil highest BlockHeader") + }) + + t.Run("returns correct lowest and highest when multiple blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple non-finalized blocks in non-sequential order + block1 := aggkittypes.NewBlockHeader(2000, exampleTestHash[0], 1630001000, nil) + err := storage.saveAggkitBlock(nil, block1, mdtypes.NotFinalized) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(1000, exampleTestHash[1], 1630000000, nil) + err = storage.saveAggkitBlock(nil, block2, mdtypes.NotFinalized) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(nil, block3, mdtypes.NotFinalized) + require.NoError(t, err) + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(1000), lowest.Number, "lowest should be block 1000") + require.Equal(t, uint64(3000), highest.Number, "highest should be block 3000") + require.Equal(t, block2, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block3, highest, "highest BlockHeader mismatch") + }) + + t.Run("filters by finality type correctly", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert finalized blocks + finalizedBlock1 := aggkittypes.NewBlockHeader(500, exampleTestHash[3], 1629999000, nil) + err := storage.saveAggkitBlock(nil, finalizedBlock1, mdtypes.Finalized) + require.NoError(t, err) - lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) - require.NoError(t, err, "cannot get range BlockHeader") - require.Equal(t, block, lowest, "lowest BlockHeader mismatch") - require.Equal(t, block, highest, "highest BlockHeader mismatch") + finalizedBlock2 := aggkittypes.NewBlockHeader(1500, exampleTestHash[4], 1630000500, nil) + err = storage.saveAggkitBlock(nil, finalizedBlock2, mdtypes.Finalized) + require.NoError(t, err) + + // Insert non-finalized blocks + notFinalizedBlock := aggkittypes.NewBlockHeader(2500, exampleTestHash[5], 1630001500, nil) + err = storage.saveAggkitBlock(nil, notFinalizedBlock, mdtypes.NotFinalized) + require.NoError(t, err) - lowest, highest, err = storage.GetRangeBlockHeader(nil, mdtypes.Finalized) - require.NoError(t, err, "cannot get range BlockHeader") - require.True(t, lowest.Empty(), "lowest BlockHeader mismatch") - require.True(t, highest.Empty(), "highest BlockHeader mismatch") + // Get finalized range + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.Finalized) + require.NoError(t, err) + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(500), lowest.Number, "lowest finalized should be block 500") + require.Equal(t, uint64(1500), highest.Number, "highest finalized should be block 1500") + + // Get non-finalized range + lowest, highest, err = storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err) + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(2500), lowest.Number, "should only return non-finalized block") + require.Equal(t, uint64(2500), highest.Number, "should only return non-finalized block") + }) } func TestStorage_GetHighestBlockNumber(t *testing.T) { diff --git a/multidownloader/types/sync_segment.go b/multidownloader/types/sync_segment.go index af99ff1a8..1233a4019 100644 --- a/multidownloader/types/sync_segment.go +++ b/multidownloader/types/sync_segment.go @@ -12,7 +12,7 @@ import ( // for representing segments to be synced type SyncSegment struct { ContractAddr common.Address - // If FromBlock is 0 means that is empty + // BlockRange can be empty BlockRange.IsEmpty() BlockRange aggkitcommon.BlockRange TargetToBlock aggkittypes.BlockNumberFinality } @@ -61,6 +61,7 @@ func (s *SyncSegment) UpdateToBlock(newToBlock uint64) { s.BlockRange.ToBlock = newToBlock } +// Empty sets the SyncSegment (fromBlock > toBlock) to indicate it is empty func (s *SyncSegment) Empty() { if s == nil { return From 1d1da5e2ad6fce9fcab76b7118ac8f4191988f40 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:04:07 +0100 Subject: [PATCH 43/75] feat: renamed reorgs.chain_id to reorg_id --- AGENTS.md | 1 + Makefile | 4 + multidownloader/evm_multidownloader_reorg.go | 16 ++-- .../evm_multidownloader_reorg_test.go | 72 +++++++-------- multidownloader/reorg_processor.go | 4 +- multidownloader/reorg_processor_port_test.go | 4 +- multidownloader/storage/migrations/0002.sql | 37 ++++---- multidownloader/storage/storage_reorg.go | 74 ++++++++-------- multidownloader/storage/storage_reorg_test.go | 88 +++++++++---------- multidownloader/sync/evmdownloader.go | 8 +- multidownloader/sync/evmdownloader_test.go | 20 ++--- .../mocks/mock_multidownloader_interface.go | 32 +++---- multidownloader/sync/types/multidownloader.go | 6 +- multidownloader/types/mocks/mock_storager.go | 54 ++++++------ multidownloader/types/reorg_data.go | 8 +- multidownloader/types/reorg_data_test.go | 4 +- multidownloader/types/reorg_error.go | 10 +-- multidownloader/types/reorg_error_test.go | 8 +- multidownloader/types/storager.go | 8 +- 19 files changed, 233 insertions(+), 225 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 18e0687fb..7048d54df 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -7,6 +7,7 @@ - `make test-unit` - Run all unit tests with coverage - `go test -v -run TestName ./path/to/package` - Run a single test - `go test -v -run TestName ./...` - Run single test across all packages +- `make generate-mocks` - Generate all mocks ## Code Style diff --git a/Makefile b/Makefile index 689f205f9..c214a9a27 100644 --- a/Makefile +++ b/Makefile @@ -117,6 +117,10 @@ vulncheck: ## Runs the vulnerability checker tool @echo "Running govulncheck on all packages..." @go list ./... | xargs -n1 govulncheck +.PHONY: generate-mocks +generate-mocks: ## Generates the mocks using mockery + @cd test && $(MAKE) generate-mocks + ## Help display. ## Pulls comments from beside commands and prints a nicely formatted ## display with the commands and their usage information. diff --git a/multidownloader/evm_multidownloader_reorg.go b/multidownloader/evm_multidownloader_reorg.go index abde85f35..32382e9e5 100644 --- a/multidownloader/evm_multidownloader_reorg.go +++ b/multidownloader/evm_multidownloader_reorg.go @@ -9,7 +9,7 @@ import ( ) // CheckValidBlock checks if the given blockNumber and blockHash are still valid -// returns: isValid bool, reorgChainID uint64, err error +// returns: isValid bool, reorgID uint64, err error func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber uint64, blockHash common.Hash) (bool, uint64, error) { // Check if is stored as valid block @@ -26,15 +26,15 @@ func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber u } // From this point is invalid or unknown // Check in blocks_reorged - chainID, found, err := dh.storage.GetBlockReorgedChainID(nil, blockNumber, blockHash) + reorgID, found, err := dh.storage.GetBlockReorgedReorgID(nil, blockNumber, blockHash) if err != nil { return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot check blocks_reorged for blockNumber=%d: %w", blockNumber, err) } if found { - dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (chainID=%d)", - blockNumber, blockHash.Hex(), chainID) - return false, chainID, nil + dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (reorgID=%d)", + blockNumber, blockHash.Hex(), reorgID) + return false, reorgID, nil } // Not found anywhere, consider invalid return false, 0, fmt.Errorf( @@ -42,7 +42,7 @@ func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber u blockNumber, blockHash.Hex()) } -func (dh *EVMMultidownloader) GetReorgedDataByChainID(ctx context.Context, - reorgChainID uint64) (*mdrtypes.ReorgData, error) { - return dh.storage.GetReorgedDataByChainID(nil, reorgChainID) +func (dh *EVMMultidownloader) GetReorgedDataByReorgID(ctx context.Context, + reorgID uint64) (*mdrtypes.ReorgData, error) { + return dh.storage.GetReorgedDataByReorgID(nil, reorgID) } diff --git a/multidownloader/evm_multidownloader_reorg_test.go b/multidownloader/evm_multidownloader_reorg_test.go index a3ead02b5..1f367ea8d 100644 --- a/multidownloader/evm_multidownloader_reorg_test.go +++ b/multidownloader/evm_multidownloader_reorg_test.go @@ -27,11 +27,11 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(storedBlock, mdrtypes.Finalized, nil).Once() - isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.NoError(t, err) require.True(t, isValid) - require.Equal(t, uint64(0), reorgChainID) + require.Equal(t, uint64(0), reorgID) }) t.Run("returns error when GetBlockHeaderByNumber fails", func(t *testing.T) { @@ -43,19 +43,19 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(nil, mdrtypes.NotFinalized, expectedErr).Once() - isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.Error(t, err) require.Contains(t, err.Error(), "cannot get BlockHeader") require.True(t, isValid) - require.Equal(t, uint64(0), reorgChainID) + require.Equal(t, uint64(0), reorgID) }) - t.Run("returns false with chainID when block found in blocks_reorged", func(t *testing.T) { + t.Run("returns false with reorgID when block found in blocks_reorged", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) blockNumber := uint64(100) blockHash := common.HexToHash("0x1234") - expectedChainID := uint64(42) + expectedReorgID := uint64(42) storedBlock := &aggkittypes.BlockHeader{ Number: blockNumber, @@ -64,14 +64,14 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(storedBlock, mdrtypes.Finalized, nil).Once() - testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). - Return(expectedChainID, true, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(expectedReorgID, true, nil).Once() - isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.NoError(t, err) require.False(t, isValid) - require.Equal(t, expectedChainID, reorgChainID) + require.Equal(t, expectedReorgID, reorgID) }) t.Run("returns false when block not stored and not in blocks_reorged", func(t *testing.T) { @@ -81,22 +81,22 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(nil, mdrtypes.NotFinalized, nil).Once() - testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). Return(uint64(0), false, nil).Once() - isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.Error(t, err) require.Contains(t, err.Error(), "not found in storage or blocks_reorged") require.False(t, isValid) - require.Equal(t, uint64(0), reorgChainID) + require.Equal(t, uint64(0), reorgID) }) - t.Run("returns false with chainID when stored block hash does not match", func(t *testing.T) { + t.Run("returns false with reorgID when stored block hash does not match", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) blockNumber := uint64(100) blockHash := common.HexToHash("0x1234") - reorgChainID := uint64(99) + expectedReorgID := uint64(99) storedBlock := &aggkittypes.BlockHeader{ Number: blockNumber, @@ -105,17 +105,17 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(storedBlock, mdrtypes.Finalized, nil).Once() - testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). - Return(reorgChainID, true, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(expectedReorgID, true, nil).Once() - isValid, chainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.NoError(t, err) require.False(t, isValid) - require.Equal(t, reorgChainID, chainID) + require.Equal(t, expectedReorgID, reorgID) }) - t.Run("returns error when GetBlockReorgedChainID fails", func(t *testing.T) { + t.Run("returns error when GetBlockReorgedReorgID fails", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) blockNumber := uint64(100) blockHash := common.HexToHash("0x1234") @@ -128,24 +128,24 @@ func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { expectedErr := fmt.Errorf("reorg query error") testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). Return(storedBlock, mdrtypes.Finalized, nil).Once() - testData.mockStorage.EXPECT().GetBlockReorgedChainID(mock.Anything, blockNumber, blockHash). + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). Return(uint64(0), false, expectedErr).Once() - isValid, reorgChainID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) require.Error(t, err) require.Contains(t, err.Error(), "cannot check blocks_reorged") require.True(t, isValid) - require.Equal(t, uint64(0), reorgChainID) + require.Equal(t, uint64(0), reorgID) }) } -func TestEVMMultidownloader_GetReorgedDataByChainID(t *testing.T) { +func TestEVMMultidownloader_GetReorgedDataByReorgID(t *testing.T) { t.Run("returns reorg data successfully", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) - expectedChainID := uint64(42) + expectedReorgID := uint64(42) expectedReorgData := &mdrtypes.ReorgData{ - ChainID: expectedChainID, + ReorgID: expectedReorgID, BlockRangeAffected: aggkitcommon.BlockRange{ FromBlock: 100, ToBlock: 200, @@ -154,14 +154,14 @@ func TestEVMMultidownloader_GetReorgedDataByChainID(t *testing.T) { DetectedTimestamp: 1234567890, } - testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). Return(expectedReorgData, nil).Once() - result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) require.NoError(t, err) require.NotNil(t, result) - require.Equal(t, expectedReorgData.ChainID, result.ChainID) + require.Equal(t, expectedReorgData.ReorgID, result.ReorgID) require.Equal(t, expectedReorgData.BlockRangeAffected, result.BlockRangeAffected) require.Equal(t, expectedReorgData.DetectedAtBlock, result.DetectedAtBlock) require.Equal(t, expectedReorgData.DetectedTimestamp, result.DetectedTimestamp) @@ -169,27 +169,27 @@ func TestEVMMultidownloader_GetReorgedDataByChainID(t *testing.T) { t.Run("returns error when storage query fails", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) - expectedChainID := uint64(42) + expectedReorgID := uint64(42) expectedErr := fmt.Errorf("database error") - testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). Return(nil, expectedErr).Once() - result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) require.Error(t, err) require.Equal(t, expectedErr, err) require.Nil(t, result) }) - t.Run("returns nil when chainID not found", func(t *testing.T) { + t.Run("returns nil when reorgID not found", func(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) - expectedChainID := uint64(999) + expectedReorgID := uint64(999) - testData.mockStorage.EXPECT().GetReorgedDataByChainID(mock.Anything, expectedChainID). + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). Return(nil, nil).Once() - result, err := testData.mdr.GetReorgedDataByChainID(context.Background(), expectedChainID) + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) require.NoError(t, err) require.Nil(t, result) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 5432f994b..0eca7861f 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -99,11 +99,11 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, Description: detectedReorgError.Error(), } - chainID, err := rm.port.MoveReorgedBlocks(tx, reorgData) + reorgID, err := rm.port.MoveReorgedBlocks(tx, reorgData) if err != nil { return fmt.Errorf("ProcessReorg: error moving reorged blocks: %w", err) } - reorgData.ChainID = chainID + reorgData.ReorgID = reorgID committed = true if err := tx.Commit(); err != nil { return fmt.Errorf("ProcessReorg: cannot commit tx: %w", err) diff --git a/multidownloader/reorg_processor_port_test.go b/multidownloader/reorg_processor_port_test.go index a41744ea4..1abdf51be 100644 --- a/multidownloader/reorg_processor_port_test.go +++ b/multidownloader/reorg_processor_port_test.go @@ -231,7 +231,7 @@ func TestReorgPort_MoveReorgedBlocks(t *testing.T) { } reorgData := mdtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), } expectedAffectedRows := uint64(101) @@ -254,7 +254,7 @@ func TestReorgPort_MoveReorgedBlocks(t *testing.T) { } reorgData := mdtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), } expectedErr := fmt.Errorf("transaction failed") diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql index 7cfb0f2b0..5d8f9005b 100644 --- a/multidownloader/storage/migrations/0002.sql +++ b/multidownloader/storage/migrations/0002.sql @@ -4,36 +4,39 @@ DROP TABLE IF EXISTS blocks_reorged; DROP TABLE IF EXISTS reorgs; -- +migrate Up + + +CREATE TABLE blocks_reorged ( + reorg_id BIGINT NOT NULL REFERENCES reorgs(reorg_id), + block_number BIGINT NOT NULL, + block_hash TEXT NOT NULL, + block_timestamp INTEGER NOT NULL, + block_parent_hash TEXT NOT NULL, + PRIMARY KEY (reorg_id, block_number) +); + CREATE TABLE logs_reorged ( - chain_id BIGINT NOT NULL , - block_number BIGINT NOT NULL , - address TEXT NOT NULL, -- + reorg_id BIGINT NOT NULL, + block_number BIGINT NOT NULL, + address TEXT NOT NULL, -- topics TEXT NOT NULL, -- list of hashes in JSON - data BLOB, -- + data BLOB, -- tx_hash TEXT NOT NULL, tx_index INTEGER NOT NULL, - log_index INTEGER NOT NULL, -- β€œindex” is a reserved keyword - PRIMARY KEY (address, chain_id,block_number, log_index), - FOREIGN KEY (chain_id, block_number) REFERENCES blocks_reorged(chain_id, block_number) + log_index INTEGER NOT NULL, -- "index" is a reserved keyword + PRIMARY KEY (address, reorg_id, block_number, log_index), + FOREIGN KEY (reorg_id, block_number) REFERENCES blocks_reorged(reorg_id, block_number) ); CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); -CREATE TABLE blocks_reorged ( - chain_id BIGINT NOT NULL REFERENCES reorgs(chain_id), - block_number BIGINT NOT NULL, - block_hash TEXT NOT NULL, - block_timestamp INTEGER NOT NULL, - block_parent_hash TEXT NOT NULL, - PRIMARY KEY (chain_id, block_number) -); CREATE TABLE reorgs ( - chain_id BIGINT PRIMARY KEY, + reorg_id BIGINT PRIMARY KEY, detected_at_block BIGINT NOT NULL, reorged_from_block BIGINT NOT NULL, reorged_to_block BIGINT NOT NULL, - detected_timestamp INTEGER NOT NULL, + detected_timestamp INTEGER NOT NULL, network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 92feb28b1..4e8683005 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -14,7 +14,7 @@ import ( ) type reorgRow struct { - ChainID uint64 `meddler:"chain_id"` + ReorgID uint64 `meddler:"reorg_id"` DetectedAtBlock uint64 `meddler:"detected_at_block"` ReorgedFromBlock uint64 `meddler:"reorged_from_block"` ReorgedToBlock uint64 `meddler:"reorged_to_block"` @@ -27,7 +27,7 @@ type reorgRow struct { func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { return &reorgRow{ - ChainID: reorgData.ChainID, + ReorgID: reorgData.ReorgID, DetectedAtBlock: reorgData.DetectedAtBlock, ReorgedFromBlock: reorgData.BlockRangeAffected.FromBlock, ReorgedToBlock: reorgData.BlockRangeAffected.ToBlock, @@ -39,7 +39,7 @@ func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { } } -// returns ChainID of the inserted reorg +// returns ReorgID of the inserted reorg func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, reorgData mdrtypes.ReorgData) (uint64, error) { if tx == nil { @@ -48,24 +48,24 @@ func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtyp reorgRow := newReorgRowFromReorgData(reorgData) a.mutex.Lock() defer a.mutex.Unlock() - // Get Next ChainID from storage using rowid - lastChainID := struct { - ChainID *uint64 `meddler:"chain_id"` + // Get Next ReorgID from storage using rowid + lastReorgID := struct { + ReorgID *uint64 `meddler:"reorg_id"` }{} - err := meddler.QueryRow(tx, &lastChainID, "SELECT MAX(chain_id) as chain_id FROM reorgs") + err := meddler.QueryRow(tx, &lastReorgID, "SELECT MAX(reorg_id) as reorg_id FROM reorgs") if err != nil { - return 0, fmt.Errorf("InsertNewReorg: error getting last chain_id: %w", err) + return 0, fmt.Errorf("InsertNewReorg: error getting last reorg_id: %w", err) } - if lastChainID.ChainID == nil { - reorgRow.ChainID = 1 + if lastReorgID.ReorgID == nil { + reorgRow.ReorgID = 1 } else { - reorgRow.ChainID = *lastChainID.ChainID + 1 + reorgRow.ReorgID = *lastReorgID.ReorgID + 1 } if err := meddler.Insert(tx, "reorgs", reorgRow); err != nil { return 0, fmt.Errorf("InsertNewReorg: error inserting reorgs (%s): %w", reorgData.String(), err) } - if err := a.moveReorgedBlocksAndLogsNoMutex(tx, reorgRow.ChainID, + if err := a.moveReorgedBlocksAndLogsNoMutex(tx, reorgRow.ReorgID, reorgData.BlockRangeAffected); err != nil { return 0, fmt.Errorf("InsertNewReorg: error moving reorged blocks to block_reorged: %w", err) } @@ -74,18 +74,18 @@ func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtyp if err != nil { return 0, fmt.Errorf("InsertNewReorg: error adjusting sync_status for reorg: %w", err) } - return reorgRow.ChainID, nil + return reorgRow.ReorgID, nil } -func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Querier, chainID uint64, +func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Querier, reorgID uint64, blockRangeAffected aggkitcommon.BlockRange) error { - a.logger.Debugf("storage: moving blocks to blocks_reorged - chain_id: %d, range: %s", - chainID, blockRangeAffected.String()) - query := `INSERT INTO blocks_reorged (chain_id, block_number, block_hash,block_parent_hash, block_timestamp) + a.logger.Debugf("storage: moving blocks to blocks_reorged - reorg_id: %d, range: %s", + reorgID, blockRangeAffected.String()) + query := `INSERT INTO blocks_reorged (reorg_id, block_number, block_hash,block_parent_hash, block_timestamp) SELECT ?, block_number, block_hash, block_parent_hash, block_timestamp FROM blocks WHERE block_number >= ? AND block_number <= ?; - INSERT INTO logs_reorged (chain_id, block_number, address,topics, data, tx_hash, tx_index, log_index) + INSERT INTO logs_reorged (reorg_id, block_number, address,topics, data, tx_hash, tx_index, log_index) SELECT ?, block_number, address, topics, data, tx_hash, tx_index, log_index FROM logs WHERE block_number >= ? AND block_number <= ?; @@ -94,9 +94,9 @@ func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Quer DELETE FROM blocks WHERE block_number >= ? AND block_number <= ?;` _, err := tx.Exec(query, - chainID, + reorgID, blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, - chainID, + reorgID, blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, blockRangeAffected.FromBlock, blockRangeAffected.ToBlock) @@ -106,36 +106,36 @@ func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Quer return nil } -func (a *MultidownloaderStorage) GetBlockReorgedChainID(tx dbtypes.Querier, +func (a *MultidownloaderStorage) GetBlockReorgedReorgID(tx dbtypes.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { if tx == nil { tx = a.db } a.mutex.RLock() defer a.mutex.RUnlock() - var chainIDRow struct { - ChainID *uint64 `meddler:"chain_id"` + var reorgIDRow struct { + ReorgID *uint64 `meddler:"reorg_id"` } - query := `SELECT br.chain_id FROM blocks_reorged br - INNER JOIN reorgs r ON br.chain_id = r.chain_id + query := `SELECT br.reorg_id FROM blocks_reorged br + INNER JOIN reorgs r ON br.reorg_id = r.reorg_id WHERE br.block_number = ? AND br.block_hash = ? ORDER BY r.reorged_from_block ASC LIMIT 1;` - err := tx.QueryRow(query, blockNumber, blockHash.Hex()).Scan(&chainIDRow.ChainID) + err := tx.QueryRow(query, blockNumber, blockHash.Hex()).Scan(&reorgIDRow.ReorgID) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, false, nil } - return 0, false, fmt.Errorf("GetBlockReorgedChainID: error querying blocks_reorged: %w", err) + return 0, false, fmt.Errorf("GetBlockReorgedReorgID: error querying blocks_reorged: %w", err) } - if chainIDRow.ChainID == nil { + if reorgIDRow.ReorgID == nil { return 0, false, nil } - return *chainIDRow.ChainID, true, nil + return *reorgIDRow.ReorgID, true, nil } -func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, - reorgedChainID uint64) (*mdrtypes.ReorgData, error) { +func (a *MultidownloaderStorage) GetReorgedDataByReorgID(tx dbtypes.Querier, + reorgID uint64) (*mdrtypes.ReorgData, error) { if tx == nil { tx = a.db } @@ -143,26 +143,26 @@ func (a *MultidownloaderStorage) GetReorgedDataByChainID(tx dbtypes.Querier, defer a.mutex.RUnlock() var row reorgRow - query := `SELECT chain_id, detected_at_block, reorged_from_block, reorged_to_block, + query := `SELECT reorg_id, detected_at_block, reorged_from_block, reorged_to_block, detected_timestamp, network_latest_block, network_finalized_block, network_finalized_block_name, description - FROM reorgs WHERE chain_id = ? LIMIT 1;` + FROM reorgs WHERE reorg_id = ? LIMIT 1;` - err := meddler.QueryRow(tx, &row, query, reorgedChainID) + err := meddler.QueryRow(tx, &row, query, reorgID) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } - return nil, fmt.Errorf("GetReorgedDataByChainID: error querying reorgs table: %w", err) + return nil, fmt.Errorf("GetReorgedDataByReorgID: error querying reorgs table: %w", err) } // Convert string to BlockNumberFinality blockFinality, err := aggkittypes.NewBlockNumberFinality(row.NetworkFinalizedBlockName) if err != nil { - return nil, fmt.Errorf("GetReorgedDataByChainID: error parsing NetworkFinalizedBlockName: %w", err) + return nil, fmt.Errorf("GetReorgedDataByReorgID: error parsing NetworkFinalizedBlockName: %w", err) } reorgData := &mdrtypes.ReorgData{ - ChainID: row.ChainID, + ReorgID: row.ReorgID, BlockRangeAffected: aggkitcommon.BlockRange{ FromBlock: row.ReorgedFromBlock, ToBlock: row.ReorgedToBlock, diff --git a/multidownloader/storage/storage_reorg_test.go b/multidownloader/storage/storage_reorg_test.go index ed487b9a0..8f3308821 100644 --- a/multidownloader/storage/storage_reorg_test.go +++ b/multidownloader/storage/storage_reorg_test.go @@ -12,7 +12,7 @@ import ( func TestStorage_InsertNewReorg(t *testing.T) { storage := newStorageForTest(t, nil) reorgData := mdrtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(5000, 5010), DetectedAtBlock: 5020, DetectedTimestamp: 1630003000, @@ -22,17 +22,17 @@ func TestStorage_InsertNewReorg(t *testing.T) { } tx, err := storage.NewTx(t.Context()) require.NoError(t, err, "cannot start new transaction") - chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) require.NoError(t, err, "cannot insert new reorg") - require.Equal(t, uint64(1), chainID, "first chain ID must be 1") + require.Equal(t, uint64(1), reorgID, "first reorg ID must be 1") err = tx.Commit() require.NoError(t, err, "cannot commit transaction") tx, err = storage.NewTx(t.Context()) require.NoError(t, err, "cannot start new transaction") - chainID, err = storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + reorgID, err = storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) require.NoError(t, err, "cannot insert new reorg") - require.Equal(t, uint64(2), chainID, "second chain ID must be 2") + require.Equal(t, uint64(2), reorgID, "second reorg ID must be 2") err = tx.Commit() require.NoError(t, err, "cannot commit transaction") } @@ -43,7 +43,7 @@ func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { 5000, 20, 5) reorgData := mdrtypes.ReorgData{ - ChainID: 0, // will be set by InsertNewReorg + ReorgID: 0, // will be set by InsertNewReorg BlockRangeAffected: aggkitcommon.NewBlockRange(5005, 5015), DetectedAtBlock: 5020, DetectedTimestamp: 1630003000, @@ -53,9 +53,9 @@ func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { } tx, err := storage.NewTx(t.Context()) require.NoError(t, err, "cannot start new transaction") - chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) require.NoError(t, err, "cannot insert new reorg") - require.Equal(t, uint64(1), chainID, "first chain ID must be 1") + require.Equal(t, uint64(1), reorgID, "first reorg ID must be 1") err = tx.Commit() require.NoError(t, err, "cannot commit transaction") // Now check that blocks from 5005 to 5015 are in block_reorged @@ -66,8 +66,8 @@ func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { } } -func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { - t.Run("returns chain_id with lowest reorged_from_block when block exists in multiple chains", func(t *testing.T) { +func TestStorage_GetBlockReorgedReorgID_MultipleChains(t *testing.T) { + t.Run("returns reorg_id with lowest reorged_from_block when block exists in multiple chains", func(t *testing.T) { storage := newStorageForTest(t, nil) // First, populate some blocks that will be reorged @@ -86,9 +86,9 @@ func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { tx1, err := storage.NewTx(t.Context()) require.NoError(t, err) - chainID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + reorgID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) require.NoError(t, err) - require.Equal(t, uint64(1), chainID1) + require.Equal(t, uint64(1), reorgID1) err = tx1.Commit() require.NoError(t, err) @@ -105,15 +105,15 @@ func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { tx2, err := storage.NewTx(t.Context()) require.NoError(t, err) - chainID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + reorgID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) require.NoError(t, err) - require.Equal(t, uint64(2), chainID2) + require.Equal(t, uint64(2), reorgID2) err = tx2.Commit() require.NoError(t, err) // The key test: insert the SAME block_number and block_hash into MULTIPLE chains // This is the scenario the user wants to test - when a block exists in multiple reorg chains, - // the function should return the chain_id with the lowest reorged_from_block + // the function should return the reorg_id with the lowest reorged_from_block testBlockNumber := uint64(2000) // Use a block number outside the reorg ranges testBlockHash := exampleTestHash[7] @@ -121,30 +121,30 @@ func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { require.NoError(t, err) // Insert the SAME block into chain 1 (reorged_from_block=1010) - _, err = tx3.Exec(`INSERT INTO blocks_reorged (chain_id, block_number, block_hash, block_parent_hash, block_timestamp) - VALUES (?, ?, ?, ?, ?)`, chainID1, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (reorg_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, reorgID1, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) require.NoError(t, err) // Insert the SAME block into chain 2 (reorged_from_block=1005, lower!) - _, err = tx3.Exec(`INSERT INTO blocks_reorged (chain_id, block_number, block_hash, block_parent_hash, block_timestamp) - VALUES (?, ?, ?, ?, ?)`, chainID2, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (reorg_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, reorgID2, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) require.NoError(t, err) err = tx3.Commit() require.NoError(t, err) - // Query for the block - should return chainID2 since it has the lowest reorged_from_block (1005 < 1010) - returnedChainID, found, err := storage.GetBlockReorgedChainID(nil, testBlockNumber, testBlockHash) + // Query for the block - should return reorgID2 since it has the lowest reorged_from_block (1005 < 1010) + returnedReorgID, found, err := storage.GetBlockReorgedReorgID(nil, testBlockNumber, testBlockHash) require.NoError(t, err) require.True(t, found, "block should be found") - require.Equal(t, chainID2, returnedChainID, "should return chain_id with lowest reorged_from_block (chain 2 with reorged_from_block=1005)") + require.Equal(t, reorgID2, returnedReorgID, "should return reorg_id with lowest reorged_from_block (chain 2 with reorged_from_block=1005)") // Verify the reorged_from_block values to confirm our expectation - reorgData1Retrieved, err := storage.GetReorgedDataByChainID(nil, chainID1) + reorgData1Retrieved, err := storage.GetReorgedDataByReorgID(nil, reorgID1) require.NoError(t, err) require.Equal(t, uint64(1010), reorgData1Retrieved.BlockRangeAffected.FromBlock) - reorgData2Retrieved, err := storage.GetReorgedDataByChainID(nil, chainID2) + reorgData2Retrieved, err := storage.GetReorgedDataByReorgID(nil, reorgID2) require.NoError(t, err) require.Equal(t, uint64(1005), reorgData2Retrieved.BlockRangeAffected.FromBlock) }) @@ -153,20 +153,20 @@ func TestStorage_GetBlockReorgedChainID_MultipleChains(t *testing.T) { storage := newStorageForTest(t, nil) // Query for non-existent block - chainID, found, err := storage.GetBlockReorgedChainID(nil, 9999, exampleTestHash[0]) + reorgID, found, err := storage.GetBlockReorgedReorgID(nil, 9999, exampleTestHash[0]) require.NoError(t, err) require.False(t, found, "block should not be found") - require.Equal(t, uint64(0), chainID) + require.Equal(t, uint64(0), reorgID) }) } -func TestStorage_GetReorgedDataByChainID(t *testing.T) { +func TestStorage_GetReorgedDataByReorgID(t *testing.T) { t.Run("returns reorg data when found", func(t *testing.T) { storage := newStorageForTest(t, nil) // Insert a reorg expectedReorgData := mdrtypes.ReorgData{ - ChainID: 0, // will be set by InsertNewReorg + ReorgID: 0, // will be set by InsertNewReorg BlockRangeAffected: aggkitcommon.NewBlockRange(1000, 1010), DetectedAtBlock: 1020, DetectedTimestamp: 1630003000, @@ -177,17 +177,17 @@ func TestStorage_GetReorgedDataByChainID(t *testing.T) { tx, err := storage.NewTx(t.Context()) require.NoError(t, err) - chainID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, expectedReorgData) + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, expectedReorgData) require.NoError(t, err) - require.Equal(t, uint64(1), chainID) + require.Equal(t, uint64(1), reorgID) err = tx.Commit() require.NoError(t, err) // Retrieve the reorg data - reorgData, err := storage.GetReorgedDataByChainID(nil, chainID) + reorgData, err := storage.GetReorgedDataByReorgID(nil, reorgID) require.NoError(t, err) require.NotNil(t, reorgData, "reorg data should not be nil when found") - require.Equal(t, chainID, reorgData.ChainID) + require.Equal(t, reorgID, reorgData.ReorgID) require.Equal(t, expectedReorgData.BlockRangeAffected, reorgData.BlockRangeAffected) require.Equal(t, expectedReorgData.DetectedAtBlock, reorgData.DetectedAtBlock) require.Equal(t, expectedReorgData.DetectedTimestamp, reorgData.DetectedTimestamp) @@ -196,12 +196,12 @@ func TestStorage_GetReorgedDataByChainID(t *testing.T) { require.Equal(t, expectedReorgData.NetworkFinalizedBlockName, reorgData.NetworkFinalizedBlockName) }) - t.Run("returns nil when chainID not found", func(t *testing.T) { + t.Run("returns nil when reorgID not found", func(t *testing.T) { storage := newStorageForTest(t, nil) - // Try to retrieve a non-existent chainID - reorgData, err := storage.GetReorgedDataByChainID(nil, 999) - require.NoError(t, err, "should not return error when chainID not found") + // Try to retrieve a non-existent reorgID + reorgData, err := storage.GetReorgedDataByReorgID(nil, 999) + require.NoError(t, err, "should not return error when reorgID not found") require.Nil(t, reorgData, "reorg data should be nil when not found") }) @@ -229,33 +229,33 @@ func TestStorage_GetReorgedDataByChainID(t *testing.T) { tx1, err := storage.NewTx(t.Context()) require.NoError(t, err) - chainID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + reorgID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) require.NoError(t, err) - require.Equal(t, uint64(1), chainID1) + require.Equal(t, uint64(1), reorgID1) err = tx1.Commit() require.NoError(t, err) tx2, err := storage.NewTx(t.Context()) require.NoError(t, err) - chainID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + reorgID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) require.NoError(t, err) - require.Equal(t, uint64(2), chainID2) + require.Equal(t, uint64(2), reorgID2) err = tx2.Commit() require.NoError(t, err) // Retrieve first reorg - retrieved1, err := storage.GetReorgedDataByChainID(nil, chainID1) + retrieved1, err := storage.GetReorgedDataByReorgID(nil, reorgID1) require.NoError(t, err) require.NotNil(t, retrieved1) - require.Equal(t, chainID1, retrieved1.ChainID) + require.Equal(t, reorgID1, retrieved1.ReorgID) require.Equal(t, reorgData1.BlockRangeAffected, retrieved1.BlockRangeAffected) require.Equal(t, reorgData1.NetworkFinalizedBlockName, retrieved1.NetworkFinalizedBlockName) // Retrieve second reorg - retrieved2, err := storage.GetReorgedDataByChainID(nil, chainID2) + retrieved2, err := storage.GetReorgedDataByReorgID(nil, reorgID2) require.NoError(t, err) require.NotNil(t, retrieved2) - require.Equal(t, chainID2, retrieved2.ChainID) + require.Equal(t, reorgID2, retrieved2.ReorgID) require.Equal(t, reorgData2.BlockRangeAffected, retrieved2.BlockRangeAffected) require.Equal(t, reorgData2.NetworkFinalizedBlockName, retrieved2.NetworkFinalizedBlockName) }) diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index 20164b726..e8fddfa00 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -317,20 +317,20 @@ func (d *EVMDownloader) checkReorgedBlock(ctx context.Context, return nil } // Check blockHeader is not reorged - isValid, reorgChainID, err := d.mdr.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) + isValid, reorgID, err := d.mdr.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) if err != nil { return err } if !isValid { - reorgData, err := d.mdr.GetReorgedDataByChainID(ctx, reorgChainID) + reorgData, err := d.mdr.GetReorgedDataByReorgID(ctx, reorgID) if err != nil { return err } // TODO: if reorgData is nil?? can't happen if reorgData == nil { - return fmt.Errorf("reorg data not found for chain ID %d", reorgChainID) + return fmt.Errorf("reorg data not found for reorg ID %d", reorgID) } - return mdrtypes.NewReorgedError(reorgData.BlockRangeAffected, reorgChainID, + return mdrtypes.NewReorgedError(reorgData.BlockRangeAffected, reorgID, fmt.Sprintf("detected at block number %d", blockHeader.Number), ) } diff --git a/multidownloader/sync/evmdownloader_test.go b/multidownloader/sync/evmdownloader_test.go index daf686ccb..f4fb6f65d 100644 --- a/multidownloader/sync/evmdownloader_test.go +++ b/multidownloader/sync/evmdownloader_test.go @@ -176,14 +176,14 @@ func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { } reorgData := &mdrtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), DetectedAtBlock: 106, } // Setup mocks - reorg detected mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil) - mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil) result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) @@ -191,7 +191,7 @@ func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { require.Nil(t, result) require.True(t, mdrtypes.IsReorgedError(err)) reorgErr := mdrtypes.CastReorgedError(err) - require.Equal(t, uint64(1), reorgErr.ReorgedChainID) + require.Equal(t, uint64(1), reorgErr.ReorgID) } func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { @@ -481,7 +481,7 @@ func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { } reorgData := &mdrtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), DetectedAtBlock: 106, } @@ -496,7 +496,7 @@ func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { // Second iteration: reorg detected during checkReorgedBlock mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil).Once() - mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil).Once() + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil).Once() result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) @@ -865,20 +865,20 @@ func TestCheckReorgedBlock_InvalidBlock(t *testing.T) { } reorgData := &mdrtypes.ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), DetectedAtBlock: 106, } mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) - mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(reorgData, nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil) err := download.checkReorgedBlock(ctx, blockHeader) require.Error(t, err) require.True(t, mdrtypes.IsReorgedError(err)) reorgErr := mdrtypes.CastReorgedError(err) - require.Equal(t, uint64(1), reorgErr.ReorgedChainID) + require.Equal(t, uint64(1), reorgErr.ReorgID) } func TestCheckReorgedBlock_ContextCancellation(t *testing.T) { @@ -967,7 +967,7 @@ func TestCheckReorgedBlock_GetReorgedDataError(t *testing.T) { } mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) - mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(nil, fmt.Errorf("database error")) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(nil, fmt.Errorf("database error")) err := download.checkReorgedBlock(ctx, blockHeader) @@ -999,7 +999,7 @@ func TestCheckReorgedBlock_NilReorgData(t *testing.T) { } mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) - mockMdr.EXPECT().GetReorgedDataByChainID(ctx, uint64(1)).Return(nil, nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(nil, nil) err := download.checkReorgedBlock(ctx, blockHeader) diff --git a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go index 9b1977a36..efb26b2da 100644 --- a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go +++ b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go @@ -192,21 +192,21 @@ func (_c *MultidownloaderInterface_Finality_Call) RunAndReturn(run func() aggkit return _c } -// GetReorgedDataByChainID provides a mock function with given fields: ctx, reorgedChainID -func (_m *MultidownloaderInterface) GetReorgedDataByChainID(ctx context.Context, reorgedChainID uint64) (*multidownloadertypes.ReorgData, error) { - ret := _m.Called(ctx, reorgedChainID) +// GetReorgedDataByReorgID provides a mock function with given fields: ctx, reorgID +func (_m *MultidownloaderInterface) GetReorgedDataByReorgID(ctx context.Context, reorgID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(ctx, reorgID) if len(ret) == 0 { - panic("no return value specified for GetReorgedDataByChainID") + panic("no return value specified for GetReorgedDataByReorgID") } var r0 *multidownloadertypes.ReorgData var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)); ok { - return rf(ctx, reorgedChainID) + return rf(ctx, reorgID) } if rf, ok := ret.Get(0).(func(context.Context, uint64) *multidownloadertypes.ReorgData); ok { - r0 = rf(ctx, reorgedChainID) + r0 = rf(ctx, reorgID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*multidownloadertypes.ReorgData) @@ -214,7 +214,7 @@ func (_m *MultidownloaderInterface) GetReorgedDataByChainID(ctx context.Context, } if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = rf(ctx, reorgedChainID) + r1 = rf(ctx, reorgID) } else { r1 = ret.Error(1) } @@ -222,31 +222,31 @@ func (_m *MultidownloaderInterface) GetReorgedDataByChainID(ctx context.Context, return r0, r1 } -// MultidownloaderInterface_GetReorgedDataByChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByChainID' -type MultidownloaderInterface_GetReorgedDataByChainID_Call struct { +// MultidownloaderInterface_GetReorgedDataByReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByReorgID' +type MultidownloaderInterface_GetReorgedDataByReorgID_Call struct { *mock.Call } -// GetReorgedDataByChainID is a helper method to define mock.On call +// GetReorgedDataByReorgID is a helper method to define mock.On call // - ctx context.Context -// - reorgedChainID uint64 -func (_e *MultidownloaderInterface_Expecter) GetReorgedDataByChainID(ctx interface{}, reorgedChainID interface{}) *MultidownloaderInterface_GetReorgedDataByChainID_Call { - return &MultidownloaderInterface_GetReorgedDataByChainID_Call{Call: _e.mock.On("GetReorgedDataByChainID", ctx, reorgedChainID)} +// - reorgID uint64 +func (_e *MultidownloaderInterface_Expecter) GetReorgedDataByReorgID(ctx interface{}, reorgID interface{}) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { + return &MultidownloaderInterface_GetReorgedDataByReorgID_Call{Call: _e.mock.On("GetReorgedDataByReorgID", ctx, reorgID)} } -func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) Run(run func(ctx context.Context, reorgedChainID uint64)) *MultidownloaderInterface_GetReorgedDataByChainID_Call { +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) Run(run func(ctx context.Context, reorgID uint64)) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(uint64)) }) return _c } -func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *MultidownloaderInterface_GetReorgedDataByChainID_Call { +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MultidownloaderInterface_GetReorgedDataByChainID_Call) RunAndReturn(run func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)) *MultidownloaderInterface_GetReorgedDataByChainID_Call { +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) RunAndReturn(run func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/sync/types/multidownloader.go b/multidownloader/sync/types/multidownloader.go index da53931ef..6336c8e7b 100644 --- a/multidownloader/sync/types/multidownloader.go +++ b/multidownloader/sync/types/multidownloader.go @@ -10,11 +10,11 @@ import ( type MultidownloaderInterface interface { // CheckValidBlock checks if the given blockNumber and blockHash are still valid - // returns: isValid bool, reorgChainID uint64, err error + // returns: isValid bool, reorgID uint64, err error CheckValidBlock(ctx context.Context, blockNumber uint64, blockHash common.Hash) (bool, uint64, error) - // GetReorgedDataByChainID retrieves the reorged data by chain ID - GetReorgedDataByChainID(ctx context.Context, reorgedChainID uint64) (*mdrtypes.ReorgData, error) + // GetReorgedDataByReorgID retrieves the reorged data by reorg ID + GetReorgedDataByReorgID(ctx context.Context, reorgID uint64) (*mdrtypes.ReorgData, error) // IsAvailable checks if the logs for the given query are available IsAvailable(query mdrtypes.LogQuery) bool // IsPartiallyAvailable checks if the logs for the given query are partially available diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index a1ab40653..dbe8c6bb4 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -155,12 +155,12 @@ func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types return _c } -// GetBlockReorgedChainID provides a mock function with given fields: tx, blockNumber, blockHash -func (_m *Storager) GetBlockReorgedChainID(tx types.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { +// GetBlockReorgedReorgID provides a mock function with given fields: tx, blockNumber, blockHash +func (_m *Storager) GetBlockReorgedReorgID(tx types.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { ret := _m.Called(tx, blockNumber, blockHash) if len(ret) == 0 { - panic("no return value specified for GetBlockReorgedChainID") + panic("no return value specified for GetBlockReorgedReorgID") } var r0 uint64 @@ -190,32 +190,32 @@ func (_m *Storager) GetBlockReorgedChainID(tx types.Querier, blockNumber uint64, return r0, r1, r2 } -// Storager_GetBlockReorgedChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockReorgedChainID' -type Storager_GetBlockReorgedChainID_Call struct { +// Storager_GetBlockReorgedReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockReorgedReorgID' +type Storager_GetBlockReorgedReorgID_Call struct { *mock.Call } -// GetBlockReorgedChainID is a helper method to define mock.On call +// GetBlockReorgedReorgID is a helper method to define mock.On call // - tx types.Querier // - blockNumber uint64 // - blockHash common.Hash -func (_e *Storager_Expecter) GetBlockReorgedChainID(tx interface{}, blockNumber interface{}, blockHash interface{}) *Storager_GetBlockReorgedChainID_Call { - return &Storager_GetBlockReorgedChainID_Call{Call: _e.mock.On("GetBlockReorgedChainID", tx, blockNumber, blockHash)} +func (_e *Storager_Expecter) GetBlockReorgedReorgID(tx interface{}, blockNumber interface{}, blockHash interface{}) *Storager_GetBlockReorgedReorgID_Call { + return &Storager_GetBlockReorgedReorgID_Call{Call: _e.mock.On("GetBlockReorgedReorgID", tx, blockNumber, blockHash)} } -func (_c *Storager_GetBlockReorgedChainID_Call) Run(run func(tx types.Querier, blockNumber uint64, blockHash common.Hash)) *Storager_GetBlockReorgedChainID_Call { +func (_c *Storager_GetBlockReorgedReorgID_Call) Run(run func(tx types.Querier, blockNumber uint64, blockHash common.Hash)) *Storager_GetBlockReorgedReorgID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(types.Querier), args[1].(uint64), args[2].(common.Hash)) }) return _c } -func (_c *Storager_GetBlockReorgedChainID_Call) Return(_a0 uint64, _a1 bool, _a2 error) *Storager_GetBlockReorgedChainID_Call { +func (_c *Storager_GetBlockReorgedReorgID_Call) Return(_a0 uint64, _a1 bool, _a2 error) *Storager_GetBlockReorgedReorgID_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Storager_GetBlockReorgedChainID_Call) RunAndReturn(run func(types.Querier, uint64, common.Hash) (uint64, bool, error)) *Storager_GetBlockReorgedChainID_Call { +func (_c *Storager_GetBlockReorgedReorgID_Call) RunAndReturn(run func(types.Querier, uint64, common.Hash) (uint64, bool, error)) *Storager_GetBlockReorgedReorgID_Call { _c.Call.Return(run) return _c } @@ -403,21 +403,21 @@ func (_c *Storager_GetRangeBlockHeader_Call) RunAndReturn(run func(types.Querier return _c } -// GetReorgedDataByChainID provides a mock function with given fields: tx, reorgedChainID -func (_m *Storager) GetReorgedDataByChainID(tx types.Querier, reorgedChainID uint64) (*multidownloadertypes.ReorgData, error) { - ret := _m.Called(tx, reorgedChainID) +// GetReorgedDataByReorgID provides a mock function with given fields: tx, reorgID +func (_m *Storager) GetReorgedDataByReorgID(tx types.Querier, reorgID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(tx, reorgID) if len(ret) == 0 { - panic("no return value specified for GetReorgedDataByChainID") + panic("no return value specified for GetReorgedDataByReorgID") } var r0 *multidownloadertypes.ReorgData var r1 error if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)); ok { - return rf(tx, reorgedChainID) + return rf(tx, reorgID) } if rf, ok := ret.Get(0).(func(types.Querier, uint64) *multidownloadertypes.ReorgData); ok { - r0 = rf(tx, reorgedChainID) + r0 = rf(tx, reorgID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*multidownloadertypes.ReorgData) @@ -425,7 +425,7 @@ func (_m *Storager) GetReorgedDataByChainID(tx types.Querier, reorgedChainID uin } if rf, ok := ret.Get(1).(func(types.Querier, uint64) error); ok { - r1 = rf(tx, reorgedChainID) + r1 = rf(tx, reorgID) } else { r1 = ret.Error(1) } @@ -433,31 +433,31 @@ func (_m *Storager) GetReorgedDataByChainID(tx types.Querier, reorgedChainID uin return r0, r1 } -// Storager_GetReorgedDataByChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByChainID' -type Storager_GetReorgedDataByChainID_Call struct { +// Storager_GetReorgedDataByReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByReorgID' +type Storager_GetReorgedDataByReorgID_Call struct { *mock.Call } -// GetReorgedDataByChainID is a helper method to define mock.On call +// GetReorgedDataByReorgID is a helper method to define mock.On call // - tx types.Querier -// - reorgedChainID uint64 -func (_e *Storager_Expecter) GetReorgedDataByChainID(tx interface{}, reorgedChainID interface{}) *Storager_GetReorgedDataByChainID_Call { - return &Storager_GetReorgedDataByChainID_Call{Call: _e.mock.On("GetReorgedDataByChainID", tx, reorgedChainID)} +// - reorgID uint64 +func (_e *Storager_Expecter) GetReorgedDataByReorgID(tx interface{}, reorgID interface{}) *Storager_GetReorgedDataByReorgID_Call { + return &Storager_GetReorgedDataByReorgID_Call{Call: _e.mock.On("GetReorgedDataByReorgID", tx, reorgID)} } -func (_c *Storager_GetReorgedDataByChainID_Call) Run(run func(tx types.Querier, reorgedChainID uint64)) *Storager_GetReorgedDataByChainID_Call { +func (_c *Storager_GetReorgedDataByReorgID_Call) Run(run func(tx types.Querier, reorgID uint64)) *Storager_GetReorgedDataByReorgID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(types.Querier), args[1].(uint64)) }) return _c } -func (_c *Storager_GetReorgedDataByChainID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *Storager_GetReorgedDataByChainID_Call { +func (_c *Storager_GetReorgedDataByReorgID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *Storager_GetReorgedDataByReorgID_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Storager_GetReorgedDataByChainID_Call) RunAndReturn(run func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)) *Storager_GetReorgedDataByChainID_Call { +func (_c *Storager_GetReorgedDataByReorgID_Call) RunAndReturn(run func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)) *Storager_GetReorgedDataByReorgID_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/types/reorg_data.go b/multidownloader/types/reorg_data.go index 1e5801524..0bc059534 100644 --- a/multidownloader/types/reorg_data.go +++ b/multidownloader/types/reorg_data.go @@ -8,8 +8,8 @@ import ( ) type ReorgData struct { - // ChainID is the id of the roerged chain stored on DB (incremental ID) - ChainID uint64 + // ReorgID is the unique identifier for the reorg stored in DB (incremental ID) + ReorgID uint64 // BlockRangeAffected is the range of blocks affected by the reorg (from,to inclusive) BlockRangeAffected aggkitcommon.BlockRange // DetectedAtBlock is the block number where the reorg was detected @@ -22,9 +22,9 @@ type ReorgData struct { } func (r *ReorgData) String() string { - return fmt.Sprintf("ReorgData{ChainID: %d, BlockRangeAffected: %s, DetectedAtBlock: %d, DetectedTimestamp: %d, "+ + return fmt.Sprintf("ReorgData{ReorgID: %d, BlockRangeAffected: %s, DetectedAtBlock: %d, DetectedTimestamp: %d, "+ "NetworkLatestBlock: %d, NetworkFinalizedBlock: %d (%s), Description: %s}", - r.ChainID, + r.ReorgID, r.BlockRangeAffected.String(), r.DetectedAtBlock, r.DetectedTimestamp, diff --git a/multidownloader/types/reorg_data_test.go b/multidownloader/types/reorg_data_test.go index 355a17a6f..e7682fdfa 100644 --- a/multidownloader/types/reorg_data_test.go +++ b/multidownloader/types/reorg_data_test.go @@ -10,7 +10,7 @@ import ( func TestReorgData_String(t *testing.T) { reorgData := &ReorgData{ - ChainID: 1, + ReorgID: 1, BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), DetectedAtBlock: 250, DetectedTimestamp: 1620000000, @@ -19,7 +19,7 @@ func TestReorgData_String(t *testing.T) { NetworkFinalizedBlockName: aggkittypes.LatestBlock, Description: "Test reorg description", } - require.Equal(t, "ReorgData{ChainID: 1, BlockRangeAffected: From: 100, To: 200 (101), "+ + require.Equal(t, "ReorgData{ReorgID: 1, BlockRangeAffected: From: 100, To: 200 (101), "+ "DetectedAtBlock: 250, DetectedTimestamp: 1620000000, NetworkLatestBlock: 300, NetworkFinalizedBlock: 240 (LatestBlock), "+ "Description: Test reorg description}", reorgData.String()) diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go index 1ecf9f25e..7bb47f37e 100644 --- a/multidownloader/types/reorg_error.go +++ b/multidownloader/types/reorg_error.go @@ -93,22 +93,22 @@ func CastDetectedReorgError(err error) *DetectedReorgError { type ReorgedError struct { Message string BlockRangeReorged aggkitcommon.BlockRange - ReorgedChainID uint64 + ReorgID uint64 } func NewReorgedError(blockRangeReorged aggkitcommon.BlockRange, - reorgedChainID uint64, + reorgID uint64, msg string) *ReorgedError { return &ReorgedError{ Message: msg, BlockRangeReorged: blockRangeReorged, - ReorgedChainID: reorgedChainID, + ReorgID: reorgID, } } func (e *ReorgedError) Error() string { - return fmt.Sprintf("reorgedError: chainID=%d blockRangeReorged=%s: %s", - e.ReorgedChainID, e.BlockRangeReorged.String(), e.Message) + return fmt.Sprintf("reorgedError: reorgID=%d blockRangeReorged=%s: %s", + e.ReorgID, e.BlockRangeReorged.String(), e.Message) } // IsReorgedError checks if an error is a ReorgedError diff --git a/multidownloader/types/reorg_error_test.go b/multidownloader/types/reorg_error_test.go index a739dc4fc..6e95bf22e 100644 --- a/multidownloader/types/reorg_error_test.go +++ b/multidownloader/types/reorg_error_test.go @@ -207,19 +207,19 @@ func TestNewReorgedError(t *testing.T) { require.NotNil(t, err) require.Equal(t, blockRange, err.BlockRangeReorged) - require.Equal(t, chainID, err.ReorgedChainID) + require.Equal(t, chainID, err.ReorgID) require.Equal(t, testReorgMsg, err.Message) } func TestReorgedError_Error(t *testing.T) { blockRange := aggkitcommon.NewBlockRange(100, 200) - chainID := uint64(1) + reorgID := uint64(1) msg := "test message" - err := NewReorgedError(blockRange, chainID, testReorgMsg) + err := NewReorgedError(blockRange, reorgID, testReorgMsg) result := err.Error() - expected := fmt.Sprintf("reorgedError: chainID=%d blockRangeReorged=%s: %s", chainID, blockRange.String(), msg) + expected := fmt.Sprintf("reorgedError: reorgID=%d blockRangeReorged=%s: %s", reorgID, blockRange.String(), msg) require.Equal(t, expected, result) } diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 10702986f..3d9a56bc5 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -38,12 +38,12 @@ type Storager interface { highest *aggkittypes.BlockHeader, err error) // GetHighestBlockNumber returns the highest block number stored in db GetHighestBlockNumber(tx dbtypes.Querier) (uint64, error) - // GetReorgedChainID returns the chainID of the reorged block if exists + // GetBlockReorgedReorgID returns the reorgID of the reorged block if exists // second return value indicates if the block is reorged - GetBlockReorgedChainID(tx dbtypes.Querier, + GetBlockReorgedReorgID(tx dbtypes.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) - GetReorgedDataByChainID(tx dbtypes.Querier, - reorgedChainID uint64) (*ReorgData, error) + GetReorgedDataByReorgID(tx dbtypes.Querier, + reorgID uint64) (*ReorgData, error) } type StoragerForReorg interface { From 0a5eb07e40bfb37a86dc365544482806ad30a517 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:19:03 +0100 Subject: [PATCH 44/75] fix: Claude comments --- multidownloader/storage/migrations/0002.sql | 27 +++++++++------------ multidownloader/storage/storage_reorg.go | 2 +- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql index 5d8f9005b..c6796ac8e 100644 --- a/multidownloader/storage/migrations/0002.sql +++ b/multidownloader/storage/migrations/0002.sql @@ -4,7 +4,17 @@ DROP TABLE IF EXISTS blocks_reorged; DROP TABLE IF EXISTS reorgs; -- +migrate Up - +CREATE TABLE reorgs ( + reorg_id BIGINT PRIMARY KEY, + detected_at_block BIGINT NOT NULL, + reorged_from_block BIGINT NOT NULL, + reorged_to_block BIGINT NOT NULL, + detected_timestamp INTEGER NOT NULL, + network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment + network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment + network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) + description TEXT -- extra information, can be null +); CREATE TABLE blocks_reorged ( reorg_id BIGINT NOT NULL REFERENCES reorgs(reorg_id), @@ -28,17 +38,4 @@ CREATE TABLE logs_reorged ( FOREIGN KEY (reorg_id, block_number) REFERENCES blocks_reorged(reorg_id, block_number) ); -CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); - - -CREATE TABLE reorgs ( - reorg_id BIGINT PRIMARY KEY, - detected_at_block BIGINT NOT NULL, - reorged_from_block BIGINT NOT NULL, - reorged_to_block BIGINT NOT NULL, - detected_timestamp INTEGER NOT NULL, - network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment - network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment - network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) - description TEXT -- extra information, can be null -); \ No newline at end of file +CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); \ No newline at end of file diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 4e8683005..5139c1e8c 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -43,7 +43,7 @@ func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, reorgData mdrtypes.ReorgData) (uint64, error) { if tx == nil { - return 0, fmt.Errorf("InsertNewReorg: require a tx because it done multiples operations") + return 0, fmt.Errorf("InsertNewReorg: requires a tx because it performs multiple operations that need to be atomic") } reorgRow := newReorgRowFromReorgData(reorgData) a.mutex.Lock() From 8e37d8c67e28f11cdbd452075e019f47652ed091 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:38:50 +0100 Subject: [PATCH 45/75] feat: increase coverage --- multidownloader/evm_multidownloader_test.go | 405 +++++++++++++++++--- 1 file changed, 352 insertions(+), 53 deletions(-) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index a3656d899..a17128eaf 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -13,6 +13,7 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" "github.com/agglayer/aggkit/db" + dbmocks "github.com/agglayer/aggkit/db/mocks" "github.com/agglayer/aggkit/etherman" mockethermantypes "github.com/agglayer/aggkit/etherman/types/mocks" "github.com/agglayer/aggkit/log" @@ -54,7 +55,7 @@ func (tp *testProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) er return nil } -func TestEVMMultidownloader(t *testing.T) { +func TestEVMMultidownloaderExploratory(t *testing.T) { t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") cfgLog := log.Config{ @@ -161,48 +162,9 @@ func TestEVMMultidownloader(t *testing.T) { wg.Wait() } -func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { - t.Skip("it's a exploratory test for batch requests - requires external dependencies") - /* Commented out to avoid import cycles - l1url := os.Getenv("L1URL") - ethClient, err := rpc.DialContext(t.Context(), l1url) - require.NoError(t, err) - var blockNumber string - var chainID string - - var latestBlock aggkittypes.BlockHeader - batch := []rpc.BatchElem{ - { - Method: "eth_blockNumber", - Args: []interface{}{}, - Result: &blockNumber, - }, - { - Method: "eth_chainId", - Args: []interface{}{}, - Result: &chainID, - }, - { - Method: "eth_getBlockByNumber", - Args: []interface{}{ - "0x37", // nΓΊmero de bloque en formato hex o palabra clave - false, // incluir transacciones completas - }, - Result: &latestBlock, - }, - } - - err = ethClient.BatchCallContext(t.Context(), batch) - require.NoError(t, err) - - log.Infof("blockNumber: %s, chainID: %s", blockNumber, chainID) - log.Infof("latestBlock: %+v", latestBlock) - */ -} - -func TestDownloaderParellelvsBatch(t *testing.T) { +func TestPerformanceDownloaderParallelvsBatch(t *testing.T) { t.Skip("it's a benchmarking test - requires external dependencies") - /* Commented out to avoid import cycles + l1url := os.Getenv("L1URL") ethClient, err := ethclient.Dial(l1url) require.NoError(t, err) @@ -233,24 +195,24 @@ func TestDownloaderParellelvsBatch(t *testing.T) { require.Equal(t, len(headersParallel), len(headersBatch)) for _, blockNumber := range blockNumbersSlice { - headerP := getBlockHeader(blockNumber, headersParallel) - headerB := getBlockHeader(blockNumber, headersBatch) + headerP := getBlockHeader(t, blockNumber, headersParallel) + headerB := getBlockHeader(t, blockNumber, headersBatch) require.NotNil(t, headerP) require.NotNil(t, headerB) require.Equal(t, headerP.Hash, headerB.Hash) } - */ } // getBlockHeader is only used in skipped tests -// func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { -// for _, h := range headers { -// if h.Number == bn { -// return h -// } -// } -// return nil -// } +func getBlockHeader(t *testing.T, bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { + t.Helper() + for _, h := range headers { + if h.Number == bn { + return h + } + } + return nil +} func TestEVMMultidownloader_NewEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "evm_multidownloader_test") @@ -604,3 +566,340 @@ func TestEVMMultidownloader_StartStop(t *testing.T) { wg.Wait() }) } + +func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { + t.Run("successful move from unsafe to safe", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + mockTx.EXPECT().Commit().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum headers that will be returned by RPC + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + header196 := ðtypes.Header{ + Number: big.NewInt(196), + ParentHash: common.HexToHash("0x195"), + Time: 1234567891, + } + + // Mock unsafe blocks with the same hashes that will be calculated from the Ethereum headers + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + &aggkittypes.BlockHeader{Number: 196, Hash: header196.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers retrieval for reorg detection + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(196)).Return(header196, nil).Once() + + // Mock update to finalized + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195, 196}).Return(nil).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.NoError(t, err) + }) + + t.Run("no unsafe blocks to move", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock no unsafe blocks + emptyBlocks := aggkittypes.ListBlockHeaders{} + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(emptyBlocks, nil).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.NoError(t, err) + }) + + t.Run("error getting finalized block number", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number error + expectedErr := fmt.Errorf("finalized block error") + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(uint64(0), expectedErr).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get finalized block number") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error creating transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction creation error + expectedErr := fmt.Errorf("tx creation error") + data.mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error getting unsafe blocks", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock error getting unsafe blocks + expectedErr := fmt.Errorf("get blocks error") + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(nil, expectedErr).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get unsafe block bases") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("reorg detected during move", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Mock unsafe blocks with a specific hash + storageHash := common.HexToHash("0x195") + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: storageHash}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC returns header with different hash (reorg detected) + headerDifferent := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0xDIFFERENT"), + Time: 9999999, + } + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(headerDifferent, nil).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "error detecting reorgs") + // Check it's a reorg error + reorgErr := mdrtypes.CastDetectedReorgError(err) + require.NotNil(t, reorgErr) + }) + + t.Run("error updating blocks to finalized", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum header + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + + // Mock unsafe blocks with matching hash + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers (no reorg) + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + + // Mock update error + expectedErr := fmt.Errorf("update error") + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(expectedErr).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot update is_final for block bases") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error committing transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + expectedErr := fmt.Errorf("commit error") + mockTx.EXPECT().Commit().Return(expectedErr).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum header + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + + // Mock unsafe blocks with matching hash + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers (no reorg) + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + + // Mock update success + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(nil).Once() + + err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + require.Contains(t, err.Error(), expectedErr.Error()) + }) +} + +func TestEVMMultidownloader_StartStep(t *testing.T) { + t.Run("error in MoveUnsafeToSafeIfPossible", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock updateTargetBlockNumber success (no pending blocks to update) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible to fail + expectedErr := fmt.Errorf("move unsafe error") + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, expectedErr).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + }) + + t.Run("error in checkReorgsUnsafeZone", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock updateTargetBlockNumber success + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible success (no unsafe blocks) + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock checkReorgsUnsafeZone to fail + expectedErr := fmt.Errorf("check reorgs error") + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything). + Return(nil, expectedErr).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "check reorgs error") + }) + + t.Run("no pending blocks - waits for new blocks", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Create a context with cancel to avoid waiting forever + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Mock updateTargetBlockNumber success + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible success + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock checkReorgsUnsafeZone success (no unsafe blocks) + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock WaitForNewLatestBlocks - GetBlockHeaderByNumber will fail + data.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, mock.Anything). + Return(nil, mdrtypes.NotFinalized, fmt.Errorf("no blocks yet")).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get block header") + }) +} From d7f6d1024ee8aad3af83c12dac294196fad27177 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 5 Feb 2026 20:15:26 +0100 Subject: [PATCH 46/75] feat: increase coverage --- multidownloader/evm_multidownloader_test.go | 480 ++++++++++++++++++++ 1 file changed, 480 insertions(+) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index a17128eaf..b9cbf68c1 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -903,3 +903,483 @@ func TestEVMMultidownloader_StartStep(t *testing.T) { require.Contains(t, err.Error(), "cannot get block header") }) } + +func TestGetBlockNumbers(t *testing.T) { + t.Run("empty logs", func(t *testing.T) { + logs := []ethtypes.Log{} + result := getBlockNumbers(logs) + require.Empty(t, result) + }) + + t.Run("single log", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 1) + require.Equal(t, uint64(100), result[0]) + }) + + t.Run("multiple logs with unique block numbers", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + {BlockNumber: 101}, + {BlockNumber: 102}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 3) + require.Contains(t, result, uint64(100)) + require.Contains(t, result, uint64(101)) + require.Contains(t, result, uint64(102)) + }) + + t.Run("multiple logs with duplicate block numbers", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + {BlockNumber: 100}, + {BlockNumber: 101}, + {BlockNumber: 101}, + {BlockNumber: 102}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 3) + require.Contains(t, result, uint64(100)) + require.Contains(t, result, uint64(101)) + require.Contains(t, result, uint64(102)) + }) +} + +func TestGetContracts(t *testing.T) { + t.Run("empty log queries", func(t *testing.T) { + queries := []mdrtypes.LogQuery{} + result := getContracts(queries) + require.Empty(t, result) + }) + + t.Run("single query with one address", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1}}, + } + result := getContracts(queries) + require.Len(t, result, 1) + require.Contains(t, result, addr1) + }) + + t.Run("multiple queries with unique addresses", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1}}, + {Addrs: []common.Address{addr2}}, + } + result := getContracts(queries) + require.Len(t, result, 2) + require.Contains(t, result, addr1) + require.Contains(t, result, addr2) + }) + + t.Run("multiple queries with duplicate addresses", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1, addr2}}, + {Addrs: []common.Address{addr1}}, + {Addrs: []common.Address{addr2}}, + } + result := getContracts(queries) + require.Len(t, result, 2) + require.Contains(t, result, addr1) + require.Contains(t, result, addr2) + }) +} + +func TestEVMMultidownloader_CheckIntegrityNewLogsBlockHeaders(t *testing.T) { + t.Run("empty logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + logs := []ethtypes.Log{} + headers := aggkittypes.ListBlockHeaders{} + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.NoError(t, err) + }) + + t.Run("matching logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + hash101 := common.HexToHash("0x101") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + {BlockNumber: 101, BlockHash: hash101}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: hash100}, + &aggkittypes.BlockHeader{Number: 101, Hash: hash101}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.NoError(t, err) + }) + + t.Run("log with missing block header", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + {BlockNumber: 101, BlockHash: common.HexToHash("0x101")}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: hash100}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.Error(t, err) + require.Contains(t, err.Error(), "block header for log block number 101 not found") + }) + + t.Run("log with mismatched hash", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + differentHash := common.HexToHash("0xDIFFERENT") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: differentHash}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.Error(t, err) + require.Contains(t, err.Error(), "does not match block header hash") + }) +} + +func TestEVMMultidownloader_IsPartiallyAvailable(t *testing.T) { + t.Run("basic functionality", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + data.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(200), nil).Maybe() + + err := data.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{ + common.HexToAddress("0x1"), + }, + FromBlock: 100, + ToBlock: aggkittypes.FinalizedBlock, + }) + require.NoError(t, err) + + err = data.mdr.Initialize(context.Background()) + require.NoError(t, err) + + // Query for blocks that are not yet synced + query := mdrtypes.LogQuery{ + BlockRange: aggkitcommon.NewBlockRange(100, 200), + Addrs: []common.Address{common.HexToAddress("0x1")}, + } + + // The function should not panic and return valid values + isPartial, partialQuery := data.mdr.IsPartiallyAvailable(query) + // Since nothing is synced yet, it might be partially available or not available + // We just verify it doesn't panic and returns consistent values + if isPartial { + require.NotNil(t, partialQuery) + } else { + require.Nil(t, partialQuery) + } + }) +} + +func TestEVMMultidownloader_GetLatestBlockNumber(t *testing.T) { + t.Run("success", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedBlockNumber := uint64(12345) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock). + Return(expectedBlockNumber, nil).Once() + + blockNumber, err := data.mdr.GetLatestBlockNumber(ctx) + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, blockNumber) + }) + + t.Run("error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedErr := fmt.Errorf("block number error") + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock). + Return(uint64(0), expectedErr).Once() + + blockNumber, err := data.mdr.GetLatestBlockNumber(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get latest block") + require.Equal(t, uint64(0), blockNumber) + }) +} + +func TestEVMMultidownloader_ShowStatistics(t *testing.T) { + t.Run("show statistics", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + // This should not panic + data.mdr.ShowStatistics(1) + data.mdr.ShowStatistics(10) + }) +} + +// mockDataError is a mock implementation of ethrpc.DataError for testing +type mockDataError struct { + msg string + data any +} + +func (e *mockDataError) Error() string { + return e.msg +} + +func (e *mockDataError) ErrorCode() int { + return -32000 +} + +func (e *mockDataError) ErrorData() any { + return e.data +} + +func Test_ethGetExtendedError(t *testing.T) { + t.Run("nil error returns empty string", func(t *testing.T) { + result := ethGetExtendedError(nil) + require.Equal(t, "", result) + }) + + t.Run("non-DataError returns empty string", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := ethGetExtendedError(err) + require.Equal(t, "", result) + }) + + t.Run("DataError returns formatted error data", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results", + } + result := ethGetExtendedError(dataErr) + require.Equal(t, "json_data: Query returned more than 20000 results", result) + }) +} + +func Test_isEthClientErrorTooManyResults(t *testing.T) { + t.Run("nil error returns false", func(t *testing.T) { + result := isEthClientErrorTooManyResults(nil) + require.False(t, result) + }) + + t.Run("regular error returns false", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := isEthClientErrorTooManyResults(err) + require.False(t, result) + }) + + t.Run("error with 'Response size exceeded' returns true", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Response size exceeded maximum limit", + } + result := isEthClientErrorTooManyResults(dataErr) + require.True(t, result) + }) + + t.Run("error with 'Query returned more than' returns true", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with this block range [0x852c16, 0x853273].", + } + result := isEthClientErrorTooManyResults(dataErr) + require.True(t, result) + }) +} + +func Test_extractSuggestedBlockRangeFromError(t *testing.T) { + t.Run("nil error returns nil", func(t *testing.T) { + result := extractSuggestedBlockRangeFromError(nil) + require.Nil(t, result) + }) + + t.Run("non-too-many-results error returns nil", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := extractSuggestedBlockRangeFromError(err) + require.Nil(t, result) + }) + + t.Run("error with valid block range returns BlockRange", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with this block range [0x852c16, 0x853273].", + } + result := extractSuggestedBlockRangeFromError(dataErr) + require.NotNil(t, result) + require.Equal(t, uint64(0x852c16), result.FromBlock) + require.Equal(t, uint64(0x853273), result.ToBlock) + }) + + t.Run("error with invalid block range returns nil", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with different range.", + } + result := extractSuggestedBlockRangeFromError(dataErr) + require.Nil(t, result) + }) +} + +func TestEVMMultidownloader_storeData(t *testing.T) { + t.Run("successful store", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + logs := []ethtypes.Log{{Address: common.HexToAddress("0x123")}} + blocks := aggkittypes.ListBlockHeaders{{Number: 100, Hash: common.HexToHash("0xabc")}} + updatedSegments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(100, 200), + aggkittypes.BlockNumberFinality{}, + false, + ), + } + + mockTx := dbmocks.NewTxer(t) + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, blocks, logs, true).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, updatedSegments).Return(nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + + err := data.mdr.storeData(ctx, logs, blocks, updatedSegments, true) + require.NoError(t, err) + }) + + t.Run("error creating transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedErr := fmt.Errorf("tx creation error") + data.mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := data.mdr.storeData(ctx, nil, nil, nil, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + }) + + t.Run("error saving logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + logs := []ethtypes.Log{{Address: common.HexToAddress("0x123")}} + blocks := aggkittypes.ListBlockHeaders{{Number: 100}} + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("save error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, blocks, logs, true).Return(expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := data.mdr.storeData(ctx, logs, blocks, nil, true) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot save eth logs") + }) + + t.Run("error updating synced status", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + updatedSegments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(100, 200), + aggkittypes.BlockNumberFinality{}, + false, + ), + } + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("update error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, mock.Anything, mock.Anything, false).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, updatedSegments).Return(expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := data.mdr.storeData(ctx, nil, nil, updatedSegments, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot update synced segments") + }) + + t.Run("error committing transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("commit error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, mock.Anything, mock.Anything, false).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, mock.Anything).Return(nil).Once() + mockTx.EXPECT().Commit().Return(expectedErr).Once() + + err := data.mdr.storeData(ctx, nil, nil, nil, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + }) +} + +func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { + t.Run("successful state creation", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + + // Mock GetCurrentBlockNumber for UpdateTargetBlockToNumber + data.mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(1000), nil).Maybe() + + // Mock storage response + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(0, 100), + aggkittypes.BlockNumberFinality{}, + false, + )) + data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). + Return(storageSyncSegments, nil).Once() + + state, err := data.mdr.newStateFromStorage() + require.NoError(t, err) + require.NotNil(t, state) + }) + + t.Run("error getting synced block ranges from storage", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + + // Mock GetCurrentBlockNumber for UpdateTargetBlockToNumber + data.mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(1000), nil).Maybe() + + // Mock storage to return error + expectedErr := fmt.Errorf("storage error") + emptySegments := mdrtypes.NewSetSyncSegment() + data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). + Return(emptySegments, expectedErr).Once() + + state, err := data.mdr.newStateFromStorage() + require.Error(t, err) + require.Nil(t, state) + require.Contains(t, err.Error(), "cannot get synced block ranges from storage") + }) +} From 80ea8b6859dfe90b30fc9cd4a12984eff293f6c4 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:37:45 +0100 Subject: [PATCH 47/75] fix: coverage --- .../evm_multidownloader_debug_test.go | 24 +++ sync/evmtypes_test.go | 168 ++++++++++++++++++ 2 files changed, 192 insertions(+) create mode 100644 multidownloader/evm_multidownloader_debug_test.go create mode 100644 sync/evmtypes_test.go diff --git a/multidownloader/evm_multidownloader_debug_test.go b/multidownloader/evm_multidownloader_debug_test.go new file mode 100644 index 000000000..8813a401d --- /dev/null +++ b/multidownloader/evm_multidownloader_debug_test.go @@ -0,0 +1,24 @@ +package multidownloader + +import "testing" + +func TestEVMMultidownloaderDebug(t *testing.T) { + sut := NewEVMMultidownloaderDebug() + + sut.ForceRorg(123) + err := sut.GetInjectedStartStepError() + if err == nil { + t.Fatalf("Expected error to be injected, got nil") + } + expectedMsg := "ForceRorg: forced reorg at block number 123" + if err.Error() != expectedMsg { + t.Fatalf("Expected error message '%s', got '%s'", expectedMsg, err.Error()) + } + + // After getting the error once, it should be cleared + err = sut.GetInjectedStartStepError() + if err != nil { + t.Fatalf("Expected error to be cleared after retrieval, got '%s'", err.Error()) + } + +} diff --git a/sync/evmtypes_test.go b/sync/evmtypes_test.go new file mode 100644 index 000000000..5ab12afe4 --- /dev/null +++ b/sync/evmtypes_test.go @@ -0,0 +1,168 @@ +package sync + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestEVMBlocks_Len(t *testing.T) { + t.Run("empty blocks", func(t *testing.T) { + blocks := EVMBlocks{} + require.Equal(t, 0, blocks.Len()) + }) + + t.Run("single block", func(t *testing.T) { + blocks := EVMBlocks{ + {EVMBlockHeader: EVMBlockHeader{Num: 1}}, + } + require.Equal(t, 1, blocks.Len()) + }) + + t.Run("multiple blocks", func(t *testing.T) { + blocks := EVMBlocks{ + {EVMBlockHeader: EVMBlockHeader{Num: 1}}, + {EVMBlockHeader: EVMBlockHeader{Num: 2}}, + {EVMBlockHeader: EVMBlockHeader{Num: 3}}, + } + require.Equal(t, 3, blocks.Len()) + }) + + t.Run("nil blocks slice", func(t *testing.T) { + var blocks EVMBlocks + require.Equal(t, 0, blocks.Len()) + }) +} + +func TestEVMBlocks_LastBlock(t *testing.T) { + t.Run("empty blocks returns nil", func(t *testing.T) { + blocks := EVMBlocks{} + result := blocks.LastBlock() + require.Nil(t, result) + }) + + t.Run("nil blocks returns nil", func(t *testing.T) { + var blocks EVMBlocks + result := blocks.LastBlock() + require.Nil(t, result) + }) + + t.Run("single block returns that block", func(t *testing.T) { + expectedBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 100, + Hash: common.HexToHash("0x123"), + }, + IsFinalizedBlock: true, + } + blocks := EVMBlocks{expectedBlock} + result := blocks.LastBlock() + require.NotNil(t, result) + require.Equal(t, expectedBlock, result) + require.Equal(t, uint64(100), result.Num) + require.Equal(t, common.HexToHash("0x123"), result.Hash) + require.True(t, result.IsFinalizedBlock) + }) + + t.Run("multiple blocks returns last block", func(t *testing.T) { + firstBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{Num: 1}, + } + secondBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{Num: 2}, + } + lastBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 3, + Hash: common.HexToHash("0xLAST"), + ParentHash: common.HexToHash("0xPARENT"), + Timestamp: 1234567890, + }, + IsFinalizedBlock: false, + Events: []any{"event1", "event2"}, + } + blocks := EVMBlocks{firstBlock, secondBlock, lastBlock} + result := blocks.LastBlock() + require.NotNil(t, result) + require.Equal(t, lastBlock, result) + require.Equal(t, uint64(3), result.Num) + require.Equal(t, common.HexToHash("0xLAST"), result.Hash) + require.Equal(t, common.HexToHash("0xPARENT"), result.ParentHash) + require.Equal(t, uint64(1234567890), result.Timestamp) + require.False(t, result.IsFinalizedBlock) + require.Len(t, result.Events, 2) + }) +} + +func TestEVMBlock_Brief(t *testing.T) { + t.Run("nil block returns special string", func(t *testing.T) { + var block *EVMBlock + result := block.Brief() + require.Equal(t, "EVMBlock", result) + }) + + t.Run("block with no events", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 100, + }, + IsFinalizedBlock: true, + Events: []any{}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 100, IsFinalizedBlock: true, EventsCount: 0}", result) + }) + + t.Run("block with events and finalized", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 12345, + }, + IsFinalizedBlock: true, + Events: []any{"event1", "event2", "event3"}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 12345, IsFinalizedBlock: true, EventsCount: 3}", result) + }) + + t.Run("block not finalized with single event", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 999, + }, + IsFinalizedBlock: false, + Events: []any{"single_event"}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 999, IsFinalizedBlock: false, EventsCount: 1}", result) + }) + + t.Run("block with nil events", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 50, + }, + IsFinalizedBlock: false, + Events: nil, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 50, IsFinalizedBlock: false, EventsCount: 0}", result) + }) + + t.Run("block with complete header information", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 777, + Hash: common.HexToHash("0xABC"), + ParentHash: common.HexToHash("0xDEF"), + Timestamp: 1640000000, + }, + IsFinalizedBlock: true, + Events: []any{"ev1", "ev2", "ev3", "ev4", "ev5"}, + } + result := block.Brief() + // Brief only includes Num, IsFinalizedBlock, and EventsCount + require.Equal(t, "EVMBlock{Num: 777, IsFinalizedBlock: true, EventsCount: 5}", result) + }) +} From 953f1b7a2641cb6c57d321f6cd70141e2160f692 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 12:15:38 +0100 Subject: [PATCH 48/75] feat: speed up l1infotree. Tested in sepolia from 17m to 12m --- l1infotreesync/processor.go | 81 +++++- multidownloader/e2e_test.go | 22 +- multidownloader/evm_multidownloader.go | 256 +++++++++--------- .../evm_multidownloader_debug_test.go | 1 - multidownloader/evm_multidownloader_test.go | 31 ++- multidownloader/sync/evmdriver.go | 22 +- .../types/mocks/mock_processor_interface.go | 34 +-- multidownloader/sync/types/processor.go | 14 +- multidownloader/types/syncer_config.go | 31 ++- types/list_block_header.go | 34 ++- types/list_block_header_test.go | 86 ++++++ 11 files changed, 419 insertions(+), 193 deletions(-) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c6f504784..6028096c1 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -12,6 +12,7 @@ import ( dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/l1infotreesync/migrations" "github.com/agglayer/aggkit/log" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree" treetypes "github.com/agglayer/aggkit/tree/types" @@ -347,6 +348,59 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } return nil } +func (p *processor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + if p.isHalted() { + p.log.Errorf("processor is halted due to: %s", p.haltedReason) + return sync.ErrInconsistentState + } + return p.processBlocksSameTx(ctx, blocks) +} + +// processBlocksSameTx processes the blocks in the same transaction, so if any block fails to +// be processed, all the blocks will be rolled back. This is important to keep the integrity of the data, +// specially for the L1 Info tree that relies on the correct order of the leaves +// Note: Maybe could be problems if it rollback with memory data? +func (p *processor) processBlocksSameTx(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + tx, err := db.NewTx(ctx, p.db) + if err != nil { + return err + } + shouldRollback := true + defer func() { + if shouldRollback { + p.log.Debugf("rolling back block processing for blocks") + if errRllbck := tx.Rollback(); errRllbck != nil { + p.log.Errorf("error while rolling back tx %v", errRllbck) + } + } + }() + + for _, block := range blocks.Data { + syncBlock := sync.Block{ + Num: block.Num, + Hash: block.Hash, + Events: block.Events, + } + if err := p.processBlock(tx, syncBlock); err != nil { + return fmt.Errorf("processing block %d: %w", block.Num, err) + } + logFunc := p.log.Debugf + if len(block.Events) > 0 { + logFunc = p.log.Infof + } + logFunc("block %d processed with %d events", block.Num, len(block.Events)) + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("err: %w", err) + } + shouldRollback = false + log.Infof("processed %d blocks, percent %.2f%% complete. LastBlock: %d", + len(blocks.Data), blocks.PercentComplete, blocks.Data[len(blocks.Data)-1].Num) + return nil +} // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) @@ -371,7 +425,24 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } } }() + err = p.processBlock(tx, block) + if err != nil { + return fmt.Errorf("processing block %d: %w", block.Num, err) + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("err: %w", err) + } + shouldRollback = false + logFunc := p.log.Debugf + if len(block.Events) > 0 { + logFunc = p.log.Infof + } + logFunc("block %d processed with %d events", block.Num, len(block.Events)) + return nil +} + +func (p *processor) processBlock(tx dbtypes.Txer, block sync.Block) error { if _, err := tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, block.Num, block.Hash.String()); err != nil { return fmt.Errorf("insert Block. err: %w", err) } @@ -469,16 +540,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } } } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("err: %w", err) - } - shouldRollback = false - logFunc := p.log.Debugf - if len(block.Events) > 0 { - logFunc = p.log.Infof - } - logFunc("block %d processed with %d events", block.Num, len(block.Events)) return nil } diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go index 35f5a3279..975513cdb 100644 --- a/multidownloader/e2e_test.go +++ b/multidownloader/e2e_test.go @@ -15,6 +15,7 @@ import ( "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" mdsync "github.com/agglayer/aggkit/multidownloader/sync" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" aggkitsync "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/test/contracts/logemitter" aggkittypes "github.com/agglayer/aggkit/types" @@ -70,7 +71,7 @@ type logemitterProcessor struct { mdr *EVMMultidownloader mutex sync.Mutex lastBlock *aggkittypes.BlockHeader - events map[uint64]*aggkitsync.Block + events map[uint64]*aggkitsync.EVMBlock } func (p *logemitterProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { @@ -78,7 +79,20 @@ func (p *logemitterProcessor) GetLastProcessedBlockHeader(ctx context.Context) ( defer p.mutex.Unlock() return p.lastBlock, nil } -func (p *logemitterProcessor) ProcessBlock(ctx context.Context, block aggkitsync.Block) error { + +func (tp *logemitterProcessor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + for _, block := range blocks.Data { + if err := tp.ProcessBlock(ctx, block); err != nil { + return err + } + } + return nil +} + +func (p *logemitterProcessor) ProcessBlock(ctx context.Context, block *aggkitsync.EVMBlock) error { p.mutex.Lock() defer p.mutex.Unlock() p.lastBlock = &aggkittypes.BlockHeader{ @@ -88,9 +102,9 @@ func (p *logemitterProcessor) ProcessBlock(ctx context.Context, block aggkitsync p.logger.Infof("Processed block number %d / %s with %d events", block.Num, block.Hash.Hex(), len(block.Events)) if p.events == nil { - p.events = make(map[uint64]*aggkitsync.Block) + p.events = make(map[uint64]*aggkitsync.EVMBlock) } - p.events[block.Num] = &block + p.events[block.Num] = block return nil } func (p *logemitterProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index d7660425e..04e7f35fa 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -117,105 +117,6 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, }, nil } -func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) error { - dh.mutex.Lock() - defer dh.mutex.Unlock() - - if dh.isInitializedNoMutex() { - return fmt.Errorf("registerSyncer: cannot add new syncer config after initialization") - } - - dh.syncersConfig.Add(data) - return nil -} - -func (dh *EVMMultidownloader) MoveUnsafeToSafeIfPossible(ctx context.Context) error { - dh.mutex.Lock() - defer dh.mutex.Unlock() - - finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) - if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get finalized block number: %w", err) - } - - committed := false - tx, err := dh.storage.NewTx(ctx) - if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot create new tx: %w", err) - } - defer func() { - if !committed { - dh.log.Debugf("MoveUnsafeToSafeIfPossible: rolling back tx") - if err := tx.Rollback(); err != nil { - dh.log.Errorf("MoveUnsafeToSafeIfPossible: error rolling back tx: %v", err) - } - } - }() - - blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, &finalizedBlockNumber) - if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) - } - if blocks.Len() == 0 { - dh.log.Debugf("MoveUnsafeToSafeIfPossible: no unsafe blocks to move to safe") - return nil - } - dh.log.Infof("MoveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ - "unsafe blocks to finalize=%d", finalizedBlockNumber, len(blocks)) - err = dh.detectReorgs(ctx, blocks) - if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: error detecting reorgs: %w", err) - } - err = dh.storage.UpdateBlockToFinalized(tx, blocks.BlockNumbers()) - if err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) - } - committed = true - if err := tx.Commit(); err != nil { - return fmt.Errorf("MoveUnsafeToSafeIfPossible: cannot commit tx: %w", err) - } - - return nil -} - -func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, - blocks aggkittypes.ListBlockHeaders) error { - if blocks.Len() == 0 { - dh.log.Debugf("detectReorgs: no blocks to check for reorgs") - return nil - } - blocksNumber := blocks.BlockNumbers() - currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, - blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) - if err != nil { - return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) - } - // check blocks vs currentBlockHeaders. Must match by number and hash - storageBlocks := blocks.ToMap() - rpcBlocks := currentBlockHeaders.ToMap() - for _, number := range blocksNumber { - rpcBlock, exists := rpcBlocks[number] - if !exists { - return mdrtypes.NewDetectedReorgError(number, - mdrtypes.ReorgDetectionReason_MissingBlock, - common.Hash{}, common.Hash{}, - fmt.Sprintf("detectReorgs: block number %d not found in RPC", number)) - } - storageBlock, exists := storageBlocks[number] - if !exists { - return fmt.Errorf("detectReorgs: block number %d not found in storage", number) - } - if storageBlock.Hash != rpcBlock.Hash { - return mdrtypes.NewDetectedReorgError(storageBlock.Number, - mdrtypes.ReorgDetectionReason_BlockHashMismatch, - storageBlock.Hash, rpcBlock.Hash, - fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", - number, storageBlock.Hash.String(), rpcBlock.Hash.String())) - } - } - return nil -} - func (dh *EVMMultidownloader) GetRPCServices() []jRPC.Service { logger := log.WithFields("module", "multidownloader-rpc-"+dh.name) return []jRPC.Service{ @@ -225,24 +126,18 @@ func (dh *EVMMultidownloader) GetRPCServices() []jRPC.Service { }, } } -func (dh *EVMMultidownloader) CheckDatabase(ctx context.Context) error { - chainID, err := dh.ChainID(ctx) - if err != nil { - return fmt.Errorf("Initialize: cannot get chainID: %w", err) - } - compatibilityStorageChecker := compatibility.NewCompatibilityCheck( - true, - func(ctx context.Context) (storage.DBRuntimeData, error) { - return storage.DBRuntimeData{NetworkID: chainID, - DataVersion: storage.DataVersionCurrent}, nil - }, - compatibility.NewKeyValueToCompatibilityStorage[storage.DBRuntimeData](dh.storage, "multidownloader-"+dh.name), - ) - err = compatibilityStorageChecker.Check(ctx, nil) - if err != nil { - return fmt.Errorf("Initialize: compatibility check failed: %w", err) +// RegisterSyncer registers a new syncer config to the multidownloader. +// it must be called before initialization or Start +func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + + if dh.isInitializedNoMutex() { + return fmt.Errorf("registerSyncer: cannot add new syncer config after initialization") } + + dh.syncersConfig.Add(data) return nil } @@ -254,13 +149,13 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { if dh.isInitializedNoMutex() { return fmt.Errorf("initialize: already initialized") } - dh.log.Infof("Initializing multidownloader...") + dh.log.Debugf("Initializing multidownloader...") // Check DB compatibility - err := dh.CheckDatabase(ctx) + err := dh.checkDatabaseContentsCompatibility(ctx) if err != nil { return err } - dh.log.Infof("Saving syncer configs to storage...") + dh.log.Debugf("Saving syncer configs to storage...") // Save syncer configs to storage; it overrides previous ones but keeps // the synced segments err = dh.storage.UpsertSyncerConfigs(nil, dh.syncersConfig.ContractConfigs()) @@ -273,10 +168,13 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { } // What is pending to download? dh.state = newState - dh.log.Infof("Initialization completed. state: %s", - dh.state.String()) + dh.log.Infof("Initialization completed.configs: %s state: %s", + dh.syncersConfig.Brief(), dh.state.String()) return nil } + +// newStateFromStorage creates a new State based on data on storage and the current syncer configs. +// It is used on initialization and after reorgs to recreate the state of pending and synced segments func (dh *EVMMultidownloader) newStateFromStorage() (*State, error) { syncSegments, err := dh.syncersConfig.SyncSegments() if err != nil { @@ -432,7 +330,7 @@ func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { } // There are unsafe blocks that can be moved to safe and checked? - if err = dh.MoveUnsafeToSafeIfPossible(ctx); err != nil { + if err = dh.moveUnsafeToSafeIfPossible(ctx); err != nil { return err } // Check possible reorgs in unsafe zone @@ -994,3 +892,119 @@ func (dh *EVMMultidownloader) requestLogsSingleTry(ctx context.Context, func (dh *EVMMultidownloader) ShowStatistics(iteration int) { dh.statistics.Show(dh.log.Infof, iteration) } + +// checkDatabaseContentsCompatibility checks that the data already in database +// match the data in config/RPC (e.g: contract addresses, chainID, etc) +func (dh *EVMMultidownloader) checkDatabaseContentsCompatibility(ctx context.Context) error { + chainID, err := dh.ChainID(ctx) + if err != nil { + return fmt.Errorf("Initialize: cannot get chainID: %w", err) + } + compatibilityStorageChecker := compatibility.NewCompatibilityCheck( + true, + func(ctx context.Context) (storage.DBRuntimeData, error) { + return storage.DBRuntimeData{NetworkID: chainID, + DataVersion: storage.DataVersionCurrent}, nil + }, + compatibility.NewKeyValueToCompatibilityStorage[storage.DBRuntimeData](dh.storage, "multidownloader-"+dh.name), + ) + + err = compatibilityStorageChecker.Check(ctx, nil) + if err != nil { + return fmt.Errorf("Initialize: compatibility check failed: %w", err) + } + return nil +} + +// moveUnsafeToSafeIfPossible it's used at start or when finalize block change +// moving the unsafe blocks to safe zone checking that the block is not reorged +// If there are any missmatch it returns an DetectedReorgError +func (dh *EVMMultidownloader) moveUnsafeToSafeIfPossible(ctx context.Context) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot get finalized block number: %w", err) + } + + committed := false + tx, err := dh.storage.NewTx(ctx) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot create new tx: %w", err) + } + defer func() { + if !committed { + dh.log.Debugf("moveUnsafeToSafeIfPossible: rolling back tx") + if err := tx.Rollback(); err != nil { + dh.log.Errorf("moveUnsafeToSafeIfPossible: error rolling back tx: %v", err) + } + } + }() + + blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, &finalizedBlockNumber) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) + } + if blocks.Len() == 0 { + dh.log.Debugf("moveUnsafeToSafeIfPossible: no unsafe blocks to move to safe") + return nil + } + + err = dh.detectReorgs(ctx, blocks) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: error detecting reorgs: %w", err) + } + err = dh.storage.UpdateBlockToFinalized(tx, blocks.BlockNumbers()) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) + } + dh.log.Infof("moveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ + "block moved to safe zone: %s (len=%d)", finalizedBlockNumber, blocks.BlockRange().String(), blocks.Len()) + committed = true + if err := tx.Commit(); err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot commit tx: %w", err) + } + + return nil +} + +// detectReorgs check \param blocks that match RPC +// if not return an DetectedReorgError +func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, + blocks aggkittypes.ListBlockHeaders) error { + if blocks.Len() == 0 { + dh.log.Debugf("detectReorgs: no blocks to check for reorgs") + return nil + } + blocksNumber := blocks.BlockNumbers() + currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) + if err != nil { + return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) + } + // check blocks vs currentBlockHeaders. Must match by number and hash + storageBlocks := blocks.ToMap() + rpcBlocks := currentBlockHeaders.ToMap() + for _, number := range blocksNumber { + rpcBlock, exists := rpcBlocks[number] + if !exists { + return mdrtypes.NewDetectedReorgError(number, + mdrtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, common.Hash{}, + fmt.Sprintf("detectReorgs: block number %d not found in RPC", number)) + } + storageBlock, exists := storageBlocks[number] + if !exists { + return fmt.Errorf("detectReorgs: block number %d not found in storage", number) + } + if storageBlock.Hash != rpcBlock.Hash { + return mdrtypes.NewDetectedReorgError(storageBlock.Number, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + storageBlock.Hash, rpcBlock.Hash, + fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", + number, storageBlock.Hash.String(), rpcBlock.Hash.String())) + } + } + return nil +} diff --git a/multidownloader/evm_multidownloader_debug_test.go b/multidownloader/evm_multidownloader_debug_test.go index 8813a401d..5a080ecce 100644 --- a/multidownloader/evm_multidownloader_debug_test.go +++ b/multidownloader/evm_multidownloader_debug_test.go @@ -20,5 +20,4 @@ func TestEVMMultidownloaderDebug(t *testing.T) { if err != nil { t.Fatalf("Expected error to be cleared after retrieval, got '%s'", err.Error()) } - } diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index b9cbf68c1..1587ae456 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -19,6 +19,7 @@ import ( "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" mdrsync "github.com/agglayer/aggkit/multidownloader/sync" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkitsync "github.com/agglayer/aggkit/sync" @@ -42,7 +43,19 @@ type testProcessor struct { func (tp *testProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { return tp.lastBlock, nil } -func (tp *testProcessor) ProcessBlock(ctx context.Context, block aggkitsync.Block) error { + +func (tp *testProcessor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + for _, block := range blocks.Data { + if err := tp.ProcessBlock(ctx, block); err != nil { + return err + } + } + return nil +} +func (tp *testProcessor) ProcessBlock(ctx context.Context, block *aggkitsync.EVMBlock) error { log.Infof("PROCESSOR: Processing block number %d", block.Num) tp.lastBlock = &aggkittypes.BlockHeader{ Number: block.Num, @@ -611,7 +624,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { // Mock update to finalized data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195, 196}).Return(nil).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.NoError(t, err) }) @@ -635,7 +648,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). Return(emptyBlocks, nil).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.NoError(t, err) }) @@ -649,7 +662,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). Return(uint64(0), expectedErr).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "cannot get finalized block number") require.Contains(t, err.Error(), expectedErr.Error()) @@ -669,7 +682,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { expectedErr := fmt.Errorf("tx creation error") data.mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "cannot create new tx") require.Contains(t, err.Error(), expectedErr.Error()) @@ -695,7 +708,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). Return(nil, expectedErr).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "cannot get unsafe block bases") require.Contains(t, err.Error(), expectedErr.Error()) @@ -732,7 +745,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { } data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(headerDifferent, nil).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "error detecting reorgs") // Check it's a reorg error @@ -776,7 +789,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { expectedErr := fmt.Errorf("update error") data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(expectedErr).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "cannot update is_final for block bases") require.Contains(t, err.Error(), expectedErr.Error()) @@ -819,7 +832,7 @@ func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { // Mock update success data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(nil).Once() - err := data.mdr.MoveUnsafeToSafeIfPossible(ctx) + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) require.Error(t, err) require.Contains(t, err.Error(), "cannot commit tx") require.Contains(t, err.Error(), expectedErr.Error()) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index a8693648d..0d3513f01 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -103,27 +103,13 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { return nil } -func (d *EVMDriver) processBlocks(ctx context.Context, b *mdrsynctypes.DownloadResult) error { - if b == nil || len(b.Data) == 0 { +func (d *EVMDriver) processBlocks(ctx context.Context, data *mdrsynctypes.DownloadResult) error { + if data == nil || len(data.Data) == 0 { return nil } - for _, block := range b.Data { - err := d.processBlock(ctx, block) - if err != nil { - return err - } - } - return nil -} -func (d *EVMDriver) processBlock(ctx context.Context, b *sync.EVMBlock) error { - return d.withRetry(ctx, "processBlock", func() error { - block := sync.Block{ - Num: b.Num, - Hash: b.Hash, - Events: b.Events, - } - return d.processor.ProcessBlock(ctx, block) + return d.withRetry(ctx, "processBlocks", func() error { + return d.processor.ProcessBlocks(ctx, data) }) } diff --git a/multidownloader/sync/types/mocks/mock_processor_interface.go b/multidownloader/sync/types/mocks/mock_processor_interface.go index fafd625b7..28840b6d2 100644 --- a/multidownloader/sync/types/mocks/mock_processor_interface.go +++ b/multidownloader/sync/types/mocks/mock_processor_interface.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - sync "github.com/agglayer/aggkit/sync" + synctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mock "github.com/stretchr/testify/mock" types "github.com/agglayer/aggkit/types" @@ -82,17 +82,17 @@ func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) RunAndReturn(run return _c } -// ProcessBlock provides a mock function with given fields: ctx, block -func (_m *ProcessorInterface) ProcessBlock(ctx context.Context, block sync.Block) error { - ret := _m.Called(ctx, block) +// ProcessBlocks provides a mock function with given fields: ctx, blocks +func (_m *ProcessorInterface) ProcessBlocks(ctx context.Context, blocks *synctypes.DownloadResult) error { + ret := _m.Called(ctx, blocks) if len(ret) == 0 { - panic("no return value specified for ProcessBlock") + panic("no return value specified for ProcessBlocks") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, sync.Block) error); ok { - r0 = rf(ctx, block) + if rf, ok := ret.Get(0).(func(context.Context, *synctypes.DownloadResult) error); ok { + r0 = rf(ctx, blocks) } else { r0 = ret.Error(0) } @@ -100,31 +100,31 @@ func (_m *ProcessorInterface) ProcessBlock(ctx context.Context, block sync.Block return r0 } -// ProcessorInterface_ProcessBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlock' -type ProcessorInterface_ProcessBlock_Call struct { +// ProcessorInterface_ProcessBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlocks' +type ProcessorInterface_ProcessBlocks_Call struct { *mock.Call } -// ProcessBlock is a helper method to define mock.On call +// ProcessBlocks is a helper method to define mock.On call // - ctx context.Context -// - block sync.Block -func (_e *ProcessorInterface_Expecter) ProcessBlock(ctx interface{}, block interface{}) *ProcessorInterface_ProcessBlock_Call { - return &ProcessorInterface_ProcessBlock_Call{Call: _e.mock.On("ProcessBlock", ctx, block)} +// - blocks *synctypes.DownloadResult +func (_e *ProcessorInterface_Expecter) ProcessBlocks(ctx interface{}, blocks interface{}) *ProcessorInterface_ProcessBlocks_Call { + return &ProcessorInterface_ProcessBlocks_Call{Call: _e.mock.On("ProcessBlocks", ctx, blocks)} } -func (_c *ProcessorInterface_ProcessBlock_Call) Run(run func(ctx context.Context, block sync.Block)) *ProcessorInterface_ProcessBlock_Call { +func (_c *ProcessorInterface_ProcessBlocks_Call) Run(run func(ctx context.Context, blocks *synctypes.DownloadResult)) *ProcessorInterface_ProcessBlocks_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(sync.Block)) + run(args[0].(context.Context), args[1].(*synctypes.DownloadResult)) }) return _c } -func (_c *ProcessorInterface_ProcessBlock_Call) Return(_a0 error) *ProcessorInterface_ProcessBlock_Call { +func (_c *ProcessorInterface_ProcessBlocks_Call) Return(_a0 error) *ProcessorInterface_ProcessBlocks_Call { _c.Call.Return(_a0) return _c } -func (_c *ProcessorInterface_ProcessBlock_Call) RunAndReturn(run func(context.Context, sync.Block) error) *ProcessorInterface_ProcessBlock_Call { +func (_c *ProcessorInterface_ProcessBlocks_Call) RunAndReturn(run func(context.Context, *synctypes.DownloadResult) error) *ProcessorInterface_ProcessBlocks_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/sync/types/processor.go b/multidownloader/sync/types/processor.go index 1249da739..273cbc787 100644 --- a/multidownloader/sync/types/processor.go +++ b/multidownloader/sync/types/processor.go @@ -3,12 +3,22 @@ package types import ( "context" - "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" ) type ProcessorInterface interface { + // GetLastProcessedBlockHeader it must return the last processed block header. + // or nil if no block has been processed yet. + // It is used to determine from which block number the downloader should start. GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) - ProcessBlock(ctx context.Context, block sync.Block) error + // ProcessBlocks processes the block. It is called for all blocks that are downloaded and + // must be processed. + // NOTE: legacy syncer use ProcessBlock for each block but it's slower because + // can't take advantage of batch processing. ProcessBlocks is called with batches of blocks + // and it is more efficient. + // It is the responsibility of the syncer to process them in batch or one by one. + ProcessBlocks(ctx context.Context, blocks *DownloadResult) error + // Reorg is called when a reorg is detected. Must execute a syncer reorg if apply + // it's possible that the reorged blocks doesn't affect to this syncer Reorg(ctx context.Context, firstReorgedBlock uint64) error } diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index 78647d205..d60de7ae9 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "sort" aggkitcommon "github.com/agglayer/aggkit/common" @@ -59,7 +60,17 @@ func NewSetSyncerConfig() SetSyncerConfig { filters: make(map[SyncerID]aggkittypes.SyncerConfig), } } - +func (f *SetSyncerConfig) Brief() string { + if f == nil || f.filters == nil { + return "SetSyncerConfig{filters: }" + } + result := "SetSyncerConfig{" + for syncerID, filter := range f.filters { + result += fmt.Sprintf("(%s -> [%d - %s])", syncerID, filter.FromBlock, filter.ToBlock.String()) + } + result += "}" + return result +} func (f *SetSyncerConfig) Add(filter aggkittypes.SyncerConfig) { if f.filters == nil { f.filters = make(map[SyncerID]aggkittypes.SyncerConfig) @@ -119,15 +130,6 @@ func (f *SetSyncerConfig) ContractConfigs() []ContractConfig { return convertContractMapToSlice(contractMap) } -// convertContractMapToSlice converts map to slice -func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) []ContractConfig { - contractConfigs := make([]ContractConfig, 0, len(contractMap)) - for _, cc := range contractMap { - contractConfigs = append(contractConfigs, *cc) - } - return contractConfigs -} - // SyncSegments groups the SetSyncerConfig into segments per contract address and blockRange func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { segments := NewSetSyncSegment() @@ -147,3 +149,12 @@ func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { } return &segments, nil } + +// convertContractMapToSlice converts map to slice +func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) []ContractConfig { + contractConfigs := make([]ContractConfig, 0, len(contractMap)) + for _, cc := range contractMap { + contractConfigs = append(contractConfigs, *cc) + } + return contractConfigs +} diff --git a/types/list_block_header.go b/types/list_block_header.go index 1070b00dc..c5978325b 100644 --- a/types/list_block_header.go +++ b/types/list_block_header.go @@ -1,6 +1,10 @@ package types -import "sort" +import ( + "sort" + + aggkitcommon "github.com/agglayer/aggkit/common" +) type ListBlockHeaders []*BlockHeader @@ -39,3 +43,31 @@ func (lbs ListBlockHeaders) BlockNumbers() []uint64 { }) return result } + +func (lbs ListBlockHeaders) BlockRange() aggkitcommon.BlockRange { + if len(lbs) == 0 { + return aggkitcommon.BlockRange{} + } + var minBlock, maxBlock uint64 + initialized := false + for _, header := range lbs { + if header != nil { + if !initialized { + minBlock = header.Number + maxBlock = header.Number + initialized = true + } else { + if header.Number < minBlock { + minBlock = header.Number + } + if header.Number > maxBlock { + maxBlock = header.Number + } + } + } + } + if !initialized { + return aggkitcommon.BlockRange{} + } + return aggkitcommon.NewBlockRange(minBlock, maxBlock) +} diff --git a/types/list_block_header_test.go b/types/list_block_header_test.go index 7b84dd41e..70f149f46 100644 --- a/types/list_block_header_test.go +++ b/types/list_block_header_test.go @@ -3,6 +3,7 @@ package types import ( "testing" + aggkitcommon "github.com/agglayer/aggkit/common" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -165,3 +166,88 @@ func TestListBlockHeaders_BlockNumbers(t *testing.T) { require.Equal(t, []uint64{1, 2, 2}, result) }) } + +func TestListBlockHeaders_BlockRange(t *testing.T) { + t.Run("returns empty block range for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.BlockRange() + + require.Equal(t, aggkitcommon.BlockRange{}, result) + }) + + t.Run("returns correct range for single header", func(t *testing.T) { + header := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + list := ListBlockHeaders{header} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(5, 5) + require.Equal(t, expected, result) + }) + + t.Run("returns correct range for multiple headers in order", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(1, 3) + require.Equal(t, expected, result) + }) + + t.Run("returns correct range for multiple headers out of order", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(8, common.HexToHash("0x08"), 8000, nil) + header4 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3, header4} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(1, 8) + require.Equal(t, expected, result) + }) + + t.Run("skips nil headers when calculating range", func(t *testing.T) { + header1 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + header2 := NewBlockHeader(10, common.HexToHash("0x0a"), 10000, nil) + + list := ListBlockHeaders{nil, header1, nil, header2, nil} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(3, 10) + require.Equal(t, expected, result) + }) + + t.Run("returns empty range for list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.BlockRange() + + require.Equal(t, aggkitcommon.BlockRange{}, result) + }) + + t.Run("handles non-consecutive block numbers", func(t *testing.T) { + header1 := NewBlockHeader(100, common.HexToHash("0x64"), 100000, nil) + header2 := NewBlockHeader(500, common.HexToHash("0x01f4"), 500000, nil) + header3 := NewBlockHeader(250, common.HexToHash("0xfa"), 250000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(100, 500) + require.Equal(t, expected, result) + }) + + t.Run("handles duplicate block numbers", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(5, common.HexToHash("0x05b"), 5001, nil) + header3 := NewBlockHeader(10, common.HexToHash("0x0a"), 10000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(5, 10) + require.Equal(t, expected, result) + }) +} From 04442902824abc26538fabcd2a6fcd19e47d7fe4 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 14:01:04 +0100 Subject: [PATCH 49/75] fix: ut --- multidownloader/evm_multidownloader_debug_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/multidownloader/evm_multidownloader_debug_test.go b/multidownloader/evm_multidownloader_debug_test.go index 5a080ecce..c3ed1e4f2 100644 --- a/multidownloader/evm_multidownloader_debug_test.go +++ b/multidownloader/evm_multidownloader_debug_test.go @@ -1,6 +1,10 @@ package multidownloader -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestEVMMultidownloaderDebug(t *testing.T) { sut := NewEVMMultidownloaderDebug() @@ -11,13 +15,9 @@ func TestEVMMultidownloaderDebug(t *testing.T) { t.Fatalf("Expected error to be injected, got nil") } expectedMsg := "ForceRorg: forced reorg at block number 123" - if err.Error() != expectedMsg { - t.Fatalf("Expected error message '%s', got '%s'", expectedMsg, err.Error()) - } + require.ErrorContains(t, err, expectedMsg) // After getting the error once, it should be cleared err = sut.GetInjectedStartStepError() - if err != nil { - t.Fatalf("Expected error to be cleared after retrieval, got '%s'", err.Error()) - } + require.NoError(t, err, "Expected error to be cleared after retrieval") } From c91d154bdf9187ac310c86bb4e515890e9e3e1db Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 14:55:07 +0100 Subject: [PATCH 50/75] fix: ut --- .../evm_multidownloader_syncers.go | 3 +- .../evm_multidownloader_syncers_test.go | 261 ++++++++++++++++++ multidownloader/sync/evmdownloader.go | 20 +- 3 files changed, 270 insertions(+), 14 deletions(-) diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index da6235eff..80983f9d6 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -100,7 +100,7 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, return blockHeader, nil } -// HeaderByNumber gets the block header for the given block number from storage or ethClient +// HeaderByNumber gets the block header for the given block number from storage func (dh *EVMMultidownloader) StorageHeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) { if number == nil { @@ -143,7 +143,6 @@ func (dh *EVMMultidownloader) LogQuery(ctx context.Context, err) } // Calculate UnsafeRange - result, err := dh.storage.LogQuery(nil, *availQuery) if err != nil { return mdrtypes.LogQueryResponse{}, fmt.Errorf("EVMMultidownloader.LogQuery: error executing log query %s: %w", diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index 0987ee993..73a5ac03d 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + aggkitcommon "github.com/agglayer/aggkit/common" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum" @@ -238,3 +239,263 @@ func TestEVMMultidownloader_EthClient(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) require.Equal(t, testData.mockEthClient, testData.mdr.EthClient()) } + +func TestEVMMultidownloader_LogQuery(t *testing.T) { + t.Run("success case with unsafe range calculation", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a log query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + + // Mark the query as synced in state + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber (via GetCurrentBlockNumber) + finalizedBlock := uint64(150) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(finalizedBlock, nil) + + // Mock storage.LogQuery to return a response + expectedResponse := mdrtypes.LogQueryResponse{ + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + } + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(expectedResponse, nil) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.NoError(t, err) + require.Equal(t, aggkitcommon.NewBlockRange(100, 200), result.ResponseRange) + // UnsafeRange should be the range after finalized block + require.Equal(t, aggkitcommon.NewBlockRange(151, 200), result.UnsafeRange) + }) + + t.Run("logs not synced returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a query that is NOT synced + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + + // Test - state.IsPartiallyAvailable will return false because we didn't call OnNewSyncedLogQuery + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "logs not synced for query") + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("GetFinalizedBlockNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create and sync a query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber to fail + expectedErr := errors.New("finalized block error") + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(uint64(0), expectedErr) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get finalized block number") + require.ErrorIs(t, err, expectedErr) + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("storage.LogQuery error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create and sync a query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(uint64(150), nil) + + // Mock storage.LogQuery to fail + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(mdrtypes.LogQueryResponse{}, errStorageExample) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "error executing log query") + require.ErrorIs(t, err, errStorageExample) + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("empty unsafe range when all blocks are finalized", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a log query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber - finalized is beyond the query range + finalizedBlock := uint64(250) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(finalizedBlock, nil) + + // Mock storage.LogQuery + expectedResponse := mdrtypes.LogQueryResponse{ + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + } + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(expectedResponse, nil) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.NoError(t, err) + require.Equal(t, aggkitcommon.NewBlockRange(100, 200), result.ResponseRange) + // UnsafeRange should be empty since all blocks are finalized + require.True(t, result.UnsafeRange.IsEmpty()) + }) +} + +func TestEVMMultidownloader_StorageHeaderByNumber(t *testing.T) { + t.Run("block found in storage with finalized=true", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 123, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, true, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(123)) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.True(t, finalized) + }) + + t.Run("block found in storage with finalized=false", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 456, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, false, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(456)) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.False(t, finalized) + }) + + t.Run("nil block number defaults to LatestBlock", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 999, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, true, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), nil) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.True(t, finalized) + }) + + t.Run("block not found in storage returns nil", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(789), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(789)). + Return(nil, false, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(789)) + + // Assertions + require.NoError(t, err) + require.Nil(t, result) + require.False(t, finalized) + }) + + t.Run("GetCurrentBlockNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + expectedErr := errors.New("block number resolution error") + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), expectedErr) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), &aggkittypes.FinalizedBlock) + + // Assertions + require.Nil(t, result) + require.False(t, finalized) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get block number for finality") + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("GetBlockHeaderByNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(555), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(555)). + Return(nil, false, errStorageExample) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(555)) + + // Assertions + require.Nil(t, result) + require.False(t, finalized) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get BlockHeader number=555") + require.ErrorIs(t, err, errStorageExample) + }) +} diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index e8fddfa00..debc74569 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -15,7 +15,7 @@ import ( ) const ( - percentComplete = 100.0 + percentTotallyCompleted = 100.0 ) var ( @@ -98,19 +98,15 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", maxLogQuery.String(), ErrLogsNotAvailable) } - - // TODO: Add extra empty block is is in unsafe zone + if result == nil { + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: executeLogQuery return result=nil. Range: %s", maxLogQuery.BlockRange.String()) + } + // Before returning we check again that lastBlockHeader is not reorged err = d.checkReorgedBlock(ctx, lastBlockHeader) if err != nil { return nil, err } - if result == nil { - d.logger.Debugf("EVMDownloader.DownloadNextBlocks: no logs found for blocks %s", maxLogQuery.BlockRange.String()) - result = &mdrsynctypes.DownloadResult{ - Data: nil, - PercentComplete: percentComplete, - } - } + return result, nil } @@ -174,10 +170,10 @@ func (d *EVMDownloader) calculatePercentCompletation(ctx context.Context, totalBlocks := fullRange.CountBlocks() pendingRange := aggkitcommon.NewBlockRange(lastRange.ToBlock+1, fullRange.ToBlock) if pendingRange.CountBlocks() == 0 { - return percentComplete, nil + return percentTotallyCompleted, nil } blocksCompleted := totalBlocks - pendingRange.CountBlocks() - percent := (float64(blocksCompleted) / float64(totalBlocks)) * percentComplete + percent := (float64(blocksCompleted) / float64(totalBlocks)) * percentTotallyCompleted return percent, nil } From 4b1a4bc23e73f072a5600d07054fe487d0b973cb Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 15:54:00 +0100 Subject: [PATCH 51/75] fix: ut --- multidownloader/sync/evmdownloader.go | 3 ++- multidownloader/types/syncer_config.go | 4 +-- multidownloader/types/syncer_config_test.go | 27 +++++++++++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index debc74569..b9fb3abc5 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -99,7 +99,8 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, maxLogQuery.String(), ErrLogsNotAvailable) } if result == nil { - return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: executeLogQuery return result=nil. Range: %s", maxLogQuery.BlockRange.String()) + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: executeLogQuery "+ + "return result=nil. Range: %s", maxLogQuery.BlockRange.String()) } // Before returning we check again that lastBlockHeader is not reorged err = d.checkReorgedBlock(ctx, lastBlockHeader) diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index d60de7ae9..b96bfbfb7 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -62,11 +62,11 @@ func NewSetSyncerConfig() SetSyncerConfig { } func (f *SetSyncerConfig) Brief() string { if f == nil || f.filters == nil { - return "SetSyncerConfig{filters: }" + return "SetSyncerConfig{}" } result := "SetSyncerConfig{" for syncerID, filter := range f.filters { - result += fmt.Sprintf("(%s -> [%d - %s])", syncerID, filter.FromBlock, filter.ToBlock.String()) + result += fmt.Sprintf("(%s -> [%d - %s]) ", syncerID, filter.FromBlock, filter.ToBlock.String()) } result += "}" return result diff --git a/multidownloader/types/syncer_config_test.go b/multidownloader/types/syncer_config_test.go index 314c26cfc..317577af0 100644 --- a/multidownloader/types/syncer_config_test.go +++ b/multidownloader/types/syncer_config_test.go @@ -212,3 +212,30 @@ func TestContractConfig_Update_Combined(t *testing.T) { require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) require.Equal(t, []SyncerID{"syncer1", "syncer2"}, cc.Syncers) } + +func TestContractConfig_Update_Brief(t *testing.T) { + t.Run("brief with valid config", func(t *testing.T) { + sut := NewSetSyncerConfig() + sut.Add(aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 10, + ToBlock: aggkittypes.FinalizedBlock, + }) + sut.Add(aggkittypes.SyncerConfig{ + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, + }) + + expected := "SetSyncerConfig{(syncer1 -> [10 - FinalizedBlock]) (syncer2 -> [5 - LatestBlock])}" + require.Equal(t, expected, sut.Brief()) + }) + + t.Run("brief with nil config", func(t *testing.T) { + var cc *SetSyncerConfig + expected := "SetSyncerConfig{}" + require.Equal(t, expected, cc.Brief()) + }) +} From a919d614853407d8ca3f7402a2a506d5cc5a2efa Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 16:17:38 +0100 Subject: [PATCH 52/75] fix: ut --- .../block_notifier_manager_test.go | 4 ++++ .../evm_multidownloader_rpc_test.go | 22 +++++++++++++++++++ multidownloader/types/syncer_config.go | 2 +- multidownloader/types/syncer_config_test.go | 2 +- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/etherman/block_notifier/block_notifier_manager_test.go b/etherman/block_notifier/block_notifier_manager_test.go index 345eb2c7a..6a191ebc7 100644 --- a/etherman/block_notifier/block_notifier_manager_test.go +++ b/etherman/block_notifier/block_notifier_manager_test.go @@ -41,4 +41,8 @@ func TestBlockNotifierManager_GetCurrentBlockNumber(t *testing.T) { currentBlockNumber, err := sut.GetCurrentBlockNumber(t.Context(), aggkittypes.LatestBlock) require.NoError(t, err) require.Equal(t, uint64(1234), currentBlockNumber) + + bn, err := sut.GetCurrentBlockNumber(t.Context(), *aggkittypes.NewBlockNumber(123)) + require.NoError(t, err) + require.Equal(t, uint64(123), bn) } diff --git a/multidownloader/evm_multidownloader_rpc_test.go b/multidownloader/evm_multidownloader_rpc_test.go index 66baae50b..a03fce8ec 100644 --- a/multidownloader/evm_multidownloader_rpc_test.go +++ b/multidownloader/evm_multidownloader_rpc_test.go @@ -35,3 +35,25 @@ func TestEVMMultidownloaderRPC_Status(t *testing.T) { require.Contains(t, fmt.Sprintf("%+v", result), "Status") } + +func TestEVMMultidownloaderRPC_Reorg(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + t.Run("returns error if debug is not enabled", func(t *testing.T) { + sut := EVMMultidownloaderRPC{ + logger: log.WithFields("module", "test"), + downloader: testData.mdr, + } + _, err := sut.Reorg(123) + require.Error(t, err) + require.Contains(t, err.Error(), "debug is not enabled") + }) + t.Run("calls ForceReorg on downloader when debug is enabled", func(t *testing.T) { + testData.mdr.debug = &EVMMultidownloaderDebug{} + sut := EVMMultidownloaderRPC{ + logger: log.WithFields("module", "test"), + downloader: testData.mdr, + } + _, err := sut.Reorg(123) + require.NoError(t, err) + }) +} diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index b96bfbfb7..0bb2f6b61 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -64,7 +64,7 @@ func (f *SetSyncerConfig) Brief() string { if f == nil || f.filters == nil { return "SetSyncerConfig{}" } - result := "SetSyncerConfig{" + result := "SetSyncerConfig{ " for syncerID, filter := range f.filters { result += fmt.Sprintf("(%s -> [%d - %s]) ", syncerID, filter.FromBlock, filter.ToBlock.String()) } diff --git a/multidownloader/types/syncer_config_test.go b/multidownloader/types/syncer_config_test.go index 317577af0..3e094b194 100644 --- a/multidownloader/types/syncer_config_test.go +++ b/multidownloader/types/syncer_config_test.go @@ -229,7 +229,7 @@ func TestContractConfig_Update_Brief(t *testing.T) { ToBlock: aggkittypes.LatestBlock, }) - expected := "SetSyncerConfig{(syncer1 -> [10 - FinalizedBlock]) (syncer2 -> [5 - LatestBlock])}" + expected := "SetSyncerConfig{ (syncer1 -> [10 - FinalizedBlock]) (syncer2 -> [5 - LatestBlock]) }" require.Equal(t, expected, sut.Brief()) }) From ae81496185cbe727782bc549b0f0e6847fade965 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 16:55:10 +0100 Subject: [PATCH 53/75] fix: ut --- multidownloader/evm_multidownloader.go | 15 +++++++++++ multidownloader/evm_multidownloader_debug.go | 4 +-- .../evm_multidownloader_debug_test.go | 4 +-- multidownloader/evm_multidownloader_rpc.go | 2 +- multidownloader/evm_multidownloader_test.go | 27 ++++++++++++++----- 5 files changed, 41 insertions(+), 11 deletions(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 04e7f35fa..0b6eee638 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -193,7 +193,13 @@ func (dh *EVMMultidownloader) newStateFromStorage() (*State, error) { return NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) } +const infiniteLoops = -1 + func (dh *EVMMultidownloader) Start(ctx context.Context) error { + return dh.startNumLoops(ctx, infiniteLoops) +} + +func (dh *EVMMultidownloader) startNumLoops(ctx context.Context, numLoopsToExecute int) error { dh.mutex.Lock() if dh.isRunning { dh.mutex.Unlock() @@ -225,7 +231,13 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { } dh.statistics.StartSyncing() + numLoops := 0 for { + // This is for debug, when reach the number of loops it returns to allow testing + if numLoops == numLoopsToExecute { + return nil + } + numLoops++ // check if context is done if runCtx.Err() != nil { dh.log.Infof("EVMMultidownloader.Start: context done, exiting...") @@ -250,6 +262,7 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { dh.mutex.Lock() // check if context is done during reorg processing if runCtx.Err() != nil { + dh.mutex.Unlock() dh.log.Infof("EVMMultidownloader.Start: context done during reorg processing, exiting...") return runCtx.Err() } @@ -257,12 +270,14 @@ func (dh *EVMMultidownloader) Start(ctx context.Context) error { dh.log.Infof("Processing reorg at block number %d...", reorgErr.OffendingBlockNumber) err = dh.reorgProcessor.ProcessReorg(runCtx, *reorgErr) if err != nil { + dh.mutex.Unlock() dh.log.Warnf("Error running reorg multidownloader: %s", err.Error()) time.Sleep(1 * time.Second) continue } newState, err := dh.newStateFromStorage() if err != nil { + dh.mutex.Unlock() dh.log.Warnf("Error recreating state after reorg processing: %s", err.Error()) time.Sleep(1 * time.Second) continue diff --git a/multidownloader/evm_multidownloader_debug.go b/multidownloader/evm_multidownloader_debug.go index 2102e12db..daf48aea3 100644 --- a/multidownloader/evm_multidownloader_debug.go +++ b/multidownloader/evm_multidownloader_debug.go @@ -17,7 +17,7 @@ func NewEVMMultidownloaderDebug() *EVMMultidownloaderDebug { return &EVMMultidownloaderDebug{} } -func (dh *EVMMultidownloaderDebug) ForceRorg(mismatchingBlockNumber uint64) { +func (dh *EVMMultidownloaderDebug) ForceReorg(mismatchingBlockNumber uint64) { if dh == nil { return } @@ -28,7 +28,7 @@ func (dh *EVMMultidownloaderDebug) ForceRorg(mismatchingBlockNumber uint64) { mdrtypes.ReorgDetectionReason_Forced, common.Hash{}, common.Hash{}, - fmt.Sprintf("ForceRorg: forced reorg at block number %d", mismatchingBlockNumber), + fmt.Sprintf("ForceReorg: forced reorg at block number %d", mismatchingBlockNumber), ) } diff --git a/multidownloader/evm_multidownloader_debug_test.go b/multidownloader/evm_multidownloader_debug_test.go index c3ed1e4f2..20f4b3831 100644 --- a/multidownloader/evm_multidownloader_debug_test.go +++ b/multidownloader/evm_multidownloader_debug_test.go @@ -9,12 +9,12 @@ import ( func TestEVMMultidownloaderDebug(t *testing.T) { sut := NewEVMMultidownloaderDebug() - sut.ForceRorg(123) + sut.ForceReorg(123) err := sut.GetInjectedStartStepError() if err == nil { t.Fatalf("Expected error to be injected, got nil") } - expectedMsg := "ForceRorg: forced reorg at block number 123" + expectedMsg := "ForceReorg: forced reorg at block number 123" require.ErrorContains(t, err, expectedMsg) // After getting the error once, it should be cleared diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 9b62bd148..356960547 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -59,7 +59,7 @@ func (b *EVMMultidownloaderRPC) Reorg(mismatchingBlockNumber uint64) (interface{ return nil, rpc.NewRPCError(rpc.DefaultErrorCode, "EVMMultidownloaderRPC.ForceReorg: debug is not enabled") } - b.downloader.debug.ForceRorg(mismatchingBlockNumber) + b.downloader.debug.ForceReorg(mismatchingBlockNumber) return struct { Message string `json:"message"` }{ diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 1587ae456..00f66b1c9 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -21,6 +21,7 @@ import ( mdrsync "github.com/agglayer/aggkit/multidownloader/sync" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/multidownloader/types/mocks" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkitsync "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" @@ -394,11 +395,21 @@ func TestEVMMultidownloader_Start(t *testing.T) { require.False(t, testData.mdr.IsInitialized()) }) - // Note: Testing the full Start() loop with auto-initialization is complex because Start() - // has an infinite loop and requires extensive mocking. The key behavior is tested above: - // - If not initialized, Start() calls Initialize() - // - If Initialize() fails, Start() returns the error - // For integration testing of the full Start() flow, see e2e_test.go + t.Run("Start() and reorg", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + testData.mdr.debug = &EVMMultidownloaderDebug{} // Enable debug to test that reorgs are checked even in debug mode + // Fake initialization + testData.mdr.state = NewEmptyState() + ctx := context.Background() + testData.mdr.debug.ForceReorg(1234) + + testData.mockReorgProcessor.EXPECT().ProcessReorg(mock.Anything, mock.Anything).Return(nil).Once() + // It starts, execute 1 loop that do a reorg and then return + err := testData.mdr.startNumLoops(ctx, 1) + // Should return no error + require.NoError(t, err) + }) + } type testDataEVMMultidownloader struct { @@ -408,6 +419,7 @@ type testDataEVMMultidownloader struct { mockStorage *mockmdrtypes.Storager usedStorage mdrtypes.Storager mockBlockNotifierManager *mockethermantypes.BlockNotifierManager + mockReorgProcessor *mocks.ReorgProcessor } func (td *testDataEVMMultidownloader) FakeInitialized(t *testing.T) { @@ -441,6 +453,7 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM } ethClient := mocktypes.NewBaseEthereumClienter(t) mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + mockReorgProcessor := mocks.NewReorgProcessor(t) var mockDB *mockmdrtypes.Storager var realDB *storage.MultidownloaderStorage var useDB mdrtypes.Storager @@ -455,7 +468,8 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM require.NoError(t, err) useDB = realDB } - mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, useDB, mockBlockNotifierManager, nil) + mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, + useDB, mockBlockNotifierManager, mockReorgProcessor) require.NoError(t, err) return &testDataEVMMultidownloader{ mockEthClient: ethClient, @@ -464,6 +478,7 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM mockStorage: mockDB, usedStorage: useDB, mockBlockNotifierManager: mockBlockNotifierManager, + mockReorgProcessor: mockReorgProcessor, } } From e0a2b9fdefd5e6ebd3fbe529e74fa276641ceb40 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:44:14 +0100 Subject: [PATCH 54/75] feat: enable by default L1Multidownloader --- config/default.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/default.go b/config/default.go index 1be88340e..afab53832 100644 --- a/config/default.go +++ b/config/default.go @@ -337,7 +337,7 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" MaxAttempts = "{{AggSender.AgglayerClient.GRPC.Retry.MaxAttempts}}" [L1Multidownloader] - Enabled = false + Enabled = true DeveloperMode = false StoragePath = "{{PathRWData}}/l1_multidownloader.sqlite" BlockChunkSize = 10000 From 863813f039ec8a429b800ade561f0720ebc6b6db Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 18:21:09 +0100 Subject: [PATCH 55/75] feat: fixed run.go --- cmd/run.go | 38 ++++++++++----------- l1infotreesync/e2e_test.go | 6 ++-- l1infotreesync/l1infotreesync.go | 5 +-- multidownloader/evm_multidownloader_test.go | 6 ++-- test/helpers/e2e.go | 2 +- 5 files changed, 28 insertions(+), 29 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 6addc051a..e81aeb3ce 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -99,7 +99,7 @@ func start(cliCtx *cli.Context) error { } }() var rpcServices []jRPC.Service - l1MdrEthAdapter, l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(l1Client, cfg.L1Multidownloader) + l1MultiDownloader, l1mdServices, err := runL1MultiDownloaderIfNeeded(l1Client, cfg.L1Multidownloader) if err != nil { return fmt.Errorf("failed to create L1MultiDownloader: %w", err) } @@ -120,7 +120,7 @@ func start(cliCtx *cli.Context) error { var backfillWg sync.WaitGroup l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(ctx, components, *cfg, reorgDetectorL1, - l1Client, l1MdrEthAdapter, l1MultiDownloader) + l1Client, l1MultiDownloader) if l1InfoTreeSync != nil { rpcServices = append(rpcServices, l1InfoTreeSync.GetRPCServices()...) } @@ -232,12 +232,12 @@ func start(cliCtx *cli.Context) error { if cfg.Profiling.ProfilingEnabled { go pprof.StartProfilingHTTPServer(ctx, cfg.Profiling) } - if l1MdrEthAdapter != nil { + if l1MultiDownloader != nil { log.Info("starting L1 MultiDownloader...") go func() { - err := l1MdrEthAdapter.Start(ctx) + err := l1MultiDownloader.Start(ctx) if err != nil { - log.Error("l1MultiDownloader stopped: %w", err) + log.Fatal("l1MultiDownloader stopped: %w", err) } }() } @@ -516,8 +516,7 @@ func runL1InfoTreeSyncerIfNeeded( components []string, cfg config.Config, reorgDetectorL1 aggkitsync.ReorgDetector, - _ aggkittypes.BaseEthereumClienter, - l1EthClient aggkittypes.MultiDownloaderLegacy, + l1EthClient aggkittypes.BaseEthereumClienter, l1MultiDownloader *multidownloader.EVMMultidownloader, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{ @@ -537,10 +536,12 @@ func runL1InfoTreeSyncerIfNeeded( l1infotreesync.FlagNone, ) } else { - l1InfoTreeSync, err = l1infotreesync.New( + log.Info("L1 Info Tree Syncer using legacy sync implementation") + l1Client := aggkitsync.NewAdapterEthClientToMultidownloader(l1EthClient) + l1InfoTreeSync, err = l1infotreesync.NewLegacy( ctx, cfg.L1InfoTreeSync, - l1EthClient, + l1Client, reorgDetectorL1, l1infotreesync.FlagNone, ) @@ -623,16 +624,15 @@ func runReorgDetectorL1IfNeeded( func runL1MultiDownloaderIfNeeded( l1Client aggkittypes.EthClienter, cfg multidownloader.Config, -) (aggkittypes.MultiDownloaderLegacy, - *multidownloader.EVMMultidownloader, []jRPC.Service, error) { +) (*multidownloader.EVMMultidownloader, []jRPC.Service, error) { // The requirements are the same as L1Client if l1Client == nil { - return nil, nil, nil, nil + return nil, nil, nil } // If it's disable It creates a direct eth client if !cfg.Enabled { - log.Warnf("L1 MultiDownloader is disabled, using legacy EthClient") - return aggkitsync.NewAdapterEthClientToMultidownloader(l1Client), nil, nil, nil + log.Warnf("L1 MultiDownloader is disabled, don't creating the service.") + return nil, nil, nil } logger := log.WithFields("module", "L1MultiDownloader") @@ -642,15 +642,15 @@ func runL1MultiDownloaderIfNeeded( "l1", l1Client, // ethClient l1Client, // rpcClient - nil, // storage - nil, // blockNotifierManager - nil, // reorgProcessor + nil, // storage (created inside the multidownloader if nil) + nil, // blockNotifierManager (created inside the multidownloader if nil) + nil, // reorgProcessor (created inside the multidownloader if nil) ) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) + return nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) } rpcServices := downloader.GetRPCServices() - return downloader, downloader, rpcServices, nil + return downloader, rpcServices, nil } func runReorgDetectorL2IfNeeded( diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index e1d7a3b83..6527f7c8a 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -121,7 +121,7 @@ func TestE2E(t *testing.T) { mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, mockReorgDetector, + syncer, err = l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, mockReorgDetector, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) } @@ -243,7 +243,7 @@ func TestWithReorgs(t *testing.T) { require.NoError(t, err) require.NoError(t, rd.Start(ctx)) multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - syncer, err = l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + syncer, err = l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) } go syncer.Start(ctx) @@ -443,7 +443,7 @@ func TestStressAndReorgs(t *testing.T) { RequireStorageContentCompatibility: true, WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond * 100), } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + syncer, err := l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index b43ca5632..349581e45 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -143,8 +143,8 @@ func NewMultidownloadBased( }, nil } -// New creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree -func New( +// NewLegacy creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree +func NewLegacy( ctx context.Context, cfg Config, l1Client aggkittypes.MultiDownloaderLegacy, @@ -155,6 +155,7 @@ func New( if err != nil { return nil, err } + // TODO: get the initialBlock from L1 to simplify config lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) if err != nil { diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 00f66b1c9..960e1ee24 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -21,7 +21,6 @@ import ( mdrsync "github.com/agglayer/aggkit/multidownloader/sync" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" - "github.com/agglayer/aggkit/multidownloader/types/mocks" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" aggkitsync "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" @@ -409,7 +408,6 @@ func TestEVMMultidownloader_Start(t *testing.T) { // Should return no error require.NoError(t, err) }) - } type testDataEVMMultidownloader struct { @@ -419,7 +417,7 @@ type testDataEVMMultidownloader struct { mockStorage *mockmdrtypes.Storager usedStorage mdrtypes.Storager mockBlockNotifierManager *mockethermantypes.BlockNotifierManager - mockReorgProcessor *mocks.ReorgProcessor + mockReorgProcessor *mockmdrtypes.ReorgProcessor } func (td *testDataEVMMultidownloader) FakeInitialized(t *testing.T) { @@ -453,7 +451,7 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM } ethClient := mocktypes.NewBaseEthereumClienter(t) mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - mockReorgProcessor := mocks.NewReorgProcessor(t) + mockReorgProcessor := mockmdrtypes.NewReorgProcessor(t) var mockDB *mockmdrtypes.Storager var realDB *storage.MultidownloaderStorage var useDB mdrtypes.Storager diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 00b9221d9..69d241202 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -175,7 +175,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { } else { multidownloaderClient = aggkitsync.NewAdapterEthClientToMultidownloader(l1EthClient) } - l1InfoTreeSync, err := l1infotreesync.New( + l1InfoTreeSync, err := l1infotreesync.NewLegacy( ctx, l1InfoTreeSyncCfg, multidownloaderClient, From c89ca09788545048208e9f768661381763904c81 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 18:43:59 +0100 Subject: [PATCH 56/75] fix: ut --- multidownloader/types/syncer_config.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index 0bb2f6b61..ed1d76402 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -65,7 +65,14 @@ func (f *SetSyncerConfig) Brief() string { return "SetSyncerConfig{}" } result := "SetSyncerConfig{ " - for syncerID, filter := range f.filters { + // Sort syncer IDs to ensure deterministic output + syncerIDs := make([]string, 0, len(f.filters)) + for syncerID := range f.filters { + syncerIDs = append(syncerIDs, syncerID) + } + sort.Strings(syncerIDs) + for _, syncerID := range syncerIDs { + filter := f.filters[syncerID] result += fmt.Sprintf("(%s -> [%d - %s]) ", syncerID, filter.FromBlock, filter.ToBlock.String()) } result += "}" From 7e77c32e3aaef997e810e461d7a14cb290948354 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 18:49:46 +0100 Subject: [PATCH 57/75] fix: determistic order for several functions that works over maps --- multidownloader/evm_multidownloader.go | 5 +++++ multidownloader/types/syncer_config.go | 4 ++++ sync/evmdownloader.go | 5 +++++ 3 files changed, 14 insertions(+) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 0b6eee638..7f0bb12a6 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "regexp" + "sort" "strconv" "strings" "sync" @@ -563,6 +564,10 @@ func getContracts(logQueries []mdrtypes.LogQuery) []common.Address { for addr := range addressMap { addresses = append(addresses, addr) } + // Sort addresses to ensure deterministic output + sort.Slice(addresses, func(i, j int) bool { + return addresses[i].Hex() < addresses[j].Hex() + }) return addresses } diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index ed1d76402..7c8749207 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -163,5 +163,9 @@ func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) [ for _, cc := range contractMap { contractConfigs = append(contractConfigs, *cc) } + // Sort by address to ensure deterministic output + sort.Slice(contractConfigs, func(i, j int) bool { + return contractConfigs[i].Address.Hex() < contractConfigs[j].Address.Hex() + }) return contractConfigs } diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 45991b12e..cfad11382 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "slices" + "sort" "strings" "time" @@ -45,6 +46,10 @@ func (m LogAppenderMap) GetTopics() []common.Hash { for topic := range m { topics = append(topics, topic) } + // Sort topics to ensure deterministic output + sort.Slice(topics, func(i, j int) bool { + return topics[i].Hex() < topics[j].Hex() + }) return topics } From 738f76797daf9628bc3952065b0b0ce58efe0fe9 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 6 Feb 2026 21:12:23 +0100 Subject: [PATCH 58/75] fix: ut --- config/config_test.go | 1 + multidownloader/config.go | 3 ++- multidownloader/config_test.go | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index dee8249f9..d67865213 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -77,6 +77,7 @@ func TestLoadDefaultConfig(t *testing.T) { require.Equal(t, multidownloader.NewConfigDefault("l1", ""), cfg.L1Multidownloader) cfgL2Multidownloader := multidownloader.NewConfigDefault("l2", "") cfgL2Multidownloader.BlockFinality = aggkittypes.LatestBlock + cfgL2Multidownloader.Enabled = false require.Equal(t, cfgL2Multidownloader, cfg.L2Multidownloader) } diff --git a/multidownloader/config.go b/multidownloader/config.go index 1eec5c1c0..ac4347e30 100644 --- a/multidownloader/config.go +++ b/multidownloader/config.go @@ -54,13 +54,14 @@ func NewConfigDefault(name string, basePathDB string) Config { } dbPath := path.Join(basePathDB, fmt.Sprintf("%s_multidownloader.sqlite", name)) return Config{ - Enabled: false, + Enabled: true, StoragePath: dbPath, BlockChunkSize: defaultBlockChunkSize, MaxParallelBlockHeaderRetrieval: defaultMaxParallelBlockHeaderRetrieval, BlockFinality: aggkittypes.FinalizedBlock, WaitPeriodToCheckCatchUp: types.NewDuration(defaultWaitPeriodToCheckCatchUp), PeriodToCheckReorgs: types.NewDuration(defaultPeriodToCheckReorgs), + DeveloperMode: false, } } diff --git a/multidownloader/config_test.go b/multidownloader/config_test.go index a501a73b8..32564e8e3 100644 --- a/multidownloader/config_test.go +++ b/multidownloader/config_test.go @@ -10,7 +10,7 @@ import ( func TestNewConfigDefault(t *testing.T) { cfg := NewConfigDefault("l1", "/tmp/aggkit/") - require.Equal(t, false, cfg.Enabled) + require.Equal(t, true, cfg.Enabled) require.Equal(t, "/tmp/aggkit/l1_multidownloader.sqlite", cfg.StoragePath) require.Equal(t, uint32(defaultBlockChunkSize), cfg.BlockChunkSize, "BlockChunkSize should be 10000") require.Equal(t, defaultMaxParallelBlockHeaderRetrieval, cfg.MaxParallelBlockHeaderRetrieval, "MaxParallelBlockHeaderRetrieval should be 30") @@ -18,7 +18,7 @@ func TestNewConfigDefault(t *testing.T) { require.Equal(t, types.NewDuration(defaultWaitPeriodToCheckCatchUp), cfg.WaitPeriodToCheckCatchUp, "WaitPeriodToCheckCatchUp should be 10 seconds") require.Equal(t, types.NewDuration(defaultPeriodToCheckReorgs), cfg.PeriodToCheckReorgs, "PeriodToCheckReorgs should be 5 seconds") - require.False(t, cfg.Enabled, "Enabled should be false by default") + require.True(t, cfg.Enabled, "Enabled should be true by default") } func TestNewConfigDefault_ValidatesCorrectly(t *testing.T) { From e5ce4fdd6c54f7425471207a8de6dc2ecaa8a048 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Sat, 7 Feb 2026 10:32:24 +0100 Subject: [PATCH 59/75] fix: launch dependecy --- cmd/run.go | 28 +++++++++++++++++----------- l1infotreesync/l1infotreesync.go | 3 ++- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index e81aeb3ce..5959f4491 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -159,6 +159,23 @@ func start(cliCtx *cli.Context) error { go b.Start(ctx) log.Info("Bridge service started") } + if l1MultiDownloader != nil { + log.Info("starting L1 MultiDownloader...") + err = l1MultiDownloader.Initialize(ctx) + if err != nil { + log.Fatal("failed to initialize L1 MultiDownloader: ", err) + } + go func() { + err := l1MultiDownloader.Start(ctx) + if err != nil { + log.Fatal("l1MultiDownloader stopped: %w", err) + } + }() + } + if l1InfoTreeSync != nil { + log.Info("starting L1 Info Tree Syncer...") + go l1InfoTreeSync.Start(ctx) + } for _, component := range components { switch component { @@ -232,15 +249,6 @@ func start(cliCtx *cli.Context) error { if cfg.Profiling.ProfilingEnabled { go pprof.StartProfilingHTTPServer(ctx, cfg.Profiling) } - if l1MultiDownloader != nil { - log.Info("starting L1 MultiDownloader...") - go func() { - err := l1MultiDownloader.Start(ctx) - if err != nil { - log.Fatal("l1MultiDownloader stopped: %w", err) - } - }() - } waitSignal([]context.CancelFunc{cancel}, &backfillWg) @@ -549,8 +557,6 @@ func runL1InfoTreeSyncerIfNeeded( if err != nil { log.Fatal(err) } - go l1InfoTreeSync.Start(ctx) - return l1InfoTreeSync } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 349581e45..e37be4c9d 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -101,7 +101,7 @@ func NewMultidownloadBased( } logger := log.WithFields("syncer", syncerID) // TODO: move the durations to config file (mdrsync.NewEVMDownloader) - logger.Infof("Creating L1 Info Tree Syncer with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", + logger.Infof("Creating L1InfoTreeSync with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", cfg.WaitForNewBlocksPeriod.String(), cfg.RetryAfterErrorPeriod.String(), ) @@ -151,6 +151,7 @@ func NewLegacy( reorgDetector sync.ReorgDetector, flags CreationFlags, ) (*L1InfoTreeSync, error) { + processor, err := newProcessor(cfg.DBPath) if err != nil { return nil, err From d637ecd4df97b6dd46544a93a54d8625b89240f1 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Sat, 7 Feb 2026 11:00:53 +0100 Subject: [PATCH 60/75] fix: nil pointer --- multidownloader/evm_multidownloader.go | 6 ++++++ scripts/local_config_helper | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 7f0bb12a6..56f241f41 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -391,6 +391,12 @@ func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error return fmt.Errorf("WaitForNewLatestBlocks: cannot get block header for latest synced block %d: %w", latestSyncedBlockNumber, err) } + // If the block is finalized and have no events it's not in DB + if lastBlockHeader == nil { + lastBlockHeader = &aggkittypes.BlockHeader{ + Number: latestSyncedBlockNumber, + } + } dh.log.Infof("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) _, err = dh.waitForNewBlocks(ctx, lastSyncedBlockTag, lastBlockHeader, finalized) return err diff --git a/scripts/local_config_helper b/scripts/local_config_helper index c3a80f046..c77ce6a1c 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -343,13 +343,14 @@ function main(){ check_requirements create_dest_folder - common_aggsender_committee_override_urls + #common_aggsender_committee_override_urls download_kurtosis_artifacts export_ports_from_kurtosis export_values_of_aggkit_config $DEST/config.toml export_forced_values # Generate config file + echo "go run scripts/run_template.go $ORIG_TEMPLATE_FILE >$DEST_TEMPLATE_FILE" go run scripts/run_template.go $ORIG_TEMPLATE_FILE >$DEST_TEMPLATE_FILE ok_or_fatal "Error generating template" From ca6527e8c018d67f2ad87191c5fb2a55f66aea81 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 10:10:46 +0100 Subject: [PATCH 61/75] feat: add completion percentage tracking to l1infotreesync RPC status Implement completion percentage reporting in l1infotreesync RPC status endpoint to provide sync progress visibility. - Add GetCompletionPercentage() to DriverInterface and L1InfoTreeSync - Add completionPercentage field to StatusInfo RPC response - Rename PercentComplete to CompletionPercentage across multidownloader for consistency - Legacy syncer returns nil for completion percentage (not supported) - Update local_config_helper script with --skip-committee-override option --- l1infotreesync/l1infotreesync.go | 5 +++ l1infotreesync/l1infotreesync_rpc.go | 7 +++- l1infotreesync/l1infotreesync_test.go | 14 +++++++ l1infotreesync/mock_driver_interface.go | 47 ++++++++++++++++++++++ l1infotreesync/mock_l1_info_tree_syncer.go | 47 ++++++++++++++++++++++ l1infotreesync/processor.go | 2 +- multidownloader/sync/evmdownloader.go | 4 +- multidownloader/sync/evmdriver.go | 44 +++++++++++++++++--- multidownloader/sync/types/downloader.go | 6 +-- scripts/local_config_helper | 19 ++++++--- sync/evmdriver.go | 5 +++ sync/evmdriver_test.go | 5 +++ 12 files changed, 186 insertions(+), 19 deletions(-) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index e37be4c9d..81714d498 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -40,6 +40,7 @@ var ( type DriverInterface interface { Sync(ctx context.Context) + GetCompletionPercentage() *float64 } type DownloaderInterface interface { @@ -245,6 +246,10 @@ func (d *L1InfoTreeSync) Finality() aggkittypes.BlockNumberFinality { return d.downloader.Finality() } +func (d *L1InfoTreeSync) GetCompletionPercentage() *float64 { + return d.driver.GetCompletionPercentage() +} + // GetRPCServices returns the list of services that the RPC provider exposes func (a *L1InfoTreeSync) GetRPCServices() []jRPC.Service { logger := log.WithFields("module", "l1infotreesync-rpc") diff --git a/l1infotreesync/l1infotreesync_rpc.go b/l1infotreesync/l1infotreesync_rpc.go index c092ac478..34054b614 100644 --- a/l1infotreesync/l1infotreesync_rpc.go +++ b/l1infotreesync/l1infotreesync_rpc.go @@ -10,12 +10,14 @@ import ( ) type StatusInfo struct { - Status string `json:"status"` + Status string `json:"status"` + CompletionPercentage *float64 `json:"completionPercentage,omitempty"` } type L1InfoTreeSyncer interface { GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) GetLatestL1InfoLeaf(ctx context.Context) (*L1InfoTreeLeaf, error) GetInfoByRoot(ger common.Hash) (*L1InfoTreeLeaf, error) + GetCompletionPercentage() *float64 } // L1InfoTreeSyncRPC is the RPC interface for the L1InfoTreeSync @@ -39,7 +41,8 @@ func NewL1InfoTreeSyncRPC( // -d '{"method":"l1infotreesync_status", "params":[], "id":1}' func (b *L1InfoTreeSyncRPC) Status() (interface{}, rpc.Error) { info := StatusInfo{ - Status: "running", + Status: "running", + CompletionPercentage: b.l1InfoTreeSyncer.GetCompletionPercentage(), } return info, nil } diff --git a/l1infotreesync/l1infotreesync_test.go b/l1infotreesync/l1infotreesync_test.go index 69332dbf4..870916d7f 100644 --- a/l1infotreesync/l1infotreesync_test.go +++ b/l1infotreesync/l1infotreesync_test.go @@ -319,3 +319,17 @@ func TestFinality(t *testing.T) { } require.Equal(t, aggkittypes.LatestBlock, s.Finality()) } + +func TestL1InfoTreeSync_GetCompletionPercentage(t *testing.T) { + mockEVMDriver := NewDriverInterfaceMock(t) + s := L1InfoTreeSync{ + driver: mockEVMDriver, + } + mockEVMDriver.EXPECT().GetCompletionPercentage().Return(nil).Once() + + require.Nil(t, s.GetCompletionPercentage(), "expected GetCompletionPercentage to return nil for legacy syncer") + percent := float64(10.0) + mockEVMDriver.EXPECT().GetCompletionPercentage().Return(&percent).Once() + require.Equal(t, &percent, s.GetCompletionPercentage()) + +} diff --git a/l1infotreesync/mock_driver_interface.go b/l1infotreesync/mock_driver_interface.go index d5f5b4a33..d8f4a5a0c 100644 --- a/l1infotreesync/mock_driver_interface.go +++ b/l1infotreesync/mock_driver_interface.go @@ -21,6 +21,53 @@ func (_m *DriverInterfaceMock) EXPECT() *DriverInterfaceMock_Expecter { return &DriverInterfaceMock_Expecter{mock: &_m.Mock} } +// GetCompletionPercentage provides a mock function with no fields +func (_m *DriverInterfaceMock) GetCompletionPercentage() *float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCompletionPercentage") + } + + var r0 *float64 + if rf, ok := ret.Get(0).(func() *float64); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*float64) + } + } + + return r0 +} + +// DriverInterfaceMock_GetCompletionPercentage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCompletionPercentage' +type DriverInterfaceMock_GetCompletionPercentage_Call struct { + *mock.Call +} + +// GetCompletionPercentage is a helper method to define mock.On call +func (_e *DriverInterfaceMock_Expecter) GetCompletionPercentage() *DriverInterfaceMock_GetCompletionPercentage_Call { + return &DriverInterfaceMock_GetCompletionPercentage_Call{Call: _e.mock.On("GetCompletionPercentage")} +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) Run(run func()) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) Return(_a0 *float64) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) RunAndReturn(run func() *float64) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Return(run) + return _c +} + // Sync provides a mock function with given fields: ctx func (_m *DriverInterfaceMock) Sync(ctx context.Context) { _m.Called(ctx) diff --git a/l1infotreesync/mock_l1_info_tree_syncer.go b/l1infotreesync/mock_l1_info_tree_syncer.go index fa1759b7f..e026414cf 100644 --- a/l1infotreesync/mock_l1_info_tree_syncer.go +++ b/l1infotreesync/mock_l1_info_tree_syncer.go @@ -23,6 +23,53 @@ func (_m *L1InfoTreeSyncerMock) EXPECT() *L1InfoTreeSyncerMock_Expecter { return &L1InfoTreeSyncerMock_Expecter{mock: &_m.Mock} } +// GetCompletionPercentage provides a mock function with no fields +func (_m *L1InfoTreeSyncerMock) GetCompletionPercentage() *float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCompletionPercentage") + } + + var r0 *float64 + if rf, ok := ret.Get(0).(func() *float64); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*float64) + } + } + + return r0 +} + +// L1InfoTreeSyncerMock_GetCompletionPercentage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCompletionPercentage' +type L1InfoTreeSyncerMock_GetCompletionPercentage_Call struct { + *mock.Call +} + +// GetCompletionPercentage is a helper method to define mock.On call +func (_e *L1InfoTreeSyncerMock_Expecter) GetCompletionPercentage() *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + return &L1InfoTreeSyncerMock_GetCompletionPercentage_Call{Call: _e.mock.On("GetCompletionPercentage")} +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) Run(run func()) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) Return(_a0 *float64) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) RunAndReturn(run func() *float64) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Return(run) + return _c +} + // GetInfoByGlobalExitRoot provides a mock function with given fields: ger func (_m *L1InfoTreeSyncerMock) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { ret := _m.Called(ger) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 6028096c1..2578251bf 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -398,7 +398,7 @@ func (p *processor) processBlocksSameTx(ctx context.Context, blocks *mdrsynctype } shouldRollback = false log.Infof("processed %d blocks, percent %.2f%% complete. LastBlock: %d", - len(blocks.Data), blocks.PercentComplete, blocks.Data[len(blocks.Data)-1].Num) + len(blocks.Data), blocks.CompletionPercentage, blocks.Data[len(blocks.Data)-1].Num) return nil } diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index b9fb3abc5..32abd502b 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -139,8 +139,8 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, d.logger.Warn(err.Error()) } result := &mdrsynctypes.DownloadResult{ - Data: d.logQueryResponseToEVMBlocks(ctx, logQueryResponse), - PercentComplete: percentComplete, + Data: d.logQueryResponseToEVMBlocks(ctx, logQueryResponse), + CompletionPercentage: percentComplete, } err = d.addLastBlockIfNotIncluded(ctx, result, logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 0d3513f01..1a410256f 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" + "sync" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db/compatibility" mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" - "github.com/agglayer/aggkit/sync" + aggkitsync "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" ) @@ -17,10 +18,18 @@ type EVMDriver struct { processor mdrsynctypes.ProcessorInterface downloader mdrsynctypes.DownloaderInterface syncerConfig aggkittypes.SyncerConfig - rh *sync.RetryHandler + rh *aggkitsync.RetryHandler logger aggkitcommon.Logger compatibilityChecker compatibility.CompatibilityChecker - syncBlockChunkSize uint64 + // This mutext protect to: + // - syncBlockChunkSize, because it can be updated dynamically by the user and read by the sync loop + // - completionPercentage, because it can be updated by the downloader and read by the sync loop and by the API server + mutex sync.Mutex + syncBlockChunkSize uint64 + //It's the percentage of completion of the download, it can be used to estimate the progress of the sync + // can be nil is there are no information yet + // 0 -> 0%, 100, -> 100% + completionPercentage *float64 } func NewEVMDriver( @@ -29,7 +38,7 @@ func NewEVMDriver( downloader mdrsynctypes.DownloaderInterface, syncerConfig aggkittypes.SyncerConfig, syncBlockChunkSize uint64, - rh *sync.RetryHandler, + rh *aggkitsync.RetryHandler, compatibilityChecker compatibility.CompatibilityChecker, ) *EVMDriver { return &EVMDriver{ @@ -98,7 +107,7 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { if blocks != nil { LastProcessedBlock := blocks.Data.LastBlock() d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", - len(blocks.Data), blocks.PercentComplete, LastProcessedBlock.Brief()) + len(blocks.Data), blocks.CompletionPercentage, LastProcessedBlock.Brief()) } return nil } @@ -108,9 +117,14 @@ func (d *EVMDriver) processBlocks(ctx context.Context, data *mdrsynctypes.Downlo return nil } - return d.withRetry(ctx, "processBlocks", func() error { + err := d.withRetry(ctx, "processBlocks", func() error { return d.processor.ProcessBlocks(ctx, data) }) + // If no error update percentage + if err == nil { + d.setCompletionPercentage(data.CompletionPercentage) + } + return err } func (d *EVMDriver) handleReorg(ctx context.Context, err *mdrtypes.ReorgedError) error { @@ -140,3 +154,21 @@ func (d *EVMDriver) withRetry(ctx context.Context, opName string, fn func() erro } } } + +func (d *EVMDriver) GetCompletionPercentage() *float64 { + d.mutex.Lock() + defer d.mutex.Unlock() + // This is done to copy the value avoid passing internal + // pointer. + if d.completionPercentage == nil { + return nil + } + percent := *d.completionPercentage + return &percent +} + +func (d *EVMDriver) setCompletionPercentage(percent float64) { + d.mutex.Lock() + defer d.mutex.Unlock() + d.completionPercentage = &percent +} diff --git a/multidownloader/sync/types/downloader.go b/multidownloader/sync/types/downloader.go index fd1b1096a..818839c30 100644 --- a/multidownloader/sync/types/downloader.go +++ b/multidownloader/sync/types/downloader.go @@ -9,9 +9,9 @@ import ( type DownloadResult struct { Data sync.EVMBlocks - // PercentComplete indicates the percent of completion of the download + // CompletionPercentage indicates the percent of completion of the download // 0 -> 0%, 100 -> 100% - PercentComplete float64 + CompletionPercentage float64 } type DownloaderInterface interface { @@ -26,7 +26,7 @@ type DownloaderInterface interface { // - DownloadResult: the result of the download, containing the blocks and the percent complete // DownloadResult is never nil // DownloadResult.Data could be nil if no blocks were downloaded - // DownloadResult.PercentComplete indicates the percent of completion of the download + // DownloadResult.CompletionPercentage indicates the percent of completion of the download // 0 -> 0%, 100 -> 100% // - error: if any error occurred during the download // special error: errors.Is(err, ErrLogsNotAvailable) indicates that it works diff --git a/scripts/local_config_helper b/scripts/local_config_helper index c77ce6a1c..befa7e0ea 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -312,8 +312,9 @@ function parse_command_line_args() { -h | --help) echo "Usage: $0" echo " -h: help" - echo " -e: kurtosis enclave name (default $KURTOSIS_ENCLAVE)" - echo " -p: expose same ports as service in kurtosis (RPC and REST)" + echo " -e: kurtosis enclave name (default $KURTOSIS_ENCLAVE) (--enclave)" + echo " -p: expose same ports as service in kurtosis (RPC and REST) (--expose-ports) " + echo " -s: skip setting up aggsender committee override URLs (--skip-comittee-override)" exit 0 ;; -e | --enclave) @@ -325,6 +326,10 @@ function parse_command_line_args() { export EXPOSE_PORTS=0 shift ;; + -s | --skip-comittee-override) + export SKIP_COMMITTEE_OVERRIDE=1 + shift + ;; -*) echo "Invalid Option: $1" 1>&2 exit 1 @@ -342,15 +347,19 @@ function main(){ parse_command_line_args $* check_requirements create_dest_folder - - #common_aggsender_committee_override_urls + if [ -z $SKIP_COMMITTEE_OVERRIDE ]; then + log_debug "Setting up aggsender_committee_override_urls with validators in kurtosis enclave" + common_aggsender_committee_override_urls + else + log_debug "skipping setup of aggsender_committee_override_urls with validators in kurtosis enclave, setting it to empty" + export aggsender_committee_override_urls="{}" + fi download_kurtosis_artifacts export_ports_from_kurtosis export_values_of_aggkit_config $DEST/config.toml export_forced_values # Generate config file - echo "go run scripts/run_template.go $ORIG_TEMPLATE_FILE >$DEST_TEMPLATE_FILE" go run scripts/run_template.go $ORIG_TEMPLATE_FILE >$DEST_TEMPLATE_FILE ok_or_fatal "Error generating template" diff --git a/sync/evmdriver.go b/sync/evmdriver.go index d0e984f0f..3352f5386 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -125,6 +125,11 @@ func (d *EVMDriver) SubscribeToNewBlocks(subscriberName string) <-chan Block { return d.blockSubscriber.Subscribe(subscriberName) } +// Legacy syncer doesn't support completion percentage, so we return nil here. +func (d *EVMDriver) GetCompletionPercentage() *float64 { + return nil +} + func (d *EVMDriver) Sync(ctx context.Context) { reset: var ( diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index af43a98bd..2b278f32a 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -435,3 +435,8 @@ func TestEVMDriver_Sync(t *testing.T) { }) } } + +func TestEVMDriver_GetCompletionPercentage(t *testing.T) { + sut := &EVMDriver{} + require.Nil(t, sut.GetCompletionPercentage(), "expected GetCompletionPercentage to return nil for legacy syncer") +} From b8ff53177365708facf8fb4be19bcdbe02f3057f Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 10:48:48 +0100 Subject: [PATCH 62/75] fix: ut and lint --- cmd/run.go | 2 +- l1infotreesync/l1infotreesync.go | 1 - l1infotreesync/l1infotreesync_rpc_test.go | 19 ++++++++++++++----- l1infotreesync/l1infotreesync_test.go | 1 - multidownloader/sync/evmdriver.go | 2 +- 5 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 5959f4491..ba716d3ce 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -163,6 +163,7 @@ func start(cliCtx *cli.Context) error { log.Info("starting L1 MultiDownloader...") err = l1MultiDownloader.Initialize(ctx) if err != nil { + //nolint:gocritic log.Fatal("failed to initialize L1 MultiDownloader: ", err) } go func() { @@ -194,7 +195,6 @@ func start(cliCtx *cli.Context) error { committeeQuerier, ) if err != nil { - //nolint:gocritic log.Fatal(err) } rpcServices = append(rpcServices, aggsender.GetRPCServices()...) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 81714d498..97b87438a 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -152,7 +152,6 @@ func NewLegacy( reorgDetector sync.ReorgDetector, flags CreationFlags, ) (*L1InfoTreeSync, error) { - processor, err := newProcessor(cfg.DBPath) if err != nil { return nil, err diff --git a/l1infotreesync/l1infotreesync_rpc_test.go b/l1infotreesync/l1infotreesync_rpc_test.go index a5509aeee..41e8ae4f6 100644 --- a/l1infotreesync/l1infotreesync_rpc_test.go +++ b/l1infotreesync/l1infotreesync_rpc_test.go @@ -13,17 +13,26 @@ import ( var testHash = common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") func TestL1InfoTreeSyncRPC_Status(t *testing.T) { - rpc := NewL1InfoTreeSyncRPC( - log.WithFields("modules", "test"), - nil, - ) - + mockSyncer := NewL1InfoTreeSyncerMock(t) + rpc := NewL1InfoTreeSyncRPC(log.WithFields("modules", "test"), mockSyncer) + mockSyncer.EXPECT().GetCompletionPercentage().Return(nil).Once() result, err := rpc.Status() require.Nil(t, err, "expected no error from Status") statusInfo, ok := result.(StatusInfo) require.True(t, ok, "expected result to be of type StatusInfo") assert.Equal(t, "running", statusInfo.Status, "status should be 'running'") + require.Nil(t, statusInfo.CompletionPercentage, "expected CompletionPercentage to be nil") + + percent := float64(20.0) + mockSyncer.EXPECT().GetCompletionPercentage().Return(&percent).Once() + result, err = rpc.Status() + require.NoError(t, err) + statusInfo, ok = result.(StatusInfo) + require.True(t, ok, "expected result to be of type StatusInfo") + assert.Equal(t, "running", statusInfo.Status, "status should be 'running'") + require.NotNil(t, statusInfo.CompletionPercentage, "expected CompletionPercentage to not be nil") + assert.Equal(t, percent, *statusInfo.CompletionPercentage, "expected CompletionPercentage to match the mock value") } func TestL1InfoTreeSyncRPC_GetInfoByGlobalExitRoot_NilParam_Success(t *testing.T) { diff --git a/l1infotreesync/l1infotreesync_test.go b/l1infotreesync/l1infotreesync_test.go index 870916d7f..96ab5a0c4 100644 --- a/l1infotreesync/l1infotreesync_test.go +++ b/l1infotreesync/l1infotreesync_test.go @@ -331,5 +331,4 @@ func TestL1InfoTreeSync_GetCompletionPercentage(t *testing.T) { percent := float64(10.0) mockEVMDriver.EXPECT().GetCompletionPercentage().Return(&percent).Once() require.Equal(t, &percent, s.GetCompletionPercentage()) - } diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 1a410256f..63173a54a 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -26,7 +26,7 @@ type EVMDriver struct { // - completionPercentage, because it can be updated by the downloader and read by the sync loop and by the API server mutex sync.Mutex syncBlockChunkSize uint64 - //It's the percentage of completion of the download, it can be used to estimate the progress of the sync + // It's the percentage of completion of the download, it can be used to estimate the progress of the sync // can be nil is there are no information yet // 0 -> 0%, 100, -> 100% completionPercentage *float64 From 9e887daeec33bfc944220ea2dd717ca71711e89d Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 11:31:14 +0100 Subject: [PATCH 63/75] fix: copilot comments --- l1infotreesync/processor.go | 11 ++++++++++- multidownloader/reorg_processor.go | 13 ++++++++----- multidownloader/reorg_processor_port.go | 3 ++- multidownloader/reorg_processor_test.go | 16 +++++++++++++++- multidownloader/sync/evmdriver.go | 3 +++ scripts/local_config_helper | 6 +++--- 6 files changed, 41 insertions(+), 11 deletions(-) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 2578251bf..01d863520 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -262,7 +262,16 @@ func (p *processor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittyp if errors.Is(err, sql.ErrNoRows) { return nil, nil } - hdr := aggkittypes.NewBlockHeader(lastProcessedBlockNum, common.HexToHash(*hash), 0, nil) + if err != nil { + return nil, err + } + var blockHash common.Hash + if hash == nil { + blockHash = common.Hash{} // zero hash if no hash is available + } else { + blockHash = common.HexToHash(*hash) + } + hdr := aggkittypes.NewBlockHeader(lastProcessedBlockNum, blockHash, 0, nil) return hdr, err } diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 0eca7861f..7335c3f8a 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -40,7 +40,10 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, var err error // We known that offendingBlockNumber is affected, so we go backwards until we find // the first unaffected block - currentBlockNumber := detectedReorgError.OffendingBlockNumber + offendingBlockNumber := detectedReorgError.OffendingBlockNumber + if offendingBlockNumber == 0 { + return fmt.Errorf("ProcessReorg: reorg detected at block 0, this should never happen, check the reorg detection logic") + } tx, err := rm.port.NewTx(ctx) if err != nil { return fmt.Errorf("ProcessReorg: error starting new tx: %w", err) @@ -54,7 +57,7 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, } } }() - firstUnaffectedBlock, err := rm.findFirstUnaffectedBlock(ctx, tx, currentBlockNumber-1) + firstUnaffectedBlock, err := rm.findFirstUnaffectedBlock(ctx, tx, offendingBlockNumber-1) if err != nil { return fmt.Errorf("ProcessReorg: error finding first unaffected block: %w", err) } @@ -64,12 +67,12 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, "It acts as missing blocks, so is going to delete blocks > %d."+ "Overriding real unaffected block found %d."+ "(forbidden in production! but developerMode is enabled))!!. ", - currentBlockNumber, currentBlockNumber, firstUnaffectedBlock) - firstUnaffectedBlock = currentBlockNumber - 1 + offendingBlockNumber, offendingBlockNumber, firstUnaffectedBlock) + firstUnaffectedBlock = offendingBlockNumber - 1 } else { rm.log.Warnf("ProcessReorg: forced reorg at block %d with developerMode disabled, "+ "using the first unaffected block found %d", - currentBlockNumber, firstUnaffectedBlock) + offendingBlockNumber, firstUnaffectedBlock) // Continue with the reorg using the firstUnaffectedBlock found } } diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go index 2da633620..0bc492949 100644 --- a/multidownloader/reorg_processor_port.go +++ b/multidownloader/reorg_processor_port.go @@ -40,12 +40,13 @@ func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querie } func (r *ReorgPort) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { - highestBlock, err := r.storage.GetHighestBlockNumber(nil) + highestBlock, err := r.storage.GetHighestBlockNumber(tx) if err != nil { return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block from storage: %w", err) } return highestBlock, nil } + func (r *ReorgPort) MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) { return r.storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) } diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index b0174b7b6..50910a720 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -285,7 +285,21 @@ func TestReorgProcessor_FindFirstUnaffectedBlock(t *testing.T) { func TestReorgProcessor_ProcessReorg(t *testing.T) { mockLogger := commonmocks.NewLogger(t) - + t.Run("can't reorg if the offending block is genesis (0)", func(t *testing.T) { + processor := &ReorgProcessor{ + log: log.WithFields("module", "test"), + port: nil, + } + ctx := context.Background() + reorgErr := mdtypes.NewDetectedReorgError( + 0, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg at genesis") + err := processor.ProcessReorg(ctx, *reorgErr) + require.Error(t, err) + }) t.Run("returns error when NewTx fails", func(t *testing.T) { mockPort := mdmocks.NewReorgPorter(t) diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index 63173a54a..ce69362c9 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -99,7 +99,10 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { case errors.Is(err, ErrLogsNotAvailable): d.logger.Debug("EVMDriver: no logs available yet, waiting to retry") return nil + default: + return fmt.Errorf("EVMDriver: error downloading blocks: %w", err) } + } if err = d.processBlocks(ctx, blocks); err != nil { return fmt.Errorf("EVMDriver: error processing blocks: %w", err) diff --git a/scripts/local_config_helper b/scripts/local_config_helper index befa7e0ea..5b0c200b4 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -314,7 +314,7 @@ function parse_command_line_args() { echo " -h: help" echo " -e: kurtosis enclave name (default $KURTOSIS_ENCLAVE) (--enclave)" echo " -p: expose same ports as service in kurtosis (RPC and REST) (--expose-ports) " - echo " -s: skip setting up aggsender committee override URLs (--skip-comittee-override)" + echo " -s: skip setting up aggsender committee override URLs (--skip-committee-override)" exit 0 ;; -e | --enclave) @@ -326,7 +326,7 @@ function parse_command_line_args() { export EXPOSE_PORTS=0 shift ;; - -s | --skip-comittee-override) + -s | --skip-committee-override) export SKIP_COMMITTEE_OVERRIDE=1 shift ;; @@ -347,7 +347,7 @@ function main(){ parse_command_line_args $* check_requirements create_dest_folder - if [ -z $SKIP_COMMITTEE_OVERRIDE ]; then + if [ -z "$SKIP_COMMITTEE_OVERRIDE" ]; then log_debug "Setting up aggsender_committee_override_urls with validators in kurtosis enclave" common_aggsender_committee_override_urls else From cc3128e6f92320cf3263f72870384e6533e9f022 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 11:33:40 +0100 Subject: [PATCH 64/75] Update common/polling_with_timeout.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- common/polling_with_timeout.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/common/polling_with_timeout.go b/common/polling_with_timeout.go index 043ed7cb1..09e516e43 100644 --- a/common/polling_with_timeout.go +++ b/common/polling_with_timeout.go @@ -20,31 +20,28 @@ func PollingWithTimeout( timeoutTimer := time.NewTimer(timeoutPeriod) defer timeoutTimer.Stop() + pollingTicker := time.NewTicker(pollingPeriod) + defer pollingTicker.Stop() + for { - pollingTimer := time.NewTimer(pollingPeriod) conditionMet, err := checkCondition() if err != nil { return false, err } if conditionMet { - pollingTimer.Stop() return true, nil } select { - case <-pollingTimer.C: - pollingTimer.Stop() + case <-pollingTicker.C: // Loop continues to check condition case <-timeoutTimer.C: - pollingTimer.Stop() return false, fmt.Errorf("pollingWithTimeout: condition not met after waiting %s: %w", timeoutPeriod.String(), ErrTimeoutReached) case <-ctx.Done(): - pollingTimer.Stop() return false, fmt.Errorf("pollingWithTimeout: "+ "context done while waiting for condition to be met: %w", ctx.Err()) } } - return false, nil } From 50fb76e1f6dae944ff129a71542e96913f3220f4 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 11:47:59 +0100 Subject: [PATCH 65/75] fix: copilot comments --- multidownloader/evm_multidownloader_rpc.go | 4 ++++ multidownloader/evm_multidownloader_rpc_test.go | 7 +++++++ multidownloader/evm_multidownloader_syncers.go | 3 +++ multidownloader/reorg_processor.go | 3 ++- multidownloader/sync/evmdriver.go | 1 - 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 356960547..ec9b7c094 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -26,6 +26,10 @@ func NewEVMMultidownloaderRPC( // curl -X POST http://localhost:5576/ "Content-Type: application/json" \ // -d '{"method":"multidownloader-l1_status", "params":[], "id":1}' func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { + if !b.downloader.IsInitialized() { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: multidownloader not initialized") + } finalizedBlockNumber, err := b.downloader.GetFinalizedBlockNumber(context.Background()) if err != nil { return nil, rpc.NewRPCError(rpc.DefaultErrorCode, diff --git a/multidownloader/evm_multidownloader_rpc_test.go b/multidownloader/evm_multidownloader_rpc_test.go index a03fce8ec..64f7bb956 100644 --- a/multidownloader/evm_multidownloader_rpc_test.go +++ b/multidownloader/evm_multidownloader_rpc_test.go @@ -36,6 +36,13 @@ func TestEVMMultidownloaderRPC_Status(t *testing.T) { require.Contains(t, fmt.Sprintf("%+v", result), "Status") } +func TestEVMMultidownloaderRPC_Status_NotInitialized(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + sut := NewEVMMultidownloaderRPC(log.WithFields("module", "test"), testData.mdr) + _, err := sut.Status() + require.ErrorContains(t, err, "multidownloader not initialized") +} + func TestEVMMultidownloaderRPC_Reorg(t *testing.T) { testData := newEVMMultidownloaderTestData(t, false) t.Run("returns error if debug is not enabled", func(t *testing.T) { diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 80983f9d6..3ce22bdf6 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -128,6 +128,9 @@ func (dh *EVMMultidownloader) EthClient() aggkittypes.BaseEthereumClienter { func (dh *EVMMultidownloader) LogQuery(ctx context.Context, query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) { + if !dh.IsInitialized() { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("EVMMultidownloader.LogQuery: multidownloader not initialized") + } dh.mutex.Lock() defer dh.mutex.Unlock() isAval, availQuery := dh.state.IsPartiallyAvailable(query) diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 7335c3f8a..0ccf7ad09 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -42,7 +42,8 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, // the first unaffected block offendingBlockNumber := detectedReorgError.OffendingBlockNumber if offendingBlockNumber == 0 { - return fmt.Errorf("ProcessReorg: reorg detected at block 0, this should never happen, check the reorg detection logic") + return fmt.Errorf("ProcessReorg: reorg detected at block 0, " + + "this should never happen, check the reorg detection logic") } tx, err := rm.port.NewTx(ctx) if err != nil { diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go index ce69362c9..b20b2d286 100644 --- a/multidownloader/sync/evmdriver.go +++ b/multidownloader/sync/evmdriver.go @@ -102,7 +102,6 @@ func (d *EVMDriver) syncStep(ctx context.Context) error { default: return fmt.Errorf("EVMDriver: error downloading blocks: %w", err) } - } if err = d.processBlocks(ctx, blocks); err != nil { return fmt.Errorf("EVMDriver: error processing blocks: %w", err) From 0a2df73b0fa7faf2dea2c4dfb946fd89dbee8e4a Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 11:53:02 +0100 Subject: [PATCH 66/75] fix: copilot comments --- cmd/run.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index ba716d3ce..207225b14 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -164,12 +164,12 @@ func start(cliCtx *cli.Context) error { err = l1MultiDownloader.Initialize(ctx) if err != nil { //nolint:gocritic - log.Fatal("failed to initialize L1 MultiDownloader: ", err) + log.Fatalf("failed to initialize L1 MultiDownloader: %v", err) } go func() { err := l1MultiDownloader.Start(ctx) if err != nil { - log.Fatal("l1MultiDownloader stopped: %w", err) + log.Fatalf("l1MultiDownloader stopped: %v", err) } }() } @@ -195,7 +195,7 @@ func start(cliCtx *cli.Context) error { committeeQuerier, ) if err != nil { - log.Fatal(err) + log.Fatalf("failed to create AggSender: %v", err) } rpcServices = append(rpcServices, aggsender.GetRPCServices()...) From a24d1faa12c99491bbcb047f98bf587e0099c468 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:27:19 +0100 Subject: [PATCH 67/75] fix: coverage --- multidownloader/sync/evmdriver_test.go | 32 ++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/multidownloader/sync/evmdriver_test.go b/multidownloader/sync/evmdriver_test.go index 7b296820b..908b8a016 100644 --- a/multidownloader/sync/evmdriver_test.go +++ b/multidownloader/sync/evmdriver_test.go @@ -2,12 +2,14 @@ package multidownloader import ( "errors" + "fmt" "testing" "time" aggkitcommon "github.com/agglayer/aggkit/common" compatibilityMocks "github.com/agglayer/aggkit/db/compatibility/mocks" "github.com/agglayer/aggkit/log" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" "github.com/agglayer/aggkit/multidownloader/sync/types/mocks" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" "github.com/agglayer/aggkit/sync" @@ -34,7 +36,7 @@ func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *ev syncerConfig := aggkittypes.SyncerConfig{} logger := log.WithFields("module", "test") rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: time.Second, + RetryAfterErrorPeriod: time.Millisecond * 10, MaxRetryAttemptsAfterError: 0, } if compatibilityCheckExpectations { @@ -61,7 +63,7 @@ func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *ev } } -func TestNewEVMDriver(t *testing.T) { +func TestNewEVMDriver_SyncStep(t *testing.T) { t.Run("fail compatibility check", func(t *testing.T) { testData := newEVMDriverTestData(t, false) expectedErr := errors.New("compatibility check failed") @@ -111,3 +113,29 @@ func TestNewEVMDriver(t *testing.T) { require.NoError(t, err) }) } + +func TestNewEVMDriver_ProcessBlocks(t *testing.T) { + t.Run("xxx", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + ctx := t.Context() + testData.driver.rh.MaxRetryAttemptsAfterError = 2 + data := &mdrsynctypes.DownloadResult{ + Data: []*sync.EVMBlock{ + { // sync.EVMBlock + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 10, + }, + }, + }, + CompletionPercentage: 50, + } + errProcessBlock := fmt.Errorf("error processing blocks") + testData.mockProcessor.EXPECT(). + ProcessBlocks(mock.Anything, data).Return(errProcessBlock).Once() + testData.mockProcessor.EXPECT(). + ProcessBlocks(mock.Anything, data).Return(nil).Once() + err := testData.driver.processBlocks(ctx, data) + require.NoError(t, err) + require.Equal(t, data.CompletionPercentage, *testData.driver.GetCompletionPercentage()) + }) +} From 8c08958f0ca2f224d8ac23bacee76fc92d85292a Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 10 Feb 2026 11:01:59 +0100 Subject: [PATCH 68/75] feat: add completion percentage tracking to multidownloader Adds per-contract and global completion percentage tracking to monitor sync progress: - Implements CompletionPercentage() method in State for per-contract progress calculation - Extends RPC Status endpoint with completionPercentage and completionPercentageDetailed fields - Adds GetContracts() helper to SetSyncSegment for contract iteration - Improves Add() logic to handle empty BlockRange initialization correctly - Adds documentation comments to all exported State functions - Adds test coverage for merging segments from empty BlockRange Co-Authored-By: Claude Sonnet 4.5 --- multidownloader/evm_multidownloader_rpc.go | 31 +++++++---- multidownloader/state.go | 53 +++++++++++++++++++ multidownloader/types/set_sync_segment.go | 20 ++++++- .../types/set_sync_segment_test.go | 33 ++++++++++++ 4 files changed, 125 insertions(+), 12 deletions(-) diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index ec9b7c094..ba67016c4 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" ) type EVMMultidownloaderRPC struct { @@ -42,18 +43,28 @@ func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { } b.downloader.mutex.Lock() defer b.downloader.mutex.Unlock() - + completationPercentage := b.downloader.state.CompletionPercentage() + minPercent := 100.0 + for _, percent := range completationPercentage { + if percent < minPercent { + minPercent = percent + } + } info := struct { - Status string `json:"status"` - State string `json:"state,omitempty"` - Pending string `json:"pending,omitempty"` - FinalizedBlockNumber uint64 `json:"finalizedBlockNumber,omitempty"` - LatestBlockNumber uint64 `json:"latestBlockNumber,omitempty"` + Status string `json:"status"` + State string `json:"state,omitempty"` + Pending string `json:"pending,omitempty"` + FinalizedBlockNumber uint64 `json:"finalizedBlockNumber,omitempty"` + LatestBlockNumber uint64 `json:"latestBlockNumber,omitempty"` + CompletionPercentage float64 `json:"completionPercentage,omitempty"` + CompletionPercentageDetailed map[common.Address]float64 `json:"completionPercentageDetailed,omitempty"` }{ - Status: "running", - State: b.downloader.state.String(), - FinalizedBlockNumber: finalizedBlockNumber, - LatestBlockNumber: latestBlockNumber, + Status: "running", + State: b.downloader.state.String(), + FinalizedBlockNumber: finalizedBlockNumber, + LatestBlockNumber: latestBlockNumber, + CompletionPercentage: minPercent, + CompletionPercentageDetailed: completationPercentage, } return info, nil } diff --git a/multidownloader/state.go b/multidownloader/state.go index 2bf132272..3fd3513d2 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -6,11 +6,14 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/etherman/types" + "github.com/agglayer/aggkit/log" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) +// State represents the current state of the multidownloader, +// it contains the segments that are already synced and the segments that are pending to be synced type State struct { // These are the segments that we have already synced // when a syncer does a `FilterLogs`, it is used to check what is already synced @@ -19,6 +22,7 @@ type State struct { Pending mdrtypes.SetSyncSegment } +// NewEmptyState creates a new State with empty synced and pending segments func NewEmptyState() *State { return &State{ Synced: mdrtypes.NewSetSyncSegment(), @@ -26,6 +30,7 @@ func NewEmptyState() *State { } } +// NewState creates a new State with the given synced and pending segments func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) *State { return &State{ Synced: *synced, @@ -33,6 +38,8 @@ func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) } } +// NewStateFromStorageSyncedBlocks creates a new State from the given storage +// synced blocks and total to sync blocks func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, totalToSync mdrtypes.SetSyncSegment) (*State, error) { err := totalToSync.SubtractSegments(&storageSynced) @@ -58,38 +65,51 @@ func (s *State) Clone() *State { Pending: *clonedPending, } } + +// String returns a string representation of the State func (s *State) String() string { return "State{Synced: " + s.Synced.String() + ", Pending: " + s.Pending.String() + "}" } +// UpdateTargetBlockToNumber updates the target block number for the pending segments +// for that use the blockNotifier func (s *State) UpdateTargetBlockToNumber(ctx context.Context, blockNotifier types.BlockNotifierManager) error { return s.Pending.UpdateTargetBlockToNumber(ctx, blockNotifier) } +// GetHighestBlockNumberPendingToSync returns the highest block number that is pending to be synced func (s *State) GetHighestBlockNumberPendingToSync() (uint64, aggkittypes.BlockNumberFinality) { return s.Pending.GetHighestBlockNumber() } +// IsAvailable checks if the given LogQuery is fully available in the synced segments func (s *State) IsAvailable(query mdrtypes.LogQuery) bool { return s.Synced.IsAvailable(query) } +// IsPartiallyAvailable checks if the given LogQuery is partially available in the synced segments func (s *State) IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) { return s.Synced.IsPartiallyAvailable(query) } +// GetTotalPendingBlockRange returns the total block range that is pending to be synced func (s *State) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { return s.Pending.GetTotalPendingBlockRange() } +// GetAddressesToSyncForBlockNumber returns the list of addresses that have pending segments +// for the given block number func (s *State) GetAddressesToSyncForBlockNumber(blockNumber uint64) []common.Address { return s.Pending.GetAddressesForBlock(blockNumber) } + +// IsSyncFinished returns true if there are no more segments pending to be synced func (s *State) IsSyncFinished() bool { return s.Pending.Finished() } +// TotalBlocksPendingToSync returns the total number of blocks that are pending to be synced func (s *State) TotalBlocksPendingToSync() uint64 { return s.Pending.TotalBlocks() } @@ -128,10 +148,43 @@ func (s *State) OnNewSyncedLogQuery(logQuery *mdrtypes.LogQuery) error { return nil } +// SyncedSegmentsByContract returns the list of synced segments for the given contract addresses func (s *State) SyncedSegmentsByContract(addrs []common.Address) []mdrtypes.SyncSegment { return s.Synced.SegmentsByContract(addrs) } +// NextQueryToSync returns the next LogQuery to sync based on the pending segments and the given chunk size func (s *State) NextQueryToSync(syncBlockChunkSize uint32, maxBlockNumber uint64) (*mdrtypes.LogQuery, error) { return s.Pending.NextQuery(syncBlockChunkSize, maxBlockNumber) } + +func (s *State) CompletionPercentage() map[common.Address]float64 { + if s == nil { + return nil + } + result := make(map[common.Address]float64) + contracts := s.Synced.GetContracts() + for _, contract := range contracts { + synced, existsSynced := s.Synced.GetByContract(contract) + if !existsSynced { + continue + } + pending, existsPending := s.Pending.GetByContract(contract) + if !existsPending { + result[contract] = 100.0 + continue + } + + syncedBlocks := synced.BlockRange.CountBlocks() + pendingBlocks := pending.BlockRange.CountBlocks() + totalBlocks := syncedBlocks + pendingBlocks + log.Infof("CompletionPercentage for contract %s: syncedBlocks=%d, pendingBlocks=%d, totalBlocks=%d", + contract.Hex(), syncedBlocks, pendingBlocks, totalBlocks) + if totalBlocks == 0 { + result[contract] = 100.0 + } else { + result[contract] = (float64(syncedBlocks) / float64(totalBlocks)) * 100.0 + } + } + return result +} diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 909df74e9..eb1bf3293 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -60,11 +60,17 @@ func (s *SetSyncSegment) Add(segment SyncSegment) { return } // Merge syncers - s.UpdateBlockRange(¤t, current.BlockRange.Extend(segment.BlockRange)) + var newBlockRange aggkitcommon.BlockRange + if current.BlockRange.IsEmpty() { + newBlockRange = segment.BlockRange + } else { + newBlockRange = current.BlockRange.Extend(segment.BlockRange) + } + s.UpdateBlockRange(¤t, newBlockRange) } // GetByContract returns the SyncSegment for the given contract address - +// it returns true if it exists, otherwise it returns false func (s *SetSyncSegment) GetByContract(addr common.Address) (SyncSegment, bool) { if s == nil { return SyncSegment{}, false @@ -409,3 +415,13 @@ func (s *SetSyncSegment) SegmentsByContract(addrs []common.Address) []SyncSegmen } return result } + +// GetContracts returns the list of contract addresses +// in the SetSyncSegment +func (s *SetSyncSegment) GetContracts() []common.Address { + contracts := make([]common.Address, 0, len(s.segments)) + for _, segment := range s.segments { + contracts = append(contracts, segment.ContractAddr) + } + return contracts +} diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 7ac1199f7..5816e499e 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -62,6 +62,39 @@ func TestSetSyncSegment_Add(t *testing.T) { require.Equal(t, uint64(1), res.BlockRange.FromBlock) require.Equal(t, uint64(15), res.BlockRange.ToBlock) }) + + t.Run("merge from aggkitcommon.BlockRangeZero", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment1 := SyncSegment{ + ContractAddr: addr, + // That means no sync + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + res, exists := set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(5), res.BlockRange.FromBlock) + require.Equal(t, uint64(15), res.BlockRange.ToBlock) + segment3 := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(2, 5), + TargetToBlock: aggkittypes.LatestBlock, + } + + set.Add(segment3) + res, exists = set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(2), res.BlockRange.FromBlock) + require.Equal(t, uint64(15), res.BlockRange.ToBlock) + }) } func TestSetSyncSegment_GetByContract(t *testing.T) { From 02efdcb33317a0426a2802d5e769d70fdcdbd2ae Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 10 Feb 2026 11:04:04 +0100 Subject: [PATCH 69/75] fix: lint --- multidownloader/state.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/multidownloader/state.go b/multidownloader/state.go index 3fd3513d2..0d1f62de8 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -12,6 +12,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const maxPercent = 100.0 + // State represents the current state of the multidownloader, // it contains the segments that are already synced and the segments that are pending to be synced type State struct { @@ -171,7 +173,7 @@ func (s *State) CompletionPercentage() map[common.Address]float64 { } pending, existsPending := s.Pending.GetByContract(contract) if !existsPending { - result[contract] = 100.0 + result[contract] = maxPercent continue } @@ -181,9 +183,9 @@ func (s *State) CompletionPercentage() map[common.Address]float64 { log.Infof("CompletionPercentage for contract %s: syncedBlocks=%d, pendingBlocks=%d, totalBlocks=%d", contract.Hex(), syncedBlocks, pendingBlocks, totalBlocks) if totalBlocks == 0 { - result[contract] = 100.0 + result[contract] = maxPercent } else { - result[contract] = (float64(syncedBlocks) / float64(totalBlocks)) * 100.0 + result[contract] = (float64(syncedBlocks) / float64(totalBlocks)) * maxPercent } } return result From cf44260a7569bd702fe9f90f8b477e69e708ec6f Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 11 Feb 2026 10:55:43 +0100 Subject: [PATCH 70/75] fix: ut --- multidownloader/state_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go index fd1edbfeb..09f9ab7f8 100644 --- a/multidownloader/state_test.go +++ b/multidownloader/state_test.go @@ -41,7 +41,7 @@ func TestStateInitial(t *testing.T) { pendingSegments := state.SyncedSegmentsByContract([]common.Address{addr1}) require.Equal(t, 1, len(pendingSegments)) require.Equal(t, addr1, pendingSegments[0].ContractAddr) - require.Equal(t, aggkitcommon.NewBlockRange(0, 456), pendingSegments[0].BlockRange) + require.Equal(t, aggkitcommon.NewBlockRange(1, 456), pendingSegments[0].BlockRange) require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) } From f4f2eea8be3754de59a9e4bee5e1ca17adc7f182 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 13 Feb 2026 14:43:32 +0100 Subject: [PATCH 71/75] - deprecated UpdateTargetBlockToNumber in favour of ExtendPendingRange - Better BlockRange empty case (add a flag to set it), in that way we support range{0,0} - Multidownloader take care to avoid using range {0,0} because is the way of set an empty range in DB --- common/block_range.go | 88 +++++- common/block_range_test.go | 86 ++++-- l1infotreesync/e2e_test.go | 14 +- multidownloader/evm_multidownloader.go | 42 ++- multidownloader/evm_multidownloader_test.go | 6 +- multidownloader/reorg_processor.go | 12 +- multidownloader/reorg_processor_test.go | 22 +- multidownloader/state.go | 44 ++- multidownloader/state_test.go | 285 +++++++++++++++--- multidownloader/storage/storage_reorg.go | 8 +- multidownloader/storage/storage_sync.go | 23 +- multidownloader/storage/storage_sync_test.go | 30 +- multidownloader/sync/evmdownloader.go | 28 +- multidownloader/sync/evmdownloader_test.go | 46 +-- multidownloader/types/log_query.go | 27 +- multidownloader/types/log_query_test.go | 27 ++ .../types/mocks/mock_reorg_processor.go | 26 +- multidownloader/types/reorg_processor.go | 14 +- multidownloader/types/set_sync_segment.go | 45 ++- .../types/set_sync_segment_test.go | 95 +++--- multidownloader/types/sync_segment.go | 30 +- multidownloader/types/sync_segment_test.go | 168 +++++++++++ multidownloader/types/syncer_config.go | 35 ++- types/list_block_header.go | 4 +- 24 files changed, 921 insertions(+), 284 deletions(-) create mode 100644 multidownloader/types/sync_segment_test.go diff --git a/common/block_range.go b/common/block_range.go index 82b1da4d0..302acb305 100644 --- a/common/block_range.go +++ b/common/block_range.go @@ -13,20 +13,24 @@ var ( type BlockRange struct { FromBlock uint64 ToBlock uint64 + // isNotEmpty have a negation because creating a BlockRange{} a bool field + // is set to false by default, so the natural name 'IsEmpty' would produce a false value by default + isNotEmpty bool } // NewBlockRange creates and returns a new BlockRange with the specified fromBlock and toBlock values. func NewBlockRange(fromBlock, toBlock uint64) BlockRange { return BlockRange{ - FromBlock: fromBlock, - ToBlock: toBlock, + FromBlock: fromBlock, + ToBlock: toBlock, + isNotEmpty: true, } } // CountBlocks returns the total number of blocks in the BlockRange, inclusive of both FromBlock and ToBlock. // If both FromBlock and ToBlock are zero, or if FromBlock is greater than ToBlock, it returns 0. func (b BlockRange) CountBlocks() uint64 { - if b.FromBlock == 0 && b.ToBlock == 0 { + if b.IsEmpty() { return 0 } if b.FromBlock > b.ToBlock { @@ -36,13 +40,17 @@ func (b BlockRange) CountBlocks() uint64 { } // IsEmpty returns true if the BlockRange contains no blocks. +// the invalid case of FromBlock > ToBlock is also considered empty func (b BlockRange) IsEmpty() bool { - return b.CountBlocks() == 0 + return !b.isNotEmpty || b.FromBlock > b.ToBlock } // String returns a string representation of the BlockRange in the format // "From: , To: ". func (b BlockRange) String() string { + if b.IsEmpty() { + return "Empty" + } return fmt.Sprintf("From: %d, To: %d (%d)", b.FromBlock, b.ToBlock, b.CountBlocks()) } @@ -52,6 +60,9 @@ func (b BlockRange) String() string { // strictly between b and other. The direction of the gap depends on the relative positions // of the two ranges. func (b BlockRange) Gap(other BlockRange) BlockRange { + if b.IsEmpty() || other.IsEmpty() { + return BlockRangeZero + } // If they overlap or touch, return empty if b.ToBlock >= getBlockMinusOne(other.FromBlock) && other.ToBlock >= getBlockMinusOne(b.FromBlock) { @@ -59,23 +70,35 @@ func (b BlockRange) Gap(other BlockRange) BlockRange { } if b.ToBlock < other.FromBlock { - return BlockRange{ - FromBlock: b.ToBlock + 1, - ToBlock: other.FromBlock - 1, - } + return NewBlockRange( + b.ToBlock+1, + other.FromBlock-1, + ) } - return BlockRange{ - FromBlock: other.ToBlock + 1, - ToBlock: getBlockMinusOne(b.FromBlock), - } + return NewBlockRange( + other.ToBlock+1, + getBlockMinusOne(b.FromBlock), + ) } // Greater returns true if the receiver BlockRange (b) is strictly greater than the other BlockRange (other). // [ 10 - 50 ] > [ 1 - 9 ] = true // [ 10 - 50 ] > [ 5 - 15 ] = false (overlap) // [ 10 - 50 ] > [ 51 - 100 ] = false (not greater) +// empty > [0 - 0] = false +// [0 - 0] > empty = true +// empty > empty = false func (b BlockRange) Greater(other BlockRange) bool { + if b.IsEmpty() && other.IsEmpty() { + return false + } + if b.IsEmpty() { + return false + } + if other.IsEmpty() { + return true + } return b.FromBlock > other.ToBlock } @@ -89,13 +112,29 @@ func getBlockMinusOne(fromBlock uint64) uint64 { // IsNextContigousBlock checks if 'next' BlockRange is exactly the next contiguous block // so the way to use this is: previousBlockRange.IsNextContigousBlock(nextBlockRange) func (b BlockRange) IsNextContigousBlock(next BlockRange) bool { + if b.IsEmpty() || next.IsEmpty() { + return false + } return b.ToBlock+1 == next.FromBlock } // Merge merges two BlockRanges and returns a slice of BlockRanges. // If the two BlockRanges overlap, it returns a single BlockRange that encompasses both. // If they do not overlap, it returns both BlockRanges in sorted order. +// If some of them is empty is ignored: +// [ 10 - 50 ] Merge [ 1 - 9 ] = [ 1 - 50 ] +// [ 10 - 50 ] Merge [ 5 - 75 ] = [ 5 - 75 ] +// [ 10 - 50 ] Merge [ 70 - 100 ] = [ 10 - 50 ], [ 70 - 100 ] +// empty Merge [ 1 - 10 ] = [ 1 - 10 ] +// [ 1 - 10 ] Merge empty = [ 1 - 10 ] +// empty Merge empty = empty func (b BlockRange) Merge(other BlockRange) []BlockRange { + if b.IsEmpty() { + return []BlockRange{other} + } + if other.IsEmpty() { + return []BlockRange{b} + } if b.Overlaps(other) { // If overlaps, just extend it return []BlockRange{b.Extend(other)} @@ -109,6 +148,12 @@ func (b BlockRange) Merge(other BlockRange) []BlockRange { // Extend merges two BlockRanges into one encompassing BlockRange. func (b BlockRange) Extend(other BlockRange) BlockRange { + if b.IsEmpty() { + return other + } + if other.IsEmpty() { + return b + } return NewBlockRange( min(b.FromBlock, other.FromBlock), max(b.ToBlock, other.ToBlock), @@ -123,6 +168,7 @@ func (b BlockRange) Extend(other BlockRange) BlockRange { // (C---A---B---D) -> [] func (b BlockRange) Subtract(other BlockRange) []BlockRange { result := []BlockRange{} + // This cover the case that b is empty or other is empty if !b.Overlaps(other) { return []BlockRange{b} } @@ -136,29 +182,45 @@ func (b BlockRange) Subtract(other BlockRange) []BlockRange { } func (b BlockRange) Cap(maxBlockNumber uint64) BlockRange { + if b.IsEmpty() { + return BlockRangeZero + } if b.FromBlock > maxBlockNumber { return BlockRangeZero } return NewBlockRange(b.FromBlock, min(b.ToBlock, maxBlockNumber)) } func (b BlockRange) Contains(other BlockRange) bool { + if b.IsEmpty() { + return false + } return b.FromBlock <= other.FromBlock && b.ToBlock >= other.ToBlock } // ContainsBlockNumber returns true if the given block number is within the BlockRange (inclusive). func (b BlockRange) ContainsBlockNumber(number uint64) bool { + if b.IsEmpty() { + return false + } return b.FromBlock <= number && number <= b.ToBlock } func (b BlockRange) Overlaps(other BlockRange) bool { + if b.IsEmpty() || other.IsEmpty() { + return false + } return b.FromBlock <= other.ToBlock && other.FromBlock <= b.ToBlock } func (b BlockRange) Equal(other BlockRange) bool { - return b.FromBlock == other.FromBlock && b.ToBlock == other.ToBlock + if b.IsEmpty() && other.IsEmpty() { + return true + } + return b.FromBlock == other.FromBlock && b.ToBlock == other.ToBlock && b.IsEmpty() == other.IsEmpty() } func (b BlockRange) Intersect(other BlockRange) BlockRange { + // If either range is empty or they don't overlap, return an empty range if !b.Overlaps(other) { return BlockRangeZero } diff --git a/common/block_range_test.go b/common/block_range_test.go index b230507f2..f147f36c8 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -31,25 +31,25 @@ func TestBlockRange_Gap(t *testing.T) { name: "a and b overlap", a: NewBlockRange(5, 15), b: NewBlockRange(10, 20), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "a and b touch at edge", a: NewBlockRange(1, 5), b: NewBlockRange(6, 10), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "b and a touch at edge", a: NewBlockRange(6, 10), b: NewBlockRange(1, 5), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "identical ranges", a: NewBlockRange(5, 10), b: NewBlockRange(5, 10), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "a after b with no overlap and gap of 1", @@ -65,39 +65,39 @@ func TestBlockRange_Gap(t *testing.T) { }, { name: "empty a", - a: NewBlockRange(0, 0), + a: BlockRangeZero, b: NewBlockRange(10, 15), - expected: NewBlockRange(1, 9), + expected: BlockRangeZero, }, { name: "empty b", a: NewBlockRange(10, 15), - b: NewBlockRange(0, 0), - expected: NewBlockRange(1, 9), + b: BlockRangeZero, + expected: BlockRangeZero, }, { name: "both empty", - a: NewBlockRange(0, 0), - b: NewBlockRange(0, 0), - expected: NewBlockRange(0, 0), + a: BlockRangeZero, + b: BlockRangeZero, + expected: BlockRangeZero, }, { name: "b before a with no gap", a: NewBlockRange(5, 10), b: NewBlockRange(1, 4), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "invalid a", a: NewBlockRange(10, 5), // from > to b: NewBlockRange(1, 15), - expected: NewBlockRange(0, 0), // should return empty range + expected: BlockRangeZero, // should return empty range }, { name: "invalid b", a: NewBlockRange(1, 15), b: NewBlockRange(10, 5), // from > to - expected: NewBlockRange(0, 0), // should return empty range + expected: BlockRangeZero, // should return empty range }, { name: "start verification case", @@ -105,6 +105,18 @@ func TestBlockRange_Gap(t *testing.T) { b: NewBlockRange(10, 10), expected: NewBlockRange(6, 9), }, + { + name: "{0,0} a", + a: NewBlockRange(0, 0), + b: NewBlockRange(10, 15), + expected: NewBlockRange(1, 9), + }, + { + name: "{0,0} b", + a: NewBlockRange(10, 15), + b: NewBlockRange(0, 0), + expected: NewBlockRange(1, 9), + }, } for _, tt := range tests { @@ -124,8 +136,18 @@ func TestBlockRange_IsEmpty(t *testing.T) { expected bool }{ { - name: "empty zero value", + name: "{0,0} not empty", br: NewBlockRange(0, 0), + expected: false, + }, + { + name: "BlockRangeZero isempty", + br: BlockRangeZero, + expected: true, + }, + { + name: "BlockRange{} isempty", + br: BlockRange{}, expected: true, }, { @@ -207,32 +229,32 @@ func TestBlockRange_Greater(t *testing.T) { }, { name: "empty a, non-empty b", - a: NewBlockRange(0, 0), + a: BlockRangeZero, b: NewBlockRange(1, 10), expected: false, }, { name: "non-empty a, empty b", a: NewBlockRange(5, 10), - b: NewBlockRange(0, 0), + b: BlockRangeZero, expected: true, }, { name: "both empty", - a: NewBlockRange(0, 0), - b: NewBlockRange(0, 0), + a: BlockRangeZero, + b: BlockRangeZero, expected: false, }, { - name: "invalid a (from > to)", - a: NewBlockRange(10, 5), - b: NewBlockRange(1, 4), - expected: true, + name: "{0,0} > {0,1} = false", + a: NewBlockRange(0, 0), + b: NewBlockRange(0, 1), + expected: false, }, { - name: "invalid b (from > to)", - a: NewBlockRange(5, 10), - b: NewBlockRange(10, 5), + name: "{0,1} > {0,0} = false (overlaps!)", + a: NewBlockRange(0, 1), + b: NewBlockRange(0, 0), expected: false, }, } @@ -406,15 +428,15 @@ func TestBlockRange_Subtract(t *testing.T) { } func TestBlockRange_Intersect(t *testing.T) { bn := NewBlockRange(10, 50) - require.Equal(t, BlockRange{10, 15}, bn.Intersect(NewBlockRange(5, 15))) - require.Equal(t, BlockRange{30, 40}, bn.Intersect(NewBlockRange(30, 40))) + require.Equal(t, NewBlockRange(10, 15), bn.Intersect(NewBlockRange(5, 15))) + require.Equal(t, NewBlockRange(30, 40), bn.Intersect(NewBlockRange(30, 40))) require.Equal(t, BlockRangeZero, bn.Intersect(NewBlockRange(51, 60))) } func TestBlockRange_Cap(t *testing.T) { bn := NewBlockRange(10, 50) - require.Equal(t, BlockRange{10, 40}, bn.Cap(40)) - require.Equal(t, BlockRange{10, 50}, bn.Cap(60)) + require.Equal(t, NewBlockRange(10, 40), bn.Cap(40)) + require.Equal(t, NewBlockRange(10, 50), bn.Cap(60)) require.Equal(t, BlockRangeZero, bn.Cap(5)) } @@ -557,7 +579,9 @@ func TestBlockRange_ListBlockNumbers(t *testing.T) { bn2 := NewBlockRange(3, 5) require.Equal(t, []uint64{3, 4, 5}, bn2.ListBlockNumbers()) bn3 := NewBlockRange(0, 0) - require.Equal(t, []uint64{}, bn3.ListBlockNumbers()) + require.Equal(t, []uint64{0}, bn3.ListBlockNumbers()) + bn4 := BlockRangeZero + require.Equal(t, []uint64{}, bn4.ListBlockNumbers()) } func TestBlockRange_SplitByBlockNumber(t *testing.T) { diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 6527f7c8a..5189caa1a 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -173,7 +173,7 @@ func TestWithReorgs(t *testing.T) { useMultidownloaderForTest: false, }, { - name: "with new multidownloader", + name: "with multidownloader", useMultidownloaderForTest: true, }, } @@ -214,8 +214,8 @@ func TestWithReorgs(t *testing.T) { finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") require.NoError(t, err) cfgMD.BlockFinality = *finality - cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 100) - cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 100) + cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 1) + cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 1) evmMultidownloader, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), cfgMD, @@ -319,7 +319,9 @@ func TestWithReorgs(t *testing.T) { expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) // TODO: Remove ths sleep - time.Sleep(time.Second * 1) // wait for syncer to process the reorg + if !tt.useMultidownloaderForTest { + time.Sleep(time.Second * 1) // wait for syncer to process the reorg + } checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) @@ -339,7 +341,9 @@ func TestWithReorgs(t *testing.T) { // wait for syncer to process the reorg helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 // TODO: Remove ths sleep - time.Sleep(time.Second * 1) + if !tt.useMultidownloaderForTest { + time.Sleep(time.Second * 1) + } // create some events and update the trees updateL1InfoTreeAndRollupExitTree(2, 1) helpers.CommitBlocks(t, client, 1, time.Millisecond*100) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 56f241f41..b4746acc2 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -163,7 +163,7 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { if err != nil { return err } - newState, err := dh.newStateFromStorage() + newState, err := dh.newStateFromStorage(ctx) if err != nil { return fmt.Errorf("Initialize: error creating new state from storage: %w", err) } @@ -173,19 +173,32 @@ func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { dh.syncersConfig.Brief(), dh.state.String()) return nil } +func (dh *EVMMultidownloader) mapBlockTagToBlockNumber( + ctx context.Context) (map[aggkittypes.BlockNumberFinality]uint64, error) { + tags := dh.syncersConfig.GetTargetToBlockTags() + resultMap := make(map[aggkittypes.BlockNumberFinality]uint64) + for _, tag := range tags { + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, tag) + if err != nil { + return nil, fmt.Errorf("mapBlockTagToBlockNumber: cannot get block number for finality %s: %w", tag.String(), err) + } + resultMap[tag] = blockNumber + } + return resultMap, nil +} // newStateFromStorage creates a new State based on data on storage and the current syncer configs. // It is used on initialization and after reorgs to recreate the state of pending and synced segments -func (dh *EVMMultidownloader) newStateFromStorage() (*State, error) { - syncSegments, err := dh.syncersConfig.SyncSegments() +func (dh *EVMMultidownloader) newStateFromStorage(ctx context.Context) (*State, error) { + mapBlocks, err := dh.mapBlockTagToBlockNumber(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("newStateFromStorage: cannot map block tags to block numbers: %w", err) } - // Update TargetToBlock from name to real block numbers - err = syncSegments.UpdateTargetBlockToNumber(context.Background(), dh.blockNotifierManager) + syncSegments, err := dh.syncersConfig.SyncSegments(mapBlocks) if err != nil { - return nil, fmt.Errorf("newStateFromStorage: cannot update TargetToBlock in sync segments: %w", err) + return nil, err } + // Get synced segments from storage storageSyncSegments, err := dh.storage.GetSyncedBlockRangePerContract(nil) if err != nil { @@ -269,14 +282,14 @@ func (dh *EVMMultidownloader) startNumLoops(ctx context.Context, numLoopsToExecu } dh.log.Infof("Processing reorg at block number %d...", reorgErr.OffendingBlockNumber) - err = dh.reorgProcessor.ProcessReorg(runCtx, *reorgErr) + err = dh.reorgProcessor.ProcessReorg(runCtx, *reorgErr, dh.cfg.BlockFinality) if err != nil { dh.mutex.Unlock() dh.log.Warnf("Error running reorg multidownloader: %s", err.Error()) time.Sleep(1 * time.Second) continue } - newState, err := dh.newStateFromStorage() + newState, err := dh.newStateFromStorage(ctx) if err != nil { dh.mutex.Unlock() dh.log.Warnf("Error recreating state after reorg processing: %s", err.Error()) @@ -326,7 +339,11 @@ func (dh *EVMMultidownloader) Stop(ctx context.Context) error { func (dh *EVMMultidownloader) updateTargetBlockNumber(ctx context.Context) error { dh.mutex.Lock() defer dh.mutex.Unlock() - return dh.state.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) + mapBlocks, err := dh.mapBlockTagToBlockNumber(ctx) + if err != nil { + return fmt.Errorf("updateTargetBlockNumber: cannot map block tags to block numbers: %w", err) + } + return dh.state.ExtendPendingRange(mapBlocks, &dh.syncersConfig) } func (dh *EVMMultidownloader) checkReorgsUnsafeZone(ctx context.Context) error { @@ -543,7 +560,7 @@ func (dh *EVMMultidownloader) getUnsafeLogQueries(blockHeaders []*aggkittypes.Bl return logQueries } -func (dh *EVMMultidownloader) newState(queries []mdrtypes.LogQuery) (*State, error) { +func (dh *EVMMultidownloader) newStateAftersLogQueries(queries []mdrtypes.LogQuery) (*State, error) { dh.mutex.Lock() state := dh.state.Clone() dh.mutex.Unlock() @@ -620,7 +637,7 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { if err = dh.checkIntegrityNewLogsBlockHeaders(logs, blockHeaders); err != nil { return false, err } - newState, err := dh.newState(logQueries) + newState, err := dh.newStateAftersLogQueries(logQueries) if err != nil { return false, fmt.Errorf("Unsafe/Step: failed to create new state: %w", err) } @@ -837,6 +854,7 @@ func (dh *EVMMultidownloader) getNextQuery(ctx context.Context, chunk uint32, sa return logQueryData, nil } +// TODO: Do this requests in parallel func (dh *EVMMultidownloader) requestMultiplesLogs( ctx context.Context, queries []mdrtypes.LogQuery) ([]types.Log, error) { diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 960e1ee24..d4f11ffce 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -402,7 +402,7 @@ func TestEVMMultidownloader_Start(t *testing.T) { ctx := context.Background() testData.mdr.debug.ForceReorg(1234) - testData.mockReorgProcessor.EXPECT().ProcessReorg(mock.Anything, mock.Anything).Return(nil).Once() + testData.mockReorgProcessor.EXPECT().ProcessReorg(mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() // It starts, execute 1 loop that do a reorg and then return err := testData.mdr.startNumLoops(ctx, 1) // Should return no error @@ -1384,7 +1384,7 @@ func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). Return(storageSyncSegments, nil).Once() - state, err := data.mdr.newStateFromStorage() + state, err := data.mdr.newStateFromStorage(t.Context()) require.NoError(t, err) require.NotNil(t, state) }) @@ -1403,7 +1403,7 @@ func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). Return(emptySegments, expectedErr).Once() - state, err := data.mdr.newStateFromStorage() + state, err := data.mdr.newStateFromStorage(t.Context()) require.Error(t, err) require.Nil(t, state) require.Contains(t, err.Error(), "cannot get synced block ranges from storage") diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go index 0ccf7ad09..4adf65e60 100644 --- a/multidownloader/reorg_processor.go +++ b/multidownloader/reorg_processor.go @@ -35,8 +35,13 @@ func NewReorgProcessor(log aggkitcommon.Logger, // After detecting a reorg at detectedReorgError.OffendingBlockNumber, // - find affected blocks // - store the reorg info in storage +// params: +// - detectedReorgError: the error returned by the reorg detection logic, containing the +// offending block number and the reason for the reorg detection +// - finalizedBlockTag: the block tag to consider as finalized (typically finalizedBlock) func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, - detectedReorgError mdtypes.DetectedReorgError) error { + detectedReorgError mdtypes.DetectedReorgError, + finalizedBlockTag aggkittypes.BlockNumberFinality) error { var err error // We known that offendingBlockNumber is affected, so we go backwards until we find // the first unaffected block @@ -86,8 +91,7 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, if err != nil { return fmt.Errorf("ProcessReorg: error getting latest block number in RPC: %w", err) } - - finalizedBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock) + finalizedBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, finalizedBlockTag) if err != nil { return fmt.Errorf("ProcessReorg: error getting finalized block number in RPC: %w", err) } @@ -100,7 +104,7 @@ func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, DetectedTimestamp: rm.port.TimeNowUnix(), NetworkLatestBlock: latestBlockNumberInRPC, NetworkFinalizedBlock: finalizedBlockNumberInRPC, - NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + NetworkFinalizedBlockName: finalizedBlockTag, Description: detectedReorgError.Error(), } reorgID, err := rm.port.MoveReorgedBlocks(tx, reorgData) diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index 50910a720..b15222ebe 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -297,7 +297,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { common.HexToHash("0x1234"), common.HexToHash("0x5678"), "test reorg at genesis") - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) }) t.Run("returns error when NewTx fails", func(t *testing.T) { @@ -320,7 +320,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error starting new tx") @@ -352,7 +352,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { Return(nil, expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error finding first unaffected block") @@ -410,7 +410,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockTx.EXPECT().Commit().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.NoError(t, err) mockPort.AssertExpectations(t) @@ -453,7 +453,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error getting last block number in storage") @@ -501,7 +501,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error moving reorged blocks") @@ -547,7 +547,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error getting latest block number in RPC") @@ -594,7 +594,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(0), expectedErr).Once() mockTx.EXPECT().Rollback().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error getting finalized block number in RPC") @@ -644,7 +644,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() mockTx.EXPECT().Commit().Return(expectedErr).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "cannot commit tx") @@ -713,7 +713,7 @@ func TestReorgProcessor_ProcessReorg(t *testing.T) { Return(nil, originalErr).Once() mockTx.EXPECT().Rollback().Return(rollbackErr).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.Error(t, err) require.Contains(t, err.Error(), "error finding first unaffected block") @@ -824,7 +824,7 @@ func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock u mockPort.EXPECT().MoveReorgedBlocks(mockTx, expectedReorgData).Return(uint64(1), nil).Once() mockTx.EXPECT().Commit().Return(nil).Once() - err := processor.ProcessReorg(ctx, *reorgErr) + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) require.NoError(t, err) mockPort.AssertExpectations(t) diff --git a/multidownloader/state.go b/multidownloader/state.go index 0d1f62de8..81d860b8c 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -1,11 +1,9 @@ package multidownloader import ( - "context" "fmt" aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/etherman/types" "github.com/agglayer/aggkit/log" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -33,7 +31,8 @@ func NewEmptyState() *State { } // NewState creates a new State with the given synced and pending segments -func NewState(synced *mdrtypes.SetSyncSegment, pending *mdrtypes.SetSyncSegment) *State { +func NewState(synced *mdrtypes.SetSyncSegment, + pending *mdrtypes.SetSyncSegment) *State { return &State{ Synced: *synced, Pending: *pending, @@ -74,10 +73,38 @@ func (s *State) String() string { ", Pending: " + s.Pending.String() + "}" } -// UpdateTargetBlockToNumber updates the target block number for the pending segments -// for that use the blockNotifier -func (s *State) UpdateTargetBlockToNumber(ctx context.Context, blockNotifier types.BlockNotifierManager) error { - return s.Pending.UpdateTargetBlockToNumber(ctx, blockNotifier) +func (s *State) ExtendPendingRange( + mapBlocks map[aggkittypes.BlockNumberFinality]uint64, + syncersConfig *mdrtypes.SetSyncerConfig) error { + // It extend pending segments with this new block numbers, + // maybe pending segment IsEmpty() then you need to get latest + // block from synced segments. + newSyncSegments, err := syncersConfig.SyncSegments(mapBlocks) + if err != nil { + return fmt.Errorf("ExtendPendingRange: error creating sync segments from syncers config: %w", err) + } + for _, segment := range newSyncSegments.GetSegments() { + // If it's empty nothing to do + if segment.BlockRange.IsEmpty() { + continue + } + + synced, ok := s.Synced.GetByContract(segment.ContractAddr) + if !ok { + return fmt.Errorf("ExtendPendingRange: error getting synced segment for contract %s", segment.ContractAddr.Hex()) + } + // Subtract already synced blocks from pending segment + subs := segment.BlockRange.Subtract(synced.BlockRange) + if len(subs) == 0 { + continue + } + // We assume that there is only one segment after subtraction, if there are more it means + // that there are non contiguous blocks which is unexpected + segment.BlockRange = subs[0] + // Extend pending segment with new block range + s.Pending.Add(segment) + } + return nil } // GetHighestBlockNumberPendingToSync returns the highest block number that is pending to be synced @@ -125,6 +152,9 @@ func (s *State) OnNewSyncedLogQuery(logQuery *mdrtypes.LogQuery) error { if logQuery == nil { return fmt.Errorf("OnNewSyncedLogQuery: logQuery is nil") } + if logQuery.IsEmpty() { + return fmt.Errorf("OnNewSyncedLogQuery: logQuery is empty") + } // Clone both sets to ensure atomicity // If either operation fails, the original state remains unchanged diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go index 09f9ab7f8..857f1e7c7 100644 --- a/multidownloader/state_test.go +++ b/multidownloader/state_test.go @@ -4,7 +4,7 @@ import ( "testing" aggkitcommon "github.com/agglayer/aggkit/common" - mdtypes "github.com/agglayer/aggkit/multidownloader/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -13,18 +13,18 @@ import ( func TestStateInitial(t *testing.T) { addr1 := common.HexToAddress("0x10") addr2 := common.HexToAddress("0x20") - storageData := mdtypes.NewSetSyncSegment() - storageData.Add(mdtypes.NewSyncSegment(addr1, + storageData := mdrtypes.NewSetSyncSegment() + storageData.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.BlockRangeZero, aggkittypes.FinalizedBlock, false)) - storageData.Add(mdtypes.NewSyncSegment(addr2, + storageData.Add(mdrtypes.NewSyncSegment(addr2, aggkitcommon.BlockRangeZero, aggkittypes.LatestBlock, false)) - configData := mdtypes.NewSetSyncSegment() - segment1 := mdtypes.NewSyncSegment(addr1, + configData := mdrtypes.NewSetSyncSegment() + segment1 := mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(0, 1000), aggkittypes.FinalizedBlock, false) - segment2 := mdtypes.NewSyncSegment(addr2, + segment2 := mdrtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(0, 2000), aggkittypes.LatestBlock, false) configData.Add(segment1) @@ -33,22 +33,22 @@ func TestStateInitial(t *testing.T) { state, err := NewStateFromStorageSyncedBlocks(storageData, configData) require.NoError(t, err) require.NotNil(t, state) - logQuery := mdtypes.NewLogQuery( - 1, 456, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery( + 0, 456, []common.Address{addr1}) err = state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) pendingSegments := state.SyncedSegmentsByContract([]common.Address{addr1}) require.Equal(t, 1, len(pendingSegments)) require.Equal(t, addr1, pendingSegments[0].ContractAddr) - require.Equal(t, aggkitcommon.NewBlockRange(1, 456), pendingSegments[0].BlockRange) + require.Equal(t, aggkitcommon.NewBlockRange(0, 456), pendingSegments[0].BlockRange) require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) } func TestState_OnNewSyncedLogQuery(t *testing.T) { t.Run("nil state", func(t *testing.T) { var state *State - logQuery := mdtypes.NewLogQuery(1, 10, []common.Address{common.HexToAddress("0x1")}) + logQuery := mdrtypes.NewLogQuery(1, 10, []common.Address{common.HexToAddress("0x1")}) err := state.OnNewSyncedLogQuery(&logQuery) require.Error(t, err) require.Contains(t, err.Error(), "state is nil") @@ -64,14 +64,14 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { t.Run("successful sync", func(t *testing.T) { addr1 := common.HexToAddress("0x100") - syncedSet := mdtypes.NewSetSyncSegment() - syncedSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 100), aggkittypes.FinalizedBlock, false)) - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 200), aggkittypes.LatestBlock, false)) @@ -87,7 +87,7 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { require.Equal(t, uint64(100), pendingBefore) // Sync blocks 101-150 - logQuery := mdtypes.NewLogQuery(101, 150, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(101, 150, []common.Address{addr1}) err := state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) @@ -104,14 +104,14 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { t.Run("transactional behavior - state unchanged on error", func(t *testing.T) { addr1 := common.HexToAddress("0x100") - syncedSet := mdtypes.NewSetSyncSegment() - syncedSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 100), aggkittypes.FinalizedBlock, false)) - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 1000), aggkittypes.LatestBlock, false)) @@ -125,7 +125,7 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { // Try to sync a range in the middle (500-600) which would split the pending segment // This should fail with "cannot split segment" error - logQuery := mdtypes.NewLogQuery(500, 600, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(500, 600, []common.Address{addr1}) err := state.OnNewSyncedLogQuery(&logQuery) // Should fail because it would split the segment into two parts @@ -144,9 +144,9 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { t.Run("multiple consecutive syncs", func(t *testing.T) { addr1 := common.HexToAddress("0x100") - syncedSet := mdtypes.NewSetSyncSegment() - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 1000), aggkittypes.LatestBlock, false)) @@ -164,7 +164,7 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { } for i, chunk := range chunks { - logQuery := mdtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) err := state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err, "chunk %d should succeed", i) @@ -185,9 +185,9 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { addr1 := common.HexToAddress("0x100") // Start with empty synced and full pending - syncedSet := mdtypes.NewSetSyncSegment() - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 300), aggkittypes.LatestBlock, false)) @@ -209,7 +209,7 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { } for i, chunk := range chunks { - logQuery := mdtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) err := state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err, "chunk %d should succeed", i) @@ -241,14 +241,14 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { addr1 := common.HexToAddress("0x100") // Start with some already synced - syncedSet := mdtypes.NewSetSyncSegment() - syncedSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 50), aggkittypes.FinalizedBlock, false)) - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(51, 100), aggkittypes.LatestBlock, false)) @@ -260,7 +260,7 @@ func TestState_OnNewSyncedLogQuery(t *testing.T) { require.Equal(t, uint64(50), state.TotalBlocksPendingToSync()) // Sync remaining blocks in one go - logQuery := mdtypes.NewLogQuery(51, 100, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(51, 100, []common.Address{addr1}) err := state.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) @@ -286,14 +286,14 @@ func TestState_Clone(t *testing.T) { // Create original state with synced and pending segments addr1 := common.HexToAddress("0x100") - syncedSet := mdtypes.NewSetSyncSegment() - syncedSet.Add(mdtypes.NewSyncSegment(addr1, + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(1, 100), aggkittypes.FinalizedBlock, false)) - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 200), aggkittypes.LatestBlock, false)) @@ -314,7 +314,7 @@ func TestState_Clone(t *testing.T) { require.Equal(t, originalSyncedBefore[0].BlockRange, clonedSyncedBefore[0].BlockRange) // Modify the original by syncing more blocks - logQuery := mdtypes.NewLogQuery(101, 150, []common.Address{addr1}) + logQuery := mdrtypes.NewLogQuery(101, 150, []common.Address{addr1}) err := original.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) @@ -347,14 +347,14 @@ func TestState_Clone(t *testing.T) { addr2 := common.HexToAddress("0x2") addr3 := common.HexToAddress("0x3") - syncedSet := mdtypes.NewSetSyncSegment() - syncedSet.Add(mdtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(0, 100), aggkittypes.FinalizedBlock, false)) - syncedSet.Add(mdtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(0, 200), aggkittypes.FinalizedBlock, false)) + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(0, 100), aggkittypes.FinalizedBlock, false)) + syncedSet.Add(mdrtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(0, 200), aggkittypes.FinalizedBlock, false)) - pendingSet := mdtypes.NewSetSyncSegment() - pendingSet.Add(mdtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 500), aggkittypes.LatestBlock, false)) - pendingSet.Add(mdtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(201, 600), aggkittypes.LatestBlock, false)) - pendingSet.Add(mdtypes.NewSyncSegment(addr3, aggkitcommon.NewBlockRange(0, 1000), aggkittypes.LatestBlock, false)) + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 500), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdrtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(201, 600), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdrtypes.NewSyncSegment(addr3, aggkitcommon.NewBlockRange(0, 1000), aggkittypes.LatestBlock, false)) original := NewState(&syncedSet, &pendingSet) cloned := original.Clone() @@ -365,7 +365,7 @@ func TestState_Clone(t *testing.T) { require.Equal(t, originalPendingBefore, clonedPendingBefore) // Modify original - sync blocks at the end of addr3 range to avoid splitting - logQuery := mdtypes.NewLogQuery(901, 1000, []common.Address{addr3}) + logQuery := mdrtypes.NewLogQuery(901, 1000, []common.Address{addr3}) err := original.OnNewSyncedLogQuery(&logQuery) require.NoError(t, err) @@ -379,3 +379,194 @@ func TestState_Clone(t *testing.T) { "cloned state should be independent from original after modification") }) } + +func TestStateInitial_case_startBlock0(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 0, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 256, + } + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + br := sut.GetTotalPendingBlockRange() + require.NotNil(t, br) + require.Equal(t, "From: 0, To: 256 (257)", br.String()) + nextRequest, err := sut.NextQueryToSync(20, 250) + require.NoError(t, err) + require.Equal(t, "From: 0, To: 19 (20)", nextRequest.BlockRange.String()) + // after: synced: {0-19}, pending: {20-256} + err = sut.OnNewSyncedLogQuery(nextRequest) + require.NoError(t, err) + br = sut.GetTotalPendingBlockRange() + require.Equal(t, "From: 20, To: 256 (237)", br.String()) + require.True(t, sut.IsAvailable(*nextRequest)) + // nextRequest = {20-400} + nextRequest.BlockRange = aggkitcommon.NewBlockRange(10, 400) + require.False(t, sut.IsAvailable(*nextRequest)) + partial, subRequest := sut.IsPartiallyAvailable(*nextRequest) + require.True(t, partial) + require.Equal(t, "From: 10, To: 19 (10)", subRequest.BlockRange.String()) +} + +func TestStateInitial_case_startBlock1(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 256, + } + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + br := sut.GetTotalPendingBlockRange() + require.NotNil(t, br) + require.Equal(t, "From: 1, To: 256 (256)", br.String()) +} +func TestStateInitial_ExtendPendingRange(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 200, + } + + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + pendingSync := sut.GetTotalPendingBlockRange() + require.NotNil(t, pendingSync) + + // Sync first batch 1-200 + err = sut.OnNewSyncedLogQuery(&mdrtypes.LogQuery{ + BlockRange: aggkitcommon.NewBlockRange(1, 200), + Addrs: []common.Address{common.HexToAddress("0x10")}, + }) + require.NoError(t, err) + require.True(t, sut.IsSyncFinished()) + + // Now extend the range to block 350 + mapBlocks[aggkittypes.FinalizedBlock] = 350 + err = sut.ExtendPendingRange(mapBlocks, &configs) + require.NoError(t, err) + pendingBlockRange := sut.GetTotalPendingBlockRange() + require.NotNil(t, pendingBlockRange) + require.Equal(t, "From: 201, To: 350 (150)", pendingBlockRange.String()) +} + +func TestState_AfterFullySync(t *testing.T) { + // Setup: Create a state with a segment to sync from block 1 to 100 + addr := common.HexToAddress("0x123124543423") + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 1, + ToBlock: aggkittypes.LatestBlock, + } + configs.Add(cfg) + + // Initial target block is 100 + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.LatestBlock: 100, + } + + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + + // Start with empty storage (nothing synced yet) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + addr, + aggkitcommon.BlockRangeZero, + aggkittypes.LatestBlock, + true)) + + state, err := NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) + require.NoError(t, err) + + // Verify initial pending state + require.False(t, state.IsSyncFinished(), "should not be finished initially") + require.Equal(t, uint64(100), state.TotalBlocksPendingToSync(), "should have 100 blocks pending") + + // Sync all blocks 1-100 + logQuery := mdrtypes.NewLogQuery(1, 100, []common.Address{addr}) + err = state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify sync is complete + require.True(t, state.IsSyncFinished(), "should be finished after syncing all blocks") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 blocks pending") + + // Verify synced range + syncedSegments := state.SyncedSegmentsByContract([]common.Address{addr}) + require.Equal(t, 1, len(syncedSegments)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedSegments[0].BlockRange) + + // Now simulate that the chain has progressed to block 150 + // This is analogous to UpdateTargetBlockToNumber in the SetSyncSegment test + mapBlocks[aggkittypes.LatestBlock] = 150 + err = state.ExtendPendingRange(mapBlocks, &configs) + require.NoError(t, err) + + // Verify that new blocks are now pending (101-150) + require.False(t, state.IsSyncFinished(), "should not be finished after extending range") + require.Equal(t, uint64(50), state.TotalBlocksPendingToSync(), "should have 50 new blocks pending") + + // Verify pending range + pendingBlockRange := state.GetTotalPendingBlockRange() + require.NotNil(t, pendingBlockRange) + require.Equal(t, "From: 101, To: 150 (50)", pendingBlockRange.String()) + + // Verify synced segments remain unchanged + syncedSegments = state.SyncedSegmentsByContract([]common.Address{addr}) + require.Equal(t, 1, len(syncedSegments)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedSegments[0].BlockRange, + "synced range should remain unchanged at 1-100") +} diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go index 5139c1e8c..e5584550a 100644 --- a/multidownloader/storage/storage_reorg.go +++ b/multidownloader/storage/storage_reorg.go @@ -163,10 +163,10 @@ func (a *MultidownloaderStorage) GetReorgedDataByReorgID(tx dbtypes.Querier, reorgData := &mdrtypes.ReorgData{ ReorgID: row.ReorgID, - BlockRangeAffected: aggkitcommon.BlockRange{ - FromBlock: row.ReorgedFromBlock, - ToBlock: row.ReorgedToBlock, - }, + BlockRangeAffected: aggkitcommon.NewBlockRange( + row.ReorgedFromBlock, + row.ReorgedToBlock, + ), DetectedAtBlock: row.DetectedAtBlock, DetectedTimestamp: row.DetectedTimestamp, NetworkLatestBlock: row.NetworkLatestBlock, diff --git a/multidownloader/storage/storage_sync.go b/multidownloader/storage/storage_sync.go index 29fb45e1f..2f2d13cdc 100644 --- a/multidownloader/storage/storage_sync.go +++ b/multidownloader/storage/storage_sync.go @@ -26,10 +26,19 @@ func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { return mdrtypes.SyncSegment{}, fmt.Errorf("ToSyncSegment: error parsing target to block finality (%s): %w", r.TargetToBlock, err) } + var blockRange aggkitcommon.BlockRange + + if r.SyncedFromBlock == 0 && r.SyncedToBlock == 0 { + // We use value {0,0} to represent empty range in the database, but in the code + // we want to use the IsEmpty() method of BlockRange + blockRange = aggkitcommon.BlockRangeZero + } else { + blockRange = aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock) + } return mdrtypes.SyncSegment{ ContractAddr: r.Address, TargetToBlock: *targetToBlock, - BlockRange: aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock), + BlockRange: blockRange, }, nil } @@ -70,8 +79,16 @@ func (a *MultidownloaderStorage) UpdateSyncedStatus(tx dbtypes.Querier, a.mutex.Lock() defer a.mutex.Unlock() for _, segment := range segments { - result, err := tx.Exec(query, segment.BlockRange.FromBlock, - segment.BlockRange.ToBlock, segment.ContractAddr.Hex()) + if !segment.IsValid() { + return fmt.Errorf("UpdateSyncedStatus: invalid segment %s", segment.String()) + } + br := segment.BlockRange + if br.IsEmpty() { + // We use value {0,0} to represent empty range in the database + br = aggkitcommon.BlockRangeZero + } + result, err := tx.Exec(query, br.FromBlock, + br.ToBlock, segment.ContractAddr.Hex()) if err != nil { return fmt.Errorf("error updating %s sync status: %w", segment.String(), err) } diff --git a/multidownloader/storage/storage_sync_test.go b/multidownloader/storage/storage_sync_test.go index f233675ae..9c03da795 100644 --- a/multidownloader/storage/storage_sync_test.go +++ b/multidownloader/storage/storage_sync_test.go @@ -51,9 +51,11 @@ func TestStorage_UpsertSyncerConfigs(t *testing.T) { syncSegments, err := storage.GetSyncedBlockRangePerContract(nil) require.NoError(t, err) - require.Equal(t, 2, len(syncSegments.GetAddressesForBlockRange( + require.Equal(t, 0, len(syncSegments.GetAddressesForBlockRange( aggkitcommon.NewBlockRange(0, 10000), - ))) + )), + "There are no synced segments for the given block range", + ) seg1, exists := syncSegments.GetByContract(exampleAddr1) require.True(t, exists) require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) @@ -110,6 +112,16 @@ func TestStorage_UpdateSyncedStatus(t *testing.T) { require.True(t, exists) require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) + + invalidSyncSegment := mdrtypes.NewSyncSegment( + exampleAddr1, + aggkitcommon.NewBlockRange(0, 0), + aggkittypes.FinalizedBlock, + true, + ) + err = storage.UpdateSyncedStatus(nil, []mdrtypes.SyncSegment{invalidSyncSegment}) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid segment") } func TestSyncStatusRow_ToSyncSegment(t *testing.T) { @@ -165,4 +177,18 @@ func TestSyncStatusRow_ToSyncSegment(t *testing.T) { require.Contains(t, err.Error(), "error parsing target to block finality") require.Equal(t, mdrtypes.SyncSegment{}, segment) }) + t.Run("empty range", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "FinalizedBlock", + SyncedFromBlock: 0, + SyncedToBlock: 0, + SyncersIDs: "syncer1", + } + + segment, err := row.ToSyncSegment() + require.NoError(t, err) + require.Equal(t, true, segment.BlockRange.IsEmpty()) + }) } diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go index 32abd502b..0718fff38 100644 --- a/multidownloader/sync/evmdownloader.go +++ b/multidownloader/sync/evmdownloader.go @@ -23,10 +23,10 @@ var ( ) type EVMDownloader struct { - mdr mdrsynctypes.MultidownloaderInterface - logger aggkitcommon.Logger - rh *sync.RetryHandler - appender sync.LogAppenderMap + multidownloader mdrsynctypes.MultidownloaderInterface + logger aggkitcommon.Logger + rh *sync.RetryHandler + appender sync.LogAppenderMap // Maximum duration to wait to catch up the maximum request waitPeriodToCatchUpMaximumLogRange time.Duration pullingPeriod time.Duration @@ -41,7 +41,7 @@ func NewEVMDownloader( pullingPeriod time.Duration, ) *EVMDownloader { return &EVMDownloader{ - mdr: mdr, + multidownloader: mdr, logger: logger, rh: rh, appender: appender, @@ -51,7 +51,7 @@ func NewEVMDownloader( } func (d *EVMDownloader) Finality() aggkittypes.BlockNumberFinality { - return d.mdr.Finality() + return d.multidownloader.Finality() } func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, @@ -112,7 +112,7 @@ func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, } func (d *EVMDownloader) ChainID(ctx context.Context) (uint64, error) { - return d.mdr.ChainID(ctx) + return d.multidownloader.ChainID(ctx) } // executeLogQuery executes the log query, checking for partial availability @@ -120,8 +120,8 @@ func (d *EVMDownloader) ChainID(ctx context.Context) (uint64, error) { func (d *EVMDownloader) executeLogQuery(ctx context.Context, fullLogQuery mdrtypes.LogQuery, syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { logQuery := fullLogQuery - if !d.mdr.IsAvailable(fullLogQuery) { - isPartial, partialLogQuery := d.mdr.IsPartiallyAvailable(fullLogQuery) + if !d.multidownloader.IsAvailable(fullLogQuery) { + isPartial, partialLogQuery := d.multidownloader.IsPartiallyAvailable(fullLogQuery) if !isPartial { return nil, fmt.Errorf("DownloadNextBlocks: logs not available for query: %s. Err: %w", fullLogQuery.String(), ErrLogsNotAvailable) @@ -129,7 +129,7 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, logQuery = *partialLogQuery } - logQueryResponse, err := d.mdr.LogQuery(ctx, logQuery) + logQueryResponse, err := d.multidownloader.LogQuery(ctx, logQuery) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) } @@ -153,7 +153,7 @@ func (d *EVMDownloader) executeLogQuery(ctx context.Context, func (d *EVMDownloader) getFullBlockRange(ctx context.Context, syncerConfig aggkittypes.SyncerConfig) (*aggkitcommon.BlockRange, error) { - blockTo, err := d.mdr.HeaderByNumber(ctx, &syncerConfig.ToBlock) + blockTo, err := d.multidownloader.HeaderByNumber(ctx, &syncerConfig.ToBlock) if err != nil || blockTo == nil { return nil, fmt.Errorf("EVMDownloader.getFullBlockRange: error getting 'to' block header: %w", err) } @@ -190,7 +190,7 @@ func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, } } - hdr, _, err := d.mdr.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) + hdr, _, err := d.multidownloader.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) if err != nil { d.logger.Errorf("EVMDownloader: error getting block header for block number %d: %v", lastBlockNumber, err) return nil @@ -314,12 +314,12 @@ func (d *EVMDownloader) checkReorgedBlock(ctx context.Context, return nil } // Check blockHeader is not reorged - isValid, reorgID, err := d.mdr.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) + isValid, reorgID, err := d.multidownloader.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) if err != nil { return err } if !isValid { - reorgData, err := d.mdr.GetReorgedDataByReorgID(ctx, reorgID) + reorgData, err := d.multidownloader.GetReorgedDataByReorgID(ctx, reorgID) if err != nil { return err } diff --git a/multidownloader/sync/evmdownloader_test.go b/multidownloader/sync/evmdownloader_test.go index f4fb6f65d..5f636c81a 100644 --- a/multidownloader/sync/evmdownloader_test.go +++ b/multidownloader/sync/evmdownloader_test.go @@ -47,7 +47,7 @@ func TestDownloadNextBlocks_Success(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: appender, @@ -122,7 +122,7 @@ func TestDownloadNextBlocks_ContextCancellation(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -157,7 +157,7 @@ func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -211,7 +211,7 @@ func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: appender, @@ -284,7 +284,7 @@ func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: appender, @@ -368,7 +368,7 @@ func TestDownloadNextBlocks_TimeoutWaitingForLogs(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -415,7 +415,7 @@ func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -462,7 +462,7 @@ func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -522,7 +522,7 @@ func TestExecuteLogQuery_FullyAvailable(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: appender, @@ -596,7 +596,7 @@ func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: appender, @@ -666,7 +666,7 @@ func TestExecuteLogQuery_NotAvailable(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -701,7 +701,7 @@ func TestExecuteLogQuery_GetEthLogsError(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -734,7 +734,7 @@ func TestNewMaxLogQuery_WithLastBlockHeader(t *testing.T) { } download := &EVMDownloader{ - mdr: nil, + multidownloader: nil, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -767,7 +767,7 @@ func TestNewMaxLogQuery_WithoutLastBlockHeader(t *testing.T) { } download := &EVMDownloader{ - mdr: nil, + multidownloader: nil, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -797,7 +797,7 @@ func TestCheckReorgedBlock_NilBlockHeader(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -821,7 +821,7 @@ func TestCheckReorgedBlock_ValidBlock(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -851,7 +851,7 @@ func TestCheckReorgedBlock_InvalidBlock(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -893,7 +893,7 @@ func TestCheckReorgedBlock_ContextCancellation(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -922,7 +922,7 @@ func TestCheckReorgedBlock_CheckValidBlockError(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -953,7 +953,7 @@ func TestCheckReorgedBlock_GetReorgedDataError(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -985,7 +985,7 @@ func TestCheckReorgedBlock_NilReorgData(t *testing.T) { } download := &EVMDownloader{ - mdr: mockMdr, + multidownloader: mockMdr, logger: logger, rh: rh, appender: sync.LogAppenderMap{}, @@ -1025,7 +1025,7 @@ func TestAppendLog_Success(t *testing.T) { } download := &EVMDownloader{ - mdr: nil, + multidownloader: nil, logger: logger, rh: rh, appender: appender, @@ -1074,7 +1074,7 @@ func TestAppendLog_RetryOnError(t *testing.T) { } download := &EVMDownloader{ - mdr: nil, + multidownloader: nil, logger: logger, rh: rh, appender: appender, diff --git a/multidownloader/types/log_query.go b/multidownloader/types/log_query.go index d2cc6bc4b..fa0c798e7 100644 --- a/multidownloader/types/log_query.go +++ b/multidownloader/types/log_query.go @@ -57,7 +57,7 @@ func (l *LogQuery) String() string { if l == nil { return "LogQuery: " } - if l.BlockHash != nil { + if l.IsBlockHashQuery() { bn := " (?)" if !l.BlockRange.IsEmpty() { bn = fmt.Sprintf(" (%d)", l.BlockRange.FromBlock) @@ -66,6 +66,12 @@ func (l *LogQuery) String() string { } return fmt.Sprintf("LogQuery: addrs=%v, blockRange=%s", l.Addrs, l.BlockRange.String()) } +func (l *LogQuery) IsBlockHashQuery() bool { + return l != nil && l.BlockHash != nil +} +func (l *LogQuery) IsBlockRangeQuery() bool { + return l != nil && l.BlockHash == nil +} // ToRPCFilterQuery converts the LogQuery to an Ethereum FilterQuery func (l *LogQuery) ToRPCFilterQuery() ethereum.FilterQuery { @@ -81,3 +87,22 @@ func (l *LogQuery) ToRPCFilterQuery() ethereum.FilterQuery { ToBlock: new(big.Int).SetUint64(l.BlockRange.ToBlock), } } +func (l *LogQuery) IsEmpty() bool { + return l == nil || len(l.Addrs) == 0 && l.BlockRange.IsEmpty() && + l.BlockHash == nil +} + +func (l *LogQuery) IsValid() bool { + if l == nil { + return true + } + if l.BlockHash != nil { + return true + } + // We use value {0,0} to represent empty range in DB, so it's forbidden + // to use the BlockRange(0,0) for multidownloader + if !l.BlockRange.IsEmpty() && l.BlockRange.FromBlock == 0 && l.BlockRange.ToBlock == 0 { + return false + } + return true +} diff --git a/multidownloader/types/log_query_test.go b/multidownloader/types/log_query_test.go index 56bedf50e..5899231d0 100644 --- a/multidownloader/types/log_query_test.go +++ b/multidownloader/types/log_query_test.go @@ -83,3 +83,30 @@ func TestLogQuery_BlockHash(t *testing.T) { require.Equal(t, "LogQuery: addrs=[0x0000000000000000000000000000000000000123], blockHash=0x0000000000000000000000000000000000000000000000000000000000000abc (1234)", lq.String()) } +func TestLogQuery_IsEmpty(t *testing.T) { + var lq *LogQuery + require.True(t, lq.IsEmpty()) + + lq = &LogQuery{} + require.True(t, lq.IsEmpty()) + + lq.BlockRange = aggkitcommon.NewBlockRange(1, 10) + require.False(t, lq.IsEmpty()) + + lq.BlockRange = aggkitcommon.BlockRangeZero + require.True(t, lq.IsEmpty()) + + lq.BlockHash = new(common.Hash) + require.False(t, lq.IsEmpty()) +} + +func TestLogQuery_IsValid(t *testing.T) { + var lq *LogQuery + require.True(t, lq.IsValid()) + lq = &LogQuery{} + require.True(t, lq.IsValid(), "blockRange is {0,0} bu is empty") + lq.BlockRange = aggkitcommon.NewBlockRange(0, 0) + require.False(t, lq.IsValid()) + lq.BlockHash = new(common.Hash) + require.True(t, lq.IsValid(), "bn={0,0} but it use blockHash") +} diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go index d20660d11..f6be0bc37 100644 --- a/multidownloader/types/mocks/mock_reorg_processor.go +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -5,8 +5,11 @@ package mocks import ( context "context" - types "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/multidownloader/types" ) // ReorgProcessor is an autogenerated mock type for the ReorgProcessor type @@ -22,17 +25,17 @@ func (_m *ReorgProcessor) EXPECT() *ReorgProcessor_Expecter { return &ReorgProcessor_Expecter{mock: &_m.Mock} } -// ProcessReorg provides a mock function with given fields: ctx, detectedReorgError -func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, detectedReorgError types.DetectedReorgError) error { - ret := _m.Called(ctx, detectedReorgError) +// ProcessReorg provides a mock function with given fields: ctx, detectedReorgError, finalizedBlockTag +func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, detectedReorgError types.DetectedReorgError, finalizedBlockTag aggkittypes.BlockNumberFinality) error { + ret := _m.Called(ctx, detectedReorgError, finalizedBlockTag) if len(ret) == 0 { panic("no return value specified for ProcessReorg") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.DetectedReorgError) error); ok { - r0 = rf(ctx, detectedReorgError) + if rf, ok := ret.Get(0).(func(context.Context, types.DetectedReorgError, aggkittypes.BlockNumberFinality) error); ok { + r0 = rf(ctx, detectedReorgError, finalizedBlockTag) } else { r0 = ret.Error(0) } @@ -48,13 +51,14 @@ type ReorgProcessor_ProcessReorg_Call struct { // ProcessReorg is a helper method to define mock.On call // - ctx context.Context // - detectedReorgError types.DetectedReorgError -func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, detectedReorgError interface{}) *ReorgProcessor_ProcessReorg_Call { - return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, detectedReorgError)} +// - finalizedBlockTag aggkittypes.BlockNumberFinality +func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, detectedReorgError interface{}, finalizedBlockTag interface{}) *ReorgProcessor_ProcessReorg_Call { + return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, detectedReorgError, finalizedBlockTag)} } -func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, detectedReorgError types.DetectedReorgError)) *ReorgProcessor_ProcessReorg_Call { +func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, detectedReorgError types.DetectedReorgError, finalizedBlockTag aggkittypes.BlockNumberFinality)) *ReorgProcessor_ProcessReorg_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.DetectedReorgError)) + run(args[0].(context.Context), args[1].(types.DetectedReorgError), args[2].(aggkittypes.BlockNumberFinality)) }) return _c } @@ -64,7 +68,7 @@ func (_c *ReorgProcessor_ProcessReorg_Call) Return(_a0 error) *ReorgProcessor_Pr return _c } -func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, types.DetectedReorgError) error) *ReorgProcessor_ProcessReorg_Call { +func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, types.DetectedReorgError, aggkittypes.BlockNumberFinality) error) *ReorgProcessor_ProcessReorg_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/types/reorg_processor.go b/multidownloader/types/reorg_processor.go index f64e415e4..29d079509 100644 --- a/multidownloader/types/reorg_processor.go +++ b/multidownloader/types/reorg_processor.go @@ -1,10 +1,20 @@ package types -import "context" +import ( + "context" + + aggkittypes "github.com/agglayer/aggkit/types" +) type ReorgProcessor interface { // ProcessReorg processes a detected reorg starting from the offending block number. // It identifies the range of blocks affected by the reorg and takes necessary actions // to handle the reorganization. - ProcessReorg(ctx context.Context, detectedReorgError DetectedReorgError) error + // input paramaeters: + // - ctx: the context for managing cancellation and timeouts + // - detectedReorgError: the error returned by the reorg detection logic, containing + // the offending block number and the reason for the reorg detection + // - finalizedBlockTag: the block tag to consider as finalized (typically finalizedBlock) + ProcessReorg(ctx context.Context, detectedReorgError DetectedReorgError, + finalizedBlockTag aggkittypes.BlockNumberFinality) error } diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index eb1bf3293..7a5433829 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -1,12 +1,10 @@ package types import ( - "context" "fmt" "strings" aggkitcommon "github.com/agglayer/aggkit/common" - ethermantypes "github.com/agglayer/aggkit/etherman/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) @@ -37,7 +35,7 @@ func NewSetSyncSegment() SetSyncSegment { } // NewSetSyncSegmentFromLogQuery creates a new SetSyncSegment from a LogQuery -func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) SetSyncSegment { +func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) (SetSyncSegment, error) { set := NewSetSyncSegment() for _, addr := range logQuery.Addrs { segment := SyncSegment{ @@ -46,7 +44,7 @@ func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) SetSyncSegment { } set.Add(segment) } - return set + return set, nil } // Add adds a new SyncSegment to the SetSyncSegment, merging block ranges @@ -92,7 +90,7 @@ func (f *SetSyncSegment) SubtractSegments(segments *SetSyncSegment) error { newSegments := f.Clone() for _, segment := range segments.segments { previousSegment, exists := newSegments.GetByContract(segment.ContractAddr) - if exists { + if exists && !previousSegment.IsEmpty() { brs := previousSegment.BlockRange.Subtract(segment.BlockRange) switch len(brs) { case 0: @@ -115,7 +113,10 @@ func (f *SetSyncSegment) SubtractLogQuery(logQuery *LogQuery) error { if logQuery == nil { return nil } - newSegments := NewSetSyncSegmentFromLogQuery(logQuery) + newSegments, err := NewSetSyncSegmentFromLogQuery(logQuery) + if err != nil { + return err + } return f.SubtractSegments(&newSegments) } func isIncluded(ranges []aggkitcommon.BlockRange, br aggkitcommon.BlockRange) bool { @@ -154,21 +155,27 @@ func (f *SetSyncSegment) TotalBlocks() uint64 { return total } -// UpdateTargetBlockToNumber updates the ToBlock to real blockNumber -func (f *SetSyncSegment) UpdateTargetBlockToNumber(ctx context.Context, - blockNotifierGetter ethermantypes.BlockNotifierManager) error { +// GetTargetToBlockTags returns the list of TargetToBlock tags in the +// SetSyncSegment witout duplicates +func (f *SetSyncSegment) GetTargetToBlockTags() []aggkittypes.BlockNumberFinality { if f == nil { return nil } + result := make([]aggkittypes.BlockNumberFinality, 0, len(f.segments)) for _, segment := range f.segments { - currentBlock, err := blockNotifierGetter.GetCurrentBlockNumber(ctx, segment.TargetToBlock) - if err != nil { - return fmt.Errorf("setSyncSegment.UpdateToBlock: error getting BlockNotifier for finality=%s: %w", - segment.TargetToBlock.String(), err) + // if it's already in list don't add it again + exists := false + for _, existing := range result { + if existing == segment.TargetToBlock { + exists = true + break + } + } + if !exists { + result = append(result, segment.TargetToBlock) } - segment.UpdateToBlock(currentBlock) } - return nil + return result } // IsAvailable checks if the required LogQuery data is already synced @@ -425,3 +432,11 @@ func (s *SetSyncSegment) GetContracts() []common.Address { } return contracts } + +func (s *SetSyncSegment) GetSegments() []SyncSegment { + res := make([]SyncSegment, 0, len(s.segments)) + for _, segment := range s.segments { + res = append(res, *segment) + } + return res +} diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 5816e499e..45eb3c7c4 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -4,10 +4,8 @@ import ( "testing" aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/etherman/types/mocks" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -150,6 +148,32 @@ func TestSetSyncSegment_Subtract(t *testing.T) { result := set1.SubtractSegments(&set2) require.NotNil(t, result) }) + + t.Run("subtract from empty BlockRange", func(t *testing.T) { + set1 := NewSetSyncSegment() + set2 := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + set1.Add(SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + }) + set2.Add(SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 20), + }) + emptySetStr := set1.String() + Set2Str := set2.String() + // {empty} - {0-20} = {empty} + + err := set1.SubtractSegments(&set2) + require.NoError(t, err) + require.Equal(t, emptySetStr, set1.String()) + + // {0-20} - {empty} = {0-20} + err = set2.SubtractSegments(&set1) + require.NoError(t, err) + require.Equal(t, Set2Str, set2.String()) + }) } func TestSetSyncSegment_TotalBlocks(t *testing.T) { @@ -186,29 +210,7 @@ func TestSetSyncSegment_TotalBlocks(t *testing.T) { require.Equal(t, uint64(20), set.TotalBlocks()) }) } -func TestSetSyncSegment_UpdateTargetBlockToNumber(t *testing.T) { - t.Run("nil receiver", func(t *testing.T) { - var set *SetSyncSegment - err := set.UpdateTargetBlockToNumber(t.Context(), nil) - require.NoError(t, err) - }) - - t.Run("update target block", func(t *testing.T) { - set := NewSetSyncSegment() - finality := aggkittypes.LatestBlock - segment := SyncSegment{ - ContractAddr: common.HexToAddress("0x123"), - BlockRange: aggkitcommon.NewBlockRange(1, 10), - TargetToBlock: finality, - } - set.Add(segment) - mockBlockNotifierManager := mocks.NewBlockNotifierManager(t) - mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, finality).Return(uint64(150), nil).Once() - err := set.UpdateTargetBlockToNumber(t.Context(), mockBlockNotifierManager) - require.NoError(t, err) - }) -} func TestSetSyncSegment_IsAvailable(t *testing.T) { t.Run("nil receiver", func(t *testing.T) { var set *SetSyncSegment @@ -643,41 +645,20 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Error(t, err) }) } -func TestSetSyncSegment_AfterFullySync(t *testing.T) { - set := NewSetSyncSegment() - addr := common.HexToAddress("0x123124543423") - segment := SyncSegment{ - ContractAddr: addr, - BlockRange: aggkitcommon.NewBlockRange(1, 100), - TargetToBlock: aggkittypes.LatestBlock, - } - set.Add(segment) - - logQuery := &LogQuery{ - Addrs: []common.Address{addr}, - BlockRange: aggkitcommon.NewBlockRange(1, 100), - } - - err := set.SubtractLogQuery(logQuery) - require.NoError(t, err) - // The segment is empty so is not returned by GetByContract - segment, exists := set.GetByContract(addr) - require.True(t, exists) - require.True(t, segment.IsEmpty()) - require.True(t, set.Finished()) - require.Equal(t, uint64(0), set.TotalBlocks()) - - mockBlockManager := mocks.NewBlockNotifierManager(t) - mockBlockManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock).Return(uint64(150), nil).Once() - err = set.UpdateTargetBlockToNumber(t.Context(), mockBlockManager) - require.NoError(t, err) - require.Equal(t, uint64(50), set.TotalBlocks()) - segment, exists = set.GetByContract(addr) - require.True(t, exists) - require.Equal(t, "From: 101, To: 150 (50)", segment.BlockRange.String()) -} func TestSetSyncSegment_GetTotalPendingBlockRange_WithEmptySegments(t *testing.T) { + t.Run("a segment with empty range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment) + br := set.GetTotalPendingBlockRange() + require.Nil(t, br) + }) t.Run("single empty segment returns nil", func(t *testing.T) { set := NewSetSyncSegment() addr := common.HexToAddress("0x123") diff --git a/multidownloader/types/sync_segment.go b/multidownloader/types/sync_segment.go index 1233a4019..d37c73f87 100644 --- a/multidownloader/types/sync_segment.go +++ b/multidownloader/types/sync_segment.go @@ -53,31 +53,35 @@ func (s *SyncSegment) Clone() *SyncSegment { } } -// UpdateToBlock updates the ToBlock of the SyncSegment -func (s *SyncSegment) UpdateToBlock(newToBlock uint64) { - if s == nil { - return - } - s.BlockRange.ToBlock = newToBlock -} - // Empty sets the SyncSegment (fromBlock > toBlock) to indicate it is empty func (s *SyncSegment) Empty() { if s == nil { return } // Set FromBlock greater than ToBlock to indicate empty segment - s.BlockRange = aggkitcommon.NewBlockRange( - s.BlockRange.ToBlock+1, - 0, - ) + s.BlockRange = aggkitcommon.BlockRangeZero } func (s *SyncSegment) IsEmpty() bool { if s == nil { return true } - return s.BlockRange.FromBlock > s.BlockRange.ToBlock + return s.BlockRange.IsEmpty() +} + +// There are special values like BlockRange(0,0) +// that we want to consider invalid for multidownloader, +// so we need this method to check the validity of the SyncSegment +func (s *SyncSegment) IsValid() bool { + if s.IsEmpty() { + return true + } + // We use value {0,0} to represent empty range in DB, so it's forbidden + // to use the BlockRange(0,0) for multidownloader + if !s.BlockRange.IsEmpty() && s.BlockRange.FromBlock == 0 && s.BlockRange.ToBlock == 0 { + return false + } + return true } // Equal checks if two SyncSegments are equal diff --git a/multidownloader/types/sync_segment_test.go b/multidownloader/types/sync_segment_test.go new file mode 100644 index 000000000..0aefa2faa --- /dev/null +++ b/multidownloader/types/sync_segment_test.go @@ -0,0 +1,168 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestSyncSegment_IsValid(t *testing.T) { + addr := common.HexToAddress("0x123") + + tests := []struct { + name string + segment *SyncSegment + expected bool + reason string + }{ + { + name: "nil segment is valid", + segment: nil, + expected: true, + reason: "nil segment is considered empty, so it's valid", + }, + { + name: "empty segment with BlockRangeZero is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "empty BlockRange is valid", + }, + { + name: "segment with invalid range (from > to) is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "invalid range is considered empty, so it's valid", + }, + { + name: "segment with {0,0} non-empty range is INVALID", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 0), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + reason: "{0,0} is reserved for DB empty representation, forbidden in multidownloader", + }, + { + name: "segment with valid range {1,10} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "normal valid range", + }, + { + name: "segment with valid range {0,5} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "range starting at 0 is valid as long as it's not {0,0}", + }, + { + name: "segment with single block {5,5} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "single block range is valid", + }, + { + name: "segment with large range is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1000, 999999), + TargetToBlock: aggkittypes.LatestBlock, + }, + expected: true, + reason: "large ranges are valid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.segment.IsValid() + require.Equal(t, tt.expected, got, + "IsValid() for %s: expected %v, got %v. Reason: %s", + tt.name, tt.expected, got, tt.reason) + }) + } +} + +func TestSyncSegment_IsEmpty(t *testing.T) { + addr := common.HexToAddress("0x123") + + tests := []struct { + name string + segment *SyncSegment + expected bool + }{ + { + name: "nil segment is empty", + segment: nil, + expected: true, + }, + { + name: "segment with BlockRangeZero is empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + }, + { + name: "segment with invalid range (from > to) is empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + }, + { + name: "segment with {0,0} is not empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 0), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + }, + { + name: "segment with valid range {1,10} is not empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.segment.IsEmpty() + require.Equal(t, tt.expected, got, + "IsEmpty() for %s: expected %v, got %v", + tt.name, tt.expected, got) + }) + } +} diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index 7c8749207..24f086d8c 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -138,17 +138,21 @@ func (f *SetSyncerConfig) ContractConfigs() []ContractConfig { } // SyncSegments groups the SetSyncerConfig into segments per contract address and blockRange -func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { +func (f *SetSyncerConfig) SyncSegments( + blockNumbers map[aggkittypes.BlockNumberFinality]uint64) (*SetSyncSegment, error) { segments := NewSetSyncSegment() // Trivial implementation; it needs to be improved to group by // contract address and block range for _, filter := range f.filters { // TODO: instead of calling RPC use block_notifier_values for _, addr := range filter.ContractAddresses { + toBlock, ok := blockNumbers[filter.ToBlock] + if !ok { + return nil, fmt.Errorf("SyncSegments: block number for finality %s not found", filter.ToBlock.String()) + } segment := SyncSegment{ - ContractAddr: addr, - // Initially set ToBlock as 0; it will be updated later - BlockRange: aggkitcommon.NewBlockRange(filter.FromBlock, 0), + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(filter.FromBlock, toBlock), TargetToBlock: filter.ToBlock, } segments.Add(segment) @@ -157,6 +161,29 @@ func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { return &segments, nil } +// GetTargetToBlockTags returns the list of TargetToBlock tags in the +// SetSyncSegment witout duplicates +func (f *SetSyncerConfig) GetTargetToBlockTags() []aggkittypes.BlockNumberFinality { + if f == nil { + return nil + } + result := make([]aggkittypes.BlockNumberFinality, 0, len(f.filters)) + for _, segment := range f.filters { + // if it's already in list don't add it again + exists := false + for _, existing := range result { + if existing == segment.ToBlock { + exists = true + break + } + } + if !exists { + result = append(result, segment.ToBlock) + } + } + return result +} + // convertContractMapToSlice converts map to slice func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) []ContractConfig { contractConfigs := make([]ContractConfig, 0, len(contractMap)) diff --git a/types/list_block_header.go b/types/list_block_header.go index c5978325b..244764c68 100644 --- a/types/list_block_header.go +++ b/types/list_block_header.go @@ -46,7 +46,7 @@ func (lbs ListBlockHeaders) BlockNumbers() []uint64 { func (lbs ListBlockHeaders) BlockRange() aggkitcommon.BlockRange { if len(lbs) == 0 { - return aggkitcommon.BlockRange{} + return aggkitcommon.BlockRangeZero } var minBlock, maxBlock uint64 initialized := false @@ -67,7 +67,7 @@ func (lbs ListBlockHeaders) BlockRange() aggkitcommon.BlockRange { } } if !initialized { - return aggkitcommon.BlockRange{} + return aggkitcommon.BlockRangeZero } return aggkitcommon.NewBlockRange(minBlock, maxBlock) } From c846002ae9836772c91fbb278e5080112dc59474 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:33:50 +0100 Subject: [PATCH 72/75] - RetrieveBlockHeaders returns individual errors, fix UT --- etherman/batch_requests.go | 197 +++++++--- etherman/batch_requests_test.go | 351 ++++++++++-------- etherman/errors.go | 4 + l1infotreesync/e2e_test.go | 16 +- multidownloader/evm_multidownloader.go | 51 ++- multidownloader/evm_multidownloader_test.go | 21 +- multidownloader/reorg_processor_test.go | 52 +++ multidownloader/state.go | 5 +- multidownloader/state_test.go | 2 +- multidownloader/types/set_sync_segment.go | 5 +- .../types/set_sync_segment_test.go | 4 +- 11 files changed, 482 insertions(+), 226 deletions(-) diff --git a/etherman/batch_requests.go b/etherman/batch_requests.go index 497feccf4..a71821a7e 100644 --- a/etherman/batch_requests.go +++ b/etherman/batch_requests.go @@ -3,7 +3,9 @@ package etherman import ( "context" "fmt" + "maps" "math/big" + "sort" "sync" aggkitcommon "github.com/agglayer/aggkit/common" @@ -27,6 +29,9 @@ func (b *blockRawEth) String() string { } func (b *blockRawEth) ToBlockHeader() (*aggkittypes.BlockHeader, error) { + if b.Number == "" && b.Hash == "" { + return nil, fmt.Errorf("blockRawEth.ToBlockHeader: empty: %w", ErrNotFound) + } number, err := aggkitcommon.ParseUint64HexOrDecimal(b.Number) if err != nil { return nil, fmt.Errorf("blockRawEth.ToBlockHeader: parsing block number %s: %w", b.Number, err) @@ -49,67 +54,163 @@ func (b *blockRawEth) ToBlockHeader() (*aggkittypes.BlockHeader, error) { // https://www.alchemy.com/docs/reference/batch-requests const batchRequestLimitHTTP = 1000 +// BlockHeadersResult contiene los resultados de la recuperaciΓ³n de block headers, +// separando los exitosos de los fallidos +type BlockHeadersResult struct { + // Headers contiene los block headers recuperados exitosamente, mapeados por block number + Headers map[uint64]*aggkittypes.BlockHeader + + // Errors contiene los errores de recuperaciΓ³n, mapeados por block number + Errors map[uint64]error +} + +// NewBlockHeadersResult crea un nuevo BlockHeadersResult +func NewBlockHeadersResult() *BlockHeadersResult { + return &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: make(map[uint64]error), + } +} + +// Success retorna true si todos los bloques se recuperaron exitosamente +func (r *BlockHeadersResult) Success() bool { + return len(r.Errors) == 0 +} + +// PartialSuccess retorna true si al menos un bloque se recuperΓ³ exitosamente +func (r *BlockHeadersResult) PartialSuccess() bool { + return len(r.Headers) > 0 +} + +// GetOrderedHeaders retorna los headers en el orden de blockNumbers solicitados, +// solo para los bloques que se recuperaron exitosamente +func (r *BlockHeadersResult) GetOrderedHeaders(blockNumbers []uint64) []*aggkittypes.BlockHeader { + result := make([]*aggkittypes.BlockHeader, 0, len(r.Headers)) + for _, bn := range blockNumbers { + if header, ok := r.Headers[bn]; ok { + result = append(result, header) + } + } + return result +} + +// AddHeader aΓ±ade un header exitoso al resultado +func (r *BlockHeadersResult) AddHeader(blockNumber uint64, header *aggkittypes.BlockHeader) { + r.Headers[blockNumber] = header +} + +// AddError aΓ±ade un error para un block number especΓ­fico +func (r *BlockHeadersResult) AddError(blockNumber uint64, err error) { + r.Errors[blockNumber] = err +} + +// Merge combina otro BlockHeadersResult en este +func (r *BlockHeadersResult) Merge(other *BlockHeadersResult) { + maps.Copy(r.Headers, other.Headers) + maps.Copy(r.Errors, other.Errors) +} + +func (r *BlockHeadersResult) AreAllErrorsNotFound() bool { + for _, err := range r.Errors { + if !IsErrNotFound(err) { + return false + } + } + return true +} + +// ListBlocksNumberNotFound returns the list of not found block numbers in the result ordered by block number +func (r *BlockHeadersResult) ListBlocksNumberNotFound() []uint64 { + var notFoundBlocks []uint64 + for bn, err := range r.Errors { + if IsErrNotFound(err) { + notFoundBlocks = append(notFoundBlocks, bn) + } + } + sort.Slice(notFoundBlocks, func(i, j int) bool { + return notFoundBlocks[i] < notFoundBlocks[j] + }) + return notFoundBlocks +} + +// ComposeError returns a single error summarizing the errors in the result, or nil if there are no errors +func (r *BlockHeadersResult) ComposeError() error { + if len(r.Errors) == 0 { + return nil + } + errResult := fmt.Errorf("RetrieveBlockHeaders errors") + errBlockNumbers := r.ListBlocksNumberNotFound() + for _, bn := range errBlockNumbers { + errResult = fmt.Errorf("%w\nBlock %d: %w", errResult, bn, r.Errors[bn]) + } + return errResult +} + // RetrieveBlockHeaders retrieves block headers for the given block numbers using batch requests -// if rpcClient is provided +// if rpcClient is provided. Returns a BlockHeadersResult with successful headers and individual errors. +// The returned error is only for catastrophic failures (context cancelled, etc.) func RetrieveBlockHeaders(ctx context.Context, log aggkitcommon.Logger, ethClient aggkittypes.BaseEthereumClienter, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { + maxConcurrency int) (*BlockHeadersResult, error) { if rpcClient != nil { return RetrieveBlockHeadersBatch(ctx, log, rpcClient, blockNumbers, maxConcurrency) } return RetrieveBlockHeadersLegacy(ctx, log, ethClient, blockNumbers, maxConcurrency) } -// RetrieveBlockHeaders retrieves block headers for the given block numbers using batch requests -// with concurrency control +// RetrieveBlockHeadersBatch retrieves block headers for the given block numbers using batch requests +// with concurrency control. Returns a BlockHeadersResult with successful headers and individual errors. func RetrieveBlockHeadersBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { + maxConcurrency int) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatch(ctx, log, rpcClient, blocks) }, blockNumbers, batchRequestLimitHTTP, maxConcurrency) } // RetrieveBlockHeadersLegacy retrieves block headers for the given block numbers using individual requests // this is used in simulated environments where batch requests are not supported +// Returns a BlockHeadersResult with successful headers and individual errors for failed blocks func RetrieveBlockHeadersLegacy(ctx context.Context, log aggkitcommon.Logger, ethClient aggkittypes.BaseEthereumClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { - result := aggkittypes.NewListBlockHeaders(len(blocks)) - for i, blockNumber := range blocks { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { + result := NewBlockHeadersResult() + for _, blockNumber := range blocks { header, err := ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) if err != nil { - return nil, fmt.Errorf("RetrieveBlockHeadersLegacy: cannot get block header for block %d: %w", - blockNumber, err) + result.AddError(blockNumber, fmt.Errorf("cannot get block header: %w", err)) + continue } - result[i] = aggkittypes.NewBlockHeaderFromEthHeader(header) + result.AddHeader(blockNumber, aggkittypes.NewBlockHeaderFromEthHeader(header)) } return result, nil }, blockNumbers, 1, maxConcurrency) } // retrieveBlockHeadersInBatch retrieves block headers for the given block numbers using batch requests +// Returns a BlockHeadersResult with successful headers and individual errors for failed blocks func retrieveBlockHeadersInBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, -) (aggkittypes.ListBlockHeaders, error) { +) (*BlockHeadersResult, error) { + result := NewBlockHeadersResult() if len(blockNumbers) == 0 { - return aggkittypes.NewListBlockHeadersEmpty(0), nil + return result, nil } headers := make([]*blockRawEth, len(blockNumbers)) timeTracker := aggkitcommon.NewTimeTracker() @@ -120,7 +221,7 @@ func retrieveBlockHeadersInBatch(ctx context.Context, bn := fmt.Sprintf("0x%x", blockNumber) batch = append(batch, rpc.BatchElem{ Method: "eth_getBlockByNumber", - Args: []interface{}{bn, false}, + Args: []any{bn, false}, Result: headers[idx], }) } @@ -128,58 +229,66 @@ func retrieveBlockHeadersInBatch(ctx context.Context, err := rpcClient.BatchCallContext(ctx, batch) timeTracker.Stop() if err != nil { + // Catastrophic error: the whole batch call failed return nil, fmt.Errorf("retrieveRPCBlockHeadersInBatch(%d): BatchCallContext error: %w", len(blockNumbers), err) } + + // Process each element individually, collecting successes and failures for i, elem := range batch { + blockNumber := blockNumbers[i] if elem.Error != nil { - return nil, fmt.Errorf("retrieveRPCBlockHeadersInBatch(%d): batch element %d (%v) error: %w", len(blockNumbers), i, - elem.Args, - elem.Error) + result.AddError(blockNumber, fmt.Errorf("batch element error: %w", elem.Error)) + continue + } + // Try to convert the raw block to BlockHeader + bh, err := headers[i].ToBlockHeader() + if err != nil { + result.AddError(blockNumber, fmt.Errorf("converting block: %w", err)) + continue } + result.AddHeader(blockNumber, bh) } - log.Debugf("retrieveRPCBlockHeadersInBatch: Retrieved block headers for blocks %d in %s (elapsed)", - len(blockNumbers), timeTracker.Duration().String()) - return convertSliceBlockRawEth(headers) + + log.Debugf("retrieveRPCBlockHeadersInBatch: Retrieved %d/%d block headers in %s (elapsed)", + len(result.Headers), len(blockNumbers), timeTracker.Duration().String()) + return result, nil } // retrieveBlockHeadersInBatchParallel split request into chuncks and execute it in parallel +// Returns a BlockHeadersResult with all successful headers and individual errors func retrieveBlockHeadersInBatchParallel( ctx context.Context, logger aggkitcommon.Logger, - funcRetrieval func(context.Context, []uint64) (aggkittypes.ListBlockHeaders, error), + funcRetrieval func(context.Context, []uint64) (*BlockHeadersResult, error), blockNumbers []uint64, - chunckSize, maxConcurrency int) (aggkittypes.ListBlockHeaders, error) { + chunckSize, maxConcurrency int) (*BlockHeadersResult, error) { var mu sync.Mutex g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrency) chuncks := splitBlockNumbersIntoChunks(blockNumbers, chunckSize) - results := make(map[uint64]*aggkittypes.BlockHeader, len(blockNumbers)) + finalResult := NewBlockHeadersResult() + for _, chunck := range chuncks { g.Go(func() error { - headers, err := funcRetrieval(ctx, chunck) + chunkResult, err := funcRetrieval(ctx, chunck) if err != nil { - return fmt.Errorf("RetrieveBlockHeadersInBatchParallel: %w", err) + // Catastrophic error in this chunk (e.g., context cancelled) + return fmt.Errorf("RetrieveBlockHeadersInBatchParallel: %w", err) } mu.Lock() defer mu.Unlock() - for _, header := range headers { - results[header.Number] = header - } + finalResult.Merge(chunkResult) return nil }) } if err := g.Wait(); err != nil { + // Catastrophic error occurred return nil, err } - // convert map to sorted slice by block number - finalResults := make([]*aggkittypes.BlockHeader, len(blockNumbers)) - for idx, bn := range blockNumbers { - finalResults[idx] = results[bn] - } - logger.Debugf("retrieveRPCBlockHeadersInParallel: Retrieved block headers for blocks %d", - len(blockNumbers)) - return finalResults, nil + logger.Debugf("retrieveRPCBlockHeadersInParallel: Retrieved %d/%d block headers", + len(finalResult.Headers), len(blockNumbers)) + return finalResult, nil } func splitBlockNumbersIntoChunks(blockNumbers []uint64, chunkSize int) [][]uint64 { @@ -199,15 +308,3 @@ func splitBlockNumbersIntoChunks(blockNumbers []uint64, chunkSize int) [][]uint6 } return chunks } - -func convertSliceBlockRawEth(blocks []*blockRawEth) ([]*aggkittypes.BlockHeader, error) { - result := make([]*aggkittypes.BlockHeader, 0, len(blocks)) - for idx, blockRawEth := range blocks { - bh, err := blockRawEth.ToBlockHeader() - if err != nil { - return nil, fmt.Errorf("convert: converting block number %d (%s): %w", idx, blocks[idx].String(), err) - } - result = append(result, bh) - } - return result, nil -} diff --git a/etherman/batch_requests_test.go b/etherman/batch_requests_test.go index 0f6b9d5a6..a35af0424 100644 --- a/etherman/batch_requests_test.go +++ b/etherman/batch_requests_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "math/big" + "os" "testing" "github.com/agglayer/aggkit/log" @@ -11,135 +12,49 @@ import ( mockaggkittypes "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func TestConvertMapBlockRawEth(t *testing.T) { - tests := []struct { - name string - blocks []*blockRawEth - expected []*aggkittypes.BlockHeader - expectedError bool - }{ - { - name: "empty map", - blocks: []*blockRawEth{}, - expected: []*aggkittypes.BlockHeader{}, - expectedError: false, - }, - { - name: "single valid block", - blocks: []*blockRawEth{ - { - Number: "0x7b", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "0x5f5e100", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: []*aggkittypes.BlockHeader{ - { - Number: 123, - Hash: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), - Time: 100000000, - ParentHash: func() *common.Hash { - h := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") - return &h - }(), - }, - }, - expectedError: false, - }, - { - name: "multiple valid blocks", - blocks: []*blockRawEth{ - { - Number: "0x64", - Hash: "0x1111111111111111111111111111111111111111111111111111111111111111", - Timestamp: "0x1000", - ParentHash: "0x2222222222222222222222222222222222222222222222222222222222222222", - }, - { - Number: "0xc8", - Hash: "0x3333333333333333333333333333333333333333333333333333333333333333", - Timestamp: "0x2000", - ParentHash: "0x4444444444444444444444444444444444444444444444444444444444444444", - }, - }, - expected: []*aggkittypes.BlockHeader{ - { - Number: 100, - Hash: common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), - Time: 4096, - ParentHash: func() *common.Hash { - h := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") - return &h - }(), - }, - { - Number: 200, - Hash: common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), - Time: 8192, - ParentHash: func() *common.Hash { - h := common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") - return &h - }(), - }, - }, - expectedError: false, - }, - { - name: "invalid block number format", - blocks: []*blockRawEth{ - { - Number: "invalid", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "0x5f5e100", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: nil, - expectedError: true, - }, - { - name: "invalid timestamp format", - blocks: []*blockRawEth{ - { - Number: "0x7b", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "invalid", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := convertSliceBlockRawEth(tt.blocks) +func TestRetrieveBlockHeadersBatchExploratory(t *testing.T) { + t.Skip("This test is for exploratory purposes to check the behavior of batch requests" + + " It requires a real RPC endpoint because simulated doesn't support batch calls") + ctx := t.Context() + logger := log.WithFields("modules", "test") + // Get L1URL from environment variable + l1url := os.Getenv("L1URL") + ethClient, err := ethclient.Dial(l1url) + require.NoError(t, err) + latestBlockNumber, err := ethClient.BlockNumber(ctx) + require.NoError(t, err) + log.Infof("Latest block number: %d", latestBlockNumber) + rpcClient, err := rpc.DialContext(ctx, l1url) + require.NoError(t, err) + requestedBlockNumbers := []uint64{latestBlockNumber - 10, latestBlockNumber, latestBlockNumber + 10} - if tt.expectedError { - require.Error(t, err) - assert.Contains(t, err.Error(), "convert: converting block number") - } else { - require.NoError(t, err) - assert.Equal(t, len(tt.expected), len(result)) - for i, expectedHeader := range tt.expected { - actualHeader := result[i] - assert.Equal(t, expectedHeader.Number, actualHeader.Number) - assert.Equal(t, expectedHeader.Hash, actualHeader.Hash) - assert.Equal(t, expectedHeader.Time, actualHeader.Time) - assert.Equal(t, expectedHeader.ParentHash, actualHeader.ParentHash) - } - } - }) + res, err := RetrieveBlockHeadersBatch(ctx, logger, + rpcClient, + requestedBlockNumbers, 10) + require.NoError(t, err) + require.False(t, res.Success()) + require.True(t, res.PartialSuccess()) + require.Equal(t, 2, len(res.Headers)) + for _, number := range requestedBlockNumbers { + err, ok := res.Errors[number] + if ok { + isNotFound := IsErrNotFound(err) + require.True(t, isNotFound, "Expected error for block %d to be not found, got: %s", number, err.Error()) + log.Infof("Error retrieving block header for block %d: %s", number, err.Error()) + continue + } + require.NotNil(t, res.Headers[number]) + log.Infof(" Retrieved block header for block %d: hash %s", number, res.Headers[number].Hash.Hex()) } } + func TestRetrieveBlockHeaders(t *testing.T) { ctx := t.Context() logger := log.WithFields("test", "test") @@ -170,7 +85,8 @@ func TestRetrieveBlockHeaders(t *testing.T) { result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, mockRPCClient, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) t.Run("uses legacy when rpcClient is nil", func(t *testing.T) { @@ -185,7 +101,8 @@ func TestRetrieveBlockHeaders(t *testing.T) { result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) t.Run("propagates error from batch method", func(t *testing.T) { @@ -197,13 +114,17 @@ func TestRetrieveBlockHeaders(t *testing.T) { require.Contains(t, err.Error(), "batch error") }) - t.Run("propagates error from legacy method", func(t *testing.T) { + t.Run("collects errors from legacy method", func(t *testing.T) { mockEthClient := mockaggkittypes.NewBaseEthereumClienter(t) - mockEthClient.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, errors.New("legacy error")).Maybe() - _, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) + mockEthClient.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, errors.New("legacy error")).Times(len(blockNumbers)) + result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) - require.Error(t, err) - require.Contains(t, err.Error(), "legacy error") + require.NoError(t, err) // No catastrophic error + require.False(t, result.Success()) + require.Equal(t, len(blockNumbers), len(result.Errors)) + for _, blockErr := range result.Errors { + require.Contains(t, blockErr.Error(), "legacy error") + } }) } func TestRetrieveBlockHeadersLegacy(t *testing.T) { @@ -229,7 +150,8 @@ func TestRetrieveBlockHeadersLegacy(t *testing.T) { result, err := RetrieveBlockHeadersLegacy(ctx, logger, mockEthClient, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) } @@ -247,35 +169,28 @@ func TestRetrieveBlockHeadersInBatchParallel(t *testing.T) { result, err := retrieveBlockHeadersInBatchParallel( ctx, logger, - func(ctx context.Context, blocks []uint64) (aggkittypes.ListBlockHeaders, error) { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { t.Logf("Retrieving blocks in batch: %v", blocks) - headers := make([]*aggkittypes.BlockHeader, len(blocks)) - for i, bn := range blocks { - headers[i] = &aggkittypes.BlockHeader{ + result := NewBlockHeadersResult() + for _, bn := range blocks { + result.AddHeader(bn, &aggkittypes.BlockHeader{ Number: bn, - } + }) } - return headers, nil + return result, nil }, blockNumbers, 2, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) for _, bn := range blockNumbers { - header := getBlockHeader(bn, result) + header, exists := result.Headers[bn] + require.True(t, exists) require.NotNil(t, header) assert.Equal(t, bn, header.Number) } } -func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { - for _, h := range headers { - if h.Number == bn { - return h - } - } - return nil -} - func TestSplitBlockNumbersIntoChunks(t *testing.T) { tests := []struct { name string @@ -335,3 +250,149 @@ func TestSplitBlockNumbersIntoChunks(t *testing.T) { }) } } + +func TestBlockHeadersResult_AreAllErrorsNotFound(t *testing.T) { + tests := []struct { + name string + errors map[uint64]error + expected bool + }{ + { + name: "no errors", + errors: map[uint64]error{}, + expected: true, + }, + { + name: "all errors are ErrNotFound", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: ErrNotFound, + 300: ErrNotFound, + }, + expected: true, + }, + { + name: "all errors have exact 'not found' message", + errors: map[uint64]error{ + 100: errors.New("not found"), + 200: errors.New("not found"), + }, + expected: true, + }, + { + name: "mixed - some ErrNotFound, some other errors", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: errors.New("connection timeout"), + 300: ErrNotFound, + }, + expected: false, + }, + { + name: "errors with 'not found' in message but not exact match", + errors: map[uint64]error{ + 100: errors.New("batch element error: not found"), + 200: errors.New("converting block: not found"), + }, + expected: false, // IsErrNotFound requires exact "not found" message + }, + { + name: "no not found errors", + errors: map[uint64]error{ + 100: errors.New("connection error"), + 200: errors.New("timeout"), + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: tt.errors, + } + assert.Equal(t, tt.expected, result.AreAllErrorsNotFound()) + }) + } +} + +func TestBlockHeadersResult_ListBlocksNumberNotFound(t *testing.T) { + tests := []struct { + name string + errors map[uint64]error + expected []uint64 + }{ + { + name: "no errors", + errors: map[uint64]error{}, + expected: nil, + }, + { + name: "all errors are ErrNotFound", + errors: map[uint64]error{ + 300: ErrNotFound, + 100: ErrNotFound, + 200: ErrNotFound, + }, + expected: []uint64{100, 200, 300}, // Should be sorted + }, + { + name: "all errors have exact 'not found' message", + errors: map[uint64]error{ + 300: errors.New("not found"), + 100: errors.New("not found"), + }, + expected: []uint64{100, 300}, // Should be sorted + }, + { + name: "mixed errors - some not found, some other", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: errors.New("connection timeout"), + 300: ErrNotFound, + 150: errors.New("other error"), + 250: errors.New("not found"), + }, + expected: []uint64{100, 250, 300}, // Only not found, sorted + }, + { + name: "no not found errors", + errors: map[uint64]error{ + 100: errors.New("connection error"), + 200: errors.New("timeout"), + }, + expected: nil, + }, + { + name: "errors containing 'not found' but not exact match", + errors: map[uint64]error{ + 500: errors.New("batch element error: not found"), + 100: errors.New("converting block: not found"), + 300: errors.New("some other error"), + }, + expected: nil, // IsErrNotFound requires exact "not found" message + }, + { + name: "mixed exact and non-exact not found", + errors: map[uint64]error{ + 100: ErrNotFound, // Exact match + 200: errors.New("not found"), // Exact message + 300: errors.New("batch element error: not found"), // Not exact + 400: errors.New("timeout"), // Other error + }, + expected: []uint64{100, 200}, // Only exact matches + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: tt.errors, + } + got := result.ListBlocksNumberNotFound() + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/etherman/errors.go b/etherman/errors.go index 7ce4f9161..a87dd0dd5 100644 --- a/etherman/errors.go +++ b/etherman/errors.go @@ -67,5 +67,9 @@ func IsErrNotFound(err error) bool { if err.Error() == ErrNotFound.Error() { return true } + // If error contains "not found" (case sensitive) is an ErrNotFound + if strings.Contains(err.Error(), "not found") { + return true + } return false } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 5189caa1a..f706b8905 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -92,7 +92,7 @@ func TestE2E(t *testing.T) { if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true - finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-2") require.NoError(t, err) cfgMD.BlockFinality = *finality evmMultidownloader, err = multidownloader.NewEVMMultidownloader( @@ -211,7 +211,7 @@ func TestWithReorgs(t *testing.T) { if tt.useMultidownloaderForTest { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true - finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-15") + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-2") require.NoError(t, err) cfgMD.BlockFinality = *finality cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 1) @@ -341,9 +341,9 @@ func TestWithReorgs(t *testing.T) { // wait for syncer to process the reorg helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 // TODO: Remove ths sleep - if !tt.useMultidownloaderForTest { - time.Sleep(time.Second * 1) - } + + time.Sleep(time.Second * 1) + // create some events and update the trees updateL1InfoTreeAndRollupExitTree(2, 1) helpers.CommitBlocks(t, client, 1, time.Millisecond*100) @@ -370,6 +370,12 @@ func checkBlocks(t *testing.T, ctx context.Context, rawClient simulated.Client, log.Warn("checkBlocks: multidownloader is nil, skipping block check") return } + rpcLatest, err := rawClient.BlockNumber(ctx) + require.NoError(t, err) + mdrLatest, err := mdr.HeaderByNumber(ctx, nil) + require.NoError(t, err) + log.Infof("checkBlocks: from %d to %d, raw latest: %d, mdr latest: %d", fromBlock, toBlock, rpcLatest, mdrLatest.Number) + for i := fromBlock; i <= toBlock; i++ { block, errRaw := rawClient.BlockByNumber(ctx, big.NewInt(int64(i))) blockMDR, errMDR := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(i)) diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index b4746acc2..e60919952 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -623,11 +623,23 @@ func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { } blocks := pendingBlockRange.ListBlockNumbers() // TODO: Check that the blocks are all inside unsafe range - blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocks, dh.cfg.MaxParallelBlockHeaderRetrieval) if err != nil { return false, fmt.Errorf("Unsafe/Step: failed to retrieve %s block headers: %w", pendingBlockRange.String(), err) } + // Check for partial failures + if !blockHeadersResult.Success() { + for blockNum, blockErr := range blockHeadersResult.Errors { + dh.log.Errorf("Unsafe/Step: failed to retrieve block %d: %v", blockNum, blockErr) + } + if !blockHeadersResult.PartialSuccess() { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve any block headers for %s", pendingBlockRange.String()) + } + dh.log.Warnf("Unsafe/Step: partial success retrieving block headers: %d/%d succeeded", + len(blockHeadersResult.Headers), len(blocks)) + } + blockHeaders := blockHeadersResult.GetOrderedHeaders(blocks) dh.log.Debugf("Unsafe/Step: querying logs for %s", pendingBlockRange.String()) logQueries := dh.getUnsafeLogQueries(blockHeaders) logs, err := dh.requestMultiplesLogs(ctx, logQueries) @@ -685,11 +697,23 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { logQueryData.BlockRange.String(), logQueryData.Addrs) blocks := getBlockNumbers(logs) dh.log.Debugf("Safe/Step: querying blockHeaders for %d blocks", len(blocks)) - blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocks, dh.cfg.MaxParallelBlockHeaderRetrieval) if err != nil { return false, fmt.Errorf("Safe/Step: failed to retrieve %d block headers: %w", len(blocks), err) } + // Check for partial failures + if !blockHeadersResult.Success() { + for blockNum, blockErr := range blockHeadersResult.Errors { + dh.log.Errorf("Safe/Step: failed to retrieve block %d: %v", blockNum, blockErr) + } + if !blockHeadersResult.PartialSuccess() { + return false, fmt.Errorf("Safe/Step: failed to retrieve any block headers") + } + dh.log.Warnf("Safe/Step: partial success retrieving block headers: %d/%d succeeded", + len(blockHeadersResult.Headers), len(blocks)) + } + blockHeaders := blockHeadersResult.GetOrderedHeaders(blocks) // Calculate new state (not set in memory until commit is successful) dh.mutex.Lock() @@ -847,7 +871,7 @@ func (dh *EVMMultidownloader) getNextQuery(ctx context.Context, chunk uint32, sa } else { maxBlock = 0 } - logQueryData, err := dh.state.NextQueryToSync(chunk, maxBlock) + logQueryData, err := dh.state.NextQueryToSync(chunk, maxBlock, true) if err != nil { return nil, fmt.Errorf("getNextQuery: cannot get NextQuery: %w", err) } @@ -1022,14 +1046,31 @@ func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, return nil } blocksNumber := blocks.BlockNumbers() - currentBlockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + currentBlockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) if err != nil { return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) } + // Check for any failures in retrieving block headers + if !currentBlockHeadersResult.Success() { + for blockNum, blockErr := range currentBlockHeadersResult.Errors { + dh.log.Errorf("detectReorgs: failed to retrieve block %d: %v", blockNum, blockErr) + } + if currentBlockHeadersResult.AreAllErrorsNotFound() { + return mdrtypes.NewDetectedReorgError( + currentBlockHeadersResult.ListBlocksNumberNotFound()[0], + mdrtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, common.Hash{}, + fmt.Sprintf("detectReorgs: reorg detected at block number %d: block not found in RPC", + currentBlockHeadersResult.ListBlocksNumberNotFound()[0])) + } + return fmt.Errorf("detectReorgs: failed to retrieve some block headers for blocks: %w", + currentBlockHeadersResult.ComposeError()) + } + // check blocks vs currentBlockHeaders. Must match by number and hash storageBlocks := blocks.ToMap() - rpcBlocks := currentBlockHeaders.ToMap() + rpcBlocks := currentBlockHeadersResult.Headers for _, number := range blocksNumber { rpcBlock, exists := rpcBlocks[number] if !exists { diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index d4f11ffce..4ef5e4a1e 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -197,36 +197,29 @@ func TestPerformanceDownloaderParallelvsBatch(t *testing.T) { start := time.Now() headersBatch, err := etherman.RetrieveBlockHeaders(t.Context(), logger, nil, ethRPCClient, blockNumbersMap, 10) require.NoError(t, err) + require.True(t, headersBatch.Success()) durationBatch := time.Since(start) log.Infof("BatchMode took %s", durationBatch.String()) start = time.Now() headersParallel, err := etherman.RetrieveBlockHeaders(t.Context(), logger, ethClientWrapped, nil, blockNumbersMap, 20) require.NoError(t, err) + require.True(t, headersParallel.Success()) durationParallel := time.Since(start) log.Infof("Parallel RPC took %s", durationParallel.String()) - require.Equal(t, len(headersParallel), len(headersBatch)) + require.Equal(t, len(headersParallel.Headers), len(headersBatch.Headers)) for _, blockNumber := range blockNumbersSlice { - headerP := getBlockHeader(t, blockNumber, headersParallel) - headerB := getBlockHeader(t, blockNumber, headersBatch) + headerP, existsP := headersParallel.Headers[blockNumber] + headerB, existsB := headersBatch.Headers[blockNumber] + require.True(t, existsP) + require.True(t, existsB) require.NotNil(t, headerP) require.NotNil(t, headerB) require.Equal(t, headerP.Hash, headerB.Hash) } } -// getBlockHeader is only used in skipped tests -func getBlockHeader(t *testing.T, bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { - t.Helper() - for _, h := range headers { - if h.Number == bn { - return h - } - } - return nil -} - func TestEVMMultidownloader_NewEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "evm_multidownloader_test") cfg := NewConfigDefault("test.sqlite", t.TempDir()) diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go index b15222ebe..4d967ba7a 100644 --- a/multidownloader/reorg_processor_test.go +++ b/multidownloader/reorg_processor_test.go @@ -749,6 +749,58 @@ func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { } } +func TestReorgProcessor_ReorgMissingBlock(t *testing.T) { + logger := log.WithFields("module", "test") + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: logger, + port: mockPort, + developerMode: false, + } + ctx := context.Background() + detectedReorgBlock := uint64(100) + reorgErr := mdtypes.NewDetectedReorgError( + detectedReorgBlock, + mdtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, + common.Hash{}, + "test reorg", + ) + nowTimestamp := uint64(1234567890) + mockPort.EXPECT().TimeNowUnix().Return(nowTimestamp).Maybe() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 99, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: nil, // Missing block in RPC will cause GetBlockStorageAndRPC to return nil for RpcHeader + }, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 98, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(98), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(90), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(1), nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + require.NoError(t, err) +} + func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock uint64) { t.Helper() diff --git a/multidownloader/state.go b/multidownloader/state.go index 81d860b8c..586456bef 100644 --- a/multidownloader/state.go +++ b/multidownloader/state.go @@ -186,8 +186,9 @@ func (s *State) SyncedSegmentsByContract(addrs []common.Address) []mdrtypes.Sync } // NextQueryToSync returns the next LogQuery to sync based on the pending segments and the given chunk size -func (s *State) NextQueryToSync(syncBlockChunkSize uint32, maxBlockNumber uint64) (*mdrtypes.LogQuery, error) { - return s.Pending.NextQuery(syncBlockChunkSize, maxBlockNumber) +func (s *State) NextQueryToSync(syncBlockChunkSize uint32, + maxBlockNumber uint64, applyMaxBlockNumber bool) (*mdrtypes.LogQuery, error) { + return s.Pending.NextQuery(syncBlockChunkSize, maxBlockNumber, applyMaxBlockNumber) } func (s *State) CompletionPercentage() map[common.Address]float64 { diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go index 857f1e7c7..fd9556213 100644 --- a/multidownloader/state_test.go +++ b/multidownloader/state_test.go @@ -408,7 +408,7 @@ func TestStateInitial_case_startBlock0(t *testing.T) { br := sut.GetTotalPendingBlockRange() require.NotNil(t, br) require.Equal(t, "From: 0, To: 256 (257)", br.String()) - nextRequest, err := sut.NextQueryToSync(20, 250) + nextRequest, err := sut.NextQueryToSync(20, 250, true) require.NoError(t, err) require.Equal(t, "From: 0, To: 19 (20)", nextRequest.BlockRange.String()) // after: synced: {0-19}, pending: {20-256} diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 7a5433829..7f669fcba 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -248,7 +248,8 @@ func (f *SetSyncSegment) IsPartiallyAvailable(query LogQuery) (bool, *LogQuery) // NextQuery generates the next LogQuery to sync based on the lowest FromBlock pending // to synchronize -func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uint64) (*LogQuery, error) { +func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, + maxBlockNumber uint64, applyMaxBlockNumber bool) (*LogQuery, error) { if f == nil || len(f.segments) == 0 { return nil, ErrFinished } @@ -260,7 +261,7 @@ func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uin lowestSegment.BlockRange.FromBlock, lowestSegment.BlockRange.FromBlock+uint64(syncBlockChunkSize)-1, )) - if maxBlockNumber > 0 { + if applyMaxBlockNumber { br = br.Cap(maxBlockNumber) } if br.IsEmpty() { diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 45eb3c7c4..6b1345b84 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -443,12 +443,12 @@ func TestSetSyncSegment_IsPartiallyAvailable(t *testing.T) { func TestSetSyncSegment_NextQuery(t *testing.T) { t.Run("nil or empty segments", func(t *testing.T) { var set *SetSyncSegment - query, err := set.NextQuery(100, 0) + query, err := set.NextQuery(100, 0, false) require.Nil(t, query) require.Equal(t, ErrFinished, err) emptySet := NewSetSyncSegment() - query, err = emptySet.NextQuery(100, 0) + query, err = emptySet.NextQuery(100, 0, false) require.Nil(t, query) require.Equal(t, ErrFinished, err) }) From 0504098b05a8c57decc5c98f3cff95f2eba6b892 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:49:40 +0100 Subject: [PATCH 73/75] fix PR comments --- etherman/batch_requests_test.go | 12 ++++++------ etherman/block_notifier/block_notifier_polling.go | 2 +- l1infotreesync/processor.go | 3 ++- multidownloader/types/log_query_test.go | 2 +- multidownloader/types/reorg_processor.go | 2 +- multidownloader/types/syncer_config_test.go | 5 +++-- 6 files changed, 14 insertions(+), 12 deletions(-) diff --git a/etherman/batch_requests_test.go b/etherman/batch_requests_test.go index a35af0424..156fc8ea5 100644 --- a/etherman/batch_requests_test.go +++ b/etherman/batch_requests_test.go @@ -294,7 +294,7 @@ func TestBlockHeadersResult_AreAllErrorsNotFound(t *testing.T) { 100: errors.New("batch element error: not found"), 200: errors.New("converting block: not found"), }, - expected: false, // IsErrNotFound requires exact "not found" message + expected: true, // IsErrNotFound requires exact "not found" message }, { name: "no not found errors", @@ -365,23 +365,23 @@ func TestBlockHeadersResult_ListBlocksNumberNotFound(t *testing.T) { expected: nil, }, { - name: "errors containing 'not found' but not exact match", + name: "errors containing no 'not found'", errors: map[uint64]error{ - 500: errors.New("batch element error: not found"), - 100: errors.New("converting block: not found"), + 500: errors.New("batch element error"), + 100: errors.New("converting block"), 300: errors.New("some other error"), }, expected: nil, // IsErrNotFound requires exact "not found" message }, { - name: "mixed exact and non-exact not found", + name: "mixed not found with others", errors: map[uint64]error{ 100: ErrNotFound, // Exact match 200: errors.New("not found"), // Exact message 300: errors.New("batch element error: not found"), // Not exact 400: errors.New("timeout"), // Other error }, - expected: []uint64{100, 200}, // Only exact matches + expected: []uint64{100, 200, 300}, // Only "not found" }, } diff --git a/etherman/block_notifier/block_notifier_polling.go b/etherman/block_notifier/block_notifier_polling.go index b31798a06..3ee813201 100644 --- a/etherman/block_notifier/block_notifier_polling.go +++ b/etherman/block_notifier/block_notifier_polling.go @@ -162,7 +162,7 @@ func (b *BlockNotifierPolling) step(ctx context.Context, BlockFinalityType: b.config.BlockFinalityType, } if previousState.lastBlockSeen > currentBlock { - b.logger.Infof("Block number decreased [finality:%s]: %d -> %d", + b.logger.Warnf("Block number decreased [finality:%s]: %d -> %d", b.config.BlockFinalityType.String(), previousState.lastBlockSeen, currentBlock) // It start from scratch because something fails in calculation of block period newState := previousState.initialBlock(currentBlock) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 01d863520..269557c37 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -253,7 +253,8 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return p.getLastProcessedBlockWithTx(p.db) } -// GetLastProcessedBlock returns the last processed block +// GetLastProcessedBlockHeader returns the last processed block header +// this function is used by multidownloader func (p *processor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { var lastProcessedBlockNum uint64 var hash *string diff --git a/multidownloader/types/log_query_test.go b/multidownloader/types/log_query_test.go index 5899231d0..30fa339ee 100644 --- a/multidownloader/types/log_query_test.go +++ b/multidownloader/types/log_query_test.go @@ -104,7 +104,7 @@ func TestLogQuery_IsValid(t *testing.T) { var lq *LogQuery require.True(t, lq.IsValid()) lq = &LogQuery{} - require.True(t, lq.IsValid(), "blockRange is {0,0} bu is empty") + require.True(t, lq.IsValid(), "blockRange is {0,0} but is empty") lq.BlockRange = aggkitcommon.NewBlockRange(0, 0) require.False(t, lq.IsValid()) lq.BlockHash = new(common.Hash) diff --git a/multidownloader/types/reorg_processor.go b/multidownloader/types/reorg_processor.go index 29d079509..86f27bc90 100644 --- a/multidownloader/types/reorg_processor.go +++ b/multidownloader/types/reorg_processor.go @@ -10,7 +10,7 @@ type ReorgProcessor interface { // ProcessReorg processes a detected reorg starting from the offending block number. // It identifies the range of blocks affected by the reorg and takes necessary actions // to handle the reorganization. - // input paramaeters: + // input parameters: // - ctx: the context for managing cancellation and timeouts // - detectedReorgError: the error returned by the reorg detection logic, containing // the offending block number and the reason for the reorg detection diff --git a/multidownloader/types/syncer_config_test.go b/multidownloader/types/syncer_config_test.go index 3e094b194..d2eb24a9e 100644 --- a/multidownloader/types/syncer_config_test.go +++ b/multidownloader/types/syncer_config_test.go @@ -1,6 +1,7 @@ package types import ( + "strings" "testing" aggkittypes "github.com/agglayer/aggkit/types" @@ -230,12 +231,12 @@ func TestContractConfig_Update_Brief(t *testing.T) { }) expected := "SetSyncerConfig{ (syncer1 -> [10 - FinalizedBlock]) (syncer2 -> [5 - LatestBlock]) }" - require.Equal(t, expected, sut.Brief()) + require.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(sut.Brief())) }) t.Run("brief with nil config", func(t *testing.T) { var cc *SetSyncerConfig expected := "SetSyncerConfig{}" - require.Equal(t, expected, cc.Brief()) + require.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(cc.Brief())) }) } From e47b6e8aa54bc3641cfcef58d06ef35703e24ddf Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 13 Feb 2026 21:43:35 +0100 Subject: [PATCH 74/75] fix: coverage --- multidownloader/evm_multidownloader_test.go | 460 +++++++++++ .../types/set_sync_segment_test.go | 761 ++++++++++++++++++ 2 files changed, 1221 insertions(+) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 4ef5e4a1e..150c478ab 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -1402,3 +1402,463 @@ func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { require.Contains(t, err.Error(), "cannot get synced block ranges from storage") }) } + +func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { + t.Run("context cancelled", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + // Execute + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + require.Equal(t, context.Canceled, err) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("finalized - new block arrives", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: first call returns same block, second call returns new block + callCount := 0 + mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + RunAndReturn(func(ctx context.Context, blockTag aggkittypes.BlockNumberFinality) (uint64, error) { + callCount++ + if callCount == 1 { + return 100, nil // Same block + } + return 101, nil // New block + }) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.FinalizedBlock, lastBlockHeader, mdrtypes.Finalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("finalized - error getting current block number", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + expectedErr := fmt.Errorf("RPC error") + mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), expectedErr).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.FinalizedBlock, lastBlockHeader, mdrtypes.Finalized) + + // Assert + require.Error(t, err) + require.Contains(t, err.Error(), "WaitForNewBlocks: cannot get current block number") + require.Contains(t, err.Error(), "RPC error") + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - new block arrives", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: first call returns same block, second call returns new block + callCount := 0 + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, blockTag *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { + callCount++ + if callCount == 1 { + return &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), // Same hash + }, nil + } + return &aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + }, nil + }) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - error getting current header", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + expectedErr := fmt.Errorf("RPC error") + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(nil, expectedErr).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + require.Contains(t, err.Error(), "WaitForNewBlocks: cannot get current block header") + require.Contains(t, err.Error(), "RPC error") + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - block hash mismatch at same block", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: return same block number but different hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), // Different hash! + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_BlockHashMismatch, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Hash, reorgErr.OldHash) + require.Equal(t, common.HexToHash("0x5678"), reorgErr.NewHash) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - parent hash mismatch at next block", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + wrongParentHash := common.HexToHash("0x9999") + // Mock: return next block (101) with wrong parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: &wrongParentHash, // Wrong parent hash! + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_ParentHashMismatch, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Hash, reorgErr.OldHash) + require.Equal(t, wrongParentHash, reorgErr.NewHash) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - current block less than last block", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: return lower block number (reorg happened) + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 95, // Lower than last synced block! + Hash: common.HexToHash("0x5678"), + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_MissingBlock, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - same block number with same hash - no reorg", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: first returns same block with same hash, second returns new block + callCount := 0 + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, blockTag *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { + callCount++ + if callCount == 1 { + return &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), // Same hash - no reorg + }, nil + } + return &aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + }, nil + }) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - next block with correct parent hash - no reorg", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + correctParentHash := common.HexToHash("0x1234") + // Mock: return next block with correct parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: &correctParentHash, // Correct parent hash + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - next block without parent hash - no parent check", func(t *testing.T) { + // Setup + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + // Mock: return next block without parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: nil, // No parent hash to check + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) +} diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 6b1345b84..fc03feb95 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -731,3 +731,764 @@ func TestSetSyncSegment_GetTotalPendingBlockRange_WithEmptySegments(t *testing.T require.Equal(t, uint64(150), totalRange.ToBlock) }) } + +func TestNewSetSyncSegmentFromLogQuery(t *testing.T) { + t.Run("create from valid log query", func(t *testing.T) { + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + logQuery := &LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + + set, err := NewSetSyncSegmentFromLogQuery(logQuery) + require.NoError(t, err) + require.Len(t, set.segments, 2) + + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.Equal(t, uint64(10), seg1.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg1.BlockRange.ToBlock) + + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.Equal(t, uint64(10), seg2.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg2.BlockRange.ToBlock) + }) +} + +func TestSetSyncSegment_GetTargetToBlockTags(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var set *SetSyncSegment + result := set.GetTargetToBlockTags() + require.Nil(t, result) + }) + + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + result := set.GetTargetToBlockTags() + require.Empty(t, result) + }) + + t.Run("single segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + } + set.Add(segment) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 1) + require.Equal(t, aggkittypes.FinalizedBlock, result[0]) + }) + + t.Run("multiple segments with same tag", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 1) + require.Equal(t, aggkittypes.LatestBlock, result[0]) + }) + + t.Run("multiple segments with different tags", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.FinalizedBlock, + } + segment3 := SyncSegment{ + ContractAddr: common.HexToAddress("0x333"), + BlockRange: aggkitcommon.NewBlockRange(10, 20), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 2) + require.Contains(t, result, aggkittypes.LatestBlock) + require.Contains(t, result, aggkittypes.FinalizedBlock) + }) +} + +func TestSetSyncSegment_GetHighestBlockNumber(t *testing.T) { + t.Run("nil or empty set", func(t *testing.T) { + var set *SetSyncSegment + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(0), highest) + require.Equal(t, aggkittypes.LatestBlock, finality) + + emptySet := NewSetSyncSegment() + highest, finality = emptySet.GetHighestBlockNumber() + require.Equal(t, uint64(0), highest) + require.Equal(t, aggkittypes.LatestBlock, finality) + }) + + t.Run("single segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.FinalizedBlock, + } + set.Add(segment) + + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(100), highest) + require.Equal(t, aggkittypes.FinalizedBlock, finality) + }) + + t.Run("multiple segments", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 50), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(10, 200), + TargetToBlock: aggkittypes.FinalizedBlock, + } + segment3 := SyncSegment{ + ContractAddr: common.HexToAddress("0x333"), + BlockRange: aggkitcommon.NewBlockRange(100, 150), + TargetToBlock: aggkittypes.SafeBlock, + } + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(200), highest) + require.Equal(t, aggkittypes.FinalizedBlock, finality) + }) +} + +func TestSetSyncSegment_GetAddressesForBlock(t *testing.T) { + t.Run("single block within range", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + set.Add(segment1) + set.Add(segment2) + + addresses := set.GetAddressesForBlock(75) + require.Len(t, addresses, 2) + require.Contains(t, addresses, addr1) + require.Contains(t, addresses, addr2) + }) + + t.Run("block outside all ranges", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + + addresses := set.GetAddressesForBlock(200) + require.Empty(t, addresses) + }) + + t.Run("block at range boundary", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 20), + } + set.Add(segment) + + // Test at FromBlock + addresses := set.GetAddressesForBlock(10) + require.Len(t, addresses, 1) + require.Contains(t, addresses, addr) + + // Test at ToBlock + addresses = set.GetAddressesForBlock(20) + require.Len(t, addresses, 1) + require.Contains(t, addresses, addr) + }) +} + +func TestSetSyncSegment_Empty(t *testing.T) { + t.Run("empty existing segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + // Get the segment reference + seg, exists := set.GetByContract(addr) + require.True(t, exists) + require.False(t, seg.IsEmpty()) + + // Empty it + set.Empty(&seg) + + // Verify it's empty + updatedSeg, exists := set.GetByContract(addr) + require.True(t, exists) + require.True(t, updatedSeg.IsEmpty()) + }) + + t.Run("empty non-existent segment does nothing", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + + // Try to empty a segment that's not in the set + set.Empty(&segment) + // Should not panic + + // Verify set is still empty + require.Len(t, set.segments, 0) + }) +} + +func TestSetSyncSegment_Remove_Complete(t *testing.T) { + t.Run("remove existing segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + set.Add(segment2) + require.Len(t, set.segments, 2) + + // Remove first segment + set.Remove(&segment1) + require.Len(t, set.segments, 1) + + // Verify addr1 is gone + _, exists := set.GetByContract(addr1) + require.False(t, exists) + + // Verify addr2 still exists + _, exists = set.GetByContract(addr2) + require.True(t, exists) + }) + + t.Run("remove non-existent segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + + // Try to remove segment that's not in set + set.Remove(&segment2) + require.Len(t, set.segments, 1) + + // Verify addr1 still exists + _, exists := set.GetByContract(addr1) + require.True(t, exists) + }) +} + +func TestSetSyncSegment_AddLogQuery(t *testing.T) { + t.Run("nil set or query", func(t *testing.T) { + var set *SetSyncSegment + require.NoError(t, set.AddLogQuery(nil)) + + validSet := NewSetSyncSegment() + require.NoError(t, validSet.AddLogQuery(nil)) + }) + + t.Run("add log query to empty set", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + logQuery := &LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + + err := set.AddLogQuery(logQuery) + require.NoError(t, err) + require.Len(t, set.segments, 2) + + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.Equal(t, uint64(10), seg1.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg1.BlockRange.ToBlock) + + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.Equal(t, uint64(10), seg2.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg2.BlockRange.ToBlock) + }) + + t.Run("add log query with overlapping ranges", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + // Add initial segment + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + set.Add(segment) + + // Add log query with overlapping range + logQuery := &LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(40, 100), + } + + err := set.AddLogQuery(logQuery) + require.NoError(t, err) + + // Should merge the ranges + seg, exists := set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(1), seg.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg.BlockRange.ToBlock) + }) +} + +func TestSetSyncSegment_SegmentsByContract(t *testing.T) { + t.Run("get segments for addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + addr3 := common.HexToAddress("0x333") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + segment3 := SyncSegment{ + ContractAddr: addr3, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + // Get segments for addr1 and addr2 + result := set.SegmentsByContract([]common.Address{addr1, addr2}) + require.Len(t, result, 2) + require.Equal(t, addr1, result[0].ContractAddr) + require.Equal(t, addr2, result[1].ContractAddr) + }) + + t.Run("get segments for non-existent addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + + // Try to get segment for addr2 which doesn't exist + result := set.SegmentsByContract([]common.Address{addr2}) + require.Empty(t, result) + }) + + t.Run("empty address list", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + result := set.SegmentsByContract([]common.Address{}) + require.Empty(t, result) + }) +} + +func TestSetSyncSegment_GetContracts(t *testing.T) { + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + contracts := set.GetContracts() + require.Empty(t, contracts) + }) + + t.Run("get all contracts", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + addr3 := common.HexToAddress("0x333") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + segment3 := SyncSegment{ + ContractAddr: addr3, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + contracts := set.GetContracts() + require.Len(t, contracts, 3) + require.Contains(t, contracts, addr1) + require.Contains(t, contracts, addr2) + require.Contains(t, contracts, addr3) + }) +} + +func TestSetSyncSegment_GetSegments(t *testing.T) { + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + segments := set.GetSegments() + require.Empty(t, segments) + }) + + t.Run("get all segments", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + set.Add(segment2) + + segments := set.GetSegments() + require.Len(t, segments, 2) + require.Equal(t, addr1, segments[0].ContractAddr) + require.Equal(t, addr2, segments[1].ContractAddr) + }) +} + +func TestSetSyncSegment_IsAvailable_PositiveCases(t *testing.T) { + t.Run("query fully available for single address", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.True(t, result) + }) + + t.Run("query fully available for multiple addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.True(t, result) + }) + + t.Run("query not available - one address missing coverage", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 30), // Doesn't cover full range + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.False(t, result) + }) +} + +func TestSetSyncSegment_NextQuery_PositiveCases(t *testing.T) { + t.Run("generate next query without maxBlock limit", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 1000), + } + set.Add(segment) + + query, err := set.NextQuery(100, 0, false) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(1), query.BlockRange.FromBlock) + require.Equal(t, uint64(100), query.BlockRange.ToBlock) + require.Len(t, query.Addrs, 1) + require.Contains(t, query.Addrs, addr) + }) + + t.Run("generate next query with maxBlock limit applied", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 1000), + } + set.Add(segment) + + query, err := set.NextQuery(100, 50, true) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(1), query.BlockRange.FromBlock) + require.Equal(t, uint64(50), query.BlockRange.ToBlock) + }) + + t.Run("generate next query with multiple addresses in same range", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + set.Add(segment1) + set.Add(segment2) + + query, err := set.NextQuery(50, 0, false) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(10), query.BlockRange.FromBlock) + require.Equal(t, uint64(59), query.BlockRange.ToBlock) + require.Len(t, query.Addrs, 2) + }) + + t.Run("maxBlock limit results in empty range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + set.Add(segment) + + // Max block is below the segment range + query, err := set.NextQuery(100, 50, true) + require.Error(t, err) + require.Equal(t, ErrFinished, err) + require.Nil(t, query) + }) + + t.Run("returns ErrFinished when lowest segment is empty", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + // Add an empty segment + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + } + set.Add(segment) + + query, err := set.NextQuery(100, 0, false) + require.Error(t, err) + require.Equal(t, ErrFinished, err) + require.Nil(t, query) + }) +} + +func TestSetSyncSegment_SubtractLogQuery_EdgeCases(t *testing.T) { + t.Run("error creating segment from log query", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + // Log query with empty addresses should still work + logQuery := &LogQuery{ + Addrs: []common.Address{}, + BlockRange: aggkitcommon.NewBlockRange(10, 20), + } + + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + }) +} + +func TestSetSyncSegment_GetTotalPendingBlockRange_EdgeCases(t *testing.T) { + t.Run("nil set returns nil", func(t *testing.T) { + var set *SetSyncSegment + result := set.GetTotalPendingBlockRange() + require.Nil(t, result) + }) + + t.Run("set with single non-empty segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + set.Add(segment) + + result := set.GetTotalPendingBlockRange() + require.NotNil(t, result) + require.Equal(t, uint64(10), result.FromBlock) + require.Equal(t, uint64(50), result.ToBlock) + }) + + t.Run("set with non-overlapping segments", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + set.Add(segment1) + set.Add(segment2) + + result := set.GetTotalPendingBlockRange() + require.NotNil(t, result) + require.Equal(t, uint64(10), result.FromBlock) + require.Equal(t, uint64(200), result.ToBlock) + }) +} + +func TestSetSyncSegment_IsPartiallyAvailable_EdgeCases(t *testing.T) { + t.Run("segment exactly matches query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(10), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + }) +} From 09571a6dc7bd4bfa9684b5ed46a7d15aa4c1a3d1 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 13 Feb 2026 22:05:39 +0100 Subject: [PATCH 75/75] fix: lint --- multidownloader/evm_multidownloader_test.go | 253 ++++---------------- 1 file changed, 40 insertions(+), 213 deletions(-) diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index 150c478ab..041d8632d 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -1403,26 +1403,33 @@ func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { }) } +// setupWaitForNewBlocksTest creates common test fixtures +func setupWaitForNewBlocksTest(t *testing.T) (*EVMMultidownloader, *aggkittypes.BlockHeader, *mocktypes.BaseEthereumClienter, *mockethermantypes.BlockNotifierManager) { + t.Helper() + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + return mdr, lastBlockHeader, mockEthClient, mockBlockNotifierManager +} + func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { t.Run("context cancelled", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, _, _ := setupWaitForNewBlocksTest(t) ctx, cancel := context.WithCancel(context.Background()) cancel() // Cancel immediately @@ -1437,24 +1444,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("finalized - new block arrives", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, _, mockBlockNotifierManager := setupWaitForNewBlocksTest(t) // Mock: first call returns same block, second call returns new block callCount := 0 @@ -1479,24 +1469,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("finalized - error getting current block number", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, _, mockBlockNotifierManager := setupWaitForNewBlocksTest(t) expectedErr := fmt.Errorf("RPC error") mockBlockNotifierManager.EXPECT(). @@ -1516,42 +1489,15 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - new block arrives", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } - - // Mock: first call returns same block, second call returns new block - callCount := 0 + // Mock: return new block immediately mockEthClient.EXPECT(). CustomHeaderByNumber(mock.Anything, mock.Anything). - RunAndReturn(func(ctx context.Context, blockTag *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { - callCount++ - if callCount == 1 { - return &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), // Same hash - }, nil - } - return &aggkittypes.BlockHeader{ - Number: 101, - Hash: common.HexToHash("0x5678"), - }, nil - }) + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + }, nil).Once() // Execute ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) @@ -1564,24 +1510,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - error getting current header", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) expectedErr := fmt.Errorf("RPC error") mockEthClient.EXPECT(). @@ -1601,24 +1530,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - reorg detected - block hash mismatch at same block", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) // Mock: return same block number but different hash mockEthClient.EXPECT(). @@ -1646,24 +1558,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - reorg detected - parent hash mismatch at next block", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) wrongParentHash := common.HexToHash("0x9999") // Mock: return next block (101) with wrong parent hash @@ -1693,24 +1588,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - reorg detected - current block less than last block", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) // Mock: return lower block number (reorg happened) mockEthClient.EXPECT(). @@ -1736,24 +1614,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - same block number with same hash - no reorg", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) // Mock: first returns same block with same hash, second returns new block callCount := 0 @@ -1784,24 +1645,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - next block with correct parent hash - no reorg", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) correctParentHash := common.HexToHash("0x1234") // Mock: return next block with correct parent hash @@ -1824,24 +1668,7 @@ func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { }) t.Run("not finalized - next block without parent hash - no parent check", func(t *testing.T) { - // Setup - mockEthClient := mocktypes.NewBaseEthereumClienter(t) - mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) - logger := log.WithFields("test", "waitForNewBlocks") - - mdr := &EVMMultidownloader{ - log: logger, - ethClient: mockEthClient, - blockNotifierManager: mockBlockNotifierManager, - cfg: Config{ - PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, - }, - } - - lastBlockHeader := &aggkittypes.BlockHeader{ - Number: 100, - Hash: common.HexToHash("0x1234"), - } + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) // Mock: return next block without parent hash mockEthClient.EXPECT().