diff --git a/.mockery.yaml b/.mockery.yaml index 7d5ff522d..36fa370a5 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -162,3 +162,7 @@ packages: config: dir: "{{ .InterfaceDir }}/mocks" all: true + github.com/agglayer/aggkit/multidownloader/sync/types: + config: + dir: "{{ .InterfaceDir }}/mocks" + all: true diff --git a/AGENTS.md b/AGENTS.md index 18e0687fb..7048d54df 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -7,6 +7,7 @@ - `make test-unit` - Run all unit tests with coverage - `go test -v -run TestName ./path/to/package` - Run a single test - `go test -v -run TestName ./...` - Run single test across all packages +- `make generate-mocks` - Generate all mocks ## Code Style diff --git a/Makefile b/Makefile index 689f205f9..c214a9a27 100644 --- a/Makefile +++ b/Makefile @@ -117,6 +117,10 @@ vulncheck: ## Runs the vulnerability checker tool @echo "Running govulncheck on all packages..." @go list ./... | xargs -n1 govulncheck +.PHONY: generate-mocks +generate-mocks: ## Generates the mocks using mockery + @cd test && $(MAKE) generate-mocks + ## Help display. ## Pulls comments from beside commands and prints a nicely formatted ## display with the commands and their usage information. diff --git a/aggsender/mocks/mock_agg_sende_storage_maintenancer.go b/aggsender/mocks/mock_agg_sende_storage_maintenancer.go new file mode 100644 index 000000000..dd69b5c13 --- /dev/null +++ b/aggsender/mocks/mock_agg_sende_storage_maintenancer.go @@ -0,0 +1,177 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + types "github.com/agglayer/aggkit/db/types" + mock "github.com/stretchr/testify/mock" +) + +// AggSendeStorageMaintenancer is an autogenerated mock type for the AggSendeStorageMaintenancer type +type AggSendeStorageMaintenancer struct { + mock.Mock +} + +type AggSendeStorageMaintenancer_Expecter struct { + mock *mock.Mock +} + +func (_m *AggSendeStorageMaintenancer) EXPECT() *AggSendeStorageMaintenancer_Expecter { + return &AggSendeStorageMaintenancer_Expecter{mock: &_m.Mock} +} + +// DeleteCertificate provides a mock function with given fields: tx, height, mustDelete +func (_m *AggSendeStorageMaintenancer) DeleteCertificate(tx types.Querier, height uint64, mustDelete bool) error { + ret := _m.Called(tx, height, mustDelete) + + if len(ret) == 0 { + panic("no return value specified for DeleteCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64, bool) error); ok { + r0 = rf(tx, height, mustDelete) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' +type AggSendeStorageMaintenancer_DeleteCertificate_Call struct { + *mock.Call +} + +// DeleteCertificate is a helper method to define mock.On call +// - tx types.Querier +// - height uint64 +// - mustDelete bool +func (_e *AggSendeStorageMaintenancer_Expecter) DeleteCertificate(tx interface{}, height interface{}, mustDelete interface{}) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + return &AggSendeStorageMaintenancer_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", tx, height, mustDelete)} +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) Run(run func(tx types.Querier, height uint64, mustDelete bool)) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64), args[2].(bool)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) Return(_a0 error) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteCertificate_Call) RunAndReturn(run func(types.Querier, uint64, bool) error) *AggSendeStorageMaintenancer_DeleteCertificate_Call { + _c.Call.Return(run) + return _c +} + +// DeleteOldCertificates provides a mock function with given fields: tx, olderThanHeight +func (_m *AggSendeStorageMaintenancer) DeleteOldCertificates(tx types.Querier, olderThanHeight uint64) error { + ret := _m.Called(tx, olderThanHeight) + + if len(ret) == 0 { + panic("no return value specified for DeleteOldCertificates") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) error); ok { + r0 = rf(tx, olderThanHeight) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_DeleteOldCertificates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOldCertificates' +type AggSendeStorageMaintenancer_DeleteOldCertificates_Call struct { + *mock.Call +} + +// DeleteOldCertificates is a helper method to define mock.On call +// - tx types.Querier +// - olderThanHeight uint64 +func (_e *AggSendeStorageMaintenancer_Expecter) DeleteOldCertificates(tx interface{}, olderThanHeight interface{}) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + return &AggSendeStorageMaintenancer_DeleteOldCertificates_Call{Call: _e.mock.On("DeleteOldCertificates", tx, olderThanHeight)} +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) Run(run func(tx types.Querier, olderThanHeight uint64)) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) Return(_a0 error) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_DeleteOldCertificates_Call) RunAndReturn(run func(types.Querier, uint64) error) *AggSendeStorageMaintenancer_DeleteOldCertificates_Call { + _c.Call.Return(run) + return _c +} + +// MoveCertificateToHistory provides a mock function with given fields: tx, height +func (_m *AggSendeStorageMaintenancer) MoveCertificateToHistory(tx types.Querier, height uint64) error { + ret := _m.Called(tx, height) + + if len(ret) == 0 { + panic("no return value specified for MoveCertificateToHistory") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) error); ok { + r0 = rf(tx, height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSendeStorageMaintenancer_MoveCertificateToHistory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveCertificateToHistory' +type AggSendeStorageMaintenancer_MoveCertificateToHistory_Call struct { + *mock.Call +} + +// MoveCertificateToHistory is a helper method to define mock.On call +// - tx types.Querier +// - height uint64 +func (_e *AggSendeStorageMaintenancer_Expecter) MoveCertificateToHistory(tx interface{}, height interface{}) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + return &AggSendeStorageMaintenancer_MoveCertificateToHistory_Call{Call: _e.mock.On("MoveCertificateToHistory", tx, height)} +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) Run(run func(tx types.Querier, height uint64)) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) Return(_a0 error) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call) RunAndReturn(run func(types.Querier, uint64) error) *AggSendeStorageMaintenancer_MoveCertificateToHistory_Call { + _c.Call.Return(run) + return _c +} + +// NewAggSendeStorageMaintenancer creates a new instance of AggSendeStorageMaintenancer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggSendeStorageMaintenancer(t interface { + mock.TestingT + Cleanup(func()) +}) *AggSendeStorageMaintenancer { + mock := &AggSendeStorageMaintenancer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_emit_log_func.go b/aggsender/mocks/mock_emit_log_func.go new file mode 100644 index 000000000..6322e76eb --- /dev/null +++ b/aggsender/mocks/mock_emit_log_func.go @@ -0,0 +1,76 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// EmitLogFunc is an autogenerated mock type for the EmitLogFunc type +type EmitLogFunc struct { + mock.Mock +} + +type EmitLogFunc_Expecter struct { + mock *mock.Mock +} + +func (_m *EmitLogFunc) EXPECT() *EmitLogFunc_Expecter { + return &EmitLogFunc_Expecter{mock: &_m.Mock} +} + +// Execute provides a mock function with given fields: template, args +func (_m *EmitLogFunc) Execute(template string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, template) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// EmitLogFunc_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type EmitLogFunc_Execute_Call struct { + *mock.Call +} + +// Execute is a helper method to define mock.On call +// - template string +// - args ...interface{} +func (_e *EmitLogFunc_Expecter) Execute(template interface{}, args ...interface{}) *EmitLogFunc_Execute_Call { + return &EmitLogFunc_Execute_Call{Call: _e.mock.On("Execute", + append([]interface{}{template}, args...)...)} +} + +func (_c *EmitLogFunc_Execute_Call) Run(run func(template string, args ...interface{})) *EmitLogFunc_Execute_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *EmitLogFunc_Execute_Call) Return() *EmitLogFunc_Execute_Call { + _c.Call.Return() + return _c +} + +func (_c *EmitLogFunc_Execute_Call) RunAndReturn(run func(string, ...interface{})) *EmitLogFunc_Execute_Call { + _c.Run(run) + return _c +} + +// NewEmitLogFunc creates a new instance of EmitLogFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEmitLogFunc(t interface { + mock.TestingT + Cleanup(func()) +}) *EmitLogFunc { + mock := &EmitLogFunc{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go b/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go new file mode 100644 index 000000000..d25673da3 --- /dev/null +++ b/aggsender/mocks/mock_l1_info_tree_root_by_leaf_querier.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// L1InfoTreeRootByLeafQuerier is an autogenerated mock type for the L1InfoTreeRootByLeafQuerier type +type L1InfoTreeRootByLeafQuerier struct { + mock.Mock +} + +type L1InfoTreeRootByLeafQuerier_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreeRootByLeafQuerier) EXPECT() *L1InfoTreeRootByLeafQuerier_Expecter { + return &L1InfoTreeRootByLeafQuerier_Expecter{mock: &_m.Mock} +} + +// NewL1InfoTreeRootByLeafQuerier creates a new instance of L1InfoTreeRootByLeafQuerier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreeRootByLeafQuerier(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreeRootByLeafQuerier { + mock := &L1InfoTreeRootByLeafQuerier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/cmd/run.go b/cmd/run.go index dffa2d6fe..207225b14 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -119,7 +119,8 @@ func start(cliCtx *cli.Context) error { // Create WaitGroup for backfill goroutines synchronization var backfillWg sync.WaitGroup - l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(ctx, components, *cfg, reorgDetectorL1, l1Client, l1MultiDownloader) + l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(ctx, components, *cfg, reorgDetectorL1, + l1Client, l1MultiDownloader) if l1InfoTreeSync != nil { rpcServices = append(rpcServices, l1InfoTreeSync.GetRPCServices()...) } @@ -158,6 +159,24 @@ func start(cliCtx *cli.Context) error { go b.Start(ctx) log.Info("Bridge service started") } + if l1MultiDownloader != nil { + log.Info("starting L1 MultiDownloader...") + err = l1MultiDownloader.Initialize(ctx) + if err != nil { + //nolint:gocritic + log.Fatalf("failed to initialize L1 MultiDownloader: %v", err) + } + go func() { + err := l1MultiDownloader.Start(ctx) + if err != nil { + log.Fatalf("l1MultiDownloader stopped: %v", err) + } + }() + } + if l1InfoTreeSync != nil { + log.Info("starting L1 Info Tree Syncer...") + go l1InfoTreeSync.Start(ctx) + } for _, component := range components { switch component { @@ -176,8 +195,7 @@ func start(cliCtx *cli.Context) error { committeeQuerier, ) if err != nil { - //nolint:gocritic - log.Fatal(err) + log.Fatalf("failed to create AggSender: %v", err) } rpcServices = append(rpcServices, aggsender.GetRPCServices()...) @@ -231,15 +249,6 @@ func start(cliCtx *cli.Context) error { if cfg.Profiling.ProfilingEnabled { go pprof.StartProfilingHTTPServer(ctx, cfg.Profiling) } - if l1MultiDownloader != nil { - log.Info("starting L1 MultiDownloader...") - go func() { - err := l1MultiDownloader.Start(ctx) - if err != nil { - log.Error("l1MultiDownloader stopped: %w", err) - } - }() - } waitSignal([]context.CancelFunc{cancel}, &backfillWg) @@ -515,8 +524,8 @@ func runL1InfoTreeSyncerIfNeeded( components []string, cfg config.Config, reorgDetectorL1 aggkitsync.ReorgDetector, - _ aggkittypes.BaseEthereumClienter, - l1MultiDownloader aggkittypes.MultiDownloader, + l1EthClient aggkittypes.BaseEthereumClienter, + l1MultiDownloader *multidownloader.EVMMultidownloader, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{ aggkitcommon.AGGORACLE, aggkitcommon.AGGSENDER, aggkitcommon.AGGSENDERVALIDATOR, @@ -524,18 +533,30 @@ func runL1InfoTreeSyncerIfNeeded( aggkitcommon.L2GERSYNC, aggkitcommon.AGGCHAINPROOFGEN}, components) { return nil } - l1InfoTreeSync, err := l1infotreesync.New( - ctx, - cfg.L1InfoTreeSync, - l1MultiDownloader, - reorgDetectorL1, - l1infotreesync.FlagNone, - ) + var l1InfoTreeSync *l1infotreesync.L1InfoTreeSync + var err error + if l1MultiDownloader != nil { + log.Info("L1 Info Tree Syncer using MultiDownloader based implementation") + l1InfoTreeSync, err = l1infotreesync.NewMultidownloadBased( + ctx, + cfg.L1InfoTreeSync, + l1MultiDownloader, + l1infotreesync.FlagNone, + ) + } else { + log.Info("L1 Info Tree Syncer using legacy sync implementation") + l1Client := aggkitsync.NewAdapterEthClientToMultidownloader(l1EthClient) + l1InfoTreeSync, err = l1infotreesync.NewLegacy( + ctx, + cfg.L1InfoTreeSync, + l1Client, + reorgDetectorL1, + l1infotreesync.FlagNone, + ) + } if err != nil { log.Fatal(err) } - go l1InfoTreeSync.Start(ctx) - return l1InfoTreeSync } @@ -609,14 +630,15 @@ func runReorgDetectorL1IfNeeded( func runL1MultiDownloaderIfNeeded( l1Client aggkittypes.EthClienter, cfg multidownloader.Config, -) (aggkittypes.MultiDownloader, []jRPC.Service, error) { +) (*multidownloader.EVMMultidownloader, []jRPC.Service, error) { // The requirements are the same as L1Client if l1Client == nil { return nil, nil, nil } // If it's disable It creates a direct eth client if !cfg.Enabled { - return aggkitsync.NewAdapterEthClientToMultidownloader(l1Client), nil, nil + log.Warnf("L1 MultiDownloader is disabled, don't creating the service.") + return nil, nil, nil } logger := log.WithFields("module", "L1MultiDownloader") @@ -626,8 +648,9 @@ func runL1MultiDownloaderIfNeeded( "l1", l1Client, // ethClient l1Client, // rpcClient - nil, // storage - nil, // blockNotifierManager + nil, // storage (created inside the multidownloader if nil) + nil, // blockNotifierManager (created inside the multidownloader if nil) + nil, // reorgProcessor (created inside the multidownloader if nil) ) if err != nil { return nil, nil, fmt.Errorf("failed to create L1 MultiDownloader: %w", err) diff --git a/common/block_range.go b/common/block_range.go index 9c486df88..302acb305 100644 --- a/common/block_range.go +++ b/common/block_range.go @@ -13,20 +13,24 @@ var ( type BlockRange struct { FromBlock uint64 ToBlock uint64 + // isNotEmpty have a negation because creating a BlockRange{} a bool field + // is set to false by default, so the natural name 'IsEmpty' would produce a false value by default + isNotEmpty bool } // NewBlockRange creates and returns a new BlockRange with the specified fromBlock and toBlock values. func NewBlockRange(fromBlock, toBlock uint64) BlockRange { return BlockRange{ - FromBlock: fromBlock, - ToBlock: toBlock, + FromBlock: fromBlock, + ToBlock: toBlock, + isNotEmpty: true, } } // CountBlocks returns the total number of blocks in the BlockRange, inclusive of both FromBlock and ToBlock. // If both FromBlock and ToBlock are zero, or if FromBlock is greater than ToBlock, it returns 0. func (b BlockRange) CountBlocks() uint64 { - if b.FromBlock == 0 && b.ToBlock == 0 { + if b.IsEmpty() { return 0 } if b.FromBlock > b.ToBlock { @@ -36,13 +40,17 @@ func (b BlockRange) CountBlocks() uint64 { } // IsEmpty returns true if the BlockRange contains no blocks. +// the invalid case of FromBlock > ToBlock is also considered empty func (b BlockRange) IsEmpty() bool { - return b.CountBlocks() == 0 + return !b.isNotEmpty || b.FromBlock > b.ToBlock } // String returns a string representation of the BlockRange in the format // "From: , To: ". func (b BlockRange) String() string { + if b.IsEmpty() { + return "Empty" + } return fmt.Sprintf("From: %d, To: %d (%d)", b.FromBlock, b.ToBlock, b.CountBlocks()) } @@ -52,6 +60,9 @@ func (b BlockRange) String() string { // strictly between b and other. The direction of the gap depends on the relative positions // of the two ranges. func (b BlockRange) Gap(other BlockRange) BlockRange { + if b.IsEmpty() || other.IsEmpty() { + return BlockRangeZero + } // If they overlap or touch, return empty if b.ToBlock >= getBlockMinusOne(other.FromBlock) && other.ToBlock >= getBlockMinusOne(b.FromBlock) { @@ -59,23 +70,35 @@ func (b BlockRange) Gap(other BlockRange) BlockRange { } if b.ToBlock < other.FromBlock { - return BlockRange{ - FromBlock: b.ToBlock + 1, - ToBlock: other.FromBlock - 1, - } + return NewBlockRange( + b.ToBlock+1, + other.FromBlock-1, + ) } - return BlockRange{ - FromBlock: other.ToBlock + 1, - ToBlock: getBlockMinusOne(b.FromBlock), - } + return NewBlockRange( + other.ToBlock+1, + getBlockMinusOne(b.FromBlock), + ) } // Greater returns true if the receiver BlockRange (b) is strictly greater than the other BlockRange (other). // [ 10 - 50 ] > [ 1 - 9 ] = true // [ 10 - 50 ] > [ 5 - 15 ] = false (overlap) // [ 10 - 50 ] > [ 51 - 100 ] = false (not greater) +// empty > [0 - 0] = false +// [0 - 0] > empty = true +// empty > empty = false func (b BlockRange) Greater(other BlockRange) bool { + if b.IsEmpty() && other.IsEmpty() { + return false + } + if b.IsEmpty() { + return false + } + if other.IsEmpty() { + return true + } return b.FromBlock > other.ToBlock } @@ -89,13 +112,29 @@ func getBlockMinusOne(fromBlock uint64) uint64 { // IsNextContigousBlock checks if 'next' BlockRange is exactly the next contiguous block // so the way to use this is: previousBlockRange.IsNextContigousBlock(nextBlockRange) func (b BlockRange) IsNextContigousBlock(next BlockRange) bool { + if b.IsEmpty() || next.IsEmpty() { + return false + } return b.ToBlock+1 == next.FromBlock } // Merge merges two BlockRanges and returns a slice of BlockRanges. // If the two BlockRanges overlap, it returns a single BlockRange that encompasses both. // If they do not overlap, it returns both BlockRanges in sorted order. +// If some of them is empty is ignored: +// [ 10 - 50 ] Merge [ 1 - 9 ] = [ 1 - 50 ] +// [ 10 - 50 ] Merge [ 5 - 75 ] = [ 5 - 75 ] +// [ 10 - 50 ] Merge [ 70 - 100 ] = [ 10 - 50 ], [ 70 - 100 ] +// empty Merge [ 1 - 10 ] = [ 1 - 10 ] +// [ 1 - 10 ] Merge empty = [ 1 - 10 ] +// empty Merge empty = empty func (b BlockRange) Merge(other BlockRange) []BlockRange { + if b.IsEmpty() { + return []BlockRange{other} + } + if other.IsEmpty() { + return []BlockRange{b} + } if b.Overlaps(other) { // If overlaps, just extend it return []BlockRange{b.Extend(other)} @@ -109,6 +148,12 @@ func (b BlockRange) Merge(other BlockRange) []BlockRange { // Extend merges two BlockRanges into one encompassing BlockRange. func (b BlockRange) Extend(other BlockRange) BlockRange { + if b.IsEmpty() { + return other + } + if other.IsEmpty() { + return b + } return NewBlockRange( min(b.FromBlock, other.FromBlock), max(b.ToBlock, other.ToBlock), @@ -123,6 +168,7 @@ func (b BlockRange) Extend(other BlockRange) BlockRange { // (C---A---B---D) -> [] func (b BlockRange) Subtract(other BlockRange) []BlockRange { result := []BlockRange{} + // This cover the case that b is empty or other is empty if !b.Overlaps(other) { return []BlockRange{b} } @@ -136,24 +182,45 @@ func (b BlockRange) Subtract(other BlockRange) []BlockRange { } func (b BlockRange) Cap(maxBlockNumber uint64) BlockRange { + if b.IsEmpty() { + return BlockRangeZero + } if b.FromBlock > maxBlockNumber { return BlockRangeZero } return NewBlockRange(b.FromBlock, min(b.ToBlock, maxBlockNumber)) } func (b BlockRange) Contains(other BlockRange) bool { + if b.IsEmpty() { + return false + } return b.FromBlock <= other.FromBlock && b.ToBlock >= other.ToBlock } +// ContainsBlockNumber returns true if the given block number is within the BlockRange (inclusive). +func (b BlockRange) ContainsBlockNumber(number uint64) bool { + if b.IsEmpty() { + return false + } + return b.FromBlock <= number && number <= b.ToBlock +} + func (b BlockRange) Overlaps(other BlockRange) bool { + if b.IsEmpty() || other.IsEmpty() { + return false + } return b.FromBlock <= other.ToBlock && other.FromBlock <= b.ToBlock } func (b BlockRange) Equal(other BlockRange) bool { - return b.FromBlock == other.FromBlock && b.ToBlock == other.ToBlock + if b.IsEmpty() && other.IsEmpty() { + return true + } + return b.FromBlock == other.FromBlock && b.ToBlock == other.ToBlock && b.IsEmpty() == other.IsEmpty() } func (b BlockRange) Intersect(other BlockRange) BlockRange { + // If either range is empty or they don't overlap, return an empty range if !b.Overlaps(other) { return BlockRangeZero } @@ -191,3 +258,41 @@ func ChunkedRangeQuery[T any]( return all, nil } + +func (b BlockRange) ListBlockNumbers() []uint64 { + if b.IsEmpty() { + return []uint64{} + } + blockNumbers := make([]uint64, 0, b.CountBlocks()) + for i := b.FromBlock; i <= b.ToBlock; i++ { + blockNumbers = append(blockNumbers, i) + } + return blockNumbers +} + +// SplitByBlockNumber splits a BlockRange into two parts at the given block number +// The first range includes blocks from FromBlock to blockNumber (inclusive) +// The second range includes blocks from blockNumber+1 to ToBlock (inclusive) +// If blockNumber is outside the range, one of the returned ranges will be empty +func (b BlockRange) SplitByBlockNumber(blockNumber uint64) (BlockRange, BlockRange) { + // If the original range is empty, return two empty ranges + if b.IsEmpty() { + return BlockRangeZero, BlockRangeZero + } + + // If blockNumber is before FromBlock, first range is empty + if blockNumber < b.FromBlock { + return BlockRangeZero, b + } + + // If blockNumber is at or after ToBlock, second range is empty + if blockNumber >= b.ToBlock { + return b, BlockRangeZero + } + + // Split in the middle + first := NewBlockRange(b.FromBlock, blockNumber) + second := NewBlockRange(blockNumber+1, b.ToBlock) + + return first, second +} diff --git a/common/block_range_test.go b/common/block_range_test.go index be22e95d4..f147f36c8 100644 --- a/common/block_range_test.go +++ b/common/block_range_test.go @@ -31,25 +31,25 @@ func TestBlockRange_Gap(t *testing.T) { name: "a and b overlap", a: NewBlockRange(5, 15), b: NewBlockRange(10, 20), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "a and b touch at edge", a: NewBlockRange(1, 5), b: NewBlockRange(6, 10), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "b and a touch at edge", a: NewBlockRange(6, 10), b: NewBlockRange(1, 5), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "identical ranges", a: NewBlockRange(5, 10), b: NewBlockRange(5, 10), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "a after b with no overlap and gap of 1", @@ -65,39 +65,39 @@ func TestBlockRange_Gap(t *testing.T) { }, { name: "empty a", - a: NewBlockRange(0, 0), + a: BlockRangeZero, b: NewBlockRange(10, 15), - expected: NewBlockRange(1, 9), + expected: BlockRangeZero, }, { name: "empty b", a: NewBlockRange(10, 15), - b: NewBlockRange(0, 0), - expected: NewBlockRange(1, 9), + b: BlockRangeZero, + expected: BlockRangeZero, }, { name: "both empty", - a: NewBlockRange(0, 0), - b: NewBlockRange(0, 0), - expected: NewBlockRange(0, 0), + a: BlockRangeZero, + b: BlockRangeZero, + expected: BlockRangeZero, }, { name: "b before a with no gap", a: NewBlockRange(5, 10), b: NewBlockRange(1, 4), - expected: NewBlockRange(0, 0), + expected: BlockRangeZero, }, { name: "invalid a", a: NewBlockRange(10, 5), // from > to b: NewBlockRange(1, 15), - expected: NewBlockRange(0, 0), // should return empty range + expected: BlockRangeZero, // should return empty range }, { name: "invalid b", a: NewBlockRange(1, 15), b: NewBlockRange(10, 5), // from > to - expected: NewBlockRange(0, 0), // should return empty range + expected: BlockRangeZero, // should return empty range }, { name: "start verification case", @@ -105,6 +105,18 @@ func TestBlockRange_Gap(t *testing.T) { b: NewBlockRange(10, 10), expected: NewBlockRange(6, 9), }, + { + name: "{0,0} a", + a: NewBlockRange(0, 0), + b: NewBlockRange(10, 15), + expected: NewBlockRange(1, 9), + }, + { + name: "{0,0} b", + a: NewBlockRange(10, 15), + b: NewBlockRange(0, 0), + expected: NewBlockRange(1, 9), + }, } for _, tt := range tests { @@ -124,8 +136,18 @@ func TestBlockRange_IsEmpty(t *testing.T) { expected bool }{ { - name: "empty zero value", + name: "{0,0} not empty", br: NewBlockRange(0, 0), + expected: false, + }, + { + name: "BlockRangeZero isempty", + br: BlockRangeZero, + expected: true, + }, + { + name: "BlockRange{} isempty", + br: BlockRange{}, expected: true, }, { @@ -207,32 +229,32 @@ func TestBlockRange_Greater(t *testing.T) { }, { name: "empty a, non-empty b", - a: NewBlockRange(0, 0), + a: BlockRangeZero, b: NewBlockRange(1, 10), expected: false, }, { name: "non-empty a, empty b", a: NewBlockRange(5, 10), - b: NewBlockRange(0, 0), + b: BlockRangeZero, expected: true, }, { name: "both empty", - a: NewBlockRange(0, 0), - b: NewBlockRange(0, 0), + a: BlockRangeZero, + b: BlockRangeZero, expected: false, }, { - name: "invalid a (from > to)", - a: NewBlockRange(10, 5), - b: NewBlockRange(1, 4), - expected: true, + name: "{0,0} > {0,1} = false", + a: NewBlockRange(0, 0), + b: NewBlockRange(0, 1), + expected: false, }, { - name: "invalid b (from > to)", - a: NewBlockRange(5, 10), - b: NewBlockRange(10, 5), + name: "{0,1} > {0,0} = false (overlaps!)", + a: NewBlockRange(0, 1), + b: NewBlockRange(0, 0), expected: false, }, } @@ -316,6 +338,85 @@ func TestBlockRange_Contains(t *testing.T) { } } +func TestBlockRange_ContainsBlockNumber(t *testing.T) { + tests := []struct { + name string + blockRange BlockRange + blockNumber uint64 + expected bool + }{ + { + name: "block in the middle of range", + blockRange: NewBlockRange(10, 20), + blockNumber: 15, + expected: true, + }, + { + name: "block at FromBlock boundary", + blockRange: NewBlockRange(10, 20), + blockNumber: 10, + expected: true, + }, + { + name: "block at ToBlock boundary", + blockRange: NewBlockRange(10, 20), + blockNumber: 20, + expected: true, + }, + { + name: "block before range", + blockRange: NewBlockRange(10, 20), + blockNumber: 5, + expected: false, + }, + { + name: "block after range", + blockRange: NewBlockRange(10, 20), + blockNumber: 25, + expected: false, + }, + { + name: "single block range contains itself", + blockRange: NewBlockRange(15, 15), + blockNumber: 15, + expected: true, + }, + { + name: "single block range does not contain other", + blockRange: NewBlockRange(15, 15), + blockNumber: 16, + expected: false, + }, + { + name: "empty range does not contain block", + blockRange: NewBlockRange(0, 0), + blockNumber: 5, + expected: false, + }, + { + name: "empty range with block 0", + blockRange: NewBlockRange(0, 0), + blockNumber: 0, + expected: true, + }, + { + name: "invalid range (from > to) does not contain", + blockRange: NewBlockRange(20, 10), + blockNumber: 15, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.blockRange.ContainsBlockNumber(tt.blockNumber) + require.Equal(t, tt.expected, got, + "ContainsBlockNumber() for %s: expected %v, got %v", + tt.name, tt.expected, got) + }) + } +} + func TestBlockRange_Subtract(t *testing.T) { bn := NewBlockRange(10, 50) require.Equal(t, []BlockRange{NewBlockRange(10, 19), NewBlockRange(31, 50)}, bn.Subtract(NewBlockRange(20, 30))) @@ -327,15 +428,15 @@ func TestBlockRange_Subtract(t *testing.T) { } func TestBlockRange_Intersect(t *testing.T) { bn := NewBlockRange(10, 50) - require.Equal(t, BlockRange{10, 15}, bn.Intersect(NewBlockRange(5, 15))) - require.Equal(t, BlockRange{30, 40}, bn.Intersect(NewBlockRange(30, 40))) + require.Equal(t, NewBlockRange(10, 15), bn.Intersect(NewBlockRange(5, 15))) + require.Equal(t, NewBlockRange(30, 40), bn.Intersect(NewBlockRange(30, 40))) require.Equal(t, BlockRangeZero, bn.Intersect(NewBlockRange(51, 60))) } func TestBlockRange_Cap(t *testing.T) { bn := NewBlockRange(10, 50) - require.Equal(t, BlockRange{10, 40}, bn.Cap(40)) - require.Equal(t, BlockRange{10, 50}, bn.Cap(60)) + require.Equal(t, NewBlockRange(10, 40), bn.Cap(40)) + require.Equal(t, NewBlockRange(10, 50), bn.Cap(60)) require.Equal(t, BlockRangeZero, bn.Cap(5)) } @@ -471,3 +572,164 @@ func TestChunkedRangeQuery_EmptyRange(t *testing.T) { require.NoError(t, err) require.Equal(t, empty, result) } + +func TestBlockRange_ListBlockNumbers(t *testing.T) { + bn1 := NewBlockRange(1, 1) + require.Equal(t, []uint64{1}, bn1.ListBlockNumbers()) + bn2 := NewBlockRange(3, 5) + require.Equal(t, []uint64{3, 4, 5}, bn2.ListBlockNumbers()) + bn3 := NewBlockRange(0, 0) + require.Equal(t, []uint64{0}, bn3.ListBlockNumbers()) + bn4 := BlockRangeZero + require.Equal(t, []uint64{}, bn4.ListBlockNumbers()) +} + +func TestBlockRange_SplitByBlockNumber(t *testing.T) { + tests := []struct { + name string + blockRange BlockRange + splitBlock uint64 + expectedFirst BlockRange + expectedSecond BlockRange + descriptionFirst string + descriptionSecond string + }{ + { + name: "split in the middle", + blockRange: NewBlockRange(100, 200), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 150), + expectedSecond: NewBlockRange(151, 200), + descriptionFirst: "first half includes split block", + descriptionSecond: "second half starts after split block", + }, + { + name: "split at FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 200), + descriptionFirst: "first range is single block", + descriptionSecond: "second range is rest of blocks", + }, + { + name: "split at ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 200, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", + descriptionSecond: "second range is empty", + }, + { + name: "split before FromBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 200), + descriptionFirst: "first range is empty", + descriptionSecond: "second range is entire original range", + }, + { + name: "split after ToBlock", + blockRange: NewBlockRange(100, 200), + splitBlock: 250, + expectedFirst: NewBlockRange(100, 200), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is entire range", + descriptionSecond: "second range is empty", + }, + { + name: "split single block range at that block", + blockRange: NewBlockRange(100, 100), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", + descriptionSecond: "second range is empty", + }, + { + name: "split single block range before", + blockRange: NewBlockRange(100, 100), + splitBlock: 50, + expectedFirst: BlockRangeZero, + expectedSecond: NewBlockRange(100, 100), + descriptionFirst: "first range is empty", + descriptionSecond: "second range is the single block", + }, + { + name: "split single block range after", + blockRange: NewBlockRange(100, 100), + splitBlock: 150, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is the single block", + descriptionSecond: "second range is empty", + }, + { + name: "split empty range", + blockRange: BlockRangeZero, + splitBlock: 100, + expectedFirst: BlockRangeZero, + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is empty", + descriptionSecond: "second range is empty", + }, + { + name: "split two block range at first", + blockRange: NewBlockRange(100, 101), + splitBlock: 100, + expectedFirst: NewBlockRange(100, 100), + expectedSecond: NewBlockRange(101, 101), + descriptionFirst: "first range is first block", + descriptionSecond: "second range is second block", + }, + { + name: "split two block range at second", + blockRange: NewBlockRange(100, 101), + splitBlock: 101, + expectedFirst: NewBlockRange(100, 101), + expectedSecond: BlockRangeZero, + descriptionFirst: "first range is both blocks", + descriptionSecond: "second range is empty", + }, + { + name: "split at ToBlock minus 1", + blockRange: NewBlockRange(100, 200), + splitBlock: 199, + expectedFirst: NewBlockRange(100, 199), + expectedSecond: NewBlockRange(200, 200), + descriptionFirst: "first range is all but last block", + descriptionSecond: "second range is last block only", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + first, second := tt.blockRange.SplitByBlockNumber(tt.splitBlock) + + require.Equal(t, tt.expectedFirst, first, + "SplitByBlockNumber() first range for %s: expected %v, got %v (%s)", + tt.name, tt.expectedFirst, first, tt.descriptionFirst) + + require.Equal(t, tt.expectedSecond, second, + "SplitByBlockNumber() second range for %s: expected %v, got %v (%s)", + tt.name, tt.expectedSecond, second, tt.descriptionSecond) + + // Verify that the split is valid + if !first.IsEmpty() && !second.IsEmpty() { + // Verify there's no gap between ranges + require.Equal(t, first.ToBlock+1, second.FromBlock, + "There should be no gap between first and second ranges") + } + + // Verify that combined ranges equal original + if !first.IsEmpty() && !second.IsEmpty() { + require.Equal(t, tt.blockRange.FromBlock, first.FromBlock, + "First range should start at original FromBlock") + require.Equal(t, tt.blockRange.ToBlock, second.ToBlock, + "Second range should end at original ToBlock") + } + }) + } +} diff --git a/common/polling_with_timeout.go b/common/polling_with_timeout.go new file mode 100644 index 000000000..09e516e43 --- /dev/null +++ b/common/polling_with_timeout.go @@ -0,0 +1,47 @@ +package common + +import ( + "context" + "fmt" + "time" +) + +var ( + ErrTimeoutReached = fmt.Errorf("timeout reached") +) + +// It execute 'checkCondition' each pollingPeriod, until either the condition is met, +// the timeoutPeriod is reached, or the context is done. +// It returns true if the condition is met, false if timeout is reached, or an error. +func PollingWithTimeout( + ctx context.Context, + pollingPeriod, timeoutPeriod time.Duration, + checkCondition func() (bool, error)) (bool, error) { + timeoutTimer := time.NewTimer(timeoutPeriod) + defer timeoutTimer.Stop() + + pollingTicker := time.NewTicker(pollingPeriod) + defer pollingTicker.Stop() + + for { + conditionMet, err := checkCondition() + if err != nil { + return false, err + } + if conditionMet { + return true, nil + } + select { + case <-pollingTicker.C: + // Loop continues to check condition + + case <-timeoutTimer.C: + return false, fmt.Errorf("pollingWithTimeout: condition not met after waiting %s: %w", + timeoutPeriod.String(), ErrTimeoutReached) + case <-ctx.Done(): + return false, fmt.Errorf("pollingWithTimeout: "+ + "context done while waiting for condition to be met: %w", + ctx.Err()) + } + } +} diff --git a/common/polling_with_timeout_test.go b/common/polling_with_timeout_test.go new file mode 100644 index 000000000..43f68cedf --- /dev/null +++ b/common/polling_with_timeout_test.go @@ -0,0 +1,207 @@ +package common + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPollingWithTimeout(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + pollingPeriod time.Duration + timeoutPeriod time.Duration + setupCheckFunction func() func() (bool, error) + setupContext func() context.Context + expectedResult bool + expectedError error + expectedErrorMsg string + }{ + { + name: "condition met immediately", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return true, nil + } + }, + setupContext: context.Background, + expectedResult: true, + expectedError: nil, + }, + { + name: "condition met after several attempts", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 200 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + attempts := 0 + return func() (bool, error) { + attempts++ + if attempts >= 3 { + return true, nil + } + return false, nil + } + }, + setupContext: context.Background, + expectedResult: true, + expectedError: nil, + }, + { + name: "timeout reached", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 50 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return false, nil + } + }, + setupContext: context.Background, + expectedResult: false, + expectedError: ErrTimeoutReached, + expectedErrorMsg: "pollingWithTimeout: condition not met after waiting", + }, + { + name: "context cancelled", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 500 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + return func() (bool, error) { + return false, nil + } + }, + setupContext: func() context.Context { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Millisecond) + // Don't cancel here, let the test run and timeout naturally + _ = cancel + return ctx + }, + expectedResult: false, + expectedError: context.DeadlineExceeded, + expectedErrorMsg: "context done while waiting for condition to be met", + }, + { + name: "check function returns error", + pollingPeriod: 10 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + testErr := errors.New("check function error") + return func() (bool, error) { + return false, testErr + } + }, + setupContext: context.Background, + expectedResult: false, + expectedErrorMsg: "check function error", + }, + { + name: "condition met on last attempt before timeout", + pollingPeriod: 20 * time.Millisecond, + timeoutPeriod: 100 * time.Millisecond, + setupCheckFunction: func() func() (bool, error) { + attempts := 0 + return func() (bool, error) { + attempts++ + // Meet condition after ~80ms (4 attempts * 20ms) + if attempts >= 4 { + return true, nil + } + return false, nil + } + }, + setupContext: context.Background, + expectedResult: true, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := tt.setupContext() + checkFunc := tt.setupCheckFunction() + + result, err := PollingWithTimeout(ctx, tt.pollingPeriod, tt.timeoutPeriod, checkFunc) + + require.Equal(t, tt.expectedResult, result) + + if tt.expectedError != nil { + require.Error(t, err) + require.ErrorIs(t, err, tt.expectedError) + } + + if tt.expectedErrorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedErrorMsg) + } + + if tt.expectedError == nil && tt.expectedErrorMsg == "" { + require.NoError(t, err) + } + }) + } +} + +func TestPollingWithTimeout_Timing(t *testing.T) { + t.Parallel() + + t.Run("respects polling period", func(t *testing.T) { + t.Parallel() + + pollingPeriod := 50 * time.Millisecond + timeoutPeriod := 500 * time.Millisecond + attempts := 0 + start := time.Now() + + checkFunc := func() (bool, error) { + attempts++ + if attempts >= 3 { + return true, nil + } + return false, nil + } + + result, err := PollingWithTimeout(context.Background(), pollingPeriod, timeoutPeriod, checkFunc) + + elapsed := time.Since(start) + + require.NoError(t, err) + require.True(t, result) + require.Equal(t, 3, attempts) + // Should take at least 2 polling periods (between attempt 1 and 3) + require.GreaterOrEqual(t, elapsed, 2*pollingPeriod) + // But not more than timeout + require.Less(t, elapsed, timeoutPeriod) + }) + + t.Run("timeout is enforced", func(t *testing.T) { + t.Parallel() + + pollingPeriod := 20 * time.Millisecond + timeoutPeriod := 100 * time.Millisecond + start := time.Now() + + checkFunc := func() (bool, error) { + return false, nil + } + + result, err := PollingWithTimeout(context.Background(), pollingPeriod, timeoutPeriod, checkFunc) + + elapsed := time.Since(start) + + require.Error(t, err) + require.False(t, result) + require.ErrorIs(t, err, ErrTimeoutReached) + // Should take approximately the timeout period + require.GreaterOrEqual(t, elapsed, timeoutPeriod) + // Allow some margin for timing variance (20ms) + require.Less(t, elapsed, timeoutPeriod+20*time.Millisecond) + }) +} diff --git a/common/time_tracker.go b/common/time_tracker.go index 4775d3ced..286be69dd 100644 --- a/common/time_tracker.go +++ b/common/time_tracker.go @@ -18,7 +18,7 @@ type TimeTracker struct { func (t *TimeTracker) String() string { return "TimeTracker{times=" + strconv.Itoa(int(t.times)) + - "lastDuration=" + t.lastDuration.String() + + ", lastDuration=" + t.lastDuration.String() + ", accumulated=" + t.accumulated.String() + "}" } diff --git a/config/config_test.go b/config/config_test.go index dee8249f9..d67865213 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -77,6 +77,7 @@ func TestLoadDefaultConfig(t *testing.T) { require.Equal(t, multidownloader.NewConfigDefault("l1", ""), cfg.L1Multidownloader) cfgL2Multidownloader := multidownloader.NewConfigDefault("l2", "") cfgL2Multidownloader.BlockFinality = aggkittypes.LatestBlock + cfgL2Multidownloader.Enabled = false require.Equal(t, cfgL2Multidownloader, cfg.L2Multidownloader) } diff --git a/config/default.go b/config/default.go index b30268731..afab53832 100644 --- a/config/default.go +++ b/config/default.go @@ -337,18 +337,22 @@ BlockFinalityForL1InfoTree = "{{AggSender.BlockFinalityForL1InfoTree}}" MaxAttempts = "{{AggSender.AgglayerClient.GRPC.Retry.MaxAttempts}}" [L1Multidownloader] - Enabled = false + Enabled = true + DeveloperMode = false StoragePath = "{{PathRWData}}/l1_multidownloader.sqlite" BlockChunkSize = 10000 MaxParallelBlockHeaderRetrieval = 30 BlockFinality = "FinalizedBlock" WaitPeriodToCheckCatchUp = "10s" + PeriodToCheckReorgs = "5s" [L2Multidownloader] Enabled = false + DeveloperMode = false StoragePath = "{{PathRWData}}/l2_multidownloader.sqlite" BlockChunkSize = 10000 MaxParallelBlockHeaderRetrieval = 30 BlockFinality = "LatestBlock" WaitPeriodToCheckCatchUp = "10s" + PeriodToCheckReorgs = "5s" ` diff --git a/etherman/batch_requests.go b/etherman/batch_requests.go index d46033475..a71821a7e 100644 --- a/etherman/batch_requests.go +++ b/etherman/batch_requests.go @@ -3,7 +3,9 @@ package etherman import ( "context" "fmt" + "maps" "math/big" + "sort" "sync" aggkitcommon "github.com/agglayer/aggkit/common" @@ -27,6 +29,9 @@ func (b *blockRawEth) String() string { } func (b *blockRawEth) ToBlockHeader() (*aggkittypes.BlockHeader, error) { + if b.Number == "" && b.Hash == "" { + return nil, fmt.Errorf("blockRawEth.ToBlockHeader: empty: %w", ErrNotFound) + } number, err := aggkitcommon.ParseUint64HexOrDecimal(b.Number) if err != nil { return nil, fmt.Errorf("blockRawEth.ToBlockHeader: parsing block number %s: %w", b.Number, err) @@ -49,67 +54,163 @@ func (b *blockRawEth) ToBlockHeader() (*aggkittypes.BlockHeader, error) { // https://www.alchemy.com/docs/reference/batch-requests const batchRequestLimitHTTP = 1000 +// BlockHeadersResult contiene los resultados de la recuperación de block headers, +// separando los exitosos de los fallidos +type BlockHeadersResult struct { + // Headers contiene los block headers recuperados exitosamente, mapeados por block number + Headers map[uint64]*aggkittypes.BlockHeader + + // Errors contiene los errores de recuperación, mapeados por block number + Errors map[uint64]error +} + +// NewBlockHeadersResult crea un nuevo BlockHeadersResult +func NewBlockHeadersResult() *BlockHeadersResult { + return &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: make(map[uint64]error), + } +} + +// Success retorna true si todos los bloques se recuperaron exitosamente +func (r *BlockHeadersResult) Success() bool { + return len(r.Errors) == 0 +} + +// PartialSuccess retorna true si al menos un bloque se recuperó exitosamente +func (r *BlockHeadersResult) PartialSuccess() bool { + return len(r.Headers) > 0 +} + +// GetOrderedHeaders retorna los headers en el orden de blockNumbers solicitados, +// solo para los bloques que se recuperaron exitosamente +func (r *BlockHeadersResult) GetOrderedHeaders(blockNumbers []uint64) []*aggkittypes.BlockHeader { + result := make([]*aggkittypes.BlockHeader, 0, len(r.Headers)) + for _, bn := range blockNumbers { + if header, ok := r.Headers[bn]; ok { + result = append(result, header) + } + } + return result +} + +// AddHeader añade un header exitoso al resultado +func (r *BlockHeadersResult) AddHeader(blockNumber uint64, header *aggkittypes.BlockHeader) { + r.Headers[blockNumber] = header +} + +// AddError añade un error para un block number específico +func (r *BlockHeadersResult) AddError(blockNumber uint64, err error) { + r.Errors[blockNumber] = err +} + +// Merge combina otro BlockHeadersResult en este +func (r *BlockHeadersResult) Merge(other *BlockHeadersResult) { + maps.Copy(r.Headers, other.Headers) + maps.Copy(r.Errors, other.Errors) +} + +func (r *BlockHeadersResult) AreAllErrorsNotFound() bool { + for _, err := range r.Errors { + if !IsErrNotFound(err) { + return false + } + } + return true +} + +// ListBlocksNumberNotFound returns the list of not found block numbers in the result ordered by block number +func (r *BlockHeadersResult) ListBlocksNumberNotFound() []uint64 { + var notFoundBlocks []uint64 + for bn, err := range r.Errors { + if IsErrNotFound(err) { + notFoundBlocks = append(notFoundBlocks, bn) + } + } + sort.Slice(notFoundBlocks, func(i, j int) bool { + return notFoundBlocks[i] < notFoundBlocks[j] + }) + return notFoundBlocks +} + +// ComposeError returns a single error summarizing the errors in the result, or nil if there are no errors +func (r *BlockHeadersResult) ComposeError() error { + if len(r.Errors) == 0 { + return nil + } + errResult := fmt.Errorf("RetrieveBlockHeaders errors") + errBlockNumbers := r.ListBlocksNumberNotFound() + for _, bn := range errBlockNumbers { + errResult = fmt.Errorf("%w\nBlock %d: %w", errResult, bn, r.Errors[bn]) + } + return errResult +} + // RetrieveBlockHeaders retrieves block headers for the given block numbers using batch requests -// if rpcClient is provided +// if rpcClient is provided. Returns a BlockHeadersResult with successful headers and individual errors. +// The returned error is only for catastrophic failures (context cancelled, etc.) func RetrieveBlockHeaders(ctx context.Context, log aggkitcommon.Logger, ethClient aggkittypes.BaseEthereumClienter, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (*BlockHeadersResult, error) { if rpcClient != nil { return RetrieveBlockHeadersBatch(ctx, log, rpcClient, blockNumbers, maxConcurrency) } return RetrieveBlockHeadersLegacy(ctx, log, ethClient, blockNumbers, maxConcurrency) } -// RetrieveBlockHeaders retrieves block headers for the given block numbers using batch requests -// with concurrency control +// RetrieveBlockHeadersBatch retrieves block headers for the given block numbers using batch requests +// with concurrency control. Returns a BlockHeadersResult with successful headers and individual errors. func RetrieveBlockHeadersBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatch(ctx, log, rpcClient, blocks) }, blockNumbers, batchRequestLimitHTTP, maxConcurrency) } // RetrieveBlockHeadersLegacy retrieves block headers for the given block numbers using individual requests // this is used in simulated environments where batch requests are not supported +// Returns a BlockHeadersResult with successful headers and individual errors for failed blocks func RetrieveBlockHeadersLegacy(ctx context.Context, log aggkitcommon.Logger, ethClient aggkittypes.BaseEthereumClienter, blockNumbers []uint64, - maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + maxConcurrency int) (*BlockHeadersResult, error) { return retrieveBlockHeadersInBatchParallel( ctx, log, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { - result := make([]*aggkittypes.BlockHeader, len(blocks)) - for i, blockNumber := range blocks { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { + result := NewBlockHeadersResult() + for _, blockNumber := range blocks { header, err := ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) if err != nil { - return nil, fmt.Errorf("RetrieveBlockHeadersLegacy: cannot get block header for block %d: %w", - blockNumber, err) + result.AddError(blockNumber, fmt.Errorf("cannot get block header: %w", err)) + continue } - result[i] = aggkittypes.NewBlockHeaderFromEthHeader(header) + result.AddHeader(blockNumber, aggkittypes.NewBlockHeaderFromEthHeader(header)) } return result, nil }, blockNumbers, 1, maxConcurrency) } // retrieveBlockHeadersInBatch retrieves block headers for the given block numbers using batch requests +// Returns a BlockHeadersResult with successful headers and individual errors for failed blocks func retrieveBlockHeadersInBatch(ctx context.Context, log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, blockNumbers []uint64, -) ([]*aggkittypes.BlockHeader, error) { +) (*BlockHeadersResult, error) { + result := NewBlockHeadersResult() if len(blockNumbers) == 0 { - return make([]*aggkittypes.BlockHeader, 0), nil + return result, nil } headers := make([]*blockRawEth, len(blockNumbers)) timeTracker := aggkitcommon.NewTimeTracker() @@ -120,7 +221,7 @@ func retrieveBlockHeadersInBatch(ctx context.Context, bn := fmt.Sprintf("0x%x", blockNumber) batch = append(batch, rpc.BatchElem{ Method: "eth_getBlockByNumber", - Args: []interface{}{bn, false}, + Args: []any{bn, false}, Result: headers[idx], }) } @@ -128,58 +229,66 @@ func retrieveBlockHeadersInBatch(ctx context.Context, err := rpcClient.BatchCallContext(ctx, batch) timeTracker.Stop() if err != nil { + // Catastrophic error: the whole batch call failed return nil, fmt.Errorf("retrieveRPCBlockHeadersInBatch(%d): BatchCallContext error: %w", len(blockNumbers), err) } + + // Process each element individually, collecting successes and failures for i, elem := range batch { + blockNumber := blockNumbers[i] if elem.Error != nil { - return nil, fmt.Errorf("retrieveRPCBlockHeadersInBatch(%d): batch element %d (%v) error: %w", len(blockNumbers), i, - elem.Args, - elem.Error) + result.AddError(blockNumber, fmt.Errorf("batch element error: %w", elem.Error)) + continue + } + // Try to convert the raw block to BlockHeader + bh, err := headers[i].ToBlockHeader() + if err != nil { + result.AddError(blockNumber, fmt.Errorf("converting block: %w", err)) + continue } + result.AddHeader(blockNumber, bh) } - log.Debugf("retrieveRPCBlockHeadersInBatch: Retrieved block headers for blocks %d in %s (elapsed)", - len(blockNumbers), timeTracker.Duration().String()) - return convertSliceBlockRawEth(headers) + + log.Debugf("retrieveRPCBlockHeadersInBatch: Retrieved %d/%d block headers in %s (elapsed)", + len(result.Headers), len(blockNumbers), timeTracker.Duration().String()) + return result, nil } // retrieveBlockHeadersInBatchParallel split request into chuncks and execute it in parallel +// Returns a BlockHeadersResult with all successful headers and individual errors func retrieveBlockHeadersInBatchParallel( ctx context.Context, logger aggkitcommon.Logger, - funcRetrieval func(context.Context, []uint64) ([]*aggkittypes.BlockHeader, error), + funcRetrieval func(context.Context, []uint64) (*BlockHeadersResult, error), blockNumbers []uint64, - chunckSize, maxConcurrency int) ([]*aggkittypes.BlockHeader, error) { + chunckSize, maxConcurrency int) (*BlockHeadersResult, error) { var mu sync.Mutex g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrency) chuncks := splitBlockNumbersIntoChunks(blockNumbers, chunckSize) - results := make(map[uint64]*aggkittypes.BlockHeader, len(blockNumbers)) + finalResult := NewBlockHeadersResult() + for _, chunck := range chuncks { g.Go(func() error { - headers, err := funcRetrieval(ctx, chunck) + chunkResult, err := funcRetrieval(ctx, chunck) if err != nil { - return fmt.Errorf("RetrieveBlockHeadersInBatchParallel: %w", err) + // Catastrophic error in this chunk (e.g., context cancelled) + return fmt.Errorf("RetrieveBlockHeadersInBatchParallel: %w", err) } mu.Lock() defer mu.Unlock() - for _, header := range headers { - results[header.Number] = header - } + finalResult.Merge(chunkResult) return nil }) } if err := g.Wait(); err != nil { + // Catastrophic error occurred return nil, err } - // convert map to sorted slice by block number - finalResults := make([]*aggkittypes.BlockHeader, len(blockNumbers)) - for idx, bn := range blockNumbers { - finalResults[idx] = results[bn] - } - logger.Debugf("retrieveRPCBlockHeadersInParallel: Retrieved block headers for blocks %d", - len(blockNumbers)) - return finalResults, nil + logger.Debugf("retrieveRPCBlockHeadersInParallel: Retrieved %d/%d block headers", + len(finalResult.Headers), len(blockNumbers)) + return finalResult, nil } func splitBlockNumbersIntoChunks(blockNumbers []uint64, chunkSize int) [][]uint64 { @@ -199,15 +308,3 @@ func splitBlockNumbersIntoChunks(blockNumbers []uint64, chunkSize int) [][]uint6 } return chunks } - -func convertSliceBlockRawEth(blocks []*blockRawEth) ([]*aggkittypes.BlockHeader, error) { - result := make([]*aggkittypes.BlockHeader, 0, len(blocks)) - for idx, blockRawEth := range blocks { - bh, err := blockRawEth.ToBlockHeader() - if err != nil { - return nil, fmt.Errorf("convert: converting block number %d (%s): %w", idx, blocks[idx].String(), err) - } - result = append(result, bh) - } - return result, nil -} diff --git a/etherman/batch_requests_test.go b/etherman/batch_requests_test.go index d7b37179c..156fc8ea5 100644 --- a/etherman/batch_requests_test.go +++ b/etherman/batch_requests_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "math/big" + "os" "testing" "github.com/agglayer/aggkit/log" @@ -11,135 +12,49 @@ import ( mockaggkittypes "github.com/agglayer/aggkit/types/mocks" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func TestConvertMapBlockRawEth(t *testing.T) { - tests := []struct { - name string - blocks []*blockRawEth - expected []*aggkittypes.BlockHeader - expectedError bool - }{ - { - name: "empty map", - blocks: []*blockRawEth{}, - expected: []*aggkittypes.BlockHeader{}, - expectedError: false, - }, - { - name: "single valid block", - blocks: []*blockRawEth{ - { - Number: "0x7b", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "0x5f5e100", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: []*aggkittypes.BlockHeader{ - { - Number: 123, - Hash: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), - Time: 100000000, - ParentHash: func() *common.Hash { - h := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") - return &h - }(), - }, - }, - expectedError: false, - }, - { - name: "multiple valid blocks", - blocks: []*blockRawEth{ - { - Number: "0x64", - Hash: "0x1111111111111111111111111111111111111111111111111111111111111111", - Timestamp: "0x1000", - ParentHash: "0x2222222222222222222222222222222222222222222222222222222222222222", - }, - { - Number: "0xc8", - Hash: "0x3333333333333333333333333333333333333333333333333333333333333333", - Timestamp: "0x2000", - ParentHash: "0x4444444444444444444444444444444444444444444444444444444444444444", - }, - }, - expected: []*aggkittypes.BlockHeader{ - { - Number: 100, - Hash: common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), - Time: 4096, - ParentHash: func() *common.Hash { - h := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") - return &h - }(), - }, - { - Number: 200, - Hash: common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), - Time: 8192, - ParentHash: func() *common.Hash { - h := common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") - return &h - }(), - }, - }, - expectedError: false, - }, - { - name: "invalid block number format", - blocks: []*blockRawEth{ - { - Number: "invalid", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "0x5f5e100", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: nil, - expectedError: true, - }, - { - name: "invalid timestamp format", - blocks: []*blockRawEth{ - { - Number: "0x7b", - Hash: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Timestamp: "invalid", - ParentHash: "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", - }, - }, - expected: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := convertSliceBlockRawEth(tt.blocks) +func TestRetrieveBlockHeadersBatchExploratory(t *testing.T) { + t.Skip("This test is for exploratory purposes to check the behavior of batch requests" + + " It requires a real RPC endpoint because simulated doesn't support batch calls") + ctx := t.Context() + logger := log.WithFields("modules", "test") + // Get L1URL from environment variable + l1url := os.Getenv("L1URL") + ethClient, err := ethclient.Dial(l1url) + require.NoError(t, err) + latestBlockNumber, err := ethClient.BlockNumber(ctx) + require.NoError(t, err) + log.Infof("Latest block number: %d", latestBlockNumber) + rpcClient, err := rpc.DialContext(ctx, l1url) + require.NoError(t, err) + requestedBlockNumbers := []uint64{latestBlockNumber - 10, latestBlockNumber, latestBlockNumber + 10} - if tt.expectedError { - require.Error(t, err) - assert.Contains(t, err.Error(), "convert: converting block number") - } else { - require.NoError(t, err) - assert.Equal(t, len(tt.expected), len(result)) - for i, expectedHeader := range tt.expected { - actualHeader := result[i] - assert.Equal(t, expectedHeader.Number, actualHeader.Number) - assert.Equal(t, expectedHeader.Hash, actualHeader.Hash) - assert.Equal(t, expectedHeader.Time, actualHeader.Time) - assert.Equal(t, expectedHeader.ParentHash, actualHeader.ParentHash) - } - } - }) + res, err := RetrieveBlockHeadersBatch(ctx, logger, + rpcClient, + requestedBlockNumbers, 10) + require.NoError(t, err) + require.False(t, res.Success()) + require.True(t, res.PartialSuccess()) + require.Equal(t, 2, len(res.Headers)) + for _, number := range requestedBlockNumbers { + err, ok := res.Errors[number] + if ok { + isNotFound := IsErrNotFound(err) + require.True(t, isNotFound, "Expected error for block %d to be not found, got: %s", number, err.Error()) + log.Infof("Error retrieving block header for block %d: %s", number, err.Error()) + continue + } + require.NotNil(t, res.Headers[number]) + log.Infof(" Retrieved block header for block %d: hash %s", number, res.Headers[number].Hash.Hex()) } } + func TestRetrieveBlockHeaders(t *testing.T) { ctx := t.Context() logger := log.WithFields("test", "test") @@ -170,7 +85,8 @@ func TestRetrieveBlockHeaders(t *testing.T) { result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, mockRPCClient, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) t.Run("uses legacy when rpcClient is nil", func(t *testing.T) { @@ -185,7 +101,8 @@ func TestRetrieveBlockHeaders(t *testing.T) { result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) t.Run("propagates error from batch method", func(t *testing.T) { @@ -197,13 +114,17 @@ func TestRetrieveBlockHeaders(t *testing.T) { require.Contains(t, err.Error(), "batch error") }) - t.Run("propagates error from legacy method", func(t *testing.T) { + t.Run("collects errors from legacy method", func(t *testing.T) { mockEthClient := mockaggkittypes.NewBaseEthereumClienter(t) - mockEthClient.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, errors.New("legacy error")).Maybe() - _, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) + mockEthClient.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, errors.New("legacy error")).Times(len(blockNumbers)) + result, err := RetrieveBlockHeaders(ctx, logger, mockEthClient, nil, blockNumbers, maxConcurrency) - require.Error(t, err) - require.Contains(t, err.Error(), "legacy error") + require.NoError(t, err) // No catastrophic error + require.False(t, result.Success()) + require.Equal(t, len(blockNumbers), len(result.Errors)) + for _, blockErr := range result.Errors { + require.Contains(t, blockErr.Error(), "legacy error") + } }) } func TestRetrieveBlockHeadersLegacy(t *testing.T) { @@ -229,7 +150,8 @@ func TestRetrieveBlockHeadersLegacy(t *testing.T) { result, err := RetrieveBlockHeadersLegacy(ctx, logger, mockEthClient, blockNumbers, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) }) } @@ -247,35 +169,28 @@ func TestRetrieveBlockHeadersInBatchParallel(t *testing.T) { result, err := retrieveBlockHeadersInBatchParallel( ctx, logger, - func(ctx context.Context, blocks []uint64) ([]*aggkittypes.BlockHeader, error) { + func(ctx context.Context, blocks []uint64) (*BlockHeadersResult, error) { t.Logf("Retrieving blocks in batch: %v", blocks) - headers := make([]*aggkittypes.BlockHeader, len(blocks)) - for i, bn := range blocks { - headers[i] = &aggkittypes.BlockHeader{ + result := NewBlockHeadersResult() + for _, bn := range blocks { + result.AddHeader(bn, &aggkittypes.BlockHeader{ Number: bn, - } + }) } - return headers, nil + return result, nil }, blockNumbers, 2, maxConcurrency) require.NoError(t, err) - assert.Equal(t, len(blockNumbers), len(result)) + require.True(t, result.Success()) + assert.Equal(t, len(blockNumbers), len(result.Headers)) for _, bn := range blockNumbers { - header := getBlockHeader(bn, result) + header, exists := result.Headers[bn] + require.True(t, exists) require.NotNil(t, header) assert.Equal(t, bn, header.Number) } } -func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { - for _, h := range headers { - if h.Number == bn { - return h - } - } - return nil -} - func TestSplitBlockNumbersIntoChunks(t *testing.T) { tests := []struct { name string @@ -335,3 +250,149 @@ func TestSplitBlockNumbersIntoChunks(t *testing.T) { }) } } + +func TestBlockHeadersResult_AreAllErrorsNotFound(t *testing.T) { + tests := []struct { + name string + errors map[uint64]error + expected bool + }{ + { + name: "no errors", + errors: map[uint64]error{}, + expected: true, + }, + { + name: "all errors are ErrNotFound", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: ErrNotFound, + 300: ErrNotFound, + }, + expected: true, + }, + { + name: "all errors have exact 'not found' message", + errors: map[uint64]error{ + 100: errors.New("not found"), + 200: errors.New("not found"), + }, + expected: true, + }, + { + name: "mixed - some ErrNotFound, some other errors", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: errors.New("connection timeout"), + 300: ErrNotFound, + }, + expected: false, + }, + { + name: "errors with 'not found' in message but not exact match", + errors: map[uint64]error{ + 100: errors.New("batch element error: not found"), + 200: errors.New("converting block: not found"), + }, + expected: true, // IsErrNotFound requires exact "not found" message + }, + { + name: "no not found errors", + errors: map[uint64]error{ + 100: errors.New("connection error"), + 200: errors.New("timeout"), + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: tt.errors, + } + assert.Equal(t, tt.expected, result.AreAllErrorsNotFound()) + }) + } +} + +func TestBlockHeadersResult_ListBlocksNumberNotFound(t *testing.T) { + tests := []struct { + name string + errors map[uint64]error + expected []uint64 + }{ + { + name: "no errors", + errors: map[uint64]error{}, + expected: nil, + }, + { + name: "all errors are ErrNotFound", + errors: map[uint64]error{ + 300: ErrNotFound, + 100: ErrNotFound, + 200: ErrNotFound, + }, + expected: []uint64{100, 200, 300}, // Should be sorted + }, + { + name: "all errors have exact 'not found' message", + errors: map[uint64]error{ + 300: errors.New("not found"), + 100: errors.New("not found"), + }, + expected: []uint64{100, 300}, // Should be sorted + }, + { + name: "mixed errors - some not found, some other", + errors: map[uint64]error{ + 100: ErrNotFound, + 200: errors.New("connection timeout"), + 300: ErrNotFound, + 150: errors.New("other error"), + 250: errors.New("not found"), + }, + expected: []uint64{100, 250, 300}, // Only not found, sorted + }, + { + name: "no not found errors", + errors: map[uint64]error{ + 100: errors.New("connection error"), + 200: errors.New("timeout"), + }, + expected: nil, + }, + { + name: "errors containing no 'not found'", + errors: map[uint64]error{ + 500: errors.New("batch element error"), + 100: errors.New("converting block"), + 300: errors.New("some other error"), + }, + expected: nil, // IsErrNotFound requires exact "not found" message + }, + { + name: "mixed not found with others", + errors: map[uint64]error{ + 100: ErrNotFound, // Exact match + 200: errors.New("not found"), // Exact message + 300: errors.New("batch element error: not found"), // Not exact + 400: errors.New("timeout"), // Other error + }, + expected: []uint64{100, 200, 300}, // Only "not found" + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := &BlockHeadersResult{ + Headers: make(map[uint64]*aggkittypes.BlockHeader), + Errors: tt.errors, + } + got := result.ListBlocksNumberNotFound() + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/etherman/block_notifier/block_notifier_manager.go b/etherman/block_notifier/block_notifier_manager.go index a6212a114..3d4fa28e2 100644 --- a/etherman/block_notifier/block_notifier_manager.go +++ b/etherman/block_notifier/block_notifier_manager.go @@ -57,6 +57,9 @@ func (bnm *BlockNotifierManager) GetBlockNotifier(ctx context.Context, } func (bnm *BlockNotifierManager) GetCurrentBlockNumber(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) { + if blockFinality.IsConstant() { + return blockFinality.Specific, nil + } bn, err := bnm.GetBlockNotifier(ctx, blockFinality) if err != nil { return 0, err diff --git a/etherman/block_notifier/block_notifier_manager_test.go b/etherman/block_notifier/block_notifier_manager_test.go index 345eb2c7a..6a191ebc7 100644 --- a/etherman/block_notifier/block_notifier_manager_test.go +++ b/etherman/block_notifier/block_notifier_manager_test.go @@ -41,4 +41,8 @@ func TestBlockNotifierManager_GetCurrentBlockNumber(t *testing.T) { currentBlockNumber, err := sut.GetCurrentBlockNumber(t.Context(), aggkittypes.LatestBlock) require.NoError(t, err) require.Equal(t, uint64(1234), currentBlockNumber) + + bn, err := sut.GetCurrentBlockNumber(t.Context(), *aggkittypes.NewBlockNumber(123)) + require.NoError(t, err) + require.Equal(t, uint64(123), bn) } diff --git a/etherman/block_notifier/block_notifier_polling.go b/etherman/block_notifier/block_notifier_polling.go index 51ed45dd8..3ee813201 100644 --- a/etherman/block_notifier/block_notifier_polling.go +++ b/etherman/block_notifier/block_notifier_polling.go @@ -141,12 +141,13 @@ func (b *BlockNotifierPolling) getGlobalStatus() *blockNotifierPollingInternalSt func (b *BlockNotifierPolling) step(ctx context.Context, previousState *blockNotifierPollingInternalStatus) (time.Duration, *blockNotifierPollingInternalStatus, *ethmantypes.EventNewBlock) { - currentBlock, err := b.blockFinality.BlockNumber(ctx, b.ethClient) + hdr, err := b.ethClient.CustomHeaderByNumber(ctx, &b.blockFinality) if err != nil { b.logger.Errorf("Failed to get block number %s: %v", b.blockFinality.String(), err) newState := previousState.clear() return b.nextBlockRequestDelay(nil, err), newState, nil } + currentBlock := hdr.Number if previousState == nil { newState := previousState.initialBlock(currentBlock) return b.nextBlockRequestDelay(previousState, nil), newState, nil @@ -170,7 +171,7 @@ func (b *BlockNotifierPolling) step(ctx context.Context, if currentBlock-previousState.lastBlockSeen != 1 { if !b.config.BlockFinalityType.IsSafe() && !b.config.BlockFinalityType.IsFinalized() { - b.logger.Warnf("Missed block(s) [finality:%s]: %d -> %d", + b.logger.Infof("Missed block(s) [finality:%s]: %d -> %d", b.config.BlockFinalityType.String(), previousState.lastBlockSeen, currentBlock) } diff --git a/etherman/block_notifier/block_notifier_polling_test.go b/etherman/block_notifier/block_notifier_polling_test.go index 343f82f7b..71fe7be42 100644 --- a/etherman/block_notifier/block_notifier_polling_test.go +++ b/etherman/block_notifier/block_notifier_polling_test.go @@ -21,7 +21,7 @@ import ( ) func TestExploratoryBlockNotifierPolling(t *testing.T) { - t.Skip() + t.Skip("is an exploratory test that requires an external RPC") urlRPCL1 := os.Getenv("L1URL") fmt.Println("URL=", urlRPCL1) cfg := ðermanconfig.RPCClientConfig{ @@ -117,7 +117,8 @@ func TestBlockNotifierPollingStep(t *testing.T) { }, mockLoggerFn: func() aggkitcommon.Logger { mockLogger := commonmocks.NewLogger(t) - mockLogger.EXPECT().Warnf("Missed block(s) [finality:%s]: %d -> %d", aggkittypes.LatestBlock.String(), uint64(100), uint64(105)).Once() + mockLogger.EXPECT().Infof("Missed block(s) [finality:%s]: %d -> %d", aggkittypes.LatestBlock.String(), uint64(100), uint64(105)).Once() + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything).Maybe() return mockLogger }, headerByNumberError: false, diff --git a/etherman/default_eth_client.go b/etherman/default_eth_client.go index 78820f313..0947e604e 100644 --- a/etherman/default_eth_client.go +++ b/etherman/default_eth_client.go @@ -101,7 +101,19 @@ func (c *DefaultEthClient) CustomHeaderByNumber(ctx context.Context, if err != nil { return nil, err } + result, err := c.internalHeaderByNumber(ctx, numberBigInt) + if err != nil { + return nil, err + } + + result.RequestedBlock = number + return result, nil +} + +func (c *DefaultEthClient) internalHeaderByNumber(ctx context.Context, + numberBigInt *big.Int) (*aggkittypes.BlockHeader, error) { var result *aggkittypes.BlockHeader + var err error if c.HashFromJSON { result, err = c.rpcGetBlockByNumber(ctx, numberBigInt) if err != nil { @@ -114,8 +126,6 @@ func (c *DefaultEthClient) CustomHeaderByNumber(ctx context.Context, } result = aggkittypes.NewBlockHeaderFromEthHeader(ethHeader) } - - result.RequestedBlock = number return result, nil } @@ -126,7 +136,7 @@ func (c *DefaultEthClient) resolveBlockNumber(ctx context.Context, return number.ToBigInt(), nil } // Resolve the base block number - hdr, err := c.rpcGetBlockByNumber(ctx, number.ToBigInt()) + hdr, err := c.internalHeaderByNumber(ctx, number.ToBigInt()) if err != nil { return nil, err } @@ -144,7 +154,10 @@ func (c *DefaultEthClient) rpcGetBlockByNumber(ctx context.Context, number *big. var rawEthHeader *blockRawEth err := c.CallContext(ctx, &rawEthHeader, "eth_getBlockByNumber", blockArg, false) if err != nil { - return nil, fmt.Errorf("rpcGetBlockByNumber: %w", err) + return nil, fmt.Errorf("rpcGetBlockByNumber: CallContext error: %w", err) + } + if rawEthHeader == nil { + return nil, fmt.Errorf("rpcGetBlockByNumber:not found: %s", blockArg) } return rawEthHeader.ToBlockHeader() } diff --git a/etherman/default_eth_client_test.go b/etherman/default_eth_client_test.go index 04b991b4b..3ced7164f 100644 --- a/etherman/default_eth_client_test.go +++ b/etherman/default_eth_client_test.go @@ -37,67 +37,123 @@ func TestDefaultEthClientExploratory(t *testing.T) { fmt.Printf("header: %+v\n", header) } -func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { +// testBlockWithOffsetHelper is a helper function for testing block tag resolution with offsets +func testBlockWithOffsetHelper( + t *testing.T, + ctx context.Context, + blockTag string, + blockNumFinality string, + firstBlockNum uint64, + firstBlockHash string, + secondBlockNum uint64, + secondBlockHash string, +) { + t.Helper() mockEthClient := mocks.NewEthereumClienter(t) mockRPCClient := mocks.NewRPCClienter(t) - client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) - bnFinalized5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/5") + client.HashFromJSON = true + + bn, err := aggkittypes.NewBlockNumberFinality(blockNumFinality) require.NoError(t, err) - ctx := t.Context() - blockRaw95 := &blockRawEth{ - Number: "0x5f", // 95 in hex - Hash: "0xabc123", + + firstBlock := &blockRawEth{ + Number: fmt.Sprintf("0x%x", firstBlockNum), + Hash: firstBlockHash, Timestamp: "1234", } - blockRaw100 := &blockRawEth{ - Number: "0x64", // 100 in hex - Hash: "0xabc123", - Timestamp: "1234", + secondBlock := &blockRawEth{ + Number: fmt.Sprintf("0x%x", secondBlockNum), + Hash: secondBlockHash, + Timestamp: "1235", } - t.Run("FinalizedBlock with offset", func(t *testing.T) { - client.HashFromJSON = true - // Setup mock for rpcGetBlockByNumber - // Call to resolve finalized block - mockRPCClient. - EXPECT(). - CallContext( - ctx, - mock.Anything, - "eth_getBlockByNumber", - "finalized", - false, - ). - Return(nil). - Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { - rawEth, ok := result.(**blockRawEth) - require.True(t, ok) - *rawEth = blockRaw95 - }).Once() + // First call to resolve block tag + mockRPCClient. + EXPECT(). + CallContext(ctx, mock.Anything, "eth_getBlockByNumber", blockTag, false). + Return(nil). + Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { + rawEth, ok := result.(**blockRawEth) + require.True(t, ok) + *rawEth = firstBlock + }).Once() - mockRPCClient. - EXPECT(). - CallContext(ctx, mock.Anything, "eth_getBlockByNumber", "0x64", false). - Return(nil). - Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { - rawEth, ok := result.(**blockRawEth) - require.True(t, ok) - *rawEth = blockRaw100 - }).Once() - // Call CustomHeaderByNumber - header, err := client.CustomHeaderByNumber(ctx, bnFinalized5) - require.NoError(t, err) - require.NotNil(t, header) - require.Equal(t, uint64(100), header.Number) - require.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000abc123", header.Hash.Hex()) - require.Equal(t, bnFinalized5, header.RequestedBlock) + // Second call to get the final block after offset + mockRPCClient. + EXPECT(). + CallContext(ctx, mock.Anything, "eth_getBlockByNumber", fmt.Sprintf("0x%x", secondBlockNum), false). + Return(nil). + Run(func(ctx context.Context, result interface{}, method string, args ...interface{}) { + rawEth, ok := result.(**blockRawEth) + require.True(t, ok) + *rawEth = secondBlock + }).Once() + + header, err := client.CustomHeaderByNumber(ctx, bn) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, secondBlockNum, header.Number) + require.Equal(t, fmt.Sprintf("0x%064s", secondBlockHash[2:]), header.Hash.Hex()) + require.Equal(t, bn, header.RequestedBlock) +} + +// testBlockWithOffsetHelperGeth is a helper function for testing block tag resolution with offsets using geth client +func testBlockWithOffsetHelperGeth( + t *testing.T, + ctx context.Context, + blockNumFinality string, + firstCallArg *big.Int, + firstBlockNum uint64, + secondBlockNum uint64, +) { + t.Helper() + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = false + + bn, err := aggkittypes.NewBlockNumberFinality(blockNumFinality) + require.NoError(t, err) + + mockEthClient.EXPECT(). + HeaderByNumber(ctx, firstCallArg). + Return(&types.Header{ + Number: big.NewInt(int64(firstBlockNum)), + }, nil).Once() + + mockEthClient.EXPECT(). + HeaderByNumber(ctx, big.NewInt(int64(secondBlockNum))). + Return(&types.Header{ + Number: big.NewInt(int64(secondBlockNum)), + }, nil).Once() + + header, err := client.CustomHeaderByNumber(ctx, bn) + require.NoError(t, err) + require.NotNil(t, header) + require.Equal(t, secondBlockNum, header.Number) + require.Equal(t, bn, header.RequestedBlock) +} + +func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { + ctx := context.Background() + + t.Run("FinalizedBlock with offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "finalized", "FinalizedBlock/5", 95, "0xabc123", 100, "0xabc123") }) t.Run("Latest block", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) client.HashFromJSON = true - ctx := t.Context() + + blockRaw95 := &blockRawEth{ + Number: "0x5f", // 95 in hex + Hash: "0xabc123", + Timestamp: "1234", + } mockRPCClient. EXPECT(). @@ -115,15 +171,27 @@ func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { }) t.Run("failed to find blockNumber for tag block", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) + client.HashFromJSON = true + + bnFinalized5, err := aggkittypes.NewBlockNumberFinality("FinalizedBlock/5") + require.NoError(t, err) + mockRPCClient. EXPECT().CallContext(ctx, mock.Anything, "eth_getBlockByNumber", "finalized", false). Return(fmt.Errorf("rpc error")) - _, err := client.CustomHeaderByNumber(ctx, bnFinalized5) + _, err = client.CustomHeaderByNumber(ctx, bnFinalized5) require.Error(t, err) }) t.Run("use HashFromJSON=false (geth call)", func(t *testing.T) { + mockEthClient := mocks.NewEthereumClienter(t) + mockRPCClient := mocks.NewRPCClienter(t) + client := NewDefaultEthClient(mockEthClient, mockRPCClient, nil) client.HashFromJSON = false + mockEthClient.EXPECT(). HeaderByNumber(ctx, (*big.Int)(nil)). Return(&types.Header{ @@ -133,4 +201,32 @@ func TestDefaultEthClient_CustomHeaderByNumber(t *testing.T) { require.NoError(t, err) require.NotNil(t, header) }) + + t.Run("LatestBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "latest", "LatestBlock/-10", 100, "0xdef456", 90, "0xabc789") + }) + + t.Run("FinalizedBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "finalized", "FinalizedBlock/-5", 100, "0xfed123", 95, "0xabc456") + }) + + t.Run("SafeBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "safe", "SafeBlock/-3", 50, "0x123abc", 47, "0x456def") + }) + + t.Run("PendingBlock with negative offset", func(t *testing.T) { + testBlockWithOffsetHelper(t, ctx, "pending", "PendingBlock/-2", 101, "0x789abc", 99, "0xdef123") + }) + + t.Run("LatestBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + testBlockWithOffsetHelperGeth(t, ctx, "LatestBlock/-10", nil, 100, 90) + }) + + t.Run("FinalizedBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + testBlockWithOffsetHelperGeth(t, ctx, "FinalizedBlock/-5", big.NewInt(-3), 100, 95) + }) + + t.Run("SafeBlock with negative offset (HashFromJSON=false)", func(t *testing.T) { + testBlockWithOffsetHelperGeth(t, ctx, "SafeBlock/-3", big.NewInt(-4), 50, 47) + }) } diff --git a/etherman/errors.go b/etherman/errors.go index a2d748e7b..a87dd0dd5 100644 --- a/etherman/errors.go +++ b/etherman/errors.go @@ -56,3 +56,20 @@ func TryParseError(err error) (error, bool) { return parsedError, exists } + +func IsErrNotFound(err error) bool { + if err == nil { + return false + } + if errors.Is(err, ErrNotFound) { + return true + } + if err.Error() == ErrNotFound.Error() { + return true + } + // If error contains "not found" (case sensitive) is an ErrNotFound + if strings.Contains(err.Error(), "not found") { + return true + } + return false +} diff --git a/etherman/errors_test.go b/etherman/errors_test.go index 91ca6c500..249d32828 100644 --- a/etherman/errors_test.go +++ b/etherman/errors_test.go @@ -1,10 +1,12 @@ package etherman import ( + "errors" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTryParseWithExactMatch(t *testing.T) { @@ -35,3 +37,39 @@ func TestTryParseWithNonExistingErr(t *testing.T) { assert.Nil(t, actualErr) assert.False(t, ok) } + +func TestIsErrNotFound(t *testing.T) { + t.Run("returns false when error is nil", func(t *testing.T) { + result := IsErrNotFound(nil) + require.False(t, result) + }) + + t.Run("returns true when error is ErrNotFound", func(t *testing.T) { + result := IsErrNotFound(ErrNotFound) + require.True(t, result) + }) + + t.Run("returns true when error is wrapped with ErrNotFound", func(t *testing.T) { + wrappedErr := fmt.Errorf("some context: %w", ErrNotFound) + result := IsErrNotFound(wrappedErr) + require.True(t, result) + }) + + t.Run("returns true when error has same message as ErrNotFound", func(t *testing.T) { + sameMessageErr := errors.New("not found") + result := IsErrNotFound(sameMessageErr) + require.True(t, result) + }) + + t.Run("returns false when error is different", func(t *testing.T) { + differentErr := errors.New("some other error") + result := IsErrNotFound(differentErr) + require.False(t, result) + }) + + t.Run("returns false when error message is different", func(t *testing.T) { + differentErr := ErrMissingTrieNode + result := IsErrNotFound(differentErr) + require.False(t, result) + }) +} diff --git a/etherman/mocks/mock_dial_func.go b/etherman/mocks/mock_dial_func.go new file mode 100644 index 000000000..dd5fae541 --- /dev/null +++ b/etherman/mocks/mock_dial_func.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// DialFunc is an autogenerated mock type for the DialFunc type +type DialFunc struct { + mock.Mock +} + +type DialFunc_Expecter struct { + mock *mock.Mock +} + +func (_m *DialFunc) EXPECT() *DialFunc_Expecter { + return &DialFunc_Expecter{mock: &_m.Mock} +} + +// Execute provides a mock function with given fields: url +func (_m *DialFunc) Execute(url string) (types.BaseEthereumClienter, error) { + ret := _m.Called(url) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 types.BaseEthereumClienter + var r1 error + if rf, ok := ret.Get(0).(func(string) (types.BaseEthereumClienter, error)); ok { + return rf(url) + } + if rf, ok := ret.Get(0).(func(string) types.BaseEthereumClienter); ok { + r0 = rf(url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.BaseEthereumClienter) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(url) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DialFunc_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type DialFunc_Execute_Call struct { + *mock.Call +} + +// Execute is a helper method to define mock.On call +// - url string +func (_e *DialFunc_Expecter) Execute(url interface{}) *DialFunc_Execute_Call { + return &DialFunc_Execute_Call{Call: _e.mock.On("Execute", url)} +} + +func (_c *DialFunc_Execute_Call) Run(run func(url string)) *DialFunc_Execute_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *DialFunc_Execute_Call) Return(_a0 types.BaseEthereumClienter, _a1 error) *DialFunc_Execute_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DialFunc_Execute_Call) RunAndReturn(run func(string) (types.BaseEthereumClienter, error)) *DialFunc_Execute_Call { + _c.Call.Return(run) + return _c +} + +// NewDialFunc creates a new instance of DialFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDialFunc(t interface { + mock.TestingT + Cleanup(func()) +}) *DialFunc { + mock := &DialFunc{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index d9621346e..f706b8905 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -30,7 +30,7 @@ import ( "github.com/stretchr/testify/require" ) -const useMultidownloaderForTests = false +const useMultidownloaderForTests = true func newSimulatedClient(t *testing.T) ( *simulated.Backend, @@ -73,19 +73,29 @@ func TestE2E(t *testing.T) { ctx := t.Context() dbPath := path.Join(t.TempDir(), "l1infotreesyncTestE2E.sqlite") - mockReorgDetector := mocks.NewReorgDetectorMock(t) - mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(&reorgdetector.Subscription{}, nil) - mockReorgDetector.EXPECT().AddBlockToTrack(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(aggkittypes.FinalizedBlock).Once() - mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) - client, auth, gerAddr, verifyAddr, gerSc, _ := newSimulatedClient(t) - var multidownloaderClient aggkittypes.MultiDownloader + cfg := l1infotreesync.Config{ + DBPath: dbPath, + InitialBlock: 0, + SyncBlockChunkSize: 10, + BlockFinality: aggkittypes.LatestBlock, + GlobalExitRootAddr: gerAddr, + RollupManagerAddr: verifyAddr, + RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), + MaxRetryAttemptsAfterError: 25, + RequireStorageContentCompatibility: true, + WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + } + var syncer *l1infotreesync.L1InfoTreeSync var err error + var evmMultidownloader *multidownloader.EVMMultidownloader if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true - multidownloaderClient, err = multidownloader.NewEVMMultidownloader( + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-2") + require.NoError(t, err) + cfgMD.BlockFinality = *finality + evmMultidownloader, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), cfgMD, "testMD", @@ -93,27 +103,28 @@ func TestE2E(t *testing.T) { nil, // rpcClient nil, nil, + nil, // reorgProcessor will be created internally ) require.NoError(t, err) + syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go func() { + // This always return an error becase at the end of the test the context is cancelled + err = evmMultidownloader.Start(ctx) + log.Infof("Multidownloader exited with error: %v", err) + }() } else { - multidownloaderClient = sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) - } - - cfg := l1infotreesync.Config{ - DBPath: dbPath, - InitialBlock: 0, - SyncBlockChunkSize: 10, - BlockFinality: aggkittypes.LatestBlock, - GlobalExitRootAddr: gerAddr, - RollupManagerAddr: verifyAddr, - RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), - MaxRetryAttemptsAfterError: 25, - RequireStorageContentCompatibility: true, - WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + mockReorgDetector := mocks.NewReorgDetectorMock(t) + mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(&reorgdetector.Subscription{}, nil) + mockReorgDetector.EXPECT().AddBlockToTrack(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReorgDetector.EXPECT().GetFinalizedBlockType().Return(aggkittypes.FinalizedBlock).Once() + mockReorgDetector.EXPECT().GetTrackedBlockByBlockNumber(mock.Anything, mock.Anything).Return(&reorgdetector.Header{}, nil) + + multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + syncer, err = l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, mockReorgDetector, + l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, mockReorgDetector, - l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) go syncer.Start(ctx) @@ -148,150 +159,243 @@ func TestE2E(t *testing.T) { require.NoError(t, err) require.Equal(t, common.Hash(expectedGER), latestGER) } + log.Infof("FINISH TEST OK!!!!!!!!!!!!!!!!!!!!!!") } func TestWithReorgs(t *testing.T) { - ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync.sqlite") - dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg.sqlite") - - client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) - - rdConfig := reorgdetector.Config{ - DBPath: dbPathReorg, - CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), - FinalizedBlock: aggkittypes.FinalizedBlock, - } - rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - var multidownloaderClient aggkittypes.MultiDownloader - if useMultidownloaderForTests { - cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) - cfgMD.Enabled = true - multidownloaderClient, err = multidownloader.NewEVMMultidownloader( - log.WithFields("module", "multidownloader"), - cfgMD, - "testMD", - etherman.NewDefaultEthClient(client.Client(), nil, nil), - nil, // rpcClient - nil, - nil, - ) - require.NoError(t, err) - } else { - multidownloaderClient = sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + t.Parallel() + tests := []struct { + name string + useMultidownloaderForTest bool + }{ + { + name: "with legacy reorgdetector", + useMultidownloaderForTest: false, + }, + { + name: "with multidownloader", + useMultidownloaderForTest: true, + }, } - cfg := l1infotreesync.Config{ - DBPath: dbPathSyncer, - InitialBlock: 0, - SyncBlockChunkSize: 10, - BlockFinality: aggkittypes.LatestBlock, - GlobalExitRootAddr: gerAddr, - RollupManagerAddr: verifyAddr, - RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), - MaxRetryAttemptsAfterError: 25, - RequireStorageContentCompatibility: true, - WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), - } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - go syncer.Start(ctx) - - // Commit block 6 - header, err := client.Client().HeaderByHash(ctx, client.Commit()) - require.NoError(t, err) - reorgFrom := header.Hash() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() - // Commit block 7 - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - - updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { - // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) + ctx := context.Background() + suffix := "legacy" + if tt.useMultidownloaderForTest { + suffix = "multidownloader" + } + dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync_"+suffix+".sqlite") + dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg_"+suffix+".sqlite") + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + + cfg := l1infotreesync.Config{ + DBPath: dbPathSyncer, + InitialBlock: 0, + SyncBlockChunkSize: 10, + BlockFinality: aggkittypes.LatestBlock, + GlobalExitRootAddr: gerAddr, + RollupManagerAddr: verifyAddr, + RetryAfterErrorPeriod: cfgtypes.NewDuration(time.Millisecond * 100), + MaxRetryAttemptsAfterError: 25, + RequireStorageContentCompatibility: true, + WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), + } - // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) + var syncer *l1infotreesync.L1InfoTreeSync + var err error + var evmMultidownloader *multidownloader.EVMMultidownloader + if tt.useMultidownloaderForTest { + cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) + cfgMD.Enabled = true + finality, err := aggkittypes.NewBlockNumberFinality("latestBlock/-2") + require.NoError(t, err) + cfgMD.BlockFinality = *finality + cfgMD.WaitPeriodToCheckCatchUp = cfgtypes.NewDuration(time.Millisecond * 1) + cfgMD.PeriodToCheckReorgs = cfgtypes.NewDuration(time.Millisecond * 1) + evmMultidownloader, err = multidownloader.NewEVMMultidownloader( + log.WithFields("module", "multidownloader"), + cfgMD, + "testMD", + etherman.NewDefaultEthClient(client.Client(), nil, nil), + nil, // rpcClient + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally + ) + require.NoError(t, err) + syncer, err = l1infotreesync.NewMultidownloadBased(ctx, cfg, evmMultidownloader, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go func() { + err = evmMultidownloader.Start(ctx) + require.NoError(t, err) + }() + } else { + rdConfig := reorgdetector.Config{ + DBPath: dbPathReorg, + CheckReorgsInterval: cfgtypes.NewDuration(time.Millisecond * 100), + FinalizedBlock: aggkittypes.FinalizedBlock, + } + rd, err := reorgdetector.New(etherman.NewDefaultEthClient(client.Client(), nil, nil), rdConfig, reorgdetector.L1) + require.NoError(t, err) + require.NoError(t, rd.Start(ctx)) + multidownloaderClient := sync.NewAdapterEthClientToMultidownloader(etherman.NewDefaultEthClient(client.Client(), nil, nil)) + syncer, err = l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + } + go syncer.Start(ctx) + + // Commit block 6 + header, err := client.Client().HeaderByHash(ctx, client.Commit()) + require.NoError(t, err) + reorgFrom := header.Hash() + + // Commit block 7 + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } - // Update Rollup Exit Tree - newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) - require.NoError(t, err) + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) + + // Commit block 8 that contains the transaction that updates the trees + helpers.CommitBlocks(t, client, 1, time.Millisecond*500) + + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) + + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + + // Forking from block 6 + // Note: reorged trx will be added to pending transactions + // and will be committed when the forked block is committed + err = client.Fork(reorgFrom) + require.NoError(t, err) + + blockNum, err := client.Client().BlockNumber(ctx) + log.Infof("Current block number after fork: %d", blockNum) + require.NoError(t, err) + require.Equal(t, header.Number.Uint64(), blockNum) + + // Commit block 7, 8, 9 after the fork + helpers.CommitBlocks(t, client, 5, time.Millisecond*100) + + // Assert rollup exit root after committing new blocks on the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + // TODO: Remove ths sleep + if !tt.useMultidownloaderForTest { + time.Sleep(time.Second * 1) // wait for syncer to process the reorg + } + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + + lastProcessedBlock, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + log.Infof("Last processed block after reorg: %d", lastProcessedBlock) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + showLeafs(t, ctx, syncer, "Before second fork: ") + + // Forking from block 6 again + log.Infof("🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖🖖 Forking again from block (6) %d", reorgFrom.Hex()) + err = client.Fork(reorgFrom) + require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + // wait for syncer to process the reorg + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 + // TODO: Remove ths sleep + + time.Sleep(time.Second * 1) + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + helpers.CommitBlocks(t, client, 1, time.Millisecond*100) + + // Make sure syncer is up to date + helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + showLeafs(t, ctx, syncer, "After second fork: ") + checkBlocks(t, ctx, client.Client(), evmMultidownloader, 0, 10) + + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + }) } +} - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(1, 1) - - // Commit block 8 that contains the transaction that updates the trees - helpers.CommitBlocks(t, client, 1, time.Millisecond*500) - - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) - - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - - // Assert L1 Info tree root - expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) - require.NoError(t, err) - - require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) - - // Forking from block 6 - // Note: reorged trx will be added to pending transactions - // and will be committed when the forked block is committed - err = client.Fork(reorgFrom) - require.NoError(t, err) - - blockNum, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - require.Equal(t, header.Number.Uint64(), blockNum) - - // Commit block 7, 8, 9 after the fork - helpers.CommitBlocks(t, client, 5, time.Millisecond*100) - - // Assert rollup exit root after committing new blocks on the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) +func checkBlocks(t *testing.T, ctx context.Context, rawClient simulated.Client, mdr *multidownloader.EVMMultidownloader, fromBlock, toBlock uint64) { + t.Helper() + if mdr == nil { + log.Warn("checkBlocks: multidownloader is nil, skipping block check") + return + } + rpcLatest, err := rawClient.BlockNumber(ctx) require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + mdrLatest, err := mdr.HeaderByNumber(ctx, nil) require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - - // Forking from block 6 again - err = client.Fork(reorgFrom) - require.NoError(t, err) - time.Sleep(time.Millisecond * 500) - - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) // Commit block 7 - - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(2, 1) - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) - - // Make sure syncer is up to date - helpers.WaitForSyncerToCatchUp(ctx, t, syncer, client) + log.Infof("checkBlocks: from %d to %d, raw latest: %d, mdr latest: %d", fromBlock, toBlock, rpcLatest, mdrLatest.Number) + + for i := fromBlock; i <= toBlock; i++ { + block, errRaw := rawClient.BlockByNumber(ctx, big.NewInt(int64(i))) + blockMDR, errMDR := mdr.HeaderByNumber(ctx, aggkittypes.NewBlockNumber(i)) + require.Equal(t, errRaw == nil, errMDR == nil, "block number %d: errRaw=%v, errMDR=%v blockMDR=%s", i, errRaw, errMDR, blockMDR.String()) + if errRaw == nil && errMDR == nil { + require.Equal(t, block.Hash(), blockMDR.Hash, "block number %d", i) + } + } +} - // Assert rollup exit root after the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) +func showLeafs(t *testing.T, ctx context.Context, syncer *l1infotreesync.L1InfoTreeSync, prefix string) { + t.Helper() + for i := 0; i < 6; i++ { + leaf, err := syncer.GetInfoByIndex(ctx, uint32(i)) + if err != nil { + log.Infof(prefix+"Leaf %d: error: %s", i, err.Error()) + } else { + log.Infof(prefix+"Leaf %d: %+v", i, leaf) + } + } } func TestStressAndReorgs(t *testing.T) { @@ -318,7 +422,7 @@ func TestStressAndReorgs(t *testing.T) { require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy if useMultidownloaderForTests { cfgMD := multidownloader.NewConfigDefault("l1", t.TempDir()) cfgMD.Enabled = true @@ -328,8 +432,9 @@ func TestStressAndReorgs(t *testing.T) { "testMD", etherman.NewDefaultEthClient(client.Client(), nil, nil), nil, // rpcClient - nil, - nil, + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { @@ -348,7 +453,7 @@ func TestStressAndReorgs(t *testing.T) { RequireStorageContentCompatibility: true, WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond * 100), } - syncer, err := l1infotreesync.New(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) + syncer, err := l1infotreesync.NewLegacy(ctx, cfg, multidownloaderClient, rd, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index cc905cbd5..97b87438a 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -7,9 +7,12 @@ import ( "math/big" jRPC "github.com/0xPolygon/cdk-rpc/rpc" + aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/compatibility" "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader" + mdrsync "github.com/agglayer/aggkit/multidownloader/sync" "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree" "github.com/agglayer/aggkit/tree/types" @@ -35,12 +38,23 @@ var ( ErrNotFound = errors.New("l1infotreesync: not found") ) +type DriverInterface interface { + Sync(ctx context.Context) + GetCompletionPercentage() *float64 +} + +type DownloaderInterface interface { + Finality() aggkittypes.BlockNumberFinality +} + type L1InfoTreeSync struct { processor *processor - driver *sync.EVMDriver - downloader *sync.EVMDownloader + driver DriverInterface + downloader DownloaderInterface } +type RuntimeData = mdrsync.RuntimeData + func NewReadOnly( ctx context.Context, dbPath string, @@ -56,10 +70,85 @@ func NewReadOnly( } // New creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree -func New( +func NewMultidownloadBased( ctx context.Context, cfg Config, - l1Client aggkittypes.MultiDownloader, + l1Multidownloader *multidownloader.EVMMultidownloader, + flags CreationFlags, +) (*L1InfoTreeSync, error) { + processor, err := newProcessor(cfg.DBPath) + if err != nil { + return nil, err + } + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: cfg.RetryAfterErrorPeriod.Duration, + MaxRetryAttemptsAfterError: cfg.MaxRetryAttemptsAfterError, + } + + appender, err := buildAppender(l1Multidownloader.EthClient(), cfg.GlobalExitRootAddr, cfg.RollupManagerAddr, flags) + if err != nil { + return nil, err + } + addressesToQuery := []common.Address{cfg.GlobalExitRootAddr, cfg.RollupManagerAddr} + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "l1infotreesync", + ContractAddresses: addressesToQuery, + FromBlock: cfg.InitialBlock, + ToBlock: cfg.BlockFinality, + } + err = l1Multidownloader.RegisterSyncer(syncerConfig) + if err != nil { + return nil, fmt.Errorf("failed to register l1infotreesync in multidownloader: %w", err) + } + logger := log.WithFields("syncer", syncerID) + // TODO: move the durations to config file (mdrsync.NewEVMDownloader) + logger.Infof("Creating L1InfoTreeSync with WaitForNewBlocksPeriod: %s, RetryAfterErrorPeriod: %s", + cfg.WaitForNewBlocksPeriod.String(), + cfg.RetryAfterErrorPeriod.String(), + ) + downloader := mdrsync.NewEVMDownloader( + l1Multidownloader, + logger, + rh, + appender, + cfg.RetryAfterErrorPeriod.Duration, + cfg.WaitForNewBlocksPeriod.Duration, + ) + + compatibilityChecker := compatibility.NewCompatibilityCheck( + cfg.RequireStorageContentCompatibility, + func(ctx context.Context) (RuntimeData, error) { + chainID, err := downloader.ChainID(ctx) + if err != nil { + return RuntimeData{}, err + } + return RuntimeData{ + ChainID: chainID, + Addresses: addressesToQuery, + }, nil + }, + compatibility.NewKeyValueToCompatibilityStorage[RuntimeData]( + db.NewKeyValueStorage(processor.getDB()), + aggkitcommon.L1INFOTREESYNC, + )) + + driver := mdrsync.NewEVMDriver(logger, processor, downloader, syncerConfig, + cfg.SyncBlockChunkSize, rh, compatibilityChecker) + if err != nil { + return nil, err + } + return &L1InfoTreeSync{ + processor: processor, + driver: driver, + downloader: downloader, + }, nil +} + +// NewLegacy creates a L1 Info tree syncer that syncs the L1 info tree and the rollup exit tree +func NewLegacy( + ctx context.Context, + cfg Config, + l1Client aggkittypes.MultiDownloaderLegacy, reorgDetector sync.ReorgDetector, flags CreationFlags, ) (*L1InfoTreeSync, error) { @@ -67,6 +156,7 @@ func New( if err != nil { return nil, err } + // TODO: get the initialBlock from L1 to simplify config lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) if err != nil { @@ -100,10 +190,10 @@ func New( addressesToQuery := []common.Address{cfg.GlobalExitRootAddr, cfg.RollupManagerAddr} err = l1Client.RegisterSyncer( aggkittypes.SyncerConfig{ - SyncerID: "l1infotreesync", - ContractsAddr: addressesToQuery, - FromBlock: cfg.InitialBlock, - ToBlock: cfg.BlockFinality, + SyncerID: "l1infotreesync", + ContractAddresses: addressesToQuery, + FromBlock: cfg.InitialBlock, + ToBlock: cfg.BlockFinality, }, ) if err != nil { @@ -132,7 +222,10 @@ func New( compatibilityChecker := compatibility.NewCompatibilityCheck( cfg.RequireStorageContentCompatibility, downloader.RuntimeData, - processor) + compatibility.NewKeyValueToCompatibilityStorage[sync.RuntimeData]( + db.NewKeyValueStorage(processor.getDB()), + aggkitcommon.L1INFOTREESYNC, + )) driver, err := sync.NewEVMDriver(reorgDetector, processor, downloader, syncerID, downloadBufferSize, rh, compatibilityChecker) @@ -152,6 +245,10 @@ func (d *L1InfoTreeSync) Finality() aggkittypes.BlockNumberFinality { return d.downloader.Finality() } +func (d *L1InfoTreeSync) GetCompletionPercentage() *float64 { + return d.driver.GetCompletionPercentage() +} + // GetRPCServices returns the list of services that the RPC provider exposes func (a *L1InfoTreeSync) GetRPCServices() []jRPC.Service { logger := log.WithFields("module", "l1infotreesync-rpc") diff --git a/l1infotreesync/l1infotreesync_rpc.go b/l1infotreesync/l1infotreesync_rpc.go index c092ac478..34054b614 100644 --- a/l1infotreesync/l1infotreesync_rpc.go +++ b/l1infotreesync/l1infotreesync_rpc.go @@ -10,12 +10,14 @@ import ( ) type StatusInfo struct { - Status string `json:"status"` + Status string `json:"status"` + CompletionPercentage *float64 `json:"completionPercentage,omitempty"` } type L1InfoTreeSyncer interface { GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) GetLatestL1InfoLeaf(ctx context.Context) (*L1InfoTreeLeaf, error) GetInfoByRoot(ger common.Hash) (*L1InfoTreeLeaf, error) + GetCompletionPercentage() *float64 } // L1InfoTreeSyncRPC is the RPC interface for the L1InfoTreeSync @@ -39,7 +41,8 @@ func NewL1InfoTreeSyncRPC( // -d '{"method":"l1infotreesync_status", "params":[], "id":1}' func (b *L1InfoTreeSyncRPC) Status() (interface{}, rpc.Error) { info := StatusInfo{ - Status: "running", + Status: "running", + CompletionPercentage: b.l1InfoTreeSyncer.GetCompletionPercentage(), } return info, nil } diff --git a/l1infotreesync/l1infotreesync_rpc_test.go b/l1infotreesync/l1infotreesync_rpc_test.go index a5509aeee..41e8ae4f6 100644 --- a/l1infotreesync/l1infotreesync_rpc_test.go +++ b/l1infotreesync/l1infotreesync_rpc_test.go @@ -13,17 +13,26 @@ import ( var testHash = common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") func TestL1InfoTreeSyncRPC_Status(t *testing.T) { - rpc := NewL1InfoTreeSyncRPC( - log.WithFields("modules", "test"), - nil, - ) - + mockSyncer := NewL1InfoTreeSyncerMock(t) + rpc := NewL1InfoTreeSyncRPC(log.WithFields("modules", "test"), mockSyncer) + mockSyncer.EXPECT().GetCompletionPercentage().Return(nil).Once() result, err := rpc.Status() require.Nil(t, err, "expected no error from Status") statusInfo, ok := result.(StatusInfo) require.True(t, ok, "expected result to be of type StatusInfo") assert.Equal(t, "running", statusInfo.Status, "status should be 'running'") + require.Nil(t, statusInfo.CompletionPercentage, "expected CompletionPercentage to be nil") + + percent := float64(20.0) + mockSyncer.EXPECT().GetCompletionPercentage().Return(&percent).Once() + result, err = rpc.Status() + require.NoError(t, err) + statusInfo, ok = result.(StatusInfo) + require.True(t, ok, "expected result to be of type StatusInfo") + assert.Equal(t, "running", statusInfo.Status, "status should be 'running'") + require.NotNil(t, statusInfo.CompletionPercentage, "expected CompletionPercentage to not be nil") + assert.Equal(t, percent, *statusInfo.CompletionPercentage, "expected CompletionPercentage to match the mock value") } func TestL1InfoTreeSyncRPC_GetInfoByGlobalExitRoot_NilParam_Success(t *testing.T) { diff --git a/l1infotreesync/l1infotreesync_test.go b/l1infotreesync/l1infotreesync_test.go index 69332dbf4..96ab5a0c4 100644 --- a/l1infotreesync/l1infotreesync_test.go +++ b/l1infotreesync/l1infotreesync_test.go @@ -319,3 +319,16 @@ func TestFinality(t *testing.T) { } require.Equal(t, aggkittypes.LatestBlock, s.Finality()) } + +func TestL1InfoTreeSync_GetCompletionPercentage(t *testing.T) { + mockEVMDriver := NewDriverInterfaceMock(t) + s := L1InfoTreeSync{ + driver: mockEVMDriver, + } + mockEVMDriver.EXPECT().GetCompletionPercentage().Return(nil).Once() + + require.Nil(t, s.GetCompletionPercentage(), "expected GetCompletionPercentage to return nil for legacy syncer") + percent := float64(10.0) + mockEVMDriver.EXPECT().GetCompletionPercentage().Return(&percent).Once() + require.Equal(t, &percent, s.GetCompletionPercentage()) +} diff --git a/l1infotreesync/mock_downloader_interface.go b/l1infotreesync/mock_downloader_interface.go new file mode 100644 index 000000000..dc68fb63a --- /dev/null +++ b/l1infotreesync/mock_downloader_interface.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package l1infotreesync + +import ( + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// DownloaderInterfaceMock is an autogenerated mock type for the DownloaderInterface type +type DownloaderInterfaceMock struct { + mock.Mock +} + +type DownloaderInterfaceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *DownloaderInterfaceMock) EXPECT() *DownloaderInterfaceMock_Expecter { + return &DownloaderInterfaceMock_Expecter{mock: &_m.Mock} +} + +// Finality provides a mock function with no fields +func (_m *DownloaderInterfaceMock) Finality() types.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Finality") + } + + var r0 types.BlockNumberFinality + if rf, ok := ret.Get(0).(func() types.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.BlockNumberFinality) + } + + return r0 +} + +// DownloaderInterfaceMock_Finality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finality' +type DownloaderInterfaceMock_Finality_Call struct { + *mock.Call +} + +// Finality is a helper method to define mock.On call +func (_e *DownloaderInterfaceMock_Expecter) Finality() *DownloaderInterfaceMock_Finality_Call { + return &DownloaderInterfaceMock_Finality_Call{Call: _e.mock.On("Finality")} +} + +func (_c *DownloaderInterfaceMock_Finality_Call) Run(run func()) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DownloaderInterfaceMock_Finality_Call) Return(_a0 types.BlockNumberFinality) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DownloaderInterfaceMock_Finality_Call) RunAndReturn(run func() types.BlockNumberFinality) *DownloaderInterfaceMock_Finality_Call { + _c.Call.Return(run) + return _c +} + +// NewDownloaderInterfaceMock creates a new instance of DownloaderInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDownloaderInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DownloaderInterfaceMock { + mock := &DownloaderInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/mock_driver_interface.go b/l1infotreesync/mock_driver_interface.go new file mode 100644 index 000000000..d8f4a5a0c --- /dev/null +++ b/l1infotreesync/mock_driver_interface.go @@ -0,0 +1,116 @@ +// Code generated by mockery. DO NOT EDIT. + +package l1infotreesync + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// DriverInterfaceMock is an autogenerated mock type for the DriverInterface type +type DriverInterfaceMock struct { + mock.Mock +} + +type DriverInterfaceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *DriverInterfaceMock) EXPECT() *DriverInterfaceMock_Expecter { + return &DriverInterfaceMock_Expecter{mock: &_m.Mock} +} + +// GetCompletionPercentage provides a mock function with no fields +func (_m *DriverInterfaceMock) GetCompletionPercentage() *float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCompletionPercentage") + } + + var r0 *float64 + if rf, ok := ret.Get(0).(func() *float64); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*float64) + } + } + + return r0 +} + +// DriverInterfaceMock_GetCompletionPercentage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCompletionPercentage' +type DriverInterfaceMock_GetCompletionPercentage_Call struct { + *mock.Call +} + +// GetCompletionPercentage is a helper method to define mock.On call +func (_e *DriverInterfaceMock_Expecter) GetCompletionPercentage() *DriverInterfaceMock_GetCompletionPercentage_Call { + return &DriverInterfaceMock_GetCompletionPercentage_Call{Call: _e.mock.On("GetCompletionPercentage")} +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) Run(run func()) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) Return(_a0 *float64) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DriverInterfaceMock_GetCompletionPercentage_Call) RunAndReturn(run func() *float64) *DriverInterfaceMock_GetCompletionPercentage_Call { + _c.Call.Return(run) + return _c +} + +// Sync provides a mock function with given fields: ctx +func (_m *DriverInterfaceMock) Sync(ctx context.Context) { + _m.Called(ctx) +} + +// DriverInterfaceMock_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' +type DriverInterfaceMock_Sync_Call struct { + *mock.Call +} + +// Sync is a helper method to define mock.On call +// - ctx context.Context +func (_e *DriverInterfaceMock_Expecter) Sync(ctx interface{}) *DriverInterfaceMock_Sync_Call { + return &DriverInterfaceMock_Sync_Call{Call: _e.mock.On("Sync", ctx)} +} + +func (_c *DriverInterfaceMock_Sync_Call) Run(run func(ctx context.Context)) *DriverInterfaceMock_Sync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DriverInterfaceMock_Sync_Call) Return() *DriverInterfaceMock_Sync_Call { + _c.Call.Return() + return _c +} + +func (_c *DriverInterfaceMock_Sync_Call) RunAndReturn(run func(context.Context)) *DriverInterfaceMock_Sync_Call { + _c.Run(run) + return _c +} + +// NewDriverInterfaceMock creates a new instance of DriverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDriverInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DriverInterfaceMock { + mock := &DriverInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/mock_l1_info_tree_syncer.go b/l1infotreesync/mock_l1_info_tree_syncer.go index fa1759b7f..e026414cf 100644 --- a/l1infotreesync/mock_l1_info_tree_syncer.go +++ b/l1infotreesync/mock_l1_info_tree_syncer.go @@ -23,6 +23,53 @@ func (_m *L1InfoTreeSyncerMock) EXPECT() *L1InfoTreeSyncerMock_Expecter { return &L1InfoTreeSyncerMock_Expecter{mock: &_m.Mock} } +// GetCompletionPercentage provides a mock function with no fields +func (_m *L1InfoTreeSyncerMock) GetCompletionPercentage() *float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCompletionPercentage") + } + + var r0 *float64 + if rf, ok := ret.Get(0).(func() *float64); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*float64) + } + } + + return r0 +} + +// L1InfoTreeSyncerMock_GetCompletionPercentage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCompletionPercentage' +type L1InfoTreeSyncerMock_GetCompletionPercentage_Call struct { + *mock.Call +} + +// GetCompletionPercentage is a helper method to define mock.On call +func (_e *L1InfoTreeSyncerMock_Expecter) GetCompletionPercentage() *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + return &L1InfoTreeSyncerMock_GetCompletionPercentage_Call{Call: _e.mock.On("GetCompletionPercentage")} +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) Run(run func()) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) Return(_a0 *float64) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetCompletionPercentage_Call) RunAndReturn(run func() *float64) *L1InfoTreeSyncerMock_GetCompletionPercentage_Call { + _c.Call.Return(run) + return _c +} + // GetInfoByGlobalExitRoot provides a mock function with given fields: ger func (_m *L1InfoTreeSyncerMock) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { ret := _m.Called(ger) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c0481add9..269557c37 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -9,13 +9,14 @@ import ( aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" - "github.com/agglayer/aggkit/db/compatibility" dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/l1infotreesync/migrations" "github.com/agglayer/aggkit/log" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree" treetypes "github.com/agglayer/aggkit/tree/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/russross/meddler" @@ -34,7 +35,6 @@ type processor struct { halted bool haltedReason string log *log.Logger - compatibility.CompatibilityDataStorager[sync.RuntimeData] } // UpdateL1InfoTree representation of the UpdateL1InfoTree event @@ -152,13 +152,13 @@ func newProcessor(dbPath string) (*processor, error) { l1InfoTree: tree.NewAppendOnlyTree(database, migrations.L1InfoTreePrefix), rollupExitTree: tree.NewUpdatableTree(database, migrations.RollupExitTreePrefix), log: log.WithFields("processor", "l1infotreesync"), - CompatibilityDataStorager: compatibility.NewKeyValueToCompatibilityStorage[sync.RuntimeData]( - db.NewKeyValueStorage(database), - aggkitcommon.L1INFOTREESYNC, - ), }, nil } +func (p *processor) getDB() *sql.DB { + return p.db +} + // GetLatestL1InfoLeafUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned func (p *processor) GetLatestL1InfoLeafUntilBlock(ctx context.Context, blockNum *uint64) (*L1InfoTreeLeaf, error) { @@ -253,6 +253,29 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return p.getLastProcessedBlockWithTx(p.db) } +// GetLastProcessedBlockHeader returns the last processed block header +// this function is used by multidownloader +func (p *processor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + var lastProcessedBlockNum uint64 + var hash *string + row := p.db.QueryRow("SELECT num, hash FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlockNum, &hash) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + var blockHash common.Hash + if hash == nil { + blockHash = common.Hash{} // zero hash if no hash is available + } else { + blockHash = common.HexToHash(*hash) + } + hdr := aggkittypes.NewBlockHeader(lastProcessedBlockNum, blockHash, 0, nil) + return hdr, err +} + func (p *processor) getLastProcessedBlockWithTx(tx dbtypes.Querier) (uint64, error) { var lastProcessedBlockNum uint64 @@ -335,6 +358,59 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } return nil } +func (p *processor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + if p.isHalted() { + p.log.Errorf("processor is halted due to: %s", p.haltedReason) + return sync.ErrInconsistentState + } + return p.processBlocksSameTx(ctx, blocks) +} + +// processBlocksSameTx processes the blocks in the same transaction, so if any block fails to +// be processed, all the blocks will be rolled back. This is important to keep the integrity of the data, +// specially for the L1 Info tree that relies on the correct order of the leaves +// Note: Maybe could be problems if it rollback with memory data? +func (p *processor) processBlocksSameTx(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + tx, err := db.NewTx(ctx, p.db) + if err != nil { + return err + } + shouldRollback := true + defer func() { + if shouldRollback { + p.log.Debugf("rolling back block processing for blocks") + if errRllbck := tx.Rollback(); errRllbck != nil { + p.log.Errorf("error while rolling back tx %v", errRllbck) + } + } + }() + + for _, block := range blocks.Data { + syncBlock := sync.Block{ + Num: block.Num, + Hash: block.Hash, + Events: block.Events, + } + if err := p.processBlock(tx, syncBlock); err != nil { + return fmt.Errorf("processing block %d: %w", block.Num, err) + } + logFunc := p.log.Debugf + if len(block.Events) > 0 { + logFunc = p.log.Infof + } + logFunc("block %d processed with %d events", block.Num, len(block.Events)) + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("err: %w", err) + } + shouldRollback = false + log.Infof("processed %d blocks, percent %.2f%% complete. LastBlock: %d", + len(blocks.Data), blocks.CompletionPercentage, blocks.Data[len(blocks.Data)-1].Num) + return nil +} // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) @@ -359,7 +435,24 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } } }() + err = p.processBlock(tx, block) + if err != nil { + return fmt.Errorf("processing block %d: %w", block.Num, err) + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("err: %w", err) + } + shouldRollback = false + logFunc := p.log.Debugf + if len(block.Events) > 0 { + logFunc = p.log.Infof + } + logFunc("block %d processed with %d events", block.Num, len(block.Events)) + return nil +} + +func (p *processor) processBlock(tx dbtypes.Txer, block sync.Block) error { if _, err := tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, block.Num, block.Hash.String()); err != nil { return fmt.Errorf("insert Block. err: %w", err) } @@ -457,16 +550,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } } } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("err: %w", err) - } - shouldRollback = false - logFunc := p.log.Debugf - if len(block.Events) > 0 { - logFunc = p.log.Infof - } - logFunc("block %d processed with %d events", block.Num, len(block.Events)) return nil } diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 0163de852..e676090a1 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -544,3 +544,60 @@ func TestCalculateGER(t *testing.T) { }) } } + +func TestGetLastProcessedBlockHeader(t *testing.T) { + t.Parallel() + ctx := t.Context() + + t.Run("returns nil when no blocks are processed", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_empty.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.Nil(t, hdr) + }) + + t.Run("returns last processed block when single block exists", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_single.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + expectedHash := common.HexToHash("0xabc123") + expectedNum := uint64(1) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, expectedNum, expectedHash.String()) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.NotNil(t, hdr) + require.Equal(t, expectedNum, hdr.Number) + require.Equal(t, expectedHash, hdr.Hash) + }) + + t.Run("returns last processed block when multiple blocks exist", func(t *testing.T) { + t.Parallel() + dbPath := path.Join(t.TempDir(), "TestGetLastProcessedBlockHeader_multiple.sqlite") + p, err := newProcessor(dbPath) + require.NoError(t, err) + + // Insert multiple blocks + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, 1, common.HexToHash("0x1").String()) + require.NoError(t, err) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, 2, common.HexToHash("0x2").String()) + require.NoError(t, err) + expectedHash := common.HexToHash("0x3") + expectedNum := uint64(3) + _, err = p.db.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, expectedNum, expectedHash.String()) + require.NoError(t, err) + + hdr, err := p.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + require.NotNil(t, hdr) + require.Equal(t, expectedNum, hdr.Number) + require.Equal(t, expectedHash, hdr.Hash) + }) +} diff --git a/multidownloader/config.go b/multidownloader/config.go index 8e9a13847..ac4347e30 100644 --- a/multidownloader/config.go +++ b/multidownloader/config.go @@ -34,12 +34,18 @@ type Config struct { BlockFinality aggkittypes.BlockNumberFinality // WaitPeriodToCheckCatchUp is the duration to wait before checking again if logs are not yet available WaitPeriodToCheckCatchUp types.Duration + // PeriodToCheckReorgs is the duration to wait before checking for reorgs + // If is 0 reorgs are checked only when a new block appears + PeriodToCheckReorgs types.Duration + // DeveloperMode enables developer mode features like forcing reorgs + DeveloperMode bool } const ( defaultBlockChunkSize = 10000 defaultMaxParallelBlockHeaderRetrieval = 30 defaultWaitPeriodToCheckCatchUp = time.Second * 10 + defaultPeriodToCheckReorgs = time.Second * 5 ) func NewConfigDefault(name string, basePathDB string) Config { @@ -48,12 +54,14 @@ func NewConfigDefault(name string, basePathDB string) Config { } dbPath := path.Join(basePathDB, fmt.Sprintf("%s_multidownloader.sqlite", name)) return Config{ - Enabled: false, + Enabled: true, StoragePath: dbPath, BlockChunkSize: defaultBlockChunkSize, MaxParallelBlockHeaderRetrieval: defaultMaxParallelBlockHeaderRetrieval, BlockFinality: aggkittypes.FinalizedBlock, WaitPeriodToCheckCatchUp: types.NewDuration(defaultWaitPeriodToCheckCatchUp), + PeriodToCheckReorgs: types.NewDuration(defaultPeriodToCheckReorgs), + DeveloperMode: false, } } @@ -75,10 +83,12 @@ func (cfg *Config) Validate() error { func (cfg *Config) String() string { return fmt.Sprintf("MultidownloaderConfig{Enabled:%t, BlockChunkSize:%d, "+ - "MaxParallelBlockHeaderRetrieval:%d, BlockFinality:%s, WaitPeriodToCheckCatchUp:%s}", + "MaxParallelBlockHeaderRetrieval:%d, BlockFinality:%s, WaitPeriodToCheckCatchUp:%s, "+ + "PeriodToCheckReorgs:%s}", cfg.Enabled, cfg.BlockChunkSize, cfg.MaxParallelBlockHeaderRetrieval, cfg.BlockFinality.String(), - cfg.WaitPeriodToCheckCatchUp.String()) + cfg.WaitPeriodToCheckCatchUp.String(), + cfg.PeriodToCheckReorgs.String()) } diff --git a/multidownloader/config_test.go b/multidownloader/config_test.go index 2a628857a..32564e8e3 100644 --- a/multidownloader/config_test.go +++ b/multidownloader/config_test.go @@ -2,7 +2,6 @@ package multidownloader import ( "testing" - "time" "github.com/agglayer/aggkit/config/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -11,13 +10,15 @@ import ( func TestNewConfigDefault(t *testing.T) { cfg := NewConfigDefault("l1", "/tmp/aggkit/") - require.Equal(t, false, cfg.Enabled) + require.Equal(t, true, cfg.Enabled) require.Equal(t, "/tmp/aggkit/l1_multidownloader.sqlite", cfg.StoragePath) - require.Equal(t, uint32(10000), cfg.BlockChunkSize, "BlockChunkSize should be 10000") - require.Equal(t, 30, cfg.MaxParallelBlockHeaderRetrieval, "MaxParallelBlockHeaderRetrieval should be 30") + require.Equal(t, uint32(defaultBlockChunkSize), cfg.BlockChunkSize, "BlockChunkSize should be 10000") + require.Equal(t, defaultMaxParallelBlockHeaderRetrieval, cfg.MaxParallelBlockHeaderRetrieval, "MaxParallelBlockHeaderRetrieval should be 30") require.Equal(t, aggkittypes.FinalizedBlock, cfg.BlockFinality, "BlockFinality should be FinalizedBlock") - require.Equal(t, types.NewDuration(time.Second*10), cfg.WaitPeriodToCheckCatchUp, "WaitPeriodToCheckCatchUp should be 10 seconds") - require.False(t, cfg.Enabled, "Enabled should be false by default") + require.Equal(t, types.NewDuration(defaultWaitPeriodToCheckCatchUp), cfg.WaitPeriodToCheckCatchUp, "WaitPeriodToCheckCatchUp should be 10 seconds") + require.Equal(t, types.NewDuration(defaultPeriodToCheckReorgs), cfg.PeriodToCheckReorgs, "PeriodToCheckReorgs should be 5 seconds") + + require.True(t, cfg.Enabled, "Enabled should be true by default") } func TestNewConfigDefault_ValidatesCorrectly(t *testing.T) { @@ -102,5 +103,6 @@ func TestConfig_String(t *testing.T) { require.Contains(t, str, "MaxParallelBlockHeaderRetrieval", "String() should contain MaxParallelBlockHeaderRetrieval") require.Contains(t, str, "BlockFinality", "String() should contain BlockFinality") require.Contains(t, str, "WaitPeriodToCheckCatchUp", "String() should contain WaitPeriodToCheckCatchUp") + require.Contains(t, str, "PeriodToCheckReorgs", "String() should contain PeriodToCheckReorgs") require.Contains(t, str, "Enabled", "String() should contain Enabled") } diff --git a/multidownloader/e2e_test.go b/multidownloader/e2e_test.go new file mode 100644 index 000000000..975513cdb --- /dev/null +++ b/multidownloader/e2e_test.go @@ -0,0 +1,340 @@ +package multidownloader + +import ( + "context" + "errors" + "fmt" + "math/big" + "math/rand" + "sync" + "testing" + "time" + + configtypes "github.com/agglayer/aggkit/config/types" + "github.com/agglayer/aggkit/etherman" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/multidownloader/storage" + mdsync "github.com/agglayer/aggkit/multidownloader/sync" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + aggkitsync "github.com/agglayer/aggkit/sync" + "github.com/agglayer/aggkit/test/contracts/logemitter" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +var ( + pingSignature = crypto.Keccak256Hash([]byte("Ping(address,uint256,string)")) +) + +type mdrE2ESimulatedEnv struct { + SimulatedL1 *simulated.Backend + LogEmitterAddr common.Address + LogEmitterContract *logemitter.Logemitter + ethClient *etherman.DefaultEthClient + auth *bind.TransactOpts +} + +type PingEvent struct { + BlockPosition uint64 + From common.Address + Id uint64 + Message string +} + +type LogemitterEvent struct { + PingEvent *PingEvent +} + +func logemitterAppender(contract *logemitter.Logemitter) aggkitsync.LogAppenderMap { + appender := make(aggkitsync.LogAppenderMap) + appender[pingSignature] = func(b *aggkitsync.EVMBlock, l types.Log) error { + event, err := contract.ParsePing(l) + b.Events = append(b.Events, &LogemitterEvent{PingEvent: &PingEvent{ + BlockPosition: uint64(l.Index), + From: event.From, + Id: event.Id.Uint64(), + Message: event.Message, + }}) + return err + } + return appender +} + +type logemitterProcessor struct { + logger *log.Logger + mdr *EVMMultidownloader + mutex sync.Mutex + lastBlock *aggkittypes.BlockHeader + events map[uint64]*aggkitsync.EVMBlock +} + +func (p *logemitterProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.lastBlock, nil +} + +func (tp *logemitterProcessor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + for _, block := range blocks.Data { + if err := tp.ProcessBlock(ctx, block); err != nil { + return err + } + } + return nil +} + +func (p *logemitterProcessor) ProcessBlock(ctx context.Context, block *aggkitsync.EVMBlock) error { + p.mutex.Lock() + defer p.mutex.Unlock() + p.lastBlock = &aggkittypes.BlockHeader{ + Number: block.Num, + Hash: block.Hash, + } + p.logger.Infof("Processed block number %d / %s with %d events", + block.Num, block.Hash.Hex(), len(block.Events)) + if p.events == nil { + p.events = make(map[uint64]*aggkitsync.EVMBlock) + } + p.events[block.Num] = block + return nil +} +func (p *logemitterProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + p.mutex.Lock() + defer p.mutex.Unlock() + p.logger.Infof("Processing reorg from block number %d", firstReorgedBlock) + hdr, err := p.mdr.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(firstReorgedBlock-1)) + if err != nil { + return err + } + p.logger.Infof("New last block after reorg: %s", hdr.String()) + p.lastBlock = hdr + // remove reorged events from p.events + for blkNum := range p.events { + if blkNum >= firstReorgedBlock { + delete(p.events, blkNum) + } + } + return nil +} + +func (p *logemitterProcessor) lastPingEvent() *PingEvent { + p.mutex.Lock() + defer p.mutex.Unlock() + var lastEvent *PingEvent + var lastBlockNum uint64 + for blkNum, block := range p.events { + for _, ev := range block.Events { + logEv, ok := ev.(*LogemitterEvent) + if !ok { + continue + } + if logEv.PingEvent != nil { + if blkNum >= lastBlockNum { + lastBlockNum = blkNum + lastEvent = logEv.PingEvent + } + } + } + } + return lastEvent +} + +func newLogemitterSyncer(t *testing.T, mdr *EVMMultidownloader, + contract *logemitter.Logemitter, + syncerConfig aggkittypes.SyncerConfig) (*mdsync.EVMDriver, + *logemitterProcessor, *mdsync.EVMDownloader) { + t.Helper() + logger := log.WithFields("module", "sync_logemitter") + downloader := mdsync.NewEVMDownloader( + mdr, + logger, + &aggkitsync.RetryHandler{ + MaxRetryAttemptsAfterError: 5, + }, + logemitterAppender(contract), + 1*time.Minute, + 1*time.Second, + ) + + processor := &logemitterProcessor{ + logger: logger, + mdr: mdr, + } + + driver := mdsync.NewEVMDriver( + logger, + processor, + downloader, + syncerConfig, + 100, + &aggkitsync.RetryHandler{ + MaxRetryAttemptsAfterError: 5, + }, + nil, + ) + // TODO: Register syncer must be done by driver? + err := mdr.RegisterSyncer(syncerConfig) + require.NoError(t, err) + return driver, processor, downloader +} + +func buildL1Simulated(t *testing.T) *mdrE2ESimulatedEnv { + t.Helper() + // Generate key + address + key, err := crypto.GenerateKey() + require.NoError(t, err) + from := crypto.PubkeyToAddress(key.PublicKey) + // Genesis + alloc := types.GenesisAlloc{ + from: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}, // 100 ETH + } + envL1 := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(10000000)) + chainID := big.NewInt(1337) + auth, err := bind.NewKeyedTransactorWithChainID(key, chainID) + require.NoError(t, err) + logEmitterAddr, _, logEmitterContract, err := logemitter.DeployLogemitter(auth, envL1.Client(), "msg") + require.NoError(t, err) + require.NotEqual(t, logEmitterAddr, nil) + require.NotNil(t, logEmitterContract) + + envL1.Commit() + return &mdrE2ESimulatedEnv{ + SimulatedL1: envL1, + LogEmitterAddr: logEmitterAddr, + LogEmitterContract: logEmitterContract, + ethClient: etherman.NewDefaultEthClient(envL1.Client(), nil, nil), + auth: auth, + } +} + +func newMultidownloader(t *testing.T, testData *mdrE2ESimulatedEnv) *EVMMultidownloader { + t.Helper() + cfg := NewConfigDefault("e2e_test", t.TempDir()) + // This log logger will only log errors to avoid cluttering the test output + logger, _, err := log.NewLogger(log.Config{ + Level: "error", + Environment: "development", + Outputs: []string{"stdout"}, + }) + require.NoError(t, err) + store, err := storage.NewMultidownloaderStorage(logger, + storage.MultidownloaderStorageConfig{ + DBPath: cfg.StoragePath, + }) + require.NoError(t, err) + simulatedFinalized, err := aggkittypes.NewBlockNumberFinality("LatestBlock/-5") + require.NoError(t, err) + _, err = testData.ethClient.CustomHeaderByNumber(t.Context(), simulatedFinalized) + require.NoError(t, err) + + cfg.BlockFinality = *simulatedFinalized + cfg.WaitPeriodToCheckCatchUp = configtypes.Duration{Duration: 100 * time.Millisecond} + cfg.PeriodToCheckReorgs = configtypes.Duration{Duration: 500 * time.Millisecond} + + mdr, err := NewEVMMultidownloader( + logger, + cfg, + "mdr_e2e_custom_syncer", + testData.ethClient, + nil, // rpcClient + store, + nil, + nil, + ) + require.NoError(t, err) + require.NotNil(t, mdr) + return mdr +} + +func TestE2E_CustomSyncer(t *testing.T) { + if testing.Short() { + t.Skip("skipping E2E test in short mode") + } + var err error + testData := buildL1Simulated(t) + mdr := newMultidownloader(t, testData) + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "log_emitter_e2e_test_custom_syncer", + ContractAddresses: []common.Address{ + testData.LogEmitterAddr, + }, + FromBlock: 0, + ToBlock: aggkittypes.LatestBlock, + } + + driver, processor, _ := newLogemitterSyncer(t, mdr, testData.LogEmitterContract, syncerConfig) + ctx := context.TODO() + err = mdr.Initialize(ctx) + require.NoError(t, err) + + // It's important, mdr must be started + go func() { + err := mdr.Start(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } + }() + go func() { + driver.Sync(ctx) + }() + + for numReorgs := 0; numReorgs < 3; numReorgs++ { + var blocks []*types.Header + var lastBlock *types.Header + var logIndex int64 + for i := 0; i < 10; i++ { + logIndex++ + log.Infof("Emitting ping %d", logIndex) + _, err = testData.LogEmitterContract.EmitPing(testData.auth, + big.NewInt(logIndex), + fmt.Sprintf("iteration %d", logIndex)) + require.NoError(t, err) + testData.SimulatedL1.Commit() // Block 3 + hdr, err := testData.ethClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + if blocks == nil { + blocks = make([]*types.Header, 0) + } + if lastBlock == nil || (lastBlock.Number.Uint64() != hdr.Number.Uint64()) { + blocks = append(blocks, hdr) + lastBlock = hdr + } + } + // Catch up + for { + lastPing := processor.lastPingEvent() + log.Infof("Catching up: last ping id: %+v", lastPing) + if lastPing != nil && lastPing.Id == uint64(logIndex) { + break + } + time.Sleep(100 * time.Millisecond) + } + lastProcessedBlock, err := processor.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + // Pick a random index to fork (minimum 1 block must be refactored) + chooseBlockIndex := rand.Intn(len(blocks) - 2) + err = testData.SimulatedL1.Fork(blocks[chooseBlockIndex].Hash()) + require.NoError(t, err) + testData.SimulatedL1.Commit() // reorg chain: Block 4 + for { + currentBlock, err := processor.GetLastProcessedBlockHeader(ctx) + require.NoError(t, err) + log.Infof("Catching up after reorg: previousLastBlock (%d) != currentLastBlock=%d", lastProcessedBlock.Number, currentBlock.Number) + if currentBlock.Number != lastProcessedBlock.Number { + break + } + time.Sleep(100 * time.Millisecond) + } + log.Infof("Finish reorg %d", numReorgs) + } + log.Info("Finish tests") +} diff --git a/multidownloader/evm_multidownloader.go b/multidownloader/evm_multidownloader.go index 653cbffac..e60919952 100644 --- a/multidownloader/evm_multidownloader.go +++ b/multidownloader/evm_multidownloader.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" "regexp" + "sort" "strconv" "strings" "sync" + "time" jRPC "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" @@ -19,12 +21,14 @@ import ( "github.com/agglayer/aggkit/multidownloader/storage" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ethrpc "github.com/ethereum/go-ethereum/rpc" ) const ( - safeMode = true + safeMode = mdrtypes.Finalized + unsafeMode = mdrtypes.NotFinalized chunkSizeReductionFactor = 10 minChunkSize = 1 ) @@ -38,18 +42,23 @@ type EVMMultidownloader struct { blockNotifierManager ethermantypes.BlockNotifierManager name string syncersConfig mdrtypes.SetSyncerConfig + reorgProcessor mdrtypes.ReorgProcessor - mutex sync.Mutex - isInitialized bool - // These are the segments that we need to sync - pendingSync *mdrtypes.SetSyncSegment - // These are the segments that we have already synced - // when a syncer does a `FilterLogs`, it is used to check what is already synced - syncedSegments mdrtypes.SetSyncSegment - statistics *Statistics + mutex sync.Mutex + state *State // current state of synced and pending segments if nil not initialized + statistics *Statistics + + // Control fields for Start/Stop + stopRequested bool + isRunning bool + wg sync.WaitGroup + cancel context.CancelFunc + + // Debug fields + debug *EVMMultidownloaderDebug } -var _ aggkittypes.MultiDownloader = (*EVMMultidownloader)(nil) +var _ aggkittypes.MultiDownloaderLegacy = (*EVMMultidownloader)(nil) // NewEVMMultidownloader creates a new EVM multidownloader instance with proper validation func NewEVMMultidownloader(log aggkitcommon.Logger, @@ -59,6 +68,7 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, rpcClient aggkittypes.RPCClienter, storageDB mdrtypes.Storager, blockNotifierManager ethermantypes.BlockNotifierManager, + reorgProcessor mdrtypes.ReorgProcessor, ) (*EVMMultidownloader, error) { if blockNotifierManager == nil { blockNotifierManager = ethermanblocknotifier.NewBlockNotifierManager(log, @@ -83,6 +93,16 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, } } + if reorgProcessor == nil { + log.Infof("NewEVMMultidownloader: creating default ReorgProcessor for multidownloader (%s)", name) + reorgProcessor = NewReorgProcessor(log, ethClient, rpcClient, storageDB, cfg.DeveloperMode) + } + var debug *EVMMultidownloaderDebug + if cfg.DeveloperMode { + log.Warnf("NewEVMMultidownloader: enabling debug mode for multidownloader (%s)", name) + debug = NewEVMMultidownloaderDebug() + } + return &EVMMultidownloader{ log: log, ethClient: ethClient, @@ -93,34 +113,11 @@ func NewEVMMultidownloader(log aggkitcommon.Logger, syncersConfig: mdrtypes.NewSetSyncerConfig(), statistics: NewStatistics(), name: name, + reorgProcessor: reorgProcessor, + debug: debug, }, nil } -func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) error { - dh.mutex.Lock() - defer dh.mutex.Unlock() - - if dh.isInitialized { - return fmt.Errorf("registerSyncer: cannot add new syncer config after initialization") - } - dh.syncersConfig.Add(data) - return nil -} - -func (dh *EVMMultidownloader) Start(ctx context.Context) error { - err := dh.Initialize(ctx) - if err != nil { - return err - } - - err = dh.sync(ctx, dh.StepSafe, "safe") - if err != nil { - return err - } - - return nil -} - func (dh *EVMMultidownloader) GetRPCServices() []jRPC.Service { logger := log.WithFields("module", "multidownloader-rpc-"+dh.name) return []jRPC.Service{ @@ -130,24 +127,18 @@ func (dh *EVMMultidownloader) GetRPCServices() []jRPC.Service { }, } } -func (dh *EVMMultidownloader) CheckDatabase(ctx context.Context) error { - chainID, err := dh.ChainID(ctx) - if err != nil { - return fmt.Errorf("Initialize: cannot get chainID: %w", err) - } - compatibilityStorageChecker := compatibility.NewCompatibilityCheck( - true, - func(ctx context.Context) (storage.DBRuntimeData, error) { - return storage.DBRuntimeData{NetworkID: chainID, - DataVersion: storage.DataVersionCurrent}, nil - }, - compatibility.NewKeyValueToCompatibilityStorage[storage.DBRuntimeData](dh.storage, "multidownloader-"+dh.name), - ) - err = compatibilityStorageChecker.Check(ctx, nil) - if err != nil { - return fmt.Errorf("Initialize: compatibility check failed: %w", err) +// RegisterSyncer registers a new syncer config to the multidownloader. +// it must be called before initialization or Start +func (dh *EVMMultidownloader) RegisterSyncer(data aggkittypes.SyncerConfig) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + + if dh.isInitializedNoMutex() { + return fmt.Errorf("registerSyncer: cannot add new syncer config after initialization") } + + dh.syncersConfig.Add(data) return nil } @@ -156,73 +147,361 @@ func (dh *EVMMultidownloader) CheckDatabase(ctx context.Context) error { func (dh *EVMMultidownloader) Initialize(ctx context.Context) error { dh.mutex.Lock() defer dh.mutex.Unlock() - if dh.isInitialized { + if dh.isInitializedNoMutex() { return fmt.Errorf("initialize: already initialized") } + dh.log.Debugf("Initializing multidownloader...") // Check DB compatibility - err := dh.CheckDatabase(ctx) + err := dh.checkDatabaseContentsCompatibility(ctx) if err != nil { return err } + dh.log.Debugf("Saving syncer configs to storage...") // Save syncer configs to storage; it overrides previous ones but keeps // the synced segments err = dh.storage.UpsertSyncerConfigs(nil, dh.syncersConfig.ContractConfigs()) if err != nil { return err } - // Get synced segments per contract - syncSegments, err := dh.syncersConfig.SyncSegments() + newState, err := dh.newStateFromStorage(ctx) if err != nil { - return err + return fmt.Errorf("Initialize: error creating new state from storage: %w", err) } - // Update TargetToBlock from name to real block numbers - err = syncSegments.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) + // What is pending to download? + dh.state = newState + dh.log.Infof("Initialization completed.configs: %s state: %s", + dh.syncersConfig.Brief(), dh.state.String()) + return nil +} +func (dh *EVMMultidownloader) mapBlockTagToBlockNumber( + ctx context.Context) (map[aggkittypes.BlockNumberFinality]uint64, error) { + tags := dh.syncersConfig.GetTargetToBlockTags() + resultMap := make(map[aggkittypes.BlockNumberFinality]uint64) + for _, tag := range tags { + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, tag) + if err != nil { + return nil, fmt.Errorf("mapBlockTagToBlockNumber: cannot get block number for finality %s: %w", tag.String(), err) + } + resultMap[tag] = blockNumber + } + return resultMap, nil +} + +// newStateFromStorage creates a new State based on data on storage and the current syncer configs. +// It is used on initialization and after reorgs to recreate the state of pending and synced segments +func (dh *EVMMultidownloader) newStateFromStorage(ctx context.Context) (*State, error) { + mapBlocks, err := dh.mapBlockTagToBlockNumber(ctx) if err != nil { - return fmt.Errorf("Initialize: cannot update TargetToBlock in sync segments: %w", err) + return nil, fmt.Errorf("newStateFromStorage: cannot map block tags to block numbers: %w", err) } + syncSegments, err := dh.syncersConfig.SyncSegments(mapBlocks) + if err != nil { + return nil, err + } + // Get synced segments from storage storageSyncSegments, err := dh.storage.GetSyncedBlockRangePerContract(nil) if err != nil { - return err + return nil, fmt.Errorf("newStateFromStorage: cannot get synced block ranges from storage: %w", err) } - // What is pending to download? - dh.pendingSync = syncSegments.Clone() - err = dh.pendingSync.SubtractSegments(&storageSyncSegments) + return NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) +} + +const infiniteLoops = -1 + +func (dh *EVMMultidownloader) Start(ctx context.Context) error { + return dh.startNumLoops(ctx, infiniteLoops) +} + +func (dh *EVMMultidownloader) startNumLoops(ctx context.Context, numLoopsToExecute int) error { + dh.mutex.Lock() + if dh.isRunning { + dh.mutex.Unlock() + return fmt.Errorf("Start: multidownloader is already running") + } + // Create a cancelable context for this run + runCtx, cancel := context.WithCancel(ctx) + dh.cancel = cancel + dh.isRunning = true + dh.stopRequested = false + dh.wg.Add(1) + dh.mutex.Unlock() + + defer func() { + dh.mutex.Lock() + dh.isRunning = false + dh.stopRequested = false + dh.cancel = nil + dh.mutex.Unlock() + dh.wg.Done() + }() + + if !dh.IsInitialized() { + dh.log.Infof("EVMMultidownloader.Start: multidownloader not initialized, initializing...") + err := dh.Initialize(runCtx) + if err != nil { + return err + } + } + + dh.statistics.StartSyncing() + numLoops := 0 + for { + // This is for debug, when reach the number of loops it returns to allow testing + if numLoops == numLoopsToExecute { + return nil + } + numLoops++ + // check if context is done + if runCtx.Err() != nil { + dh.log.Infof("EVMMultidownloader.Start: context done, exiting...") + return runCtx.Err() + } + err := dh.debug.GetInjectedStartStepError() + if err != nil { + dh.log.Warnf("EVMMultidownloader.Start: debug forced error set: %s", + err.Error()) + } else { + err = dh.StartStep(runCtx) + } + if err != nil { + reorgErr := mdrtypes.CastDetectedReorgError(err) + if reorgErr == nil { + dh.log.Warnf("Error running multidownloader: %s ", err.Error()) + time.Sleep(time.Millisecond) // Brief pause before retry + continue + } + dh.log.Warnf("Reorg detected: %s", reorgErr.Error()) + for { + dh.mutex.Lock() + // check if context is done during reorg processing + if runCtx.Err() != nil { + dh.mutex.Unlock() + dh.log.Infof("EVMMultidownloader.Start: context done during reorg processing, exiting...") + return runCtx.Err() + } + + dh.log.Infof("Processing reorg at block number %d...", reorgErr.OffendingBlockNumber) + err = dh.reorgProcessor.ProcessReorg(runCtx, *reorgErr, dh.cfg.BlockFinality) + if err != nil { + dh.mutex.Unlock() + dh.log.Warnf("Error running reorg multidownloader: %s", err.Error()) + time.Sleep(1 * time.Second) + continue + } + newState, err := dh.newStateFromStorage(ctx) + if err != nil { + dh.mutex.Unlock() + dh.log.Warnf("Error recreating state after reorg processing: %s", err.Error()) + time.Sleep(1 * time.Second) + continue + } + dh.state = newState + dh.mutex.Unlock() + break + } + } + } +} + +// Stop gracefully stops the multidownloader if it's running +func (dh *EVMMultidownloader) Stop(ctx context.Context) error { + dh.mutex.Lock() + if !dh.isRunning { + dh.mutex.Unlock() + return fmt.Errorf("Stop: multidownloader is not running") + } + cancel := dh.cancel + dh.mutex.Unlock() + + dh.log.Infof("Stop: stopping multidownloader...") + + // Cancel the running context + if cancel != nil { + cancel() + } + + // Wait for the goroutine to finish with context timeout + done := make(chan struct{}) + go func() { + dh.wg.Wait() + close(done) + }() + + select { + case <-done: + dh.log.Infof("Stop: multidownloader stopped successfully") + return nil + case <-ctx.Done(): + return fmt.Errorf("Stop: timeout waiting for multidownloader to stop: %w", ctx.Err()) + } +} +func (dh *EVMMultidownloader) updateTargetBlockNumber(ctx context.Context) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + mapBlocks, err := dh.mapBlockTagToBlockNumber(ctx) if err != nil { - return fmt.Errorf("Initialize: cannot calculate pendingSync: %w", err) + return fmt.Errorf("updateTargetBlockNumber: cannot map block tags to block numbers: %w", err) } - dh.syncedSegments = storageSyncSegments - dh.isInitialized = true - return nil + return dh.state.ExtendPendingRange(mapBlocks, &dh.syncersConfig) } -// sync is an internal function that executes the given stepFunc until it returns done=true or error -func (dh *EVMMultidownloader) sync(ctx context.Context, - stepFunc func(ctx context.Context) (bool, error), name string) error { - dh.statistics.StartSyncing() +func (dh *EVMMultidownloader) checkReorgsUnsafeZone(ctx context.Context) error { + blockInUnsafeZone, err := dh.storage.GetBlockHeadersNotFinalized(nil, nil) + if err != nil { + return fmt.Errorf("checkReorgsUnsafeZone: cannot get unsafe blocks: %w", err) + } + return dh.detectReorgs(ctx, blockInUnsafeZone) +} + +func (dh *EVMMultidownloader) StartStep(ctx context.Context) error { + var err error + // Update ToBlock in pending segments to be able to calculate if finished + err = dh.updateTargetBlockNumber(ctx) + if err != nil { + return fmt.Errorf("cannot update ToBlock: %w", err) + } - iteration := 0 - dh.log.Infof("🚀🚀🚀🚀🚀🚀 start syncing %s ...", name) - // Execute steps until done or error - for done, err := stepFunc(ctx); !done; done, err = stepFunc(ctx) { + // There are unsafe blocks that can be moved to safe and checked? + if err = dh.moveUnsafeToSafeIfPossible(ctx); err != nil { + return err + } + // Check possible reorgs in unsafe zone + if err = dh.checkReorgsUnsafeZone(ctx); err != nil { + return err + } + + // Get the pending blocks to sync + pendingBlockRange := dh.getTotalPendingBlockRange() + if pendingBlockRange != nil { + dh.log.Debugf("StartStep: pendingBlockRange=%s", pendingBlockRange.String()) + // Split into safe and unsafe + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) if err != nil { - dh.log.Warnf("🐞🐞🐞🐞🐞 sync %s fails after %d iterations. err: %w", - name, iteration, err) + return fmt.Errorf("StartStep: cannot get finalized block number: %w", err) + } + safePendingBlockRange, unsafePendingBlockRange := pendingBlockRange.SplitByBlockNumber(finalizedBlockNumber) + if !safePendingBlockRange.IsEmpty() { + dh.log.Infof("🛡️ StartStep: Safe sync for pending range %s", safePendingBlockRange.String()) + _, err = dh.StepSafe(ctx) return err } - if ctx.Err() != nil { - dh.log.Infof("🐞🐞🐞🐞🐞 sync %s fails after %d iterations. err: %w", - name, iteration, ctx.Err()) - return ctx.Err() + if !unsafePendingBlockRange.IsEmpty() { + dh.log.Infof("😈 StartStep: Unsafe sync for pending range %s", unsafePendingBlockRange.String()) + _, err = dh.StepUnsafe(ctx) + return err } - iteration++ + } else { + dh.log.Debugf("StartStep: no pending blocks to sync") + } + dh.log.Infof("⏳StartStep: waiting new block...") + if err = dh.WaitForNewLatestBlocks(ctx); err != nil { + return err } - dh.log.Infof("🎉🎉🎉🎉🎉 sync %s completed after %d iterations.", name, iteration) - dh.statistics.FinishSyncing() - dh.ShowStatistics(iteration) return nil } +func (dh *EVMMultidownloader) WaitForNewLatestBlocks(ctx context.Context) error { + latestSyncedBlockNumber, lastSyncedBlockTag := dh.state.GetHighestBlockNumberPendingToSync() + lastBlockHeader, finalized, err := dh.storage.GetBlockHeaderByNumber(nil, latestSyncedBlockNumber) + if err != nil { + return fmt.Errorf("WaitForNewLatestBlocks: cannot get block header for latest synced block %d: %w", + latestSyncedBlockNumber, err) + } + // If the block is finalized and have no events it's not in DB + if lastBlockHeader == nil { + lastBlockHeader = &aggkittypes.BlockHeader{ + Number: latestSyncedBlockNumber, + } + } + dh.log.Infof("waiting new block (%s>%d)...", lastSyncedBlockTag.String(), latestSyncedBlockNumber) + _, err = dh.waitForNewBlocks(ctx, lastSyncedBlockTag, lastBlockHeader, finalized) + return err +} + +func (dh *EVMMultidownloader) waitForNewBlocks(ctx context.Context, + blockTag aggkittypes.BlockNumberFinality, + lastBlockHeader *aggkittypes.BlockHeader, + finalized mdrtypes.FinalizedType) (uint64, error) { + // TODO: This var dh.cfg.PeriodToCheckReorgs.Duration is the best choice? + ticker := time.NewTicker(dh.cfg.PeriodToCheckReorgs.Duration) + defer ticker.Stop() + dh.log.Debugf("waitForNewBlocks: waiting for new blocks %s after %d. Check each %s...", + blockTag.String(), + lastBlockHeader.Number, + dh.cfg.PeriodToCheckReorgs.String()) + for { + select { + case <-ctx.Done(): + dh.log.Info("context cancelled") + return lastBlockHeader.Number, ctx.Err() + case <-ticker.C: + var currentBlock uint64 + var err error + if finalized == mdrtypes.NotFinalized { + // Check reorg + currentHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, &blockTag) + if err != nil { + return lastBlockHeader.Number, fmt.Errorf("WaitForNewBlocks: cannot get current block header: %w", err) + } + dh.log.Debugf("waitForNewBlocks: tag:%s currentHeader.Number=%d, lastBlockHeader.Number=%d checking Hash", + blockTag.String(), currentHeader.Number, lastBlockHeader.Number) + if currentHeader.Number == lastBlockHeader.Number { + if currentHeader.Hash != lastBlockHeader.Hash { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + lastBlockHeader.Hash, + currentHeader.Hash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: stored hash %s != current hash %s", + lastBlockHeader.Number, + lastBlockHeader.Hash.String(), + currentHeader.Hash.String())) + } + } + if currentHeader.Number == lastBlockHeader.Number+1 && currentHeader.ParentHash != nil { + if *currentHeader.ParentHash != lastBlockHeader.Hash { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_ParentHashMismatch, + lastBlockHeader.Hash, + *currentHeader.ParentHash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: "+ + "stored hash %s != parent hash %s of new block %d", + lastBlockHeader.Number, + lastBlockHeader.Hash.String(), + currentHeader.ParentHash.String(), + currentHeader.Number)) + } + } + if currentHeader.Number < lastBlockHeader.Number { + return lastBlockHeader.Number, mdrtypes.NewDetectedReorgError( + lastBlockHeader.Number, + mdrtypes.ReorgDetectionReason_MissingBlock, + lastBlockHeader.Hash, + currentHeader.Hash, + fmt.Sprintf("WaitForNewBlocks: reorg detected at block number %d: "+ + "current block number %d < last synced block number %d", + lastBlockHeader.Number, + currentHeader.Number, + lastBlockHeader.Number)) + } + currentBlock = currentHeader.Number + } else { + currentBlock, err = dh.blockNotifierManager.GetCurrentBlockNumber(ctx, blockTag) + if err != nil { + return lastBlockHeader.Number, fmt.Errorf("WaitForNewBlocks: cannot get current block number: %w", err) + } + } + if currentBlock > lastBlockHeader.Number { + dh.log.Debugf("waitForNewBlocks: Find new block %d > lastBlockHeader.Number %d", + currentBlock, lastBlockHeader.Number) + return currentBlock, nil + } + } + } +} + func getBlockNumbers(logs []types.Log) []uint64 { blockNumbers := make(map[uint64]struct{}) result := make([]uint64, 0) @@ -235,11 +514,168 @@ func getBlockNumbers(logs []types.Log) []uint64 { } return result } +func (dh *EVMMultidownloader) IsInitialized() bool { + dh.mutex.Lock() + defer dh.mutex.Unlock() + return dh.state != nil +} + +func (dh *EVMMultidownloader) isInitializedNoMutex() bool { + return dh.state != nil +} func (dh *EVMMultidownloader) IsAvailable(query mdrtypes.LogQuery) bool { dh.mutex.Lock() defer dh.mutex.Unlock() - return dh.syncedSegments.IsAvailable(query) + return dh.state.IsAvailable(query) +} + +// Check if the given log query is partially available +func (dh *EVMMultidownloader) IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) { + dh.mutex.Lock() + defer dh.mutex.Unlock() + return dh.state.IsPartiallyAvailable(query) +} + +// getTotalPendingBlockRange returns the full pending block range without taking in +// consideration addrs +func (dh *EVMMultidownloader) getTotalPendingBlockRange() *aggkitcommon.BlockRange { + dh.mutex.Lock() + defer dh.mutex.Unlock() + br := dh.state.GetTotalPendingBlockRange() + return br +} + +func (dh *EVMMultidownloader) getUnsafeLogQueries(blockHeaders []*aggkittypes.BlockHeader) []mdrtypes.LogQuery { + dh.mutex.Lock() + defer dh.mutex.Unlock() + logQueries := make([]mdrtypes.LogQuery, 0, len(blockHeaders)) + for _, bh := range blockHeaders { + logQueries = append(logQueries, mdrtypes.NewLogQueryBlockHash( + bh.Number, + bh.Hash, + dh.state.GetAddressesToSyncForBlockNumber(bh.Number), + )) + } + return logQueries +} + +func (dh *EVMMultidownloader) newStateAftersLogQueries(queries []mdrtypes.LogQuery) (*State, error) { + dh.mutex.Lock() + state := dh.state.Clone() + dh.mutex.Unlock() + for _, logQueryData := range queries { + err := state.Synced.AddLogQuery(&logQueryData) + if err != nil { + return nil, fmt.Errorf("Safe/Step: cannot extend synced segments: %w", err) + } + err = state.Pending.SubtractLogQuery(&logQueryData) + if err != nil { + return nil, fmt.Errorf("Safe/Step: cannot subtract log query from pending segments: %w", err) + } + } + return state, nil +} +func getContracts(logQueries []mdrtypes.LogQuery) []common.Address { + addressMap := make(map[common.Address]struct{}) + for _, lq := range logQueries { + for _, addr := range lq.Addrs { + addressMap[addr] = struct{}{} + } + } + addresses := make([]common.Address, 0, len(addressMap)) + for addr := range addressMap { + addresses = append(addresses, addr) + } + // Sort addresses to ensure deterministic output + sort.Slice(addresses, func(i, j int) bool { + return addresses[i].Hex() < addresses[j].Hex() + }) + return addresses +} + +func (dh *EVMMultidownloader) checkIntegrityNewLogsBlockHeaders(logs []types.Log, + blockHeaders aggkittypes.ListBlockHeaders) error { + blockMap := blockHeaders.ToMap() + for _, lg := range logs { + bh, exists := blockMap[lg.BlockNumber] + if !exists { + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: "+ + "block header for log block number %d not found", lg.BlockNumber) + } + if bh.Hash != lg.BlockHash { + return fmt.Errorf("checkIntegrityNewLogsBlockHeaders: "+ + "log block hash %s does not match block header hash %s for block number %d", + lg.BlockHash.String(), bh.Hash.String(), lg.BlockNumber) + } + } + return nil +} + +func (dh *EVMMultidownloader) StepUnsafe(ctx context.Context) (bool, error) { + if err := ctx.Err(); err != nil { + return false, err + } + pendingBlockRange := dh.getTotalPendingBlockRange() + if pendingBlockRange == nil { + dh.log.Debugf("StepUnsafe: no pending blocks to sync") + return false, nil + } + blocks := pendingBlockRange.ListBlockNumbers() + // TODO: Check that the blocks are all inside unsafe range + blockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blocks, dh.cfg.MaxParallelBlockHeaderRetrieval) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve %s block headers: %w", pendingBlockRange.String(), err) + } + // Check for partial failures + if !blockHeadersResult.Success() { + for blockNum, blockErr := range blockHeadersResult.Errors { + dh.log.Errorf("Unsafe/Step: failed to retrieve block %d: %v", blockNum, blockErr) + } + if !blockHeadersResult.PartialSuccess() { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve any block headers for %s", pendingBlockRange.String()) + } + dh.log.Warnf("Unsafe/Step: partial success retrieving block headers: %d/%d succeeded", + len(blockHeadersResult.Headers), len(blocks)) + } + blockHeaders := blockHeadersResult.GetOrderedHeaders(blocks) + dh.log.Debugf("Unsafe/Step: querying logs for %s", pendingBlockRange.String()) + logQueries := dh.getUnsafeLogQueries(blockHeaders) + logs, err := dh.requestMultiplesLogs(ctx, logQueries) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to retrieve logs for %s: %w", pendingBlockRange.String(), err) + } + if err = dh.checkIntegrityNewLogsBlockHeaders(logs, blockHeaders); err != nil { + return false, err + } + newState, err := dh.newStateAftersLogQueries(logQueries) + if err != nil { + return false, fmt.Errorf("Unsafe/Step: failed to create new state: %w", err) + } + updatedSegments := newState.Synced.SegmentsByContract(getContracts(logQueries)) + // Store data in storage + dh.log.Debugf("Unsafe/Step: storing data for %s", pendingBlockRange.String()) + err = dh.storeData(ctx, logs, blockHeaders, + updatedSegments, unsafeMode) + if err != nil { + return false, fmt.Errorf("Safe/Step: cannot store data: %w", err) + } + + dh.mutex.Lock() + defer dh.mutex.Unlock() + dh.log.Debugf("Unsafe/Step: updating state in memory %s", pendingBlockRange.String()) + dh.state = newState + finished := dh.state.IsSyncFinished() + totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() + dh.log.Infof("Unsafe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", + dh.statistics.ElapsedSyncing().String(), + pendingBlockRange.String(), + len(logs), + len(blockHeaders), + totalBlocksPendingToSync, + dh.statistics.ETA(totalBlocksPendingToSync)) + return finished, nil } // StepSafe performs a safe step syncing logs and block headers from historical data @@ -261,56 +697,60 @@ func (dh *EVMMultidownloader) StepSafe(ctx context.Context) (bool, error) { logQueryData.BlockRange.String(), logQueryData.Addrs) blocks := getBlockNumbers(logs) dh.log.Debugf("Safe/Step: querying blockHeaders for %d blocks", len(blocks)) - blockHeaders, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, blocks, dh.cfg.MaxParallelBlockHeaderRetrieval) if err != nil { return false, fmt.Errorf("Safe/Step: failed to retrieve %d block headers: %w", len(blocks), err) } + // Check for partial failures + if !blockHeadersResult.Success() { + for blockNum, blockErr := range blockHeadersResult.Errors { + dh.log.Errorf("Safe/Step: failed to retrieve block %d: %v", blockNum, blockErr) + } + if !blockHeadersResult.PartialSuccess() { + return false, fmt.Errorf("Safe/Step: failed to retrieve any block headers") + } + dh.log.Warnf("Safe/Step: partial success retrieving block headers: %d/%d succeeded", + len(blockHeadersResult.Headers), len(blocks)) + } + blockHeaders := blockHeadersResult.GetOrderedHeaders(blocks) // Calculate new state (not set in memory until commit is successful) dh.mutex.Lock() - newSyncedSegments := dh.syncedSegments.Clone() - newPendingSegments := dh.pendingSync.Clone() + newState := dh.state.Clone() dh.mutex.Unlock() // Update synced segments - err = newSyncedSegments.AddLogQuery(logQueryData) - if err != nil { - return false, fmt.Errorf("Safe/Step: cannot extend synced segments: %w", err) - } - // from pending blocks remove current query - err = newPendingSegments.SubtractLogQuery(logQueryData) + err = newState.OnNewSyncedLogQuery(logQueryData) if err != nil { - return false, fmt.Errorf("Safe/Step: cannot subtract log query from pending segments: %w", err) - } - // Update ToBlock in pending segments to be able to calculate if finished - err = newPendingSegments.UpdateTargetBlockToNumber(ctx, dh.blockNotifierManager) - if err != nil { - return false, fmt.Errorf("Safe/Step: cannot update ToBlock in pendingSync: %w", err) + return false, fmt.Errorf("Safe/Step: fails OnNewSyncedLogQuery(%s): %w", + logQueryData.String(), err) } + // Store data in storage err = dh.storeData(ctx, logs, blockHeaders, - newSyncedSegments.SegmentsByContract(logQueryData.Addrs), true) + newState.SyncedSegmentsByContract(logQueryData.Addrs), true) if err != nil { return false, fmt.Errorf("Safe/Step: cannot store data: %w", err) } // Update in-memory synced segments (after valid commit) dh.mutex.Lock() defer dh.mutex.Unlock() - dh.syncedSegments = *newSyncedSegments - dh.pendingSync = newPendingSegments - finished := dh.pendingSync.Finished() + dh.state = newState + finished := dh.state.IsSyncFinished() + totalBlocksPendingToSync := dh.state.TotalBlocksPendingToSync() dh.log.Infof("Safe/Step: elapsed=%s finished br=%s logs=%d blocksHeaders=%d pendingBlocks=%d ETA=%s ", dh.statistics.ElapsedSyncing().String(), logQueryData.BlockRange.String(), len(logs), len(blockHeaders), - dh.pendingSync.TotalBlocks(), - dh.statistics.ETA(dh.pendingSync.TotalBlocks())) + totalBlocksPendingToSync, + dh.statistics.ETA(totalBlocksPendingToSync)) return finished, nil } func (dh *EVMMultidownloader) storeData( ctx context.Context, - logs []types.Log, blocks []*aggkittypes.BlockHeader, + logs []types.Log, + blocks []*aggkittypes.BlockHeader, updatedSegments []mdrtypes.SyncSegment, isFinal bool) error { var err error @@ -321,31 +761,31 @@ func (dh *EVMMultidownloader) storeData( }() tx, err := dh.storage.NewTx(ctx) if err != nil { - return fmt.Errorf("Safe/Step: cannot create new tx: %w", err) + return fmt.Errorf("storeData: cannot create new tx: %w", err) } defer func() { if !committed { - dh.log.Debugf("Safe/Step: rolling back tx") + dh.log.Debugf("storeData: rolling back tx") if err := tx.Rollback(); err != nil { - dh.log.Errorf("Safe/Step: error rolling back tx: %v", err) + dh.log.Errorf("storeData: error rolling back tx: %v", err) } } }() // Save logs and block headers err = dh.storage.SaveEthLogsWithHeaders(tx, blocks, logs, isFinal) if err != nil { - return fmt.Errorf("Safe/Step: cannot save eth logs: %w", err) + return fmt.Errorf("storeData: cannot save eth logs: %w", err) } // Update synced segments in storage err = dh.storage.UpdateSyncedStatus(tx, updatedSegments) if err != nil { - return fmt.Errorf("Safe/Step: cannot update synced segments +%v in storage: %w", + return fmt.Errorf("storeData: cannot update synced segments +%v in storage: %w", updatedSegments, err) } committed = true if err = tx.Commit(); err != nil { - return fmt.Errorf("Safe/Step: cannot commit tx: %w", err) + return fmt.Errorf("storeData: cannot commit tx: %w", err) } return nil } @@ -400,7 +840,16 @@ func extractSuggestedBlockRangeFromErrorMsg(msg string) *aggkitcommon.BlockRange return nil } -func (dh *EVMMultidownloader) getFinalizedBlockNumber(ctx context.Context) (uint64, error) { +func (dh *EVMMultidownloader) GetLatestBlockNumber(ctx context.Context) (uint64, error) { + bn, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock) + if err != nil { + return 0, fmt.Errorf("GetLatestBlockNumber: cannot get latest block (%s): %w", + aggkittypes.LatestBlock.String(), err) + } + return bn, nil +} + +func (dh *EVMMultidownloader) GetFinalizedBlockNumber(ctx context.Context) (uint64, error) { bn, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, dh.cfg.BlockFinality) if err != nil { return 0, fmt.Errorf("Safe/Step: cannot get finalized block (%s): %w", @@ -415,20 +864,42 @@ func (dh *EVMMultidownloader) getNextQuery(ctx context.Context, chunk uint32, sa var err error var maxBlock uint64 if safe { - maxBlock, err = dh.getFinalizedBlockNumber(ctx) + maxBlock, err = dh.GetFinalizedBlockNumber(ctx) if err != nil { return nil, fmt.Errorf("getNextQuery: cannot get finalized block number: %w", err) } } else { maxBlock = 0 } - logQueryData, err := dh.pendingSync.NextQuery(chunk, maxBlock) + logQueryData, err := dh.state.NextQueryToSync(chunk, maxBlock, true) if err != nil { return nil, fmt.Errorf("getNextQuery: cannot get NextQuery: %w", err) } return logQueryData, nil } +// TODO: Do this requests in parallel +func (dh *EVMMultidownloader) requestMultiplesLogs( + ctx context.Context, + queries []mdrtypes.LogQuery) ([]types.Log, error) { + var allLogs []types.Log + for _, query := range queries { + dh.log.Debugf("request: querying logs for blockHash=%s", query.String()) + if err := ctx.Err(); err != nil { + return nil, fmt.Errorf("requestMultiplesLogs: context error: %w", err) + } + logs, err := dh.requestLogsSingleTry(ctx, &query) + if err != nil { + return nil, fmt.Errorf("requestMultiplesLogs: ethClient.FilterLogs(%v) failed: %w", + query.String(), err) + } + dh.log.Debugf("request: successfully queried logs for blockHash=%s: returned %d logs", + query.String(), len(logs)) + allLogs = append(allLogs, logs...) + } + return allLogs, nil +} + func (dh *EVMMultidownloader) requestLogs( ctx context.Context) ([]types.Log, *mdrtypes.LogQuery, error) { currentSyncBlockChunkSize := dh.cfg.BlockChunkSize @@ -489,3 +960,136 @@ func (dh *EVMMultidownloader) requestLogsSingleTry(ctx context.Context, func (dh *EVMMultidownloader) ShowStatistics(iteration int) { dh.statistics.Show(dh.log.Infof, iteration) } + +// checkDatabaseContentsCompatibility checks that the data already in database +// match the data in config/RPC (e.g: contract addresses, chainID, etc) +func (dh *EVMMultidownloader) checkDatabaseContentsCompatibility(ctx context.Context) error { + chainID, err := dh.ChainID(ctx) + if err != nil { + return fmt.Errorf("Initialize: cannot get chainID: %w", err) + } + compatibilityStorageChecker := compatibility.NewCompatibilityCheck( + true, + func(ctx context.Context) (storage.DBRuntimeData, error) { + return storage.DBRuntimeData{NetworkID: chainID, + DataVersion: storage.DataVersionCurrent}, nil + }, + compatibility.NewKeyValueToCompatibilityStorage[storage.DBRuntimeData](dh.storage, "multidownloader-"+dh.name), + ) + + err = compatibilityStorageChecker.Check(ctx, nil) + if err != nil { + return fmt.Errorf("Initialize: compatibility check failed: %w", err) + } + return nil +} + +// moveUnsafeToSafeIfPossible it's used at start or when finalize block change +// moving the unsafe blocks to safe zone checking that the block is not reorged +// If there are any missmatch it returns an DetectedReorgError +func (dh *EVMMultidownloader) moveUnsafeToSafeIfPossible(ctx context.Context) error { + dh.mutex.Lock() + defer dh.mutex.Unlock() + + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot get finalized block number: %w", err) + } + + committed := false + tx, err := dh.storage.NewTx(ctx) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot create new tx: %w", err) + } + defer func() { + if !committed { + dh.log.Debugf("moveUnsafeToSafeIfPossible: rolling back tx") + if err := tx.Rollback(); err != nil { + dh.log.Errorf("moveUnsafeToSafeIfPossible: error rolling back tx: %v", err) + } + } + }() + + blocks, err := dh.storage.GetBlockHeadersNotFinalized(tx, &finalizedBlockNumber) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot get unsafe block bases: %w", err) + } + if blocks.Len() == 0 { + dh.log.Debugf("moveUnsafeToSafeIfPossible: no unsafe blocks to move to safe") + return nil + } + + err = dh.detectReorgs(ctx, blocks) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: error detecting reorgs: %w", err) + } + err = dh.storage.UpdateBlockToFinalized(tx, blocks.BlockNumbers()) + if err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot update is_final for block bases: %w", err) + } + dh.log.Infof("moveUnsafeToSafeIfPossible: finalizedBlockNumber=%d, "+ + "block moved to safe zone: %s (len=%d)", finalizedBlockNumber, blocks.BlockRange().String(), blocks.Len()) + committed = true + if err := tx.Commit(); err != nil { + return fmt.Errorf("moveUnsafeToSafeIfPossible: cannot commit tx: %w", err) + } + + return nil +} + +// detectReorgs check \param blocks that match RPC +// if not return an DetectedReorgError +func (dh *EVMMultidownloader) detectReorgs(ctx context.Context, + blocks aggkittypes.ListBlockHeaders) error { + if blocks.Len() == 0 { + dh.log.Debugf("detectReorgs: no blocks to check for reorgs") + return nil + } + blocksNumber := blocks.BlockNumbers() + currentBlockHeadersResult, err := etherman.RetrieveBlockHeaders(ctx, dh.log, dh.ethClient, dh.rpcClient, + blocksNumber, dh.cfg.MaxParallelBlockHeaderRetrieval) + if err != nil { + return fmt.Errorf("detectReorgs: cannot retrieve block headers: %w", err) + } + // Check for any failures in retrieving block headers + if !currentBlockHeadersResult.Success() { + for blockNum, blockErr := range currentBlockHeadersResult.Errors { + dh.log.Errorf("detectReorgs: failed to retrieve block %d: %v", blockNum, blockErr) + } + if currentBlockHeadersResult.AreAllErrorsNotFound() { + return mdrtypes.NewDetectedReorgError( + currentBlockHeadersResult.ListBlocksNumberNotFound()[0], + mdrtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, common.Hash{}, + fmt.Sprintf("detectReorgs: reorg detected at block number %d: block not found in RPC", + currentBlockHeadersResult.ListBlocksNumberNotFound()[0])) + } + return fmt.Errorf("detectReorgs: failed to retrieve some block headers for blocks: %w", + currentBlockHeadersResult.ComposeError()) + } + + // check blocks vs currentBlockHeaders. Must match by number and hash + storageBlocks := blocks.ToMap() + rpcBlocks := currentBlockHeadersResult.Headers + for _, number := range blocksNumber { + rpcBlock, exists := rpcBlocks[number] + if !exists { + return mdrtypes.NewDetectedReorgError(number, + mdrtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, common.Hash{}, + fmt.Sprintf("detectReorgs: block number %d not found in RPC", number)) + } + storageBlock, exists := storageBlocks[number] + if !exists { + return fmt.Errorf("detectReorgs: block number %d not found in storage", number) + } + if storageBlock.Hash != rpcBlock.Hash { + return mdrtypes.NewDetectedReorgError(storageBlock.Number, + mdrtypes.ReorgDetectionReason_BlockHashMismatch, + storageBlock.Hash, rpcBlock.Hash, + fmt.Sprintf("detectReorgs: reorg detected at block number %d: storage hash %s != rpc hash %s", + number, storageBlock.Hash.String(), rpcBlock.Hash.String())) + } + } + return nil +} diff --git a/multidownloader/evm_multidownloader_debug.go b/multidownloader/evm_multidownloader_debug.go new file mode 100644 index 000000000..daf48aea3 --- /dev/null +++ b/multidownloader/evm_multidownloader_debug.go @@ -0,0 +1,47 @@ +package multidownloader + +import ( + "fmt" + "sync" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" +) + +type EVMMultidownloaderDebug struct { + mutexDebug sync.Mutex + debugStepForcedReturnError error +} + +func NewEVMMultidownloaderDebug() *EVMMultidownloaderDebug { + return &EVMMultidownloaderDebug{} +} + +func (dh *EVMMultidownloaderDebug) ForceReorg(mismatchingBlockNumber uint64) { + if dh == nil { + return + } + dh.mutexDebug.Lock() + defer dh.mutexDebug.Unlock() + dh.debugStepForcedReturnError = mdrtypes.NewDetectedReorgError( + mismatchingBlockNumber, + mdrtypes.ReorgDetectionReason_Forced, + common.Hash{}, + common.Hash{}, + fmt.Sprintf("ForceReorg: forced reorg at block number %d", mismatchingBlockNumber), + ) +} + +func (dh *EVMMultidownloaderDebug) GetInjectedStartStepError() error { + if dh == nil { + return nil + } + dh.mutexDebug.Lock() + defer dh.mutexDebug.Unlock() + if dh.debugStepForcedReturnError != nil { + err := dh.debugStepForcedReturnError + dh.debugStepForcedReturnError = nil + return err + } + return nil +} diff --git a/multidownloader/evm_multidownloader_debug_test.go b/multidownloader/evm_multidownloader_debug_test.go new file mode 100644 index 000000000..20f4b3831 --- /dev/null +++ b/multidownloader/evm_multidownloader_debug_test.go @@ -0,0 +1,23 @@ +package multidownloader + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEVMMultidownloaderDebug(t *testing.T) { + sut := NewEVMMultidownloaderDebug() + + sut.ForceReorg(123) + err := sut.GetInjectedStartStepError() + if err == nil { + t.Fatalf("Expected error to be injected, got nil") + } + expectedMsg := "ForceReorg: forced reorg at block number 123" + require.ErrorContains(t, err, expectedMsg) + + // After getting the error once, it should be cleared + err = sut.GetInjectedStartStepError() + require.NoError(t, err, "Expected error to be cleared after retrieval") +} diff --git a/multidownloader/evm_multidownloader_reorg.go b/multidownloader/evm_multidownloader_reorg.go new file mode 100644 index 000000000..32382e9e5 --- /dev/null +++ b/multidownloader/evm_multidownloader_reorg.go @@ -0,0 +1,48 @@ +package multidownloader + +import ( + "context" + "fmt" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/ethereum/go-ethereum/common" +) + +// CheckValidBlock checks if the given blockNumber and blockHash are still valid +// returns: isValid bool, reorgID uint64, err error +func (dh *EVMMultidownloader) CheckValidBlock(ctx context.Context, blockNumber uint64, + blockHash common.Hash) (bool, uint64, error) { + // Check if is stored as valid block + storedBlock, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot get BlockHeader number=%d: %w", + blockNumber, err) + } + if storedBlock != nil { + // Is valid? + if storedBlock.Hash == blockHash { + return true, 0, nil + } + } + // From this point is invalid or unknown + // Check in blocks_reorged + reorgID, found, err := dh.storage.GetBlockReorgedReorgID(nil, blockNumber, blockHash) + if err != nil { + return true, 0, fmt.Errorf("EVMMultidownloader.CheckValidBlock: cannot check blocks_reorged for blockNumber=%d: %w", + blockNumber, err) + } + if found { + dh.log.Infof("EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s found in blocks_reorged (reorgID=%d)", + blockNumber, blockHash.Hex(), reorgID) + return false, reorgID, nil + } + // Not found anywhere, consider invalid + return false, 0, fmt.Errorf( + "EVMMultidownloader.CheckValidBlock: blockNumber=%d, blockHash=%s not found in storage or blocks_reorged", + blockNumber, blockHash.Hex()) +} + +func (dh *EVMMultidownloader) GetReorgedDataByReorgID(ctx context.Context, + reorgID uint64) (*mdrtypes.ReorgData, error) { + return dh.storage.GetReorgedDataByReorgID(nil, reorgID) +} diff --git a/multidownloader/evm_multidownloader_reorg_test.go b/multidownloader/evm_multidownloader_reorg_test.go new file mode 100644 index 000000000..1f367ea8d --- /dev/null +++ b/multidownloader/evm_multidownloader_reorg_test.go @@ -0,0 +1,197 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestEVMMultidownloader_CheckValidBlock(t *testing.T) { + t.Run("returns true when block is found and hash matches", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: blockHash, + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.True(t, isValid) + require.Equal(t, uint64(0), reorgID) + }) + + t.Run("returns error when GetBlockHeaderByNumber fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + expectedErr := fmt.Errorf("database error") + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(nil, mdrtypes.NotFinalized, expectedErr).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get BlockHeader") + require.True(t, isValid) + require.Equal(t, uint64(0), reorgID) + }) + + t.Run("returns false with reorgID when block found in blocks_reorged", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + expectedReorgID := uint64(42) + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x5678"), // Different hash + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(expectedReorgID, true, nil).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.False(t, isValid) + require.Equal(t, expectedReorgID, reorgID) + }) + + t.Run("returns false when block not stored and not in blocks_reorged", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(nil, mdrtypes.NotFinalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(uint64(0), false, nil).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "not found in storage or blocks_reorged") + require.False(t, isValid) + require.Equal(t, uint64(0), reorgID) + }) + + t.Run("returns false with reorgID when stored block hash does not match", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + expectedReorgID := uint64(99) + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0xabcd"), // Different hash + } + + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(expectedReorgID, true, nil).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.NoError(t, err) + require.False(t, isValid) + require.Equal(t, expectedReorgID, reorgID) + }) + + t.Run("returns error when GetBlockReorgedReorgID fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + blockNumber := uint64(100) + blockHash := common.HexToHash("0x1234") + + storedBlock := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x5678"), // Different hash + } + + expectedErr := fmt.Errorf("reorg query error") + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, blockNumber). + Return(storedBlock, mdrtypes.Finalized, nil).Once() + testData.mockStorage.EXPECT().GetBlockReorgedReorgID(mock.Anything, blockNumber, blockHash). + Return(uint64(0), false, expectedErr).Once() + + isValid, reorgID, err := testData.mdr.CheckValidBlock(context.Background(), blockNumber, blockHash) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot check blocks_reorged") + require.True(t, isValid) + require.Equal(t, uint64(0), reorgID) + }) +} + +func TestEVMMultidownloader_GetReorgedDataByReorgID(t *testing.T) { + t.Run("returns reorg data successfully", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedReorgID := uint64(42) + expectedReorgData := &mdrtypes.ReorgData{ + ReorgID: expectedReorgID, + BlockRangeAffected: aggkitcommon.BlockRange{ + FromBlock: 100, + ToBlock: 200, + }, + DetectedAtBlock: 250, + DetectedTimestamp: 1234567890, + } + + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). + Return(expectedReorgData, nil).Once() + + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, expectedReorgData.ReorgID, result.ReorgID) + require.Equal(t, expectedReorgData.BlockRangeAffected, result.BlockRangeAffected) + require.Equal(t, expectedReorgData.DetectedAtBlock, result.DetectedAtBlock) + require.Equal(t, expectedReorgData.DetectedTimestamp, result.DetectedTimestamp) + }) + + t.Run("returns error when storage query fails", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedReorgID := uint64(42) + expectedErr := fmt.Errorf("database error") + + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). + Return(nil, expectedErr).Once() + + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Nil(t, result) + }) + + t.Run("returns nil when reorgID not found", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + expectedReorgID := uint64(999) + + testData.mockStorage.EXPECT().GetReorgedDataByReorgID(mock.Anything, expectedReorgID). + Return(nil, nil).Once() + + result, err := testData.mdr.GetReorgedDataByReorgID(context.Background(), expectedReorgID) + + require.NoError(t, err) + require.Nil(t, result) + }) +} diff --git a/multidownloader/evm_multidownloader_rpc.go b/multidownloader/evm_multidownloader_rpc.go index 5753bd23f..ba67016c4 100644 --- a/multidownloader/evm_multidownloader_rpc.go +++ b/multidownloader/evm_multidownloader_rpc.go @@ -1,8 +1,11 @@ package multidownloader import ( + "context" + "github.com/0xPolygon/cdk-rpc/rpc" aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" ) type EVMMultidownloaderRPC struct { @@ -22,12 +25,59 @@ func NewEVMMultidownloaderRPC( // Status returns the status of the L1InfoTreeSync component // curl -X POST http://localhost:5576/ "Content-Type: application/json" \ -// -d '{"method":"l1infotreesync_status", "params":[], "id":1}' +// -d '{"method":"multidownloader-l1_status", "params":[], "id":1}' func (b *EVMMultidownloaderRPC) Status() (interface{}, rpc.Error) { + if !b.downloader.IsInitialized() { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: multidownloader not initialized") + } + finalizedBlockNumber, err := b.downloader.GetFinalizedBlockNumber(context.Background()) + if err != nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: getting finalized block number: %v", err) + } + latestBlockNumber, err := b.downloader.GetLatestBlockNumber(context.Background()) + if err != nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.Status: getting latest block number: %v", err) + } + b.downloader.mutex.Lock() + defer b.downloader.mutex.Unlock() + completationPercentage := b.downloader.state.CompletionPercentage() + minPercent := 100.0 + for _, percent := range completationPercentage { + if percent < minPercent { + minPercent = percent + } + } info := struct { - Status string `json:"status"` + Status string `json:"status"` + State string `json:"state,omitempty"` + Pending string `json:"pending,omitempty"` + FinalizedBlockNumber uint64 `json:"finalizedBlockNumber,omitempty"` + LatestBlockNumber uint64 `json:"latestBlockNumber,omitempty"` + CompletionPercentage float64 `json:"completionPercentage,omitempty"` + CompletionPercentageDetailed map[common.Address]float64 `json:"completionPercentageDetailed,omitempty"` }{ - Status: "running", + Status: "running", + State: b.downloader.state.String(), + FinalizedBlockNumber: finalizedBlockNumber, + LatestBlockNumber: latestBlockNumber, + CompletionPercentage: minPercent, + CompletionPercentageDetailed: completationPercentage, } return info, nil } + +func (b *EVMMultidownloaderRPC) Reorg(mismatchingBlockNumber uint64) (interface{}, rpc.Error) { + if b.downloader.debug == nil { + return nil, rpc.NewRPCError(rpc.DefaultErrorCode, + "EVMMultidownloaderRPC.ForceReorg: debug is not enabled") + } + b.downloader.debug.ForceReorg(mismatchingBlockNumber) + return struct { + Message string `json:"message"` + }{ + Message: "Reorg forced successfully", + }, nil +} diff --git a/multidownloader/evm_multidownloader_rpc_test.go b/multidownloader/evm_multidownloader_rpc_test.go index a84983731..64f7bb956 100644 --- a/multidownloader/evm_multidownloader_rpc_test.go +++ b/multidownloader/evm_multidownloader_rpc_test.go @@ -1,9 +1,11 @@ package multidownloader import ( + "fmt" "testing" "github.com/agglayer/aggkit/log" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -20,17 +22,45 @@ func TestNewEVMMultidownloaderRPC(t *testing.T) { func TestEVMMultidownloaderRPC_Status(t *testing.T) { logger := log.WithFields("module", "test") - downloader := &EVMMultidownloader{} - rpcService := NewEVMMultidownloaderRPC(logger, downloader) + testData := newEVMMultidownloaderTestData(t, false) + testData.mdr.state = NewEmptyState() + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, + mock.Anything).Return(uint64(100), nil) + rpcService := NewEVMMultidownloaderRPC(logger, testData.mdr) result, err := rpcService.Status() require.Nil(t, err) require.NotNil(t, result) - statusInfo, ok := result.(struct { - Status string `json:"status"` + require.Contains(t, fmt.Sprintf("%+v", result), "Status") +} + +func TestEVMMultidownloaderRPC_Status_NotInitialized(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + sut := NewEVMMultidownloaderRPC(log.WithFields("module", "test"), testData.mdr) + _, err := sut.Status() + require.ErrorContains(t, err, "multidownloader not initialized") +} + +func TestEVMMultidownloaderRPC_Reorg(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + t.Run("returns error if debug is not enabled", func(t *testing.T) { + sut := EVMMultidownloaderRPC{ + logger: log.WithFields("module", "test"), + downloader: testData.mdr, + } + _, err := sut.Reorg(123) + require.Error(t, err) + require.Contains(t, err.Error(), "debug is not enabled") + }) + t.Run("calls ForceReorg on downloader when debug is enabled", func(t *testing.T) { + testData.mdr.debug = &EVMMultidownloaderDebug{} + sut := EVMMultidownloaderRPC{ + logger: log.WithFields("module", "test"), + downloader: testData.mdr, + } + _, err := sut.Reorg(123) + require.NoError(t, err) }) - require.True(t, ok) - require.Equal(t, "running", statusInfo.Status) } diff --git a/multidownloader/evm_multidownloader_syncers.go b/multidownloader/evm_multidownloader_syncers.go index 76349cf49..3ce22bdf6 100644 --- a/multidownloader/evm_multidownloader_syncers.go +++ b/multidownloader/evm_multidownloader_syncers.go @@ -12,6 +12,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +const debugSyncerInterface = false + // ChainID gets the chain ID directly from ethClient func (dh *EVMMultidownloader) ChainID(ctx context.Context) (uint64, error) { chainID, err := dh.ethClient.ChainID(ctx) @@ -27,30 +29,21 @@ func (dh *EVMMultidownloader) BlockNumber(ctx context.Context, return dh.blockNotifierManager.GetCurrentBlockNumber(ctx, finality) } -// BlockHeader gets the block header for the given finality type -func (dh *EVMMultidownloader) BlockHeader(ctx context.Context, - finality aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { - number, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, finality) - if err != nil { - return nil, fmt.Errorf("EVMMultidownloader.BlockHeader: cannot get block number for finality=%s: %w", - finality.String(), err) - } - header, err := dh.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(number)) - if err != nil { - return nil, fmt.Errorf("EVMMultidownloader.BlockHeader: cannot get header for block number=%d: %w", - number, err) - } - return header, nil -} - // FilterLogs filters the logs. It gets them from storage or waits until they are available func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - dh.log.Debugf("EVMMultidownloader.FilterLogs: received query: %+v", query) - defer dh.log.Debugf("EVMMultidownloader.FilterLogs: finished query: %+v", query) + if !dh.IsInitialized() { + return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: multidownloader not initialized") + } + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs: received query: %+v", query) + defer dh.log.Debugf("EVMMultidownloader.FilterLogs: finished query: %+v", query) + } logQuery := mdrtypes.NewLogQueryFromEthereumFilter(query) for !dh.IsAvailable(logQuery) { - dh.log.Infof("EVMMultidownloader.FilterLogs: waiting %s for logs to be available: %s", - dh.cfg.WaitPeriodToCheckCatchUp.String(), logQuery.String()) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs: waiting %s for logs to be available: %s", + dh.cfg.WaitPeriodToCheckCatchUp.String(), logQuery.String()) + } select { case <-time.After(dh.cfg.WaitPeriodToCheckCatchUp.Duration): case <-ctx.Done(): @@ -63,21 +56,29 @@ func (dh *EVMMultidownloader) FilterLogs(ctx context.Context, query ethereum.Fil if err != nil { return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) } - dh.log.Debugf("EVMMultidownloader.FilterLogs(%d - %d): len(logs)= %d", query.FromBlock, query.ToBlock, len(logs)) - + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.FilterLogs(%d - %d): len(logs)= %d", query.FromBlock, query.ToBlock, len(logs)) + } return logs, nil } // HeaderByNumber gets the block header for the given block number from storage or ethClient func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { - dh.log.Debugf("EVMMultidownloader.HeaderByNumber: received number: %s", number.String()) - defer dh.log.Debugf("EVMMultidownloader.HeaderByNumber: finished number: %s", number.String()) - if !number.IsConstant() { - return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: only numeric blockNumbers are supported (got=%s)", - number.String()) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.HeaderByNumber: received number: %s", number.String()) + defer dh.log.Debugf("EVMMultidownloader.HeaderByNumber: finished number: %s", number.String()) } - blockNumber := number.Specific + if number == nil { + number = &aggkittypes.LatestBlock + } + // Resolve blockNumber + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, *number) + if err != nil { + return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: cannot get block number for finality=%s: %w", + number.String(), err) + } + // Is this block in storage? block, _, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: cannot get BlockHeader number=%s: %w", @@ -86,10 +87,12 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, if block != nil { return block, nil } - // This is a fallback mechanism in case the block is not found in storage (it must be in storage!) - dh.log.Debugf("EVMMultidownloader.HeaderByNumber: block number=%s not found in storage, fetching from ethClient", - number.String()) - blockHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, number) + if debugSyncerInterface { + dh.log.Debugf("EVMMultidownloader.HeaderByNumber: block number=%s not found in storage, fetching from ethClient", + number.String()) + } + // Get from ethClient + blockHeader, err := dh.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) if err != nil { return nil, fmt.Errorf("EVMMultidownloader.HeaderByNumber: ethClient.HeaderByNumber(%s) failed. Err: %w", number.String(), err) @@ -97,7 +100,63 @@ func (dh *EVMMultidownloader) HeaderByNumber(ctx context.Context, return blockHeader, nil } +// HeaderByNumber gets the block header for the given block number from storage +func (dh *EVMMultidownloader) StorageHeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) { + if number == nil { + number = &aggkittypes.LatestBlock + } + // Resolve blockNumber + blockNumber, err := dh.blockNotifierManager.GetCurrentBlockNumber(ctx, *number) + if err != nil { + return nil, false, fmt.Errorf("EVMMultidownloader.StorageHeaderByNumber: cannot get block number for finality=%s: %w", + number.String(), err) + } + // Is this block in storage? + block, finalized, err := dh.storage.GetBlockHeaderByNumber(nil, blockNumber) + if err != nil { + return nil, false, fmt.Errorf("EVMMultidownloader.StorageHeaderByNumber: cannot get BlockHeader number=%s: %w", + number.String(), err) + } + return block, finalized, nil +} + // EthClient returns the underlying eth client func (dh *EVMMultidownloader) EthClient() aggkittypes.BaseEthereumClienter { return dh.ethClient } + +func (dh *EVMMultidownloader) LogQuery(ctx context.Context, + query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) { + if !dh.IsInitialized() { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("EVMMultidownloader.LogQuery: multidownloader not initialized") + } + dh.mutex.Lock() + defer dh.mutex.Unlock() + isAval, availQuery := dh.state.IsPartiallyAvailable(query) + if !isAval { + return mdrtypes.LogQueryResponse{}, + fmt.Errorf("EVMMultidownloader.LogQuery: logs not synced for query: %s", + query.String()) + } + finalizedBlockNumber, err := dh.GetFinalizedBlockNumber(ctx) + if err != nil { + return mdrtypes.LogQueryResponse{}, + fmt.Errorf("EVMMultidownloader.LogQuery: cannot get finalized block number: %w", + err) + } + // Calculate UnsafeRange + result, err := dh.storage.LogQuery(nil, *availQuery) + if err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("EVMMultidownloader.LogQuery: error executing log query %s: %w", + availQuery.String(), err) + } + // Calculate UnsafeRange + _, unsafePendingBlockRange := result.ResponseRange.SplitByBlockNumber(finalizedBlockNumber) + result.UnsafeRange = unsafePendingBlockRange + return result, err +} + +func (dh *EVMMultidownloader) Finality() aggkittypes.BlockNumberFinality { + return dh.cfg.BlockFinality +} diff --git a/multidownloader/evm_multidownloader_syncers_test.go b/multidownloader/evm_multidownloader_syncers_test.go index e8b5a453b..73a5ac03d 100644 --- a/multidownloader/evm_multidownloader_syncers_test.go +++ b/multidownloader/evm_multidownloader_syncers_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + aggkitcommon "github.com/agglayer/aggkit/common" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum" @@ -62,11 +63,13 @@ func TestEVMMultidownloader_BlockHeader(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock). Return(uint64(123456), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123456)). + Return(nil, false, nil) // Block not found in storage, will fetch from ethClient testData.mockEthClient.EXPECT().CustomHeaderByNumber(mock.Anything, aggkittypes.NewBlockNumber(123456)). Return(&aggkittypes.BlockHeader{ Number: 123456, }, nil) - header, err := testData.mdr.BlockHeader(t.Context(), aggkittypes.LatestBlock) + header, err := testData.mdr.HeaderByNumber(t.Context(), &aggkittypes.LatestBlock) require.NoError(t, err) require.Equal(t, uint64(123456), header.Number) } @@ -75,6 +78,9 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { t.Run("negative block number returns error", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + // FinalizedBlock is not a numeric finality, so GetCurrentBlockNumber will fail + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), errors.New("only numeric block finalities are supported")) // Test result, err := testData.mdr.HeaderByNumber(context.Background(), &aggkittypes.FinalizedBlock) @@ -88,6 +94,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { t.Run("storage error returns error", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, errStorageExample) @@ -108,10 +116,12 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { expectedBlock := &aggkittypes.BlockHeader{ Number: 123, } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). Return(expectedBlock, false, nil) - // Test + // Test result, err := testData.mdr.HeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(123)) // Assertions @@ -123,6 +133,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, nil) // Block not found in storage @@ -144,6 +156,8 @@ func TestEVMMultidownloader_HeaderByNumber(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(123), nil) testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(123)). Return(nil, false, nil) // Block not found in storage @@ -167,7 +181,7 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { t.Run("FilterLogs context canceled waiting to catch up", func(t *testing.T) { // Setup testData := newEVMMultidownloaderTestData(t, true) - + testData.FakeInitialized(t) query := ethereum.FilterQuery{ Addresses: []common.Address{addr1}, FromBlock: big.NewInt(100), @@ -189,12 +203,15 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ - SyncerID: "test_syncer", - ContractsAddr: []common.Address{addr1}, - FromBlock: 100, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "test_syncer", + ContractAddresses: []common.Address{addr1}, + FromBlock: 100, + ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) + testData.MockInitialize(t, 1) + err = testData.mdr.Initialize(t.Context()) + require.NoError(t, err) query := ethereum.FilterQuery{ Addresses: []common.Address{addr1}, @@ -203,7 +220,7 @@ func TestEVMMultidownloader_FilterLogs(t *testing.T) { } mdQuery := mdrtypes.NewLogQueryFromEthereumFilter(query) // It updated the syncedSegments with the new one to be available - err = testData.mdr.syncedSegments.AddLogQuery(&mdQuery) + err = testData.mdr.state.OnNewSyncedLogQuery(&mdQuery) require.NoError(t, err) testData.mockStorage.EXPECT().GetEthLogs(mock.Anything, mock.Anything). Return(nil, errStorageExample) @@ -222,3 +239,263 @@ func TestEVMMultidownloader_EthClient(t *testing.T) { testData := newEVMMultidownloaderTestData(t, true) require.Equal(t, testData.mockEthClient, testData.mdr.EthClient()) } + +func TestEVMMultidownloader_LogQuery(t *testing.T) { + t.Run("success case with unsafe range calculation", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a log query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + + // Mark the query as synced in state + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber (via GetCurrentBlockNumber) + finalizedBlock := uint64(150) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(finalizedBlock, nil) + + // Mock storage.LogQuery to return a response + expectedResponse := mdrtypes.LogQueryResponse{ + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + } + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(expectedResponse, nil) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.NoError(t, err) + require.Equal(t, aggkitcommon.NewBlockRange(100, 200), result.ResponseRange) + // UnsafeRange should be the range after finalized block + require.Equal(t, aggkitcommon.NewBlockRange(151, 200), result.UnsafeRange) + }) + + t.Run("logs not synced returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a query that is NOT synced + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + + // Test - state.IsPartiallyAvailable will return false because we didn't call OnNewSyncedLogQuery + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "logs not synced for query") + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("GetFinalizedBlockNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create and sync a query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber to fail + expectedErr := errors.New("finalized block error") + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(uint64(0), expectedErr) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get finalized block number") + require.ErrorIs(t, err, expectedErr) + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("storage.LogQuery error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create and sync a query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(uint64(150), nil) + + // Mock storage.LogQuery to fail + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(mdrtypes.LogQueryResponse{}, errStorageExample) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.Error(t, err) + require.Contains(t, err.Error(), "error executing log query") + require.ErrorIs(t, err, errStorageExample) + require.Equal(t, mdrtypes.LogQueryResponse{}, result) + }) + + t.Run("empty unsafe range when all blocks are finalized", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + testData.FakeInitialized(t) + + // Create a log query + query := mdrtypes.NewLogQuery(100, 200, []common.Address{addr1}) + err := testData.mdr.state.OnNewSyncedLogQuery(&query) + require.NoError(t, err) + + // Mock GetFinalizedBlockNumber - finalized is beyond the query range + finalizedBlock := uint64(250) + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, testData.mdr.cfg.BlockFinality). + Return(finalizedBlock, nil) + + // Mock storage.LogQuery + expectedResponse := mdrtypes.LogQueryResponse{ + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + } + testData.mockStorage.EXPECT().LogQuery(mock.Anything, query). + Return(expectedResponse, nil) + + // Test + result, err := testData.mdr.LogQuery(context.Background(), query) + + // Assertions + require.NoError(t, err) + require.Equal(t, aggkitcommon.NewBlockRange(100, 200), result.ResponseRange) + // UnsafeRange should be empty since all blocks are finalized + require.True(t, result.UnsafeRange.IsEmpty()) + }) +} + +func TestEVMMultidownloader_StorageHeaderByNumber(t *testing.T) { + t.Run("block found in storage with finalized=true", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 123, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, true, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(123)) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.True(t, finalized) + }) + + t.Run("block found in storage with finalized=false", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 456, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, false, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(456)) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.False(t, finalized) + }) + + t.Run("nil block number defaults to LatestBlock", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + expectedBlock := &aggkittypes.BlockHeader{ + Number: 999, + } + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.LatestBlock). + Return(expectedBlock.Number, nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, expectedBlock.Number). + Return(expectedBlock, true, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), nil) + + // Assertions + require.NoError(t, err) + require.Equal(t, expectedBlock, result) + require.True(t, finalized) + }) + + t.Run("block not found in storage returns nil", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(789), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(789)). + Return(nil, false, nil) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(789)) + + // Assertions + require.NoError(t, err) + require.Nil(t, result) + require.False(t, finalized) + }) + + t.Run("GetCurrentBlockNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + expectedErr := errors.New("block number resolution error") + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), expectedErr) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), &aggkittypes.FinalizedBlock) + + // Assertions + require.Nil(t, result) + require.False(t, finalized) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get block number for finality") + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("GetBlockHeaderByNumber error returns error", func(t *testing.T) { + // Setup + testData := newEVMMultidownloaderTestData(t, true) + + testData.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(555), nil) + testData.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, uint64(555)). + Return(nil, false, errStorageExample) + + // Test + result, finalized, err := testData.mdr.StorageHeaderByNumber(context.Background(), aggkittypes.NewBlockNumber(555)) + + // Assertions + require.Nil(t, result) + require.False(t, finalized) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get BlockHeader number=555") + require.ErrorIs(t, err, errStorageExample) + }) +} diff --git a/multidownloader/evm_multidownloader_test.go b/multidownloader/evm_multidownloader_test.go index e3849a163..041d8632d 100644 --- a/multidownloader/evm_multidownloader_test.go +++ b/multidownloader/evm_multidownloader_test.go @@ -3,21 +3,25 @@ package multidownloader import ( "context" "fmt" + "math/big" "os" "sync" + "sync/atomic" "testing" "time" aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/config/types" + "github.com/agglayer/aggkit/db" + dbmocks "github.com/agglayer/aggkit/db/mocks" "github.com/agglayer/aggkit/etherman" mockethermantypes "github.com/agglayer/aggkit/etherman/types/mocks" - "github.com/agglayer/aggkit/l1infotreesync" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/multidownloader/storage" + mdrsync "github.com/agglayer/aggkit/multidownloader/sync" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" mockmdrtypes "github.com/agglayer/aggkit/multidownloader/types/mocks" - "github.com/agglayer/aggkit/reorgdetector" aggkitsync "github.com/agglayer/aggkit/sync" aggkittypes "github.com/agglayer/aggkit/types" mocktypes "github.com/agglayer/aggkit/types/mocks" @@ -29,11 +33,44 @@ import ( "github.com/stretchr/testify/require" ) -const runL1InfoTree = true -const l1InfoTreeUseMultidownloader = true +const storagePath = "../tmp/ut/" +const runASyncer = true + +type testProcessor struct { + lastBlock *aggkittypes.BlockHeader +} + +func (tp *testProcessor) GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) { + return tp.lastBlock, nil +} + +func (tp *testProcessor) ProcessBlocks(ctx context.Context, blocks *mdrsynctypes.DownloadResult) error { + if blocks == nil || len(blocks.Data) == 0 { + return nil + } + for _, block := range blocks.Data { + if err := tp.ProcessBlock(ctx, block); err != nil { + return err + } + } + return nil +} +func (tp *testProcessor) ProcessBlock(ctx context.Context, block *aggkitsync.EVMBlock) error { + log.Infof("PROCESSOR: Processing block number %d", block.Num) + tp.lastBlock = &aggkittypes.BlockHeader{ + Number: block.Num, + Hash: block.Hash, + } + return nil +} +func (tp *testProcessor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + log.Infof("PROCESSOR: Reorg from block number %d", firstReorgedBlock) + return nil +} + +func TestEVMMultidownloaderExploratory(t *testing.T) { + t.Skip("code to test/debug not real unittest - requires external dependencies (l1infotreesync causes import cycle)") -func TestEVMMultidownloader(t *testing.T) { - t.Skip("code to test/debug not real unittest") cfgLog := log.Config{ Environment: "development", Level: "info", @@ -54,7 +91,7 @@ func TestEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "test") db, err := storage.NewMultidownloaderStorage(logger, storage.MultidownloaderStorageConfig{ - DBPath: "/tmp/mdr_test.sqlite", + DBPath: storagePath + "mdr_test.sqlite", }) require.NoError(t, err) cfg := Config{ @@ -62,15 +99,17 @@ func TestEVMMultidownloader(t *testing.T) { MaxParallelBlockHeaderRetrieval: 50, BlockFinality: aggkittypes.FinalizedBlock, WaitPeriodToCheckCatchUp: types.NewDuration(time.Second), + PeriodToCheckReorgs: types.NewDuration(time.Second * 10), } + mdr, err := NewEVMMultidownloader(logger, cfg, "l1", ethClient, ethRPCClient, - db, nil) + db, nil, nil) require.NoError(t, err) require.NotNil(t, mdr) err = mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "test_syncer", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), // GERManager common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), // RollupManager }, @@ -78,43 +117,36 @@ func TestEVMMultidownloader(t *testing.T) { ToBlock: aggkittypes.LatestBlock, }) require.NoError(t, err) + ctx := context.TODO() - var l1infotree *l1infotreesync.L1InfoTreeSync - if runL1InfoTree == true { - var multidownloader aggkittypes.MultiDownloader - var dbPath string - if l1InfoTreeUseMultidownloader { - multidownloader = mdr - dbPath = "/tmp/l1infotree_md.sqlite" - } else { - multidownloader = aggkitsync.NewAdapterEthClientToMultidownloader(ethClient) - dbPath = "/tmp/l1infotree_eth.sqlite" - } - reorgDetector, err := reorgdetector.New(ethClient, reorgdetector.Config{ - DBPath: "/tmp/l1_reorgdetector.sqlite", - CheckReorgsInterval: types.NewDuration(time.Second * 10), - FinalizedBlock: aggkittypes.FinalizedBlock, - }, reorgdetector.L1) - require.NoError(t, err) - l1infotree, err = l1infotreesync.New( - ctx, - l1infotreesync.Config{ - DBPath: dbPath, - InitialBlock: 5157574, - GlobalExitRootAddr: common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), - RollupManagerAddr: common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), - SyncBlockChunkSize: 6500, - WaitForNewBlocksPeriod: types.Duration{ - Duration: 5 * time.Second, - }, - BlockFinality: aggkittypes.FinalizedBlock, - }, - multidownloader, - reorgDetector, - l1infotreesync.FlagStopOnFinalizedBlockReached, + var syncer *mdrsync.EVMDriver + if runASyncer == true { + logger := log.WithFields("syncer", "test") + rh := &aggkitsync.RetryHandler{ + RetryAfterErrorPeriod: time.Second, + MaxRetryAttemptsAfterError: 0, + } + downloader := mdrsync.NewEVMDownloader( + mdr, + logger, + rh, + nil, // appender, + time.Second, + time.Second, ) - require.NoError(t, err) + syncerConfig := aggkittypes.SyncerConfig{ + SyncerID: "l1infotree_syncer_test", + ContractAddresses: []common.Address{ + common.HexToAddress("0x2968d6d736178f8fe7393cc33c87f29d9c287e78"), // GlobalExitRootAddr + common.HexToAddress("0xe2ef6215adc132df6913c8dd16487abf118d1764"), // RollupManager + }, + FromBlock: 5157574, + ToBlock: aggkittypes.LatestBlock, + } + processor := &testProcessor{} + syncer = mdrsync.NewEVMDriver(logger, processor, downloader, syncerConfig, + 100, rh, nil) } var wg sync.WaitGroup @@ -134,8 +166,8 @@ func TestEVMMultidownloader(t *testing.T) { defer wg.Done() timer := aggkitcommon.TimeTracker{} timer.Start() - if l1infotree != nil { - l1infotree.Start(t.Context()) + if syncer != nil { + syncer.Sync(t.Context()) } timer.Stop() log.Infof("L1InfoTree sync finished in %s", timer.String()) @@ -143,45 +175,9 @@ func TestEVMMultidownloader(t *testing.T) { wg.Wait() } -func TestEVMMultidownloaderExploratoryBatchRequests(t *testing.T) { - t.Skip("it's a exploratory test for batch requests") - l1url := os.Getenv("L1URL") - ethClient, err := rpc.DialContext(t.Context(), l1url) - require.NoError(t, err) - var blockNumber string - var chainID string - - var latestBlock aggkittypes.BlockHeader - batch := []rpc.BatchElem{ - { - Method: "eth_blockNumber", - Args: []interface{}{}, - Result: &blockNumber, - }, - { - Method: "eth_chainId", - Args: []interface{}{}, - Result: &chainID, - }, - { - Method: "eth_getBlockByNumber", - Args: []interface{}{ - "0x37", // número de bloque en formato hex o palabra clave - false, // incluir transacciones completas - }, - Result: &latestBlock, - }, - } - - err = ethClient.BatchCallContext(t.Context(), batch) - require.NoError(t, err) - - log.Infof("blockNumber: %s, chainID: %s", blockNumber, chainID) - log.Infof("latestBlock: %+v", latestBlock) -} +func TestPerformanceDownloaderParallelvsBatch(t *testing.T) { + t.Skip("it's a benchmarking test - requires external dependencies") -func TestDownloaderParellelvsBatch(t *testing.T) { - t.Skip("it's a benchmarking test") l1url := os.Getenv("L1URL") ethClient, err := ethclient.Dial(l1url) require.NoError(t, err) @@ -201,38 +197,33 @@ func TestDownloaderParellelvsBatch(t *testing.T) { start := time.Now() headersBatch, err := etherman.RetrieveBlockHeaders(t.Context(), logger, nil, ethRPCClient, blockNumbersMap, 10) require.NoError(t, err) + require.True(t, headersBatch.Success()) durationBatch := time.Since(start) log.Infof("BatchMode took %s", durationBatch.String()) start = time.Now() headersParallel, err := etherman.RetrieveBlockHeaders(t.Context(), logger, ethClientWrapped, nil, blockNumbersMap, 20) require.NoError(t, err) + require.True(t, headersParallel.Success()) durationParallel := time.Since(start) log.Infof("Parallel RPC took %s", durationParallel.String()) - require.Equal(t, len(headersParallel), len(headersBatch)) + require.Equal(t, len(headersParallel.Headers), len(headersBatch.Headers)) for _, blockNumber := range blockNumbersSlice { - headerP := getBlockHeader(blockNumber, headersParallel) - headerB := getBlockHeader(blockNumber, headersBatch) + headerP, existsP := headersParallel.Headers[blockNumber] + headerB, existsB := headersBatch.Headers[blockNumber] + require.True(t, existsP) + require.True(t, existsB) require.NotNil(t, headerP) require.NotNil(t, headerB) require.Equal(t, headerP.Hash, headerB.Hash) } } -func getBlockHeader(bn uint64, headers []*aggkittypes.BlockHeader) *aggkittypes.BlockHeader { - for _, h := range headers { - if h.Number == bn { - return h - } - } - return nil -} - func TestEVMMultidownloader_NewEVMMultidownloader(t *testing.T) { logger := log.WithFields("test", "evm_multidownloader_test") cfg := NewConfigDefault("test.sqlite", t.TempDir()) - sut, err := NewEVMMultidownloader(logger, cfg, "test", nil, nil, nil, nil) + sut, err := NewEVMMultidownloader(logger, cfg, "test", nil, nil, nil, nil, nil) require.NoError(t, err) require.NotNil(t, sut) require.NotNil(t, sut.blockNotifierManager) @@ -251,7 +242,7 @@ func TestEVMMultidownloader_RegisterSyncer(t *testing.T) { testData := newEVMMultidownloaderTestData(t, false) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "syncer1", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x1"), }, FromBlock: 100, @@ -306,7 +297,7 @@ func TestEVMMultidownloader_GetRPCServices(t *testing.T) { require.NoError(t, err) customName := "custom-name" - mdr, err := NewEVMMultidownloader(logger, cfg, customName, ethClient, nil, db, nil) + mdr, err := NewEVMMultidownloader(logger, cfg, customName, ethClient, nil, db, nil, nil) require.NoError(t, err) services := mdr.GetRPCServices() @@ -349,7 +340,7 @@ func TestEVMMultidownloader_StepSafe(t *testing.T) { testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil) err := testData.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ SyncerID: "syncer1", - ContractsAddr: []common.Address{ + ContractAddresses: []common.Address{ common.HexToAddress("0x1"), }, FromBlock: 100, @@ -366,58 +357,51 @@ func TestEVMMultidownloader_StepSafe(t *testing.T) { require.NoError(t, err) require.True(t, finished) - err = testData.mdr.sync(t.Context(), testData.mdr.StepSafe, "safe") - require.NoError(t, err) - require.True(t, finished) - ctx, cancel := context.WithCancel(context.TODO()) cancel() _, err = testData.mdr.StepSafe(ctx) require.ErrorIs(t, err, context.Canceled) } -func TestEVMMultidownloader_sync(t *testing.T) { - testData := newEVMMultidownloaderTestData(t, false) - t.Run("context canceled", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - err := testData.mdr.sync(ctx, func(ctx context.Context) (bool, error) { - return false, nil - }, "test_sync") - require.ErrorIs(t, err, context.Canceled) - }) - t.Run("sync func returns an error", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - returnedErr := fmt.Errorf("sync function error") - err := testData.mdr.sync(ctx, func(ctx context.Context) (bool, error) { - return false, returnedErr - }, "test_sync") - require.ErrorIs(t, err, returnedErr) - }) - - t.Run("sync func finished no errors", func(t *testing.T) { - err := testData.mdr.sync(t.Context(), func(ctx context.Context) (bool, error) { - return true, nil - }, "test_sync") - require.NoError(t, err) +func TestEVMMultidownloader_Start(t *testing.T) { + t.Run("initialization error is returned", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, true) + + // Verify not initialized + require.False(t, testData.mdr.IsInitialized()) + + // Mock ChainID to fail + expectedErr := fmt.Errorf("chain ID error") + testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(nil, expectedErr).Once() + + ctx := context.Background() + + // Start should try to initialize and return the error + err := testData.mdr.Start(ctx) + + // Should return the initialization error + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID error") + + // Verify it was not initialized + require.False(t, testData.mdr.IsInitialized()) }) -} -/* -func TestEVMMultidownloader_Start(t *testing.T) { - testData := newEVMMultidownloaderTestData(t) - testData.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil).Maybe() - err := testData.mdr.Initialize(t.Context()) - require.NoError(t, err) + t.Run("Start() and reorg", func(t *testing.T) { + testData := newEVMMultidownloaderTestData(t, false) + testData.mdr.debug = &EVMMultidownloaderDebug{} // Enable debug to test that reorgs are checked even in debug mode + // Fake initialization + testData.mdr.state = NewEmptyState() + ctx := context.Background() + testData.mdr.debug.ForceReorg(1234) - start := time.Now() - err = testData.mdr.Start(t.Context()) - duration := time.Since(start) - log.Infof("Multidownloader Start took %s", duration.String()) - require.NoError(t, err) + testData.mockReorgProcessor.EXPECT().ProcessReorg(mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + // It starts, execute 1 loop that do a reorg and then return + err := testData.mdr.startNumLoops(ctx, 1) + // Should return no error + require.NoError(t, err) + }) } -*/ type testDataEVMMultidownloader struct { mockEthClient *mocktypes.BaseEthereumClienter @@ -426,6 +410,27 @@ type testDataEVMMultidownloader struct { mockStorage *mockmdrtypes.Storager usedStorage mdrtypes.Storager mockBlockNotifierManager *mockethermantypes.BlockNotifierManager + mockReorgProcessor *mockmdrtypes.ReorgProcessor +} + +func (td *testDataEVMMultidownloader) FakeInitialized(t *testing.T) { + t.Helper() + td.mdr.state = NewEmptyState() +} + +func (td *testDataEVMMultidownloader) MockInitialize(t *testing.T, chainID uint64) { + t.Helper() + chainIDBig := big.NewInt(0).SetUint64(chainID) + td.mockEthClient.EXPECT().ChainID(mock.Anything).Return(chainIDBig, nil).Maybe() + if td.mockStorage != nil { + td.mockStorage.EXPECT().GetValue(mock.Anything, mock.Anything, mock.Anything).Return("", db.ErrNotFound).Maybe() + td.mockStorage.EXPECT().InsertValue(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + td.mockStorage.EXPECT().UpsertSyncerConfigs(mock.Anything, mock.Anything).Return(nil).Maybe() + td.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything).Return(mdrtypes.NewSetSyncSegment(), nil).Maybe() + } + if td.mockBlockNotifierManager != nil { + td.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything).Return(uint64(200), nil).Maybe() + } } func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMMultidownloader { @@ -439,6 +444,7 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM } ethClient := mocktypes.NewBaseEthereumClienter(t) mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + mockReorgProcessor := mockmdrtypes.NewReorgProcessor(t) var mockDB *mockmdrtypes.Storager var realDB *storage.MultidownloaderStorage var useDB mdrtypes.Storager @@ -453,8 +459,8 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM require.NoError(t, err) useDB = realDB } - // TODO: Add mock for ethRPCClient if needed - mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, useDB, mockBlockNotifierManager) + mdr, err := NewEVMMultidownloader(logger, cfg, "test", ethClient, nil, + useDB, mockBlockNotifierManager, mockReorgProcessor) require.NoError(t, err) return &testDataEVMMultidownloader{ mockEthClient: ethClient, @@ -463,5 +469,1223 @@ func newEVMMultidownloaderTestData(t *testing.T, mockStorage bool) *testDataEVMM mockStorage: mockDB, usedStorage: useDB, mockBlockNotifierManager: mockBlockNotifierManager, + mockReorgProcessor: mockReorgProcessor, } } + +func TestEVMMultidownloader_StartStop(t *testing.T) { + t.Run("Stop without Start returns error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + err := data.mdr.Stop(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "not running") + }) + + t.Run("Start and Stop successfully", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks for Start loop + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("stop test")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start in background + ctx := context.Background() + var startErr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + startErr = data.mdr.Start(ctx) + }() + + // Give it time to start and run a few iterations + time.Sleep(50 * time.Millisecond) + + // Stop should succeed + stopCtx := context.Background() + err := data.mdr.Stop(stopCtx) + require.NoError(t, err) + + // Wait for Start to finish + wg.Wait() + // Start should return context.Canceled (clean shutdown via context cancellation) + require.ErrorIs(t, startErr, context.Canceled) + }) + + t.Run("Start twice returns error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("stop test")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start first time + ctx := context.Background() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + _ = data.mdr.Start(ctx) + }() + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + // Try to start again - should fail + err := data.mdr.Start(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "already running") + + // Cleanup + _ = data.mdr.Stop(ctx) + wg.Wait() + }) + + t.Run("Stop waits for Start to complete", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Setup mocks + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, fmt.Errorf("mock error")).Maybe() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything).Return(nil, nil).Maybe() + + // Start in background + ctx := context.Background() + var startCompleted atomic.Bool + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + _ = data.mdr.Start(ctx) + startCompleted.Store(true) + }() + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + // Stop and verify it waits + stopStartTime := time.Now() + stopCtx := context.Background() + err := data.mdr.Stop(stopCtx) + stopDuration := time.Since(stopStartTime) + + require.NoError(t, err) + require.True(t, startCompleted.Load(), "Start should have completed before Stop returns") + require.Greater(t, stopDuration, time.Duration(0), "Stop should take some time waiting for Start") + + wg.Wait() + }) +} + +func TestEVMMultidownloader_MoveUnsafeToSafeIfPossible(t *testing.T) { + t.Run("successful move from unsafe to safe", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + mockTx.EXPECT().Commit().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum headers that will be returned by RPC + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + header196 := ðtypes.Header{ + Number: big.NewInt(196), + ParentHash: common.HexToHash("0x195"), + Time: 1234567891, + } + + // Mock unsafe blocks with the same hashes that will be calculated from the Ethereum headers + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + &aggkittypes.BlockHeader{Number: 196, Hash: header196.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers retrieval for reorg detection + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(196)).Return(header196, nil).Once() + + // Mock update to finalized + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195, 196}).Return(nil).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.NoError(t, err) + }) + + t.Run("no unsafe blocks to move", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock no unsafe blocks + emptyBlocks := aggkittypes.ListBlockHeaders{} + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(emptyBlocks, nil).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.NoError(t, err) + }) + + t.Run("error getting finalized block number", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number error + expectedErr := fmt.Errorf("finalized block error") + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(uint64(0), expectedErr).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get finalized block number") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error creating transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction creation error + expectedErr := fmt.Errorf("tx creation error") + data.mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error getting unsafe blocks", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock error getting unsafe blocks + expectedErr := fmt.Errorf("get blocks error") + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(nil, expectedErr).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get unsafe block bases") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("reorg detected during move", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Mock unsafe blocks with a specific hash + storageHash := common.HexToHash("0x195") + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: storageHash}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC returns header with different hash (reorg detected) + headerDifferent := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0xDIFFERENT"), + Time: 9999999, + } + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(headerDifferent, nil).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "error detecting reorgs") + // Check it's a reorg error + reorgErr := mdrtypes.CastDetectedReorgError(err) + require.NotNil(t, reorgErr) + }) + + t.Run("error updating blocks to finalized", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum header + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + + // Mock unsafe blocks with matching hash + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers (no reorg) + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + + // Mock update error + expectedErr := fmt.Errorf("update error") + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(expectedErr).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot update is_final for block bases") + require.Contains(t, err.Error(), expectedErr.Error()) + }) + + t.Run("error committing transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock finalized block number + finalizedBlockNumber := uint64(200) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, data.mdr.cfg.BlockFinality). + Return(finalizedBlockNumber, nil).Once() + + // Mock transaction + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + expectedErr := fmt.Errorf("commit error") + mockTx.EXPECT().Commit().Return(expectedErr).Once() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + + // Create Ethereum header + header195 := ðtypes.Header{ + Number: big.NewInt(195), + ParentHash: common.HexToHash("0x194"), + Time: 1234567890, + } + + // Mock unsafe blocks with matching hash + unsafeBlocks := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 195, Hash: header195.Hash()}, + } + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, &finalizedBlockNumber). + Return(unsafeBlocks, nil).Once() + + // Mock RPC block headers (no reorg) + data.mockEthClient.EXPECT().HeaderByNumber(mock.Anything, big.NewInt(195)).Return(header195, nil).Once() + + // Mock update success + data.mockStorage.EXPECT().UpdateBlockToFinalized(mockTx, []uint64{195}).Return(nil).Once() + + err := data.mdr.moveUnsafeToSafeIfPossible(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + require.Contains(t, err.Error(), expectedErr.Error()) + }) +} + +func TestEVMMultidownloader_StartStep(t *testing.T) { + t.Run("error in MoveUnsafeToSafeIfPossible", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock updateTargetBlockNumber success (no pending blocks to update) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible to fail + expectedErr := fmt.Errorf("move unsafe error") + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(nil, expectedErr).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + }) + + t.Run("error in checkReorgsUnsafeZone", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + ctx := context.Background() + + // Mock updateTargetBlockNumber success + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible success (no unsafe blocks) + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock checkReorgsUnsafeZone to fail + expectedErr := fmt.Errorf("check reorgs error") + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything). + Return(nil, expectedErr).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "check reorgs error") + }) + + t.Run("no pending blocks - waits for new blocks", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + data.FakeInitialized(t) + + // Create a context with cancel to avoid waiting forever + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Mock updateTargetBlockNumber success + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(100), nil).Maybe() + + // Mock MoveUnsafeToSafeIfPossible success + mockTx := dbmocks.NewTxer(t) + mockTx.EXPECT().Rollback().Return(nil).Maybe() + data.mockStorage.EXPECT().NewTx(mock.Anything).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mockTx, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock checkReorgsUnsafeZone success (no unsafe blocks) + data.mockStorage.EXPECT().GetBlockHeadersNotFinalized(mock.Anything, mock.Anything). + Return(aggkittypes.ListBlockHeaders{}, nil).Once() + + // Mock WaitForNewLatestBlocks - GetBlockHeaderByNumber will fail + data.mockStorage.EXPECT().GetBlockHeaderByNumber(mock.Anything, mock.Anything). + Return(nil, mdrtypes.NotFinalized, fmt.Errorf("no blocks yet")).Once() + + err := data.mdr.StartStep(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get block header") + }) +} + +func TestGetBlockNumbers(t *testing.T) { + t.Run("empty logs", func(t *testing.T) { + logs := []ethtypes.Log{} + result := getBlockNumbers(logs) + require.Empty(t, result) + }) + + t.Run("single log", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 1) + require.Equal(t, uint64(100), result[0]) + }) + + t.Run("multiple logs with unique block numbers", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + {BlockNumber: 101}, + {BlockNumber: 102}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 3) + require.Contains(t, result, uint64(100)) + require.Contains(t, result, uint64(101)) + require.Contains(t, result, uint64(102)) + }) + + t.Run("multiple logs with duplicate block numbers", func(t *testing.T) { + logs := []ethtypes.Log{ + {BlockNumber: 100}, + {BlockNumber: 100}, + {BlockNumber: 101}, + {BlockNumber: 101}, + {BlockNumber: 102}, + } + result := getBlockNumbers(logs) + require.Len(t, result, 3) + require.Contains(t, result, uint64(100)) + require.Contains(t, result, uint64(101)) + require.Contains(t, result, uint64(102)) + }) +} + +func TestGetContracts(t *testing.T) { + t.Run("empty log queries", func(t *testing.T) { + queries := []mdrtypes.LogQuery{} + result := getContracts(queries) + require.Empty(t, result) + }) + + t.Run("single query with one address", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1}}, + } + result := getContracts(queries) + require.Len(t, result, 1) + require.Contains(t, result, addr1) + }) + + t.Run("multiple queries with unique addresses", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1}}, + {Addrs: []common.Address{addr2}}, + } + result := getContracts(queries) + require.Len(t, result, 2) + require.Contains(t, result, addr1) + require.Contains(t, result, addr2) + }) + + t.Run("multiple queries with duplicate addresses", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + queries := []mdrtypes.LogQuery{ + {Addrs: []common.Address{addr1, addr2}}, + {Addrs: []common.Address{addr1}}, + {Addrs: []common.Address{addr2}}, + } + result := getContracts(queries) + require.Len(t, result, 2) + require.Contains(t, result, addr1) + require.Contains(t, result, addr2) + }) +} + +func TestEVMMultidownloader_CheckIntegrityNewLogsBlockHeaders(t *testing.T) { + t.Run("empty logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + logs := []ethtypes.Log{} + headers := aggkittypes.ListBlockHeaders{} + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.NoError(t, err) + }) + + t.Run("matching logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + hash101 := common.HexToHash("0x101") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + {BlockNumber: 101, BlockHash: hash101}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: hash100}, + &aggkittypes.BlockHeader{Number: 101, Hash: hash101}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.NoError(t, err) + }) + + t.Run("log with missing block header", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + {BlockNumber: 101, BlockHash: common.HexToHash("0x101")}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: hash100}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.Error(t, err) + require.Contains(t, err.Error(), "block header for log block number 101 not found") + }) + + t.Run("log with mismatched hash", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + + hash100 := common.HexToHash("0x100") + differentHash := common.HexToHash("0xDIFFERENT") + + logs := []ethtypes.Log{ + {BlockNumber: 100, BlockHash: hash100}, + } + headers := aggkittypes.ListBlockHeaders{ + &aggkittypes.BlockHeader{Number: 100, Hash: differentHash}, + } + + err := data.mdr.checkIntegrityNewLogsBlockHeaders(logs, headers) + require.Error(t, err) + require.Contains(t, err.Error(), "does not match block header hash") + }) +} + +func TestEVMMultidownloader_IsPartiallyAvailable(t *testing.T) { + t.Run("basic functionality", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + data.mockEthClient.EXPECT().ChainID(mock.Anything).Return(common.Big1, nil) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(200), nil).Maybe() + + err := data.mdr.RegisterSyncer(aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{ + common.HexToAddress("0x1"), + }, + FromBlock: 100, + ToBlock: aggkittypes.FinalizedBlock, + }) + require.NoError(t, err) + + err = data.mdr.Initialize(context.Background()) + require.NoError(t, err) + + // Query for blocks that are not yet synced + query := mdrtypes.LogQuery{ + BlockRange: aggkitcommon.NewBlockRange(100, 200), + Addrs: []common.Address{common.HexToAddress("0x1")}, + } + + // The function should not panic and return valid values + isPartial, partialQuery := data.mdr.IsPartiallyAvailable(query) + // Since nothing is synced yet, it might be partially available or not available + // We just verify it doesn't panic and returns consistent values + if isPartial { + require.NotNil(t, partialQuery) + } else { + require.Nil(t, partialQuery) + } + }) +} + +func TestEVMMultidownloader_GetLatestBlockNumber(t *testing.T) { + t.Run("success", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedBlockNumber := uint64(12345) + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock). + Return(expectedBlockNumber, nil).Once() + + blockNumber, err := data.mdr.GetLatestBlockNumber(ctx) + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, blockNumber) + }) + + t.Run("error", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedErr := fmt.Errorf("block number error") + data.mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(ctx, aggkittypes.LatestBlock). + Return(uint64(0), expectedErr).Once() + + blockNumber, err := data.mdr.GetLatestBlockNumber(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get latest block") + require.Equal(t, uint64(0), blockNumber) + }) +} + +func TestEVMMultidownloader_ShowStatistics(t *testing.T) { + t.Run("show statistics", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, false) + // This should not panic + data.mdr.ShowStatistics(1) + data.mdr.ShowStatistics(10) + }) +} + +// mockDataError is a mock implementation of ethrpc.DataError for testing +type mockDataError struct { + msg string + data any +} + +func (e *mockDataError) Error() string { + return e.msg +} + +func (e *mockDataError) ErrorCode() int { + return -32000 +} + +func (e *mockDataError) ErrorData() any { + return e.data +} + +func Test_ethGetExtendedError(t *testing.T) { + t.Run("nil error returns empty string", func(t *testing.T) { + result := ethGetExtendedError(nil) + require.Equal(t, "", result) + }) + + t.Run("non-DataError returns empty string", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := ethGetExtendedError(err) + require.Equal(t, "", result) + }) + + t.Run("DataError returns formatted error data", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results", + } + result := ethGetExtendedError(dataErr) + require.Equal(t, "json_data: Query returned more than 20000 results", result) + }) +} + +func Test_isEthClientErrorTooManyResults(t *testing.T) { + t.Run("nil error returns false", func(t *testing.T) { + result := isEthClientErrorTooManyResults(nil) + require.False(t, result) + }) + + t.Run("regular error returns false", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := isEthClientErrorTooManyResults(err) + require.False(t, result) + }) + + t.Run("error with 'Response size exceeded' returns true", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Response size exceeded maximum limit", + } + result := isEthClientErrorTooManyResults(dataErr) + require.True(t, result) + }) + + t.Run("error with 'Query returned more than' returns true", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with this block range [0x852c16, 0x853273].", + } + result := isEthClientErrorTooManyResults(dataErr) + require.True(t, result) + }) +} + +func Test_extractSuggestedBlockRangeFromError(t *testing.T) { + t.Run("nil error returns nil", func(t *testing.T) { + result := extractSuggestedBlockRangeFromError(nil) + require.Nil(t, result) + }) + + t.Run("non-too-many-results error returns nil", func(t *testing.T) { + err := fmt.Errorf("regular error") + result := extractSuggestedBlockRangeFromError(err) + require.Nil(t, result) + }) + + t.Run("error with valid block range returns BlockRange", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with this block range [0x852c16, 0x853273].", + } + result := extractSuggestedBlockRangeFromError(dataErr) + require.NotNil(t, result) + require.Equal(t, uint64(0x852c16), result.FromBlock) + require.Equal(t, uint64(0x853273), result.ToBlock) + }) + + t.Run("error with invalid block range returns nil", func(t *testing.T) { + dataErr := &mockDataError{ + msg: "query error", + data: "Query returned more than 20000 results. Try with different range.", + } + result := extractSuggestedBlockRangeFromError(dataErr) + require.Nil(t, result) + }) +} + +func TestEVMMultidownloader_storeData(t *testing.T) { + t.Run("successful store", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + logs := []ethtypes.Log{{Address: common.HexToAddress("0x123")}} + blocks := aggkittypes.ListBlockHeaders{{Number: 100, Hash: common.HexToHash("0xabc")}} + updatedSegments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(100, 200), + aggkittypes.BlockNumberFinality{}, + false, + ), + } + + mockTx := dbmocks.NewTxer(t) + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, blocks, logs, true).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, updatedSegments).Return(nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + + err := data.mdr.storeData(ctx, logs, blocks, updatedSegments, true) + require.NoError(t, err) + }) + + t.Run("error creating transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + expectedErr := fmt.Errorf("tx creation error") + data.mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := data.mdr.storeData(ctx, nil, nil, nil, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create new tx") + }) + + t.Run("error saving logs and headers", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + logs := []ethtypes.Log{{Address: common.HexToAddress("0x123")}} + blocks := aggkittypes.ListBlockHeaders{{Number: 100}} + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("save error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, blocks, logs, true).Return(expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := data.mdr.storeData(ctx, logs, blocks, nil, true) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot save eth logs") + }) + + t.Run("error updating synced status", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + updatedSegments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(100, 200), + aggkittypes.BlockNumberFinality{}, + false, + ), + } + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("update error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, mock.Anything, mock.Anything, false).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, updatedSegments).Return(expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := data.mdr.storeData(ctx, nil, nil, updatedSegments, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot update synced segments") + }) + + t.Run("error committing transaction", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + ctx := context.Background() + + mockTx := dbmocks.NewTxer(t) + expectedErr := fmt.Errorf("commit error") + data.mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + data.mockStorage.EXPECT().SaveEthLogsWithHeaders(mockTx, mock.Anything, mock.Anything, false).Return(nil).Once() + data.mockStorage.EXPECT().UpdateSyncedStatus(mockTx, mock.Anything).Return(nil).Once() + mockTx.EXPECT().Commit().Return(expectedErr).Once() + + err := data.mdr.storeData(ctx, nil, nil, nil, false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + }) +} + +func TestEVMMultidownloader_newStateFromStorage(t *testing.T) { + t.Run("successful state creation", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + + // Mock GetCurrentBlockNumber for UpdateTargetBlockToNumber + data.mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(1000), nil).Maybe() + + // Mock storage response + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x123"), + aggkitcommon.NewBlockRange(0, 100), + aggkittypes.BlockNumberFinality{}, + false, + )) + data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). + Return(storageSyncSegments, nil).Once() + + state, err := data.mdr.newStateFromStorage(t.Context()) + require.NoError(t, err) + require.NotNil(t, state) + }) + + t.Run("error getting synced block ranges from storage", func(t *testing.T) { + data := newEVMMultidownloaderTestData(t, true) + + // Mock GetCurrentBlockNumber for UpdateTargetBlockToNumber + data.mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, mock.Anything). + Return(uint64(1000), nil).Maybe() + + // Mock storage to return error + expectedErr := fmt.Errorf("storage error") + emptySegments := mdrtypes.NewSetSyncSegment() + data.mockStorage.EXPECT().GetSyncedBlockRangePerContract(mock.Anything). + Return(emptySegments, expectedErr).Once() + + state, err := data.mdr.newStateFromStorage(t.Context()) + require.Error(t, err) + require.Nil(t, state) + require.Contains(t, err.Error(), "cannot get synced block ranges from storage") + }) +} + +// setupWaitForNewBlocksTest creates common test fixtures +func setupWaitForNewBlocksTest(t *testing.T) (*EVMMultidownloader, *aggkittypes.BlockHeader, *mocktypes.BaseEthereumClienter, *mockethermantypes.BlockNotifierManager) { + t.Helper() + mockEthClient := mocktypes.NewBaseEthereumClienter(t) + mockBlockNotifierManager := mockethermantypes.NewBlockNotifierManager(t) + logger := log.WithFields("test", "waitForNewBlocks") + + mdr := &EVMMultidownloader{ + log: logger, + ethClient: mockEthClient, + blockNotifierManager: mockBlockNotifierManager, + cfg: Config{ + PeriodToCheckReorgs: types.Duration{Duration: 10 * time.Millisecond}, + }, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + } + + return mdr, lastBlockHeader, mockEthClient, mockBlockNotifierManager +} + +func TestEVMMultidownloader_waitForNewBlocks(t *testing.T) { + t.Run("context cancelled", func(t *testing.T) { + mdr, lastBlockHeader, _, _ := setupWaitForNewBlocksTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + // Execute + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + require.Equal(t, context.Canceled, err) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("finalized - new block arrives", func(t *testing.T) { + mdr, lastBlockHeader, _, mockBlockNotifierManager := setupWaitForNewBlocksTest(t) + + // Mock: first call returns same block, second call returns new block + callCount := 0 + mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + RunAndReturn(func(ctx context.Context, blockTag aggkittypes.BlockNumberFinality) (uint64, error) { + callCount++ + if callCount == 1 { + return 100, nil // Same block + } + return 101, nil // New block + }) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.FinalizedBlock, lastBlockHeader, mdrtypes.Finalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("finalized - error getting current block number", func(t *testing.T) { + mdr, lastBlockHeader, _, mockBlockNotifierManager := setupWaitForNewBlocksTest(t) + + expectedErr := fmt.Errorf("RPC error") + mockBlockNotifierManager.EXPECT(). + GetCurrentBlockNumber(mock.Anything, aggkittypes.FinalizedBlock). + Return(uint64(0), expectedErr).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.FinalizedBlock, lastBlockHeader, mdrtypes.Finalized) + + // Assert + require.Error(t, err) + require.Contains(t, err.Error(), "WaitForNewBlocks: cannot get current block number") + require.Contains(t, err.Error(), "RPC error") + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - new block arrives", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + // Mock: return new block immediately + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - error getting current header", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + expectedErr := fmt.Errorf("RPC error") + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(nil, expectedErr).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + require.Contains(t, err.Error(), "WaitForNewBlocks: cannot get current block header") + require.Contains(t, err.Error(), "RPC error") + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - block hash mismatch at same block", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + // Mock: return same block number but different hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), // Different hash! + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_BlockHashMismatch, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Hash, reorgErr.OldHash) + require.Equal(t, common.HexToHash("0x5678"), reorgErr.NewHash) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - parent hash mismatch at next block", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + wrongParentHash := common.HexToHash("0x9999") + // Mock: return next block (101) with wrong parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: &wrongParentHash, // Wrong parent hash! + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_ParentHashMismatch, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Hash, reorgErr.OldHash) + require.Equal(t, wrongParentHash, reorgErr.NewHash) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - reorg detected - current block less than last block", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + // Mock: return lower block number (reorg happened) + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 95, // Lower than last synced block! + Hash: common.HexToHash("0x5678"), + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.Error(t, err) + var reorgErr *mdrtypes.DetectedReorgError + require.True(t, mdrtypes.IsDetectedReorgError(err)) + require.ErrorAs(t, err, &reorgErr) + require.Equal(t, mdrtypes.ReorgDetectionReason_MissingBlock, reorgErr.ReorgDetectionReason) + require.Equal(t, lastBlockHeader.Number, reorgErr.OffendingBlockNumber) + require.Equal(t, lastBlockHeader.Number, blockNumber) + }) + + t.Run("not finalized - same block number with same hash - no reorg", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + // Mock: first returns same block with same hash, second returns new block + callCount := 0 + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, blockTag *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { + callCount++ + if callCount == 1 { + return &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), // Same hash - no reorg + }, nil + } + return &aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + }, nil + }) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - next block with correct parent hash - no reorg", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + correctParentHash := common.HexToHash("0x1234") + // Mock: return next block with correct parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: &correctParentHash, // Correct parent hash + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) + + t.Run("not finalized - next block without parent hash - no parent check", func(t *testing.T) { + mdr, lastBlockHeader, mockEthClient, _ := setupWaitForNewBlocksTest(t) + + // Mock: return next block without parent hash + mockEthClient.EXPECT(). + CustomHeaderByNumber(mock.Anything, mock.Anything). + Return(&aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x5678"), + ParentHash: nil, // No parent hash to check + }, nil).Once() + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + blockNumber, err := mdr.waitForNewBlocks(ctx, aggkittypes.LatestBlock, lastBlockHeader, mdrtypes.NotFinalized) + + // Assert + require.NoError(t, err) + require.Equal(t, uint64(101), blockNumber) + }) +} diff --git a/multidownloader/reorg_processor.go b/multidownloader/reorg_processor.go new file mode 100644 index 000000000..4adf65e60 --- /dev/null +++ b/multidownloader/reorg_processor.go @@ -0,0 +1,173 @@ +package multidownloader + +import ( + "context" + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgProcessor struct { + log aggkitcommon.Logger + port mdtypes.ReorgPorter + developerMode bool +} + +func NewReorgProcessor(log aggkitcommon.Logger, + ethClient aggkittypes.BaseEthereumClienter, + rpcClient aggkittypes.RPCClienter, + storage mdtypes.Storager, + developerMode bool) *ReorgProcessor { + return &ReorgProcessor{ + log: log, + port: &ReorgPort{ + ethClient: ethClient, + rpcClient: rpcClient, + storage: storage, + }, + developerMode: developerMode, + } +} + +// After detecting a reorg at detectedReorgError.OffendingBlockNumber, +// - find affected blocks +// - store the reorg info in storage +// params: +// - detectedReorgError: the error returned by the reorg detection logic, containing the +// offending block number and the reason for the reorg detection +// - finalizedBlockTag: the block tag to consider as finalized (typically finalizedBlock) +func (rm *ReorgProcessor) ProcessReorg(ctx context.Context, + detectedReorgError mdtypes.DetectedReorgError, + finalizedBlockTag aggkittypes.BlockNumberFinality) error { + var err error + // We known that offendingBlockNumber is affected, so we go backwards until we find + // the first unaffected block + offendingBlockNumber := detectedReorgError.OffendingBlockNumber + if offendingBlockNumber == 0 { + return fmt.Errorf("ProcessReorg: reorg detected at block 0, " + + "this should never happen, check the reorg detection logic") + } + tx, err := rm.port.NewTx(ctx) + if err != nil { + return fmt.Errorf("ProcessReorg: error starting new tx: %w", err) + } + committed := false + defer func() { + if !committed { + rm.log.Debugf("ProcessReorg: rolling back tx") + if err := tx.Rollback(); err != nil { + rm.log.Errorf("ProcessReorg: error rolling back tx: %v", err) + } + } + }() + firstUnaffectedBlock, err := rm.findFirstUnaffectedBlock(ctx, tx, offendingBlockNumber-1) + if err != nil { + return fmt.Errorf("ProcessReorg: error finding first unaffected block: %w", err) + } + if detectedReorgError.ReorgDetectionReason == mdtypes.ReorgDetectionReason_Forced { + if rm.developerMode { + rm.log.Warnf("ProcessReorg: executing a forcedReorg in block %d "+ + "It acts as missing blocks, so is going to delete blocks > %d."+ + "Overriding real unaffected block found %d."+ + "(forbidden in production! but developerMode is enabled))!!. ", + offendingBlockNumber, offendingBlockNumber, firstUnaffectedBlock) + firstUnaffectedBlock = offendingBlockNumber - 1 + } else { + rm.log.Warnf("ProcessReorg: forced reorg at block %d with developerMode disabled, "+ + "using the first unaffected block found %d", + offendingBlockNumber, firstUnaffectedBlock) + // Continue with the reorg using the firstUnaffectedBlock found + } + } + + lastBlockNumberInStorage, err := rm.port.GetLastBlockNumberInStorage(tx) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting last block number in storage: %w", err) + } + latestBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting latest block number in RPC: %w", err) + } + finalizedBlockNumberInRPC, err := rm.port.GetBlockNumberInRPC(ctx, finalizedBlockTag) + if err != nil { + return fmt.Errorf("ProcessReorg: error getting finalized block number in RPC: %w", err) + } + rm.log.Infof("ProcessReorg: reorg detected from block %d to block %d", + firstUnaffectedBlock+1, lastBlockNumberInStorage) + // TODO: Add hash to blockNumbers + reorgData := mdtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(firstUnaffectedBlock+1, lastBlockNumberInStorage), + DetectedAtBlock: detectedReorgError.OffendingBlockNumber, + DetectedTimestamp: rm.port.TimeNowUnix(), + NetworkLatestBlock: latestBlockNumberInRPC, + NetworkFinalizedBlock: finalizedBlockNumberInRPC, + NetworkFinalizedBlockName: finalizedBlockTag, + Description: detectedReorgError.Error(), + } + reorgID, err := rm.port.MoveReorgedBlocks(tx, reorgData) + if err != nil { + return fmt.Errorf("ProcessReorg: error moving reorged blocks: %w", err) + } + reorgData.ReorgID = reorgID + committed = true + if err := tx.Commit(); err != nil { + return fmt.Errorf("ProcessReorg: cannot commit tx: %w", err) + } + rm.log.Warnf("ProcessReorg: finalized reorgProcess: %s", reorgData.String()) + return nil +} + +func (rm *ReorgProcessor) findFirstUnaffectedBlock(ctx context.Context, + tx dbtypes.Querier, + startBlockNumber uint64) (uint64, error) { + currentBlockNumber := startBlockNumber + for { + if currentBlockNumber == 0 { + // Genesis block reached, stop here + return 0, fmt.Errorf("findFirstUnaffectedBlock: genesis block reached while checking reorgs, "+ + "cannot find unaffected block. First block checked: %d", startBlockNumber) + } + data, err := rm.port.GetBlockStorageAndRPC(ctx, tx, currentBlockNumber) + if err != nil { + return 0, fmt.Errorf("findFirstUnaffectedBlock: error getting block storage and RPC: %w", err) + } + match, err := rm.checkBlocks(data) + if err != nil { + return 0, fmt.Errorf("findFirstUnaffectedBlock: error checking blocks: %w", err) + } + if match { + // Found the first unaffected block + return currentBlockNumber, nil + } + currentBlockNumber-- + } +} + +// checkBlocks compares storage and rpc block headers and returns true if they match +func (rm *ReorgProcessor) checkBlocks(blocks *mdtypes.CompareBlockHeaders) (bool, error) { + if blocks == nil { + return false, fmt.Errorf("checkBlocks: blocks is nil") + } + if blocks.StorageHeader == nil || blocks.RpcHeader == nil { + // Block not in storage or not in RPC so is a missmatch + rm.log.Warnf("checkBlocks: block %d missing storage=%t and rpc=%t", + blocks.BlockNumber, blocks.ExistsStorageBlock(), blocks.ExistsRPCBlock()) + return false, nil + } + if blocks.StorageHeader.Number != blocks.RpcHeader.Number { + return false, fmt.Errorf("checkBlocks block numbers do not match: storage=%d rpc=%d", + blocks.StorageHeader.Number, blocks.RpcHeader.Number) + } + // This is a sanity check, never have to happen because we trust in finalized blocks! + if blocks.StorageHeader.Hash != blocks.RpcHeader.Hash { + if blocks.IsFinalized == mdtypes.Finalized { + rm.log.Warnf("checkBlocks: block %d is finalized and mismatch hash %s!=%s", blocks.StorageHeader.Number, + blocks.StorageHeader.Hash.Hex(), blocks.RpcHeader.Hash.Hex()) + } + return false, nil + } + return true, nil +} diff --git a/multidownloader/reorg_processor_port.go b/multidownloader/reorg_processor_port.go new file mode 100644 index 000000000..0bc492949 --- /dev/null +++ b/multidownloader/reorg_processor_port.go @@ -0,0 +1,67 @@ +package multidownloader + +import ( + "context" + "fmt" + "time" + + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/etherman" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgPort struct { + ethClient aggkittypes.BaseEthereumClienter + rpcClient aggkittypes.RPCClienter + storage mdtypes.Storager +} + +func (r *ReorgPort) NewTx(ctx context.Context) (dbtypes.Txer, error) { + return r.storage.NewTx(ctx) +} + +func (r *ReorgPort) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, + blockNumber uint64) (*mdtypes.CompareBlockHeaders, error) { + currentStorageBlock, finalized, err := r.storage.GetBlockHeaderByNumber(tx, blockNumber) + if err != nil { + return nil, fmt.Errorf("error getting block in storage: %w", err) + } + rpcBlock, err := r.ethClient.CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)) + if err != nil && !etherman.IsErrNotFound(err) { + return nil, fmt.Errorf("error getting block in RPC: %w", err) + } + return &mdtypes.CompareBlockHeaders{ + BlockNumber: blockNumber, + StorageHeader: currentStorageBlock, + IsFinalized: finalized, + RpcHeader: rpcBlock, + }, nil +} + +func (r *ReorgPort) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { + highestBlock, err := r.storage.GetHighestBlockNumber(tx) + if err != nil { + return 0, fmt.Errorf("GetLastBlockNumberInStorage: error getting highest block from storage: %w", err) + } + return highestBlock, nil +} + +func (r *ReorgPort) MoveReorgedBlocks(tx dbtypes.Querier, reorgData mdtypes.ReorgData) (uint64, error) { + return r.storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) +} + +func (r *ReorgPort) GetBlockNumberInRPC( + ctx context.Context, blockFinality aggkittypes.BlockNumberFinality, +) (uint64, error) { + blockNumber, err := r.ethClient.CustomHeaderByNumber(ctx, &blockFinality) + if err != nil { + return 0, fmt.Errorf("GetBlockNumberInRPC: error getting block number for %s from RPC: %w", + blockFinality.String(), err) + } + return blockNumber.Number, nil +} + +func (r *ReorgPort) TimeNowUnix() uint64 { + return uint64(time.Now().Unix()) +} diff --git a/multidownloader/reorg_processor_port_test.go b/multidownloader/reorg_processor_port_test.go new file mode 100644 index 000000000..1abdf51be --- /dev/null +++ b/multidownloader/reorg_processor_port_test.go @@ -0,0 +1,345 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbmocks "github.com/agglayer/aggkit/db/mocks" + "github.com/agglayer/aggkit/etherman" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" + aggkittypes "github.com/agglayer/aggkit/types" + typesmocks "github.com/agglayer/aggkit/types/mocks" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestReorgPort_NewTx(t *testing.T) { + t.Run("successfully creates new transaction", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewTxer(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + ctx := context.Background() + mockStorage.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + result, err := reorgPort.NewTx(ctx) + + require.NoError(t, err) + require.Equal(t, mockTx, result) + }) + + t.Run("returns error when NewTx fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("database connection error") + mockStorage.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + result, err := reorgPort.NewTx(ctx) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Nil(t, result) + }) +} + +func TestReorgPort_GetBlockStorageAndRPC(t *testing.T) { + t.Run("successfully gets block from both storage and RPC", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + rpcHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, blockNumber, result.BlockNumber) + require.Equal(t, storageHeader, result.StorageHeader) + require.Equal(t, rpcHeader, result.RpcHeader) + require.Equal(t, mdtypes.Finalized, result.IsFinalized) + }) + + t.Run("returns error when storage GetBlockHeaderByNumber fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + expectedErr := fmt.Errorf("storage error") + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(nil, mdtypes.NotFinalized, expectedErr).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block in storage") + require.Nil(t, result) + }) + + t.Run("returns error when RPC CustomHeaderByNumber fails with non-NotFound error", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + expectedErr := fmt.Errorf("RPC connection error") + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(nil, expectedErr).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block in RPC") + require.Nil(t, result) + }) + + t.Run("handles NotFound error from RPC gracefully", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + ethClient: mockEthClient, + } + + ctx := context.Background() + blockNumber := uint64(100) + + storageHeader := &aggkittypes.BlockHeader{ + Number: blockNumber, + Hash: common.HexToHash("0x1234"), + } + + mockStorage.EXPECT().GetBlockHeaderByNumber(mockTx, blockNumber). + Return(storageHeader, mdtypes.Finalized, nil).Once() + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, aggkittypes.NewBlockNumber(blockNumber)). + Return(nil, etherman.ErrNotFound).Once() + + result, err := reorgPort.GetBlockStorageAndRPC(ctx, mockTx, blockNumber) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, blockNumber, result.BlockNumber) + require.Equal(t, storageHeader, result.StorageHeader) + require.Nil(t, result.RpcHeader) + require.Equal(t, mdtypes.Finalized, result.IsFinalized) + }) +} + +func TestReorgPort_GetLastBlockNumberInStorage(t *testing.T) { + t.Run("successfully gets highest block number", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + expectedBlockNumber := uint64(12345) + mockStorage.EXPECT().GetHighestBlockNumber(mock.Anything). + Return(expectedBlockNumber, nil).Once() + + result, err := reorgPort.GetLastBlockNumberInStorage(mockTx) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("returns error when GetHighestBlockNumber fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + expectedErr := fmt.Errorf("database query error") + mockStorage.EXPECT().GetHighestBlockNumber(mock.Anything). + Return(uint64(0), expectedErr).Once() + + result, err := reorgPort.GetLastBlockNumberInStorage(mockTx) + + require.Error(t, err) + require.Contains(t, err.Error(), "GetLastBlockNumberInStorage") + require.Contains(t, err.Error(), "error getting highest block from storage") + require.Equal(t, uint64(0), result) + }) +} + +func TestReorgPort_MoveReorgedBlocks(t *testing.T) { + t.Run("successfully moves reorged blocks", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + reorgData := mdtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + } + expectedAffectedRows := uint64(101) + + mockStorage.EXPECT().InsertReorgAndMoveReorgedBlocksAndLogs(mockTx, reorgData). + Return(expectedAffectedRows, nil).Once() + + result, err := reorgPort.MoveReorgedBlocks(mockTx, reorgData) + + require.NoError(t, err) + require.Equal(t, expectedAffectedRows, result) + }) + + t.Run("returns error when InsertReorgAndMoveReorgedBlocksAndLogs fails", func(t *testing.T) { + mockStorage := mdmocks.NewStorager(t) + mockTx := dbmocks.NewQuerier(t) + + reorgPort := &ReorgPort{ + storage: mockStorage, + } + + reorgData := mdtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + } + expectedErr := fmt.Errorf("transaction failed") + + mockStorage.EXPECT().InsertReorgAndMoveReorgedBlocksAndLogs(mockTx, reorgData). + Return(uint64(0), expectedErr).Once() + + result, err := reorgPort.MoveReorgedBlocks(mockTx, reorgData) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + require.Equal(t, uint64(0), result) + }) +} + +func TestReorgPort_GetBlockNumberInRPC(t *testing.T) { + t.Run("successfully gets block number from RPC with latest finality", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Latest} + expectedBlockNumber := uint64(500) + + rpcHeader := &aggkittypes.BlockHeader{ + Number: expectedBlockNumber, + Hash: common.HexToHash("0xabcd"), + } + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("successfully gets block number from RPC with finalized finality", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Finalized} + expectedBlockNumber := uint64(450) + + rpcHeader := &aggkittypes.BlockHeader{ + Number: expectedBlockNumber, + Hash: common.HexToHash("0xdef0"), + } + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(rpcHeader, nil).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.NoError(t, err) + require.Equal(t, expectedBlockNumber, result) + }) + + t.Run("returns error when CustomHeaderByNumber fails", func(t *testing.T) { + mockEthClient := typesmocks.NewBaseEthereumClienter(t) + + reorgPort := &ReorgPort{ + ethClient: mockEthClient, + } + + ctx := context.Background() + blockFinality := aggkittypes.BlockNumberFinality{Block: aggkittypes.Latest} + expectedErr := fmt.Errorf("RPC connection timeout") + + mockEthClient.EXPECT().CustomHeaderByNumber(ctx, &blockFinality). + Return(nil, expectedErr).Once() + + result, err := reorgPort.GetBlockNumberInRPC(ctx, blockFinality) + + require.Error(t, err) + require.Contains(t, err.Error(), "GetBlockNumberInRPC") + require.Contains(t, err.Error(), "error getting block number") + require.Equal(t, uint64(0), result) + }) +} diff --git a/multidownloader/reorg_processor_test.go b/multidownloader/reorg_processor_test.go new file mode 100644 index 000000000..4d967ba7a --- /dev/null +++ b/multidownloader/reorg_processor_test.go @@ -0,0 +1,884 @@ +package multidownloader + +import ( + "context" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + commonmocks "github.com/agglayer/aggkit/common/mocks" + dbmocks "github.com/agglayer/aggkit/db/mocks" + "github.com/agglayer/aggkit/log" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + mdmocks "github.com/agglayer/aggkit/multidownloader/types/mocks" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestReorgProcessor_CheckBlocks(t *testing.T) { + t.Run("returns error when blocks is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + match, err := processor.checkBlocks(nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "blocks is nil") + require.False(t, match) + }) + + t.Run("returns false when storage header is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: nil, + RpcHeader: &aggkittypes.BlockHeader{Number: 100}, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns false when RPC header is nil", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{Number: 100}, + RpcHeader: nil, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns error when block numbers do not match", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0x1234"), + }, + } + + match, err := processor.checkBlocks(blocks) + + require.Error(t, err) + require.Contains(t, err.Error(), "block numbers do not match") + require.False(t, match) + }) + + t.Run("returns false when hashes do not match (not finalized)", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + IsFinalized: mdtypes.NotFinalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns false when hashes do not match (finalized, logs warning)", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Once() + + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + IsFinalized: mdtypes.Finalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.False(t, match) + }) + + t.Run("returns true when blocks match", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + processor := &ReorgProcessor{log: mockLogger} + + hash := common.HexToHash("0x1234") + blocks := &mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: hash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: hash, + }, + IsFinalized: mdtypes.Finalized, + } + + match, err := processor.checkBlocks(blocks) + + require.NoError(t, err) + require.True(t, match) + }) +} + +func TestReorgProcessor_FindFirstUnaffectedBlock(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + + t.Run("returns error when genesis block is reached", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + hash1 := common.HexToHash("0x1234") + hash2 := common.HexToHash("0x5678") + + // Block 1 - mismatch, then loop decrements to 0 and checks genesis before calling again + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(1)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 1, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 1, + Hash: hash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 1, + Hash: hash2, + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 1) + + require.Error(t, err) + require.Contains(t, err.Error(), "genesis block reached") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("returns error when GetBlockStorageAndRPC fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("RPC connection error") + + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(nil, expectedErr).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting block storage and RPC") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("finds first unaffected block after checking multiple blocks", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + differentHash1 := common.HexToHash("0x1234") + differentHash2 := common.HexToHash("0x5678") + + // Block 102 - mismatch + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(102)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 102, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 102, + Hash: differentHash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 102, + Hash: differentHash2, + }, + }, nil).Once() + + // Block 101 - mismatch + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(101)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 101, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: differentHash1, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, + Hash: differentHash2, + }, + }, nil).Once() + + // Block 100 - match (first unaffected) + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 102) + + require.NoError(t, err) + require.Equal(t, uint64(100), result) + mockPort.AssertExpectations(t) + }) +} + +func TestReorgProcessor_ProcessReorg(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + t.Run("can't reorg if the offending block is genesis (0)", func(t *testing.T) { + processor := &ReorgProcessor{ + log: log.WithFields("module", "test"), + port: nil, + } + ctx := context.Background() + reorgErr := mdtypes.NewDetectedReorgError( + 0, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg at genesis") + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + require.Error(t, err) + }) + t.Run("returns error when NewTx fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("transaction creation error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockPort.EXPECT().NewTx(ctx).Return(nil, expectedErr).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error starting new tx") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when findFirstUnaffectedBlock fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + expectedErr := fmt.Errorf("block search error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(nil, expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error finding first unaffected block") + mockPort.AssertExpectations(t) + }) + + t.Run("successfully processes reorg and commits transaction", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + nowValue := uint64(1234567890) + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + mockPort.EXPECT().TimeNowUnix().Return(nowValue).Maybe() + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + offendingBlockNumber := uint64(105) + firstUnaffectedBlock := uint64(100) + lastBlockInStorage := uint64(110) + latestBlockInRPC := uint64(115) + finalizedBlockInRPC := uint64(100) + chainID := uint64(1) + reorgErr := mdtypes.NewDetectedReorgError( + offendingBlockNumber, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockLogger.EXPECT().Warnf(mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // findFirstUnaffectedBlock: Block 104 matches (first unaffected) + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, offendingBlockNumber-1). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: firstUnaffectedBlock, + StorageHeader: &aggkittypes.BlockHeader{ + Number: firstUnaffectedBlock, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: firstUnaffectedBlock, + Hash: matchingHash, + }, + }, nil).Once() + + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(lastBlockInStorage, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(latestBlockInRPC, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(finalizedBlockInRPC, nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() + + mockTx.EXPECT().Commit().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.NoError(t, err) + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetLastBlockNumberInStorage fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("storage query error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting last block number in storage") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when MoveReorgedBlocks fails", func(t *testing.T) { + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + mockPort.EXPECT().TimeNowUnix().Return(1234567890).Maybe() + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("move blocks error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error moving reorged blocks") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetBlockNumberInRPC for latest fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("RPC error for latest") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting latest block number in RPC") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when GetBlockNumberInRPC for finalized fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("RPC error for finalized") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(0), expectedErr).Once() + mockTx.EXPECT().Rollback().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error getting finalized block number in RPC") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error and rolls back when Commit fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + nowValue := uint64(1234567890) + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + mockPort.EXPECT().TimeNowUnix().Return(nowValue).Maybe() + ctx := context.Background() + matchingHash := common.HexToHash("0xabcd") + expectedErr := fmt.Errorf("commit failed") + chainID := uint64(1) + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Infof(mock.Anything, mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: matchingHash, + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(115), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(100), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(chainID, nil).Once() + mockTx.EXPECT().Commit().Return(expectedErr).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot commit tx") + mockPort.AssertExpectations(t) + }) + + t.Run("returns error when checkBlocks fails in findFirstUnaffectedBlock", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewQuerier(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + + // Return blocks with mismatched block numbers which will cause checkBlocks to error + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(100)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 101, // Different block number will cause error + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + + result, err := processor.findFirstUnaffectedBlock(ctx, mockTx, 100) + + require.Error(t, err) + require.Contains(t, err.Error(), "error checking blocks") + require.Equal(t, uint64(0), result) + mockPort.AssertExpectations(t) + }) + + t.Run("logs error when rollback fails", func(t *testing.T) { + mockLogger := commonmocks.NewLogger(t) + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: mockLogger, + port: mockPort, + } + + ctx := context.Background() + rollbackErr := fmt.Errorf("rollback failed") + originalErr := fmt.Errorf("original error") + reorgErr := mdtypes.NewDetectedReorgError( + 100, + mdtypes.ReorgDetectionReason_BlockHashMismatch, + common.HexToHash("0x1234"), + common.HexToHash("0x5678"), + "test reorg", + ) + + mockLogger.EXPECT().Debugf(mock.Anything).Once() + mockLogger.EXPECT().Errorf(mock.Anything, mock.Anything).Once() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(nil, originalErr).Once() + mockTx.EXPECT().Rollback().Return(rollbackErr).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.Error(t, err) + require.Contains(t, err.Error(), "error finding first unaffected block") + mockPort.AssertExpectations(t) + }) +} + +func TestReorgProcessor_ForcedReorgInDeveloperMode(t *testing.T) { + testCases := []struct { + name string + developerMode bool + expectedReorgStartBlock uint64 + expectedReorgDescription string + }{ + { + name: "with developerMode enabled - reorgs from detected block", + developerMode: true, + expectedReorgStartBlock: 100, + expectedReorgDescription: "Reorgs from detected block (overriding first unaffected block)", + }, + { + name: "with developerMode disabled - reorgs from first unaffected block", + developerMode: false, + expectedReorgStartBlock: 99, + expectedReorgDescription: "Reorgs from first unaffected block + 1", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testForcedReorg(t, tc.developerMode, tc.expectedReorgStartBlock) + }) + } +} + +func TestReorgProcessor_ReorgMissingBlock(t *testing.T) { + logger := log.WithFields("module", "test") + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: logger, + port: mockPort, + developerMode: false, + } + ctx := context.Background() + detectedReorgBlock := uint64(100) + reorgErr := mdtypes.NewDetectedReorgError( + detectedReorgBlock, + mdtypes.ReorgDetectionReason_MissingBlock, + common.Hash{}, + common.Hash{}, + "test reorg", + ) + nowTimestamp := uint64(1234567890) + mockPort.EXPECT().TimeNowUnix().Return(nowTimestamp).Maybe() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 99, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: nil, // Missing block in RPC will cause GetBlockStorageAndRPC to return nil for RpcHeader + }, nil).Once() + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 98, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(uint64(110), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(uint64(98), nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(uint64(90), nil).Once() + mockPort.EXPECT().MoveReorgedBlocks(mockTx, mock.Anything).Return(uint64(1), nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + require.NoError(t, err) +} + +func testForcedReorg(t *testing.T, developerMode bool, expectedReorgStartBlock uint64) { + t.Helper() + + logger := log.WithFields("module", "test") + mockPort := mdmocks.NewReorgPorter(t) + mockTx := dbmocks.NewTxer(t) + + processor := &ReorgProcessor{ + log: logger, + port: mockPort, + developerMode: developerMode, + } + + ctx := context.Background() + detectedReorgBlock := uint64(100) + reorgErr := mdtypes.NewDetectedReorgError( + detectedReorgBlock, + mdtypes.ReorgDetectionReason_Forced, + common.Hash{}, + common.Hash{}, + "test reorg", + ) + nowTimestamp := uint64(1234567890) + lastBlockInStorage := uint64(110) + latestBlockInRPC := uint64(115) + finalizedBlockInRPC := uint64(100) + + // Setup mock expectations + mockPort.EXPECT().TimeNowUnix().Return(nowTimestamp).Maybe() + mockPort.EXPECT().NewTx(ctx).Return(mockTx, nil).Once() + + // Mock block 99 - mismatch + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(99)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 99, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 99, + Hash: common.HexToHash("0x5678"), + }, + }, nil).Once() + + // Mock block 98 - match (first unaffected block) + mockPort.EXPECT().GetBlockStorageAndRPC(ctx, mockTx, uint64(98)). + Return(&mdtypes.CompareBlockHeaders{ + BlockNumber: 98, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 98, + Hash: common.HexToHash("0x1234"), + }, + }, nil).Once() + + mockPort.EXPECT().GetLastBlockNumberInStorage(mockTx).Return(lastBlockInStorage, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.LatestBlock).Return(latestBlockInRPC, nil).Once() + mockPort.EXPECT().GetBlockNumberInRPC(ctx, aggkittypes.FinalizedBlock).Return(finalizedBlockInRPC, nil).Once() + + expectedReorgData := mdtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(expectedReorgStartBlock, lastBlockInStorage), + DetectedAtBlock: detectedReorgBlock, + DetectedTimestamp: nowTimestamp, + NetworkLatestBlock: latestBlockInRPC, + NetworkFinalizedBlock: finalizedBlockInRPC, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: reorgErr.Error(), + } + mockPort.EXPECT().MoveReorgedBlocks(mockTx, expectedReorgData).Return(uint64(1), nil).Once() + mockTx.EXPECT().Commit().Return(nil).Once() + + err := processor.ProcessReorg(ctx, *reorgErr, aggkittypes.FinalizedBlock) + + require.NoError(t, err) + mockPort.AssertExpectations(t) + mockTx.AssertExpectations(t) +} diff --git a/multidownloader/state.go b/multidownloader/state.go new file mode 100644 index 000000000..586456bef --- /dev/null +++ b/multidownloader/state.go @@ -0,0 +1,223 @@ +package multidownloader + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +const maxPercent = 100.0 + +// State represents the current state of the multidownloader, +// it contains the segments that are already synced and the segments that are pending to be synced +type State struct { + // These are the segments that we have already synced + // when a syncer does a `FilterLogs`, it is used to check what is already synced + Synced mdrtypes.SetSyncSegment + // These are the segments that we need to sync + Pending mdrtypes.SetSyncSegment +} + +// NewEmptyState creates a new State with empty synced and pending segments +func NewEmptyState() *State { + return &State{ + Synced: mdrtypes.NewSetSyncSegment(), + Pending: mdrtypes.NewSetSyncSegment(), + } +} + +// NewState creates a new State with the given synced and pending segments +func NewState(synced *mdrtypes.SetSyncSegment, + pending *mdrtypes.SetSyncSegment) *State { + return &State{ + Synced: *synced, + Pending: *pending, + } +} + +// NewStateFromStorageSyncedBlocks creates a new State from the given storage +// synced blocks and total to sync blocks +func NewStateFromStorageSyncedBlocks(storageSynced mdrtypes.SetSyncSegment, + totalToSync mdrtypes.SetSyncSegment) (*State, error) { + err := totalToSync.SubtractSegments(&storageSynced) + if err != nil { + return nil, fmt.Errorf("Initialize: cannot calculate pendingSync: %w", err) + } + return NewState(&storageSynced, &totalToSync), nil +} + +// Clone creates a deep copy of the State +// This ensures that modifications to the cloned state don't affect the original +func (s *State) Clone() *State { + if s == nil { + return nil + } + + // Use Clone() from SetSyncSegment which does deep copy + clonedSynced := s.Synced.Clone() + clonedPending := s.Pending.Clone() + + return &State{ + Synced: *clonedSynced, + Pending: *clonedPending, + } +} + +// String returns a string representation of the State +func (s *State) String() string { + return "State{Synced: " + s.Synced.String() + + ", Pending: " + s.Pending.String() + "}" +} + +func (s *State) ExtendPendingRange( + mapBlocks map[aggkittypes.BlockNumberFinality]uint64, + syncersConfig *mdrtypes.SetSyncerConfig) error { + // It extend pending segments with this new block numbers, + // maybe pending segment IsEmpty() then you need to get latest + // block from synced segments. + newSyncSegments, err := syncersConfig.SyncSegments(mapBlocks) + if err != nil { + return fmt.Errorf("ExtendPendingRange: error creating sync segments from syncers config: %w", err) + } + for _, segment := range newSyncSegments.GetSegments() { + // If it's empty nothing to do + if segment.BlockRange.IsEmpty() { + continue + } + + synced, ok := s.Synced.GetByContract(segment.ContractAddr) + if !ok { + return fmt.Errorf("ExtendPendingRange: error getting synced segment for contract %s", segment.ContractAddr.Hex()) + } + // Subtract already synced blocks from pending segment + subs := segment.BlockRange.Subtract(synced.BlockRange) + if len(subs) == 0 { + continue + } + // We assume that there is only one segment after subtraction, if there are more it means + // that there are non contiguous blocks which is unexpected + segment.BlockRange = subs[0] + // Extend pending segment with new block range + s.Pending.Add(segment) + } + return nil +} + +// GetHighestBlockNumberPendingToSync returns the highest block number that is pending to be synced +func (s *State) GetHighestBlockNumberPendingToSync() (uint64, aggkittypes.BlockNumberFinality) { + return s.Pending.GetHighestBlockNumber() +} + +// IsAvailable checks if the given LogQuery is fully available in the synced segments +func (s *State) IsAvailable(query mdrtypes.LogQuery) bool { + return s.Synced.IsAvailable(query) +} + +// IsPartiallyAvailable checks if the given LogQuery is partially available in the synced segments +func (s *State) IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) { + return s.Synced.IsPartiallyAvailable(query) +} + +// GetTotalPendingBlockRange returns the total block range that is pending to be synced +func (s *State) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { + return s.Pending.GetTotalPendingBlockRange() +} + +// GetAddressesToSyncForBlockNumber returns the list of addresses that have pending segments +// for the given block number +func (s *State) GetAddressesToSyncForBlockNumber(blockNumber uint64) []common.Address { + return s.Pending.GetAddressesForBlock(blockNumber) +} + +// IsSyncFinished returns true if there are no more segments pending to be synced +func (s *State) IsSyncFinished() bool { + return s.Pending.Finished() +} + +// TotalBlocksPendingToSync returns the total number of blocks that are pending to be synced +func (s *State) TotalBlocksPendingToSync() uint64 { + return s.Pending.TotalBlocks() +} + +// OnNewSyncedLogQuery updates the state to mark a LogQuery as synced +// This function is transactional - if either operation fails, the state remains unchanged +func (s *State) OnNewSyncedLogQuery(logQuery *mdrtypes.LogQuery) error { + if s == nil { + return fmt.Errorf("OnNewSyncedLogQuery: state is nil") + } + if logQuery == nil { + return fmt.Errorf("OnNewSyncedLogQuery: logQuery is nil") + } + if logQuery.IsEmpty() { + return fmt.Errorf("OnNewSyncedLogQuery: logQuery is empty") + } + + // Clone both sets to ensure atomicity + // If either operation fails, the original state remains unchanged + clonedSynced := s.Synced.Clone() + clonedPending := s.Pending.Clone() + + // Try to add to synced + err := clonedSynced.AddLogQuery(logQuery) + if err != nil { + return fmt.Errorf("OnNewSyncedLogQuery: adding synced segment: %w", err) + } + + // Try to subtract from pending + err = clonedPending.SubtractLogQuery(logQuery) + if err != nil { + return fmt.Errorf("OnNewSyncedLogQuery: subtracting pending segment: %w", err) + } + + // Both operations succeeded, commit the changes + s.Synced = *clonedSynced + s.Pending = *clonedPending + + return nil +} + +// SyncedSegmentsByContract returns the list of synced segments for the given contract addresses +func (s *State) SyncedSegmentsByContract(addrs []common.Address) []mdrtypes.SyncSegment { + return s.Synced.SegmentsByContract(addrs) +} + +// NextQueryToSync returns the next LogQuery to sync based on the pending segments and the given chunk size +func (s *State) NextQueryToSync(syncBlockChunkSize uint32, + maxBlockNumber uint64, applyMaxBlockNumber bool) (*mdrtypes.LogQuery, error) { + return s.Pending.NextQuery(syncBlockChunkSize, maxBlockNumber, applyMaxBlockNumber) +} + +func (s *State) CompletionPercentage() map[common.Address]float64 { + if s == nil { + return nil + } + result := make(map[common.Address]float64) + contracts := s.Synced.GetContracts() + for _, contract := range contracts { + synced, existsSynced := s.Synced.GetByContract(contract) + if !existsSynced { + continue + } + pending, existsPending := s.Pending.GetByContract(contract) + if !existsPending { + result[contract] = maxPercent + continue + } + + syncedBlocks := synced.BlockRange.CountBlocks() + pendingBlocks := pending.BlockRange.CountBlocks() + totalBlocks := syncedBlocks + pendingBlocks + log.Infof("CompletionPercentage for contract %s: syncedBlocks=%d, pendingBlocks=%d, totalBlocks=%d", + contract.Hex(), syncedBlocks, pendingBlocks, totalBlocks) + if totalBlocks == 0 { + result[contract] = maxPercent + } else { + result[contract] = (float64(syncedBlocks) / float64(totalBlocks)) * maxPercent + } + } + return result +} diff --git a/multidownloader/state_test.go b/multidownloader/state_test.go new file mode 100644 index 000000000..fd9556213 --- /dev/null +++ b/multidownloader/state_test.go @@ -0,0 +1,572 @@ +package multidownloader + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestStateInitial(t *testing.T) { + addr1 := common.HexToAddress("0x10") + addr2 := common.HexToAddress("0x20") + storageData := mdrtypes.NewSetSyncSegment() + storageData.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.BlockRangeZero, aggkittypes.FinalizedBlock, + false)) + storageData.Add(mdrtypes.NewSyncSegment(addr2, + aggkitcommon.BlockRangeZero, aggkittypes.LatestBlock, + false)) + configData := mdrtypes.NewSetSyncSegment() + segment1 := mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(0, 1000), aggkittypes.FinalizedBlock, + false) + segment2 := mdrtypes.NewSyncSegment(addr2, + aggkitcommon.NewBlockRange(0, 2000), aggkittypes.LatestBlock, + false) + configData.Add(segment1) + configData.Add(segment2) + + state, err := NewStateFromStorageSyncedBlocks(storageData, configData) + require.NoError(t, err) + require.NotNil(t, state) + logQuery := mdrtypes.NewLogQuery( + 0, 456, []common.Address{addr1}) + + err = state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + pendingSegments := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(pendingSegments)) + require.Equal(t, addr1, pendingSegments[0].ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(0, 456), pendingSegments[0].BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, pendingSegments[0].TargetToBlock) +} + +func TestState_OnNewSyncedLogQuery(t *testing.T) { + t.Run("nil state", func(t *testing.T) { + var state *State + logQuery := mdrtypes.NewLogQuery(1, 10, []common.Address{common.HexToAddress("0x1")}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.Error(t, err) + require.Contains(t, err.Error(), "state is nil") + }) + + t.Run("nil logQuery", func(t *testing.T) { + state := NewEmptyState() + err := state.OnNewSyncedLogQuery(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "logQuery is nil") + }) + + t.Run("successful sync", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 200), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Get counts before + syncedBefore := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingBefore := state.TotalBlocksPendingToSync() + + require.Equal(t, 1, len(syncedBefore)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedBefore[0].BlockRange) + require.Equal(t, uint64(100), pendingBefore) + + // Sync blocks 101-150 + logQuery := mdrtypes.NewLogQuery(101, 150, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify synced was extended + syncedAfter := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(syncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 150), syncedAfter[0].BlockRange) + + // Verify pending was reduced + pendingAfter := state.TotalBlocksPendingToSync() + require.Equal(t, uint64(50), pendingAfter) // 151-200 = 50 blocks + }) + + t.Run("transactional behavior - state unchanged on error", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 1000), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Get state before + syncedBefore := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingBefore := state.TotalBlocksPendingToSync() + syncedCountBefore := len(syncedBefore) + + // Try to sync a range in the middle (500-600) which would split the pending segment + // This should fail with "cannot split segment" error + logQuery := mdrtypes.NewLogQuery(500, 600, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + + // Should fail because it would split the segment into two parts + require.Error(t, err) + require.Contains(t, err.Error(), "cannot split segment") + + // Verify state is unchanged + syncedAfter := state.SyncedSegmentsByContract([]common.Address{addr1}) + pendingAfter := state.TotalBlocksPendingToSync() + + require.Equal(t, syncedCountBefore, len(syncedAfter), "synced segments count should be unchanged") + require.Equal(t, syncedBefore[0].BlockRange, syncedAfter[0].BlockRange, "synced range should be unchanged") + require.Equal(t, pendingBefore, pendingAfter, "pending blocks should be unchanged") + }) + + t.Run("multiple consecutive syncs", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + syncedSet := mdrtypes.NewSetSyncSegment() + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 1000), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Sync in chunks + chunks := []struct { + from uint64 + to uint64 + }{ + {1, 100}, + {101, 200}, + {201, 300}, + } + + for i, chunk := range chunks { + logQuery := mdrtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err, "chunk %d should succeed", i) + + // Verify synced range + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, uint64(1), synced[0].BlockRange.FromBlock) + require.Equal(t, chunk.to, synced[0].BlockRange.ToBlock) + } + + // Verify final state + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, aggkitcommon.NewBlockRange(1, 300), synced[0].BlockRange) + require.Equal(t, uint64(700), state.TotalBlocksPendingToSync()) // 301-1000 + }) + + t.Run("sync everything until finished", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + // Start with empty synced and full pending + syncedSet := mdrtypes.NewSetSyncSegment() + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 300), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Verify initial state + require.False(t, state.IsSyncFinished(), "should not be finished initially") + require.Equal(t, uint64(300), state.TotalBlocksPendingToSync()) + + // Sync all blocks in chunks + chunks := []struct { + from uint64 + to uint64 + }{ + {1, 100}, + {101, 200}, + {201, 300}, + } + + for i, chunk := range chunks { + logQuery := mdrtypes.NewLogQuery(chunk.from, chunk.to, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err, "chunk %d should succeed", i) + + if i < len(chunks)-1 { + // Not finished yet + require.False(t, state.IsSyncFinished(), "should not be finished after chunk %d", i) + require.Greater(t, state.TotalBlocksPendingToSync(), uint64(0), + "should have pending blocks after chunk %d", i) + } + } + + // Verify everything is synced + require.True(t, state.IsSyncFinished(), "should be finished after syncing all blocks") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 pending blocks") + + // Verify synced range covers everything + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 300), synced[0].BlockRange) + + // Verify total pending block range is nil or empty + totalPending := state.GetTotalPendingBlockRange() + if totalPending != nil { + require.True(t, totalPending.IsEmpty(), "total pending range should be empty") + } + }) + + t.Run("sync everything with single query", func(t *testing.T) { + addr1 := common.HexToAddress("0x100") + + // Start with some already synced + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 50), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(51, 100), + aggkittypes.LatestBlock, + false)) + + state := NewState(&syncedSet, &pendingSet) + + // Verify initial state + require.False(t, state.IsSyncFinished()) + require.Equal(t, uint64(50), state.TotalBlocksPendingToSync()) + + // Sync remaining blocks in one go + logQuery := mdrtypes.NewLogQuery(51, 100, []common.Address{addr1}) + err := state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify finished + require.True(t, state.IsSyncFinished(), "should be finished") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 pending blocks") + require.Nil(t, state.GetTotalPendingBlockRange(), "total pending range should be nil") + // Verify complete synced range + synced := state.SyncedSegmentsByContract([]common.Address{addr1}) + require.Equal(t, 1, len(synced)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), synced[0].BlockRange) + }) +} + +func TestState_Clone(t *testing.T) { + t.Run("nil state", func(t *testing.T) { + var state *State + cloned := state.Clone() + require.Nil(t, cloned, "cloning a nil state should return nil") + }) + + t.Run("deep copy verification", func(t *testing.T) { + // Create original state with synced and pending segments + addr1 := common.HexToAddress("0x100") + + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(1, 100), + aggkittypes.FinalizedBlock, + false)) + + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, + aggkitcommon.NewBlockRange(101, 200), + aggkittypes.LatestBlock, + false)) + + original := NewState(&syncedSet, &pendingSet) + + // Clone the state + cloned := original.Clone() + + // Verify cloned state has same values initially + require.NotNil(t, cloned, "cloned state should not be nil") + + // Get synced segments before modification + originalSyncedBefore := original.SyncedSegmentsByContract([]common.Address{addr1}) + clonedSyncedBefore := cloned.SyncedSegmentsByContract([]common.Address{addr1}) + + require.Equal(t, len(originalSyncedBefore), len(clonedSyncedBefore)) + require.Equal(t, originalSyncedBefore[0].BlockRange, clonedSyncedBefore[0].BlockRange) + + // Modify the original by syncing more blocks + logQuery := mdrtypes.NewLogQuery(101, 150, []common.Address{addr1}) + err := original.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Get synced segments after modification + originalSyncedAfter := original.SyncedSegmentsByContract([]common.Address{addr1}) + clonedSyncedAfter := cloned.SyncedSegmentsByContract([]common.Address{addr1}) + + // Original should have extended synced range (1-150) + require.Equal(t, 1, len(originalSyncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 150), originalSyncedAfter[0].BlockRange, + "original should have extended range after sync") + + // Cloned should still have the original range (1-100) + require.Equal(t, 1, len(clonedSyncedAfter)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), clonedSyncedAfter[0].BlockRange, + "cloned state should not be affected by modifications to original") + }) + + t.Run("empty state", func(t *testing.T) { + original := NewEmptyState() + cloned := original.Clone() + + require.NotNil(t, cloned, "cloned empty state should not be nil") + require.True(t, cloned.IsSyncFinished(), "cloned empty state should be finished") + require.Equal(t, uint64(0), cloned.TotalBlocksPendingToSync(), "cloned empty state should have 0 pending blocks") + }) + + t.Run("complex state with multiple segments", func(t *testing.T) { + addr1 := common.HexToAddress("0x1") + addr2 := common.HexToAddress("0x2") + addr3 := common.HexToAddress("0x3") + + syncedSet := mdrtypes.NewSetSyncSegment() + syncedSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(0, 100), aggkittypes.FinalizedBlock, false)) + syncedSet.Add(mdrtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(0, 200), aggkittypes.FinalizedBlock, false)) + + pendingSet := mdrtypes.NewSetSyncSegment() + pendingSet.Add(mdrtypes.NewSyncSegment(addr1, aggkitcommon.NewBlockRange(101, 500), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdrtypes.NewSyncSegment(addr2, aggkitcommon.NewBlockRange(201, 600), aggkittypes.LatestBlock, false)) + pendingSet.Add(mdrtypes.NewSyncSegment(addr3, aggkitcommon.NewBlockRange(0, 1000), aggkittypes.LatestBlock, false)) + + original := NewState(&syncedSet, &pendingSet) + cloned := original.Clone() + + // Verify counts before modification + originalPendingBefore := original.TotalBlocksPendingToSync() + clonedPendingBefore := cloned.TotalBlocksPendingToSync() + require.Equal(t, originalPendingBefore, clonedPendingBefore) + + // Modify original - sync blocks at the end of addr3 range to avoid splitting + logQuery := mdrtypes.NewLogQuery(901, 1000, []common.Address{addr3}) + err := original.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify original changed + originalPendingAfter := original.TotalBlocksPendingToSync() + require.Less(t, originalPendingAfter, originalPendingBefore, "original pending should decrease") + + // Verify cloned is independent + clonedPendingAfter := cloned.TotalBlocksPendingToSync() + require.Equal(t, clonedPendingBefore, clonedPendingAfter, + "cloned state should be independent from original after modification") + }) +} + +func TestStateInitial_case_startBlock0(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 0, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 256, + } + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + br := sut.GetTotalPendingBlockRange() + require.NotNil(t, br) + require.Equal(t, "From: 0, To: 256 (257)", br.String()) + nextRequest, err := sut.NextQueryToSync(20, 250, true) + require.NoError(t, err) + require.Equal(t, "From: 0, To: 19 (20)", nextRequest.BlockRange.String()) + // after: synced: {0-19}, pending: {20-256} + err = sut.OnNewSyncedLogQuery(nextRequest) + require.NoError(t, err) + br = sut.GetTotalPendingBlockRange() + require.Equal(t, "From: 20, To: 256 (237)", br.String()) + require.True(t, sut.IsAvailable(*nextRequest)) + // nextRequest = {20-400} + nextRequest.BlockRange = aggkitcommon.NewBlockRange(10, 400) + require.False(t, sut.IsAvailable(*nextRequest)) + partial, subRequest := sut.IsPartiallyAvailable(*nextRequest) + require.True(t, partial) + require.Equal(t, "From: 10, To: 19 (10)", subRequest.BlockRange.String()) +} + +func TestStateInitial_case_startBlock1(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 256, + } + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + br := sut.GetTotalPendingBlockRange() + require.NotNil(t, br) + require.Equal(t, "From: 1, To: 256 (256)", br.String()) +} +func TestStateInitial_ExtendPendingRange(t *testing.T) { + var err error + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x10")}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, + } + configs.Add(cfg) + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.FinalizedBlock: 200, + } + + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + common.HexToAddress("0x10"), + aggkitcommon.BlockRangeZero, + aggkittypes.FinalizedBlock, + true)) + sut, err := NewStateFromStorageSyncedBlocks( + storageSyncSegments, *syncSegments) + require.NoError(t, err) + pendingSync := sut.GetTotalPendingBlockRange() + require.NotNil(t, pendingSync) + + // Sync first batch 1-200 + err = sut.OnNewSyncedLogQuery(&mdrtypes.LogQuery{ + BlockRange: aggkitcommon.NewBlockRange(1, 200), + Addrs: []common.Address{common.HexToAddress("0x10")}, + }) + require.NoError(t, err) + require.True(t, sut.IsSyncFinished()) + + // Now extend the range to block 350 + mapBlocks[aggkittypes.FinalizedBlock] = 350 + err = sut.ExtendPendingRange(mapBlocks, &configs) + require.NoError(t, err) + pendingBlockRange := sut.GetTotalPendingBlockRange() + require.NotNil(t, pendingBlockRange) + require.Equal(t, "From: 201, To: 350 (150)", pendingBlockRange.String()) +} + +func TestState_AfterFullySync(t *testing.T) { + // Setup: Create a state with a segment to sync from block 1 to 100 + addr := common.HexToAddress("0x123124543423") + configs := mdrtypes.NewSetSyncerConfig() + cfg := aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 1, + ToBlock: aggkittypes.LatestBlock, + } + configs.Add(cfg) + + // Initial target block is 100 + mapBlocks := map[aggkittypes.BlockNumberFinality]uint64{ + aggkittypes.LatestBlock: 100, + } + + syncSegments, err := configs.SyncSegments(mapBlocks) + require.NoError(t, err) + + // Start with empty storage (nothing synced yet) + storageSyncSegments := mdrtypes.NewSetSyncSegment() + storageSyncSegments.Add(mdrtypes.NewSyncSegment( + addr, + aggkitcommon.BlockRangeZero, + aggkittypes.LatestBlock, + true)) + + state, err := NewStateFromStorageSyncedBlocks(storageSyncSegments, *syncSegments) + require.NoError(t, err) + + // Verify initial pending state + require.False(t, state.IsSyncFinished(), "should not be finished initially") + require.Equal(t, uint64(100), state.TotalBlocksPendingToSync(), "should have 100 blocks pending") + + // Sync all blocks 1-100 + logQuery := mdrtypes.NewLogQuery(1, 100, []common.Address{addr}) + err = state.OnNewSyncedLogQuery(&logQuery) + require.NoError(t, err) + + // Verify sync is complete + require.True(t, state.IsSyncFinished(), "should be finished after syncing all blocks") + require.Equal(t, uint64(0), state.TotalBlocksPendingToSync(), "should have 0 blocks pending") + + // Verify synced range + syncedSegments := state.SyncedSegmentsByContract([]common.Address{addr}) + require.Equal(t, 1, len(syncedSegments)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedSegments[0].BlockRange) + + // Now simulate that the chain has progressed to block 150 + // This is analogous to UpdateTargetBlockToNumber in the SetSyncSegment test + mapBlocks[aggkittypes.LatestBlock] = 150 + err = state.ExtendPendingRange(mapBlocks, &configs) + require.NoError(t, err) + + // Verify that new blocks are now pending (101-150) + require.False(t, state.IsSyncFinished(), "should not be finished after extending range") + require.Equal(t, uint64(50), state.TotalBlocksPendingToSync(), "should have 50 new blocks pending") + + // Verify pending range + pendingBlockRange := state.GetTotalPendingBlockRange() + require.NotNil(t, pendingBlockRange) + require.Equal(t, "From: 101, To: 150 (50)", pendingBlockRange.String()) + + // Verify synced segments remain unchanged + syncedSegments = state.SyncedSegmentsByContract([]common.Address{addr}) + require.Equal(t, 1, len(syncedSegments)) + require.Equal(t, aggkitcommon.NewBlockRange(1, 100), syncedSegments[0].BlockRange, + "synced range should remain unchanged at 1-100") +} diff --git a/multidownloader/storage/migrations/0002.sql b/multidownloader/storage/migrations/0002.sql new file mode 100644 index 000000000..c6796ac8e --- /dev/null +++ b/multidownloader/storage/migrations/0002.sql @@ -0,0 +1,41 @@ +-- +migrate Down +DROP TABLE IF EXISTS logs_reorged; +DROP TABLE IF EXISTS blocks_reorged; +DROP TABLE IF EXISTS reorgs; +-- +migrate Up + +CREATE TABLE reorgs ( + reorg_id BIGINT PRIMARY KEY, + detected_at_block BIGINT NOT NULL, + reorged_from_block BIGINT NOT NULL, + reorged_to_block BIGINT NOT NULL, + detected_timestamp INTEGER NOT NULL, + network_latest_block INTEGER NOT NULL, -- which was the latest block in the detection moment + network_finalized_block INTEGER NOT NULL, -- which was the finalized block in the detection moment + network_finalized_block_name TEXT NOT NULL, -- name of the finalized block (e.g., "finalized", "safe", etc.) + description TEXT -- extra information, can be null +); + +CREATE TABLE blocks_reorged ( + reorg_id BIGINT NOT NULL REFERENCES reorgs(reorg_id), + block_number BIGINT NOT NULL, + block_hash TEXT NOT NULL, + block_timestamp INTEGER NOT NULL, + block_parent_hash TEXT NOT NULL, + PRIMARY KEY (reorg_id, block_number) +); + +CREATE TABLE logs_reorged ( + reorg_id BIGINT NOT NULL, + block_number BIGINT NOT NULL, + address TEXT NOT NULL, -- + topics TEXT NOT NULL, -- list of hashes in JSON + data BLOB, -- + tx_hash TEXT NOT NULL, + tx_index INTEGER NOT NULL, + log_index INTEGER NOT NULL, -- "index" is a reserved keyword + PRIMARY KEY (address, reorg_id, block_number, log_index), + FOREIGN KEY (reorg_id, block_number) REFERENCES blocks_reorged(reorg_id, block_number) +); + +CREATE INDEX idx_logs_reorged_block_number ON logs_reorged(block_number); \ No newline at end of file diff --git a/multidownloader/storage/migrations/migrations.go b/multidownloader/storage/migrations/migrations.go index 679c8ffde..dab2e080d 100644 --- a/multidownloader/storage/migrations/migrations.go +++ b/multidownloader/storage/migrations/migrations.go @@ -12,11 +12,18 @@ import ( //go:embed 0001.sql var mig001 string +//go:embed 0002.sql +var mig002 string + var Migrations = []types.Migration{ { ID: "0001", SQL: mig001, }, + { + ID: "0002", + SQL: mig002, + }, } func RunMigrations(logger aggkitcommon.Logger, database *sql.DB) error { diff --git a/multidownloader/storage/storage.go b/multidownloader/storage/storage.go index b38fc78e8..f2fa72f7b 100644 --- a/multidownloader/storage/storage.go +++ b/multidownloader/storage/storage.go @@ -60,15 +60,6 @@ func NewLogRowsFromEthLogs(logs []types.Log) []*logRow { return rows } -type syncStatusRow struct { - Address common.Address `meddler:"contract_address,address"` - TargetFromBlock uint64 `meddler:"target_from_block"` - TargetToBlock string `meddler:"target_to_block"` - SyncedFromBlock uint64 `meddler:"synced_from_block"` - SyncedToBlock uint64 `meddler:"synced_to_block"` - SyncersIDs string `meddler:"syncers_id"` -} - func NewLogRowFromEthLog(log types.Log) *logRow { topicsJSON, err := json.Marshal(log.Topics) if err != nil { @@ -142,7 +133,7 @@ func NewBlockRowsFromLogs(logs []types.Log, isFinal bool) map[uint64]*blockRow { return blockMap } -func NewBlockRowsFromAggkitBlock(blockHeaders []*aggkittypes.BlockHeader, isFinal bool) map[uint64]*blockRow { +func NewBlockRowsFromAggkitBlock(blockHeaders aggkittypes.ListBlockHeaders, isFinal bool) map[uint64]*blockRow { blockMap := make(map[uint64]*blockRow) for _, header := range blockHeaders { blockMap[header.Number] = newBlockRowFromAggkitBlock(header, isFinal) @@ -189,6 +180,7 @@ type logAndBlockRow struct { BlockHash common.Hash `meddler:"block_hash,hash"` BlockTimestamp uint64 `meddler:"block_timestamp"` BlockParentHash *common.Hash `meddler:"block_parent_hash,hash"` + IsFinal bool `meddler:"is_final"` } func (a *MultidownloaderStorage) GetEthLogs(tx dbtypes.Querier, query mdrtypes.LogQuery) ([]types.Log, error) { @@ -241,13 +233,94 @@ func (a *MultidownloaderStorage) GetEthLogs(tx dbtypes.Querier, query mdrtypes.L return logs, nil } +func (a *MultidownloaderStorage) LogQuery(tx dbtypes.Querier, + query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + + dbRows := make([]*logAndBlockRow, 0) + sqlQuery := ` + SELECT * FROM logs + LEFT JOIN blocks ON logs.block_number = blocks.block_number + WHERE address IN (?) + AND logs.block_number>=? AND logs.block_number<=? + ORDER BY logs.block_number ASC, log_index ASC + ` + addrs := make([]string, 0, len(query.Addrs)) + for _, addr := range query.Addrs { + addrs = append(addrs, addr.Hex()) + } + // This is used to extend the address slice into the query + queryStr, args, err := sqlx.In(sqlQuery, addrs, query.BlockRange.FromBlock, query.BlockRange.ToBlock) + if err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error building SQL query: %w", err) + } + err = meddler.QueryAll(tx, &dbRows, queryStr, args...) + if err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error querying logs: %w", err) + } + + // Group logs by block number + blockLogsMap := make(map[uint64]*mdrtypes.BlockWithLogs) + blockOrder := make([]uint64, 0) + + for _, dbRow := range dbRows { + var topics []common.Hash + if err := json.Unmarshal([]byte(dbRow.Topics), &topics); err != nil { + return mdrtypes.LogQueryResponse{}, fmt.Errorf("error unmarshaling topics: %w", err) + } + log := mdrtypes.Log{ + Address: dbRow.Address, + Topics: topics, + Data: dbRow.Data, + BlockNumber: dbRow.BlockNumber, + TxHash: dbRow.TxHash, + TxIndex: dbRow.TxIndex, + Index: dbRow.Index, + BlockTimestamp: dbRow.BlockTimestamp, + Removed: false, + } + + // Add block to map if not already present + if _, exists := blockLogsMap[dbRow.BlockNumber]; !exists { + blockLogsMap[dbRow.BlockNumber] = &mdrtypes.BlockWithLogs{ + Header: aggkittypes.BlockHeader{ + Number: dbRow.BlockNumber, + Hash: dbRow.BlockHash, + Time: dbRow.BlockTimestamp, + ParentHash: dbRow.BlockParentHash, + }, + IsFinal: dbRow.IsFinal, + Logs: make([]mdrtypes.Log, 0), + } + blockOrder = append(blockOrder, dbRow.BlockNumber) + } + + blockLogsMap[dbRow.BlockNumber].Logs = append(blockLogsMap[dbRow.BlockNumber].Logs, log) + } + + // Build response maintaining block order + blocks := make([]mdrtypes.BlockWithLogs, 0, len(blockOrder)) + for _, blockNum := range blockOrder { + blocks = append(blocks, *blockLogsMap[blockNum]) + } + + return mdrtypes.LogQueryResponse{ + Blocks: blocks, + ResponseRange: query.BlockRange, + }, nil +} + // tx dbtypes.Txer func (a *MultidownloaderStorage) SaveEthLogs(tx dbtypes.Querier, logs []types.Log, isFinal bool) error { return a.saveLogsAndBlocks(tx, NewBlockRowsFromLogs(logs, isFinal), NewLogRowsFromEthLogs(logs)) } func (a *MultidownloaderStorage) SaveEthLogsWithHeaders(tx dbtypes.Querier, - blockHeaders []*aggkittypes.BlockHeader, logs []types.Log, isFinal bool) error { + blockHeaders aggkittypes.ListBlockHeaders, logs []types.Log, isFinal bool) error { return a.saveLogsAndBlocks(tx, NewBlockRowsFromAggkitBlock(blockHeaders, isFinal), NewLogRowsFromEthLogs(logs)) } @@ -275,6 +348,8 @@ func (a *MultidownloaderStorage) saveBlocksNoMutex(tx dbtypes.Querier, blockRows tx = a.db } for _, blockRow := range blockRows { + a.logger.Debugf("Inserting block header row: %d %s final=%v", blockRow.BlockNumber, + blockRow.BlockHash.Hex(), blockRow.IsFinal) if err := meddler.Insert(tx, "blocks", blockRow); err != nil { return fmt.Errorf("saveBlocksNoMutex: error inserting block header row (%s): %w", blockRow.String(), err) } @@ -293,104 +368,3 @@ func (a *MultidownloaderStorage) saveLogsNoMutex(tx dbtypes.Querier, logRows []* } return nil } - -func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { - targetToBlock, err := aggkittypes.NewBlockNumberFinality(r.TargetToBlock) - if err != nil { - return mdrtypes.SyncSegment{}, fmt.Errorf("ToSyncSegment: error parsing target to block finality (%s): %w", - r.TargetToBlock, err) - } - return mdrtypes.SyncSegment{ - ContractAddr: r.Address, - TargetToBlock: *targetToBlock, - BlockRange: aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock), - }, nil -} - -func (a *MultidownloaderStorage) GetSyncedBlockRangePerContract(tx dbtypes.Querier) (mdrtypes.SetSyncSegment, error) { - a.mutex.RLock() - defer a.mutex.RUnlock() - result := make([]*syncStatusRow, 0) - if tx == nil { - tx = a.db - } - err := meddler.QueryAll(tx, &result, "SELECT * FROM sync_status") - if err != nil { - return mdrtypes.SetSyncSegment{}, fmt.Errorf("error querying sync status: %w", err) - } - setSegments := mdrtypes.NewSetSyncSegment() - for _, row := range result { - segment, err := row.ToSyncSegment() - if err != nil { - return mdrtypes.SetSyncSegment{}, - fmt.Errorf("GetSyncedBlockRangePerContract: error converting row to sync segment: %w", err) - } - setSegments.Add(segment) - } - return setSegments, nil -} - -func (a *MultidownloaderStorage) UpdateSyncedStatus(tx dbtypes.Querier, - segments []mdrtypes.SyncSegment) error { - if tx == nil { - tx = a.db - } - query := ` - UPDATE sync_status SET - synced_from_block = ?, - synced_to_block = ? - WHERE contract_address = ?; - ` - a.mutex.Lock() - defer a.mutex.Unlock() - for _, segment := range segments { - result, err := tx.Exec(query, segment.BlockRange.FromBlock, - segment.BlockRange.ToBlock, segment.ContractAddr.Hex()) - if err != nil { - return fmt.Errorf("error updating %s sync status: %w", segment.String(), err) - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected for contract %s: %w", - segment.ContractAddr.Hex(), err) - } - if rowsAffected == 0 { - return fmt.Errorf("no rows updated for contract %s", segment.ContractAddr.Hex()) - } - } - return nil -} - -func (a *MultidownloaderStorage) UpsertSyncerConfigs(tx dbtypes.Querier, configs []mdrtypes.ContractConfig) error { - if tx == nil { - tx = a.db - } - a.mutex.Lock() - defer a.mutex.Unlock() - for _, config := range configs { - row := syncStatusRow{ - Address: config.Address, - TargetFromBlock: config.FromBlock, - TargetToBlock: config.ToBlock.String(), - SyncedFromBlock: 0, - SyncedToBlock: 0, - SyncersIDs: fmt.Sprintf("%v", config.Syncers), - } - // Upsert logic - query := ` - INSERT INTO sync_status (contract_address, target_from_block, - target_to_block, synced_from_block, synced_to_block, syncers_id) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(contract_address) DO UPDATE SET - target_from_block = excluded.target_from_block, - target_to_block = excluded.target_to_block, - syncers_id = excluded.syncers_id - ` - _, err := tx.Exec(query, row.Address.Hex(), row.TargetFromBlock, row.TargetToBlock, - row.SyncedFromBlock, row.SyncedToBlock, row.SyncersIDs) - if err != nil { - return fmt.Errorf("error updating sync status: %w", err) - } - } - return nil -} diff --git a/multidownloader/storage/storage_block.go b/multidownloader/storage/storage_block.go index 352ce504a..016ffa15d 100644 --- a/multidownloader/storage/storage_block.go +++ b/multidownloader/storage/storage_block.go @@ -6,6 +6,7 @@ import ( "fmt" dbtypes "github.com/agglayer/aggkit/db/types" + mdtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" "github.com/jmoiron/sqlx" "github.com/russross/meddler" @@ -40,8 +41,8 @@ func (b *Blocks) Get(number uint64) (*aggkittypes.BlockHeader, bool, error) { return header, isFinal, nil } -func (b *Blocks) ListHeaders() []*aggkittypes.BlockHeader { - headers := make([]*aggkittypes.BlockHeader, 0, len(b.Headers)) +func (b *Blocks) ListHeaders() aggkittypes.ListBlockHeaders { + headers := aggkittypes.NewListBlockHeadersEmpty(len(b.Headers)) for _, header := range b.Headers { headers = append(headers, header) } @@ -51,6 +52,9 @@ func (b *Blocks) ListHeaders() []*aggkittypes.BlockHeader { func (b *Blocks) IsEmpty() bool { return len(b.Headers) == 0 } +func (b *Blocks) Len() int { + return len(b.Headers) +} func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, header *aggkittypes.BlockHeader, isFinal bool) error { @@ -62,7 +66,10 @@ func (a *MultidownloaderStorage) saveAggkitBlock(tx dbtypes.Querier, return a.saveBlocksNoMutex(tx, blockRows) } -func (a *MultidownloaderStorage) updateIsFinal(tx dbtypes.Querier, blockNumbers []uint64) error { +func (a *MultidownloaderStorage) UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error { + if len(blockNumbers) == 0 { + return nil + } if tx == nil { tx = a.db } @@ -81,11 +88,58 @@ func (a *MultidownloaderStorage) updateIsFinal(tx dbtypes.Querier, blockNumbers } return nil } -func (a *MultidownloaderStorage) GetBlockHeaderByNumber(tx dbtypes.Querier, - blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) { + +func (a *MultidownloaderStorage) GetHighestBlockNumber(tx dbtypes.Querier) (uint64, error) { + query := "SELECT MAX(block_number) as max_block_number FROM blocks" if tx == nil { tx = a.db } + var maxBlockNumber sql.NullInt64 + err := tx.QueryRow(query).Scan(&maxBlockNumber) + if err != nil { + return 0, fmt.Errorf("GetHighestBlockNumber: error querying highest block number: %w", err) + } + if maxBlockNumber.Valid { + return uint64(maxBlockNumber.Int64), nil + } + return 0, nil +} + +// GetRangeBlockHeader retrieves the lowest and highest block headers stored in the database +// for the specified finality type. Returns lowest and highest block headers. +func (a *MultidownloaderStorage) GetRangeBlockHeader(tx dbtypes.Querier, + isFinal mdtypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + a.mutex.RLock() + defer a.mutex.RUnlock() + + highestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks "+ + "WHERE is_final=? ORDER BY block_number DESC LIMIT 1", isFinal) + if err != nil { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: %w", err) + } + if highestBlock.IsEmpty() { + return nil, nil, nil + } + if highestBlock.Len() > 1 { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:highest: more than one block returned (%d)", highestBlock.Len()) + } + + lowestBlock, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final=? "+ + "ORDER BY block_number ASC LIMIT 1", isFinal) + if err != nil { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:lowest: %w", err) + } + if lowestBlock.IsEmpty() { + return nil, nil, nil + } + if lowestBlock.Len() > 1 { + return nil, nil, fmt.Errorf("GetRangeBlockHeader:lowest: more than one block returned (%d)", lowestBlock.Len()) + } + return lowestBlock.ListHeaders()[0], highestBlock.ListHeaders()[0], nil +} + +func (a *MultidownloaderStorage) GetBlockHeaderByNumber(tx dbtypes.Querier, + blockNumber uint64) (*aggkittypes.BlockHeader, mdtypes.FinalizedType, error) { a.mutex.RLock() defer a.mutex.RUnlock() blocks, err := a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE block_number = ?", blockNumber) @@ -128,3 +182,27 @@ func (a *MultidownloaderStorage) getBlockHeadersNoMutex(tx dbtypes.Querier, } return result, nil } + +// GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock +// if maxBlock is 0, retrieves all not finalized blocks +func (a *MultidownloaderStorage) GetBlockHeadersNotFinalized(tx dbtypes.Querier, + maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) { + if tx == nil { + tx = a.db + } + var blocks Blocks + var err error + a.mutex.RLock() + defer a.mutex.RUnlock() + + if maxBlock != nil { + blocks, err = a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0 AND block_number <= ?", *maxBlock) + } else { + blocks, err = a.getBlockHeadersNoMutex(tx, "SELECT * FROM blocks WHERE is_final = 0") + } + + if err != nil { + return nil, err + } + return blocks.ListHeaders(), nil +} diff --git a/multidownloader/storage/storage_block_test.go b/multidownloader/storage/storage_block_test.go new file mode 100644 index 000000000..abdab5d13 --- /dev/null +++ b/multidownloader/storage/storage_block_test.go @@ -0,0 +1,298 @@ +package storage + +import ( + "context" + "testing" + + mdtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_GetBlock(t *testing.T) { + storage := newStorageForTest(t, nil) + // BlockBase not present + blockHeader, _, err := storage.GetBlockHeaderByNumber(nil, 1234) + require.NoError(t, err, "cannot get BlockHeader") + require.Nil(t, blockHeader, "expected nil BlockHeader") + block := aggkittypes.NewBlockHeader(1234, exampleTestHash[0], 5678, &exampleTestHash[1]) + err = storage.saveAggkitBlock(nil, block, true) + require.NoError(t, err, "cannot insert BlockHeader") + // Get and verify block + readBlock, isFinal, err := storage.GetBlockHeaderByNumber(nil, 1234) + require.NoError(t, err, "cannot get BlockHeader") + require.NotNil(t, readBlock, "expected non-nil BlockHeader") + require.Equal(t, block, readBlock, "BlockHeader mismatch") + require.True(t, isFinal, "expected block to be final") + + blockNilParentHash := aggkittypes.NewBlockHeader(1235, exampleTestHash[0], 5678, nil) + err = storage.saveAggkitBlock(nil, blockNilParentHash, true) + require.NoError(t, err, "cannot get BlockHeader") + readBlock, _, err = storage.GetBlockHeaderByNumber(nil, blockNilParentHash.Number) + require.NoError(t, err, "cannot get BlockHeader") + require.Equal(t, blockNilParentHash, readBlock, "BlockHeader mismatch") +} + +func TestStorage_GetRangeBlockHeader(t *testing.T) { + t.Run("returns same block when only one block exists", func(t *testing.T) { + storage := newStorageForTest(t, nil) + block := aggkittypes.NewBlockHeader(4000, exampleTestHash[5], 1630002000, nil) + err := storage.saveAggkitBlock(nil, block, mdtypes.NotFinalized) + require.NoError(t, err, "cannot insert BlockHeader") + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.Equal(t, block, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block, highest, "highest BlockHeader mismatch") + }) + + t.Run("returns nil when no blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.Finalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.Nil(t, lowest, "expected nil lowest BlockHeader") + require.Nil(t, highest, "expected nil highest BlockHeader") + }) + + t.Run("returns correct lowest and highest when multiple blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple non-finalized blocks in non-sequential order + block1 := aggkittypes.NewBlockHeader(2000, exampleTestHash[0], 1630001000, nil) + err := storage.saveAggkitBlock(nil, block1, mdtypes.NotFinalized) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(1000, exampleTestHash[1], 1630000000, nil) + err = storage.saveAggkitBlock(nil, block2, mdtypes.NotFinalized) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(nil, block3, mdtypes.NotFinalized) + require.NoError(t, err) + + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err, "cannot get range BlockHeader") + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(1000), lowest.Number, "lowest should be block 1000") + require.Equal(t, uint64(3000), highest.Number, "highest should be block 3000") + require.Equal(t, block2, lowest, "lowest BlockHeader mismatch") + require.Equal(t, block3, highest, "highest BlockHeader mismatch") + }) + + t.Run("filters by finality type correctly", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert finalized blocks + finalizedBlock1 := aggkittypes.NewBlockHeader(500, exampleTestHash[3], 1629999000, nil) + err := storage.saveAggkitBlock(nil, finalizedBlock1, mdtypes.Finalized) + require.NoError(t, err) + + finalizedBlock2 := aggkittypes.NewBlockHeader(1500, exampleTestHash[4], 1630000500, nil) + err = storage.saveAggkitBlock(nil, finalizedBlock2, mdtypes.Finalized) + require.NoError(t, err) + + // Insert non-finalized blocks + notFinalizedBlock := aggkittypes.NewBlockHeader(2500, exampleTestHash[5], 1630001500, nil) + err = storage.saveAggkitBlock(nil, notFinalizedBlock, mdtypes.NotFinalized) + require.NoError(t, err) + + // Get finalized range + lowest, highest, err := storage.GetRangeBlockHeader(nil, mdtypes.Finalized) + require.NoError(t, err) + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(500), lowest.Number, "lowest finalized should be block 500") + require.Equal(t, uint64(1500), highest.Number, "highest finalized should be block 1500") + + // Get non-finalized range + lowest, highest, err = storage.GetRangeBlockHeader(nil, mdtypes.NotFinalized) + require.NoError(t, err) + require.NotNil(t, lowest) + require.NotNil(t, highest) + require.Equal(t, uint64(2500), lowest.Number, "should only return non-finalized block") + require.Equal(t, uint64(2500), highest.Number, "should only return non-finalized block") + }) +} + +func TestStorage_GetHighestBlockNumber(t *testing.T) { + t.Run("returns 0 when no blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + highestBlockNum, err := storage.GetHighestBlockNumber(nil) + + require.NoError(t, err) + require.Equal(t, uint64(0), highestBlockNum) + }) + + t.Run("returns highest block number when blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err := storage.saveAggkitBlock(nil, block1, true) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(nil, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(1500, exampleTestHash[2], 1630000500, nil) + err = storage.saveAggkitBlock(nil, block3, true) + require.NoError(t, err) + + highestBlockNum, err := storage.GetHighestBlockNumber(nil) + + require.NoError(t, err) + require.Equal(t, uint64(2000), highestBlockNum, "expected highest block number to be 2000") + }) +} + +func TestStorage_GetBlockHeadersNotFinalized(t *testing.T) { + t.Run("returns empty list when no non-finalized blocks exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + blocks, err := storage.GetBlockHeadersNotFinalized(nil, nil) + + require.NoError(t, err) + require.Empty(t, blocks) + }) + + t.Run("returns all non-finalized blocks when maxBlock is nil", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert finalized blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err := storage.saveAggkitBlock(nil, block1, true) + require.NoError(t, err) + + // Insert non-finalized blocks + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(nil, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(nil, block3, false) + require.NoError(t, err) + + blocks, err := storage.GetBlockHeadersNotFinalized(nil, nil) + + require.NoError(t, err) + require.Len(t, blocks, 2, "expected 2 non-finalized blocks") + }) + + t.Run("returns non-finalized blocks up to maxBlock", func(t *testing.T) { + storage := newStorageForTest(t, nil) + ctx := context.TODO() + tx, err := storage.NewTx(ctx) + require.NoError(t, err) + defer func() { + _ = tx.Rollback() + }() + // Insert non-finalized blocks + block1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + err = storage.saveAggkitBlock(tx, block1, false) + require.NoError(t, err) + + block2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + err = storage.saveAggkitBlock(tx, block2, false) + require.NoError(t, err) + + block3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + err = storage.saveAggkitBlock(tx, block3, false) + require.NoError(t, err) + + maxBlock := uint64(2500) + blocks, err := storage.GetBlockHeadersNotFinalized(tx, &maxBlock) + + require.NoError(t, err) + require.Len(t, blocks, 2, "expected 2 non-finalized blocks <= 2500") + // Verify that block 3000 is not included + for _, block := range blocks { + require.LessOrEqual(t, block.Number, maxBlock, "block number should be <= maxBlock") + } + }) +} + +func TestBlocks_Add(t *testing.T) { + blocks := NewBlocks() + require.True(t, blocks.IsEmpty()) + + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + + require.False(t, blocks.IsEmpty()) + require.Equal(t, 1, blocks.Len()) + require.Contains(t, blocks.Headers, header.Number) + require.True(t, blocks.AreFinal[header.Number]) +} + +func TestBlocks_Get(t *testing.T) { + t.Run("returns header and finality when exists", func(t *testing.T) { + blocks := NewBlocks() + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + + retrievedHeader, isFinal, err := blocks.Get(1000) + + require.NoError(t, err) + require.Equal(t, header, retrievedHeader) + require.True(t, isFinal) + }) + + t.Run("returns error when header not found", func(t *testing.T) { + blocks := NewBlocks() + + retrievedHeader, isFinal, err := blocks.Get(9999) + + require.Error(t, err) + require.Contains(t, err.Error(), "block header not found") + require.Nil(t, retrievedHeader) + require.False(t, isFinal) + }) +} + +func TestBlocks_ListHeaders(t *testing.T) { + blocks := NewBlocks() + + header1 := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + header2 := aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil) + header3 := aggkittypes.NewBlockHeader(3000, exampleTestHash[2], 1630002000, nil) + + blocks.Add(header1, true) + blocks.Add(header2, false) + blocks.Add(header3, true) + + headers := blocks.ListHeaders() + + require.Len(t, headers, 3) + // Verify all headers are present (order may vary since it's from a map) + headerNumbers := make(map[uint64]bool) + for _, h := range headers { + headerNumbers[h.Number] = true + } + require.True(t, headerNumbers[1000]) + require.True(t, headerNumbers[2000]) + require.True(t, headerNumbers[3000]) +} + +func TestBlocks_IsEmpty(t *testing.T) { + blocks := NewBlocks() + require.True(t, blocks.IsEmpty()) + + header := aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil) + blocks.Add(header, true) + require.False(t, blocks.IsEmpty()) +} + +func TestBlocks_Len(t *testing.T) { + blocks := NewBlocks() + require.Equal(t, 0, blocks.Len()) + + blocks.Add(aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil), true) + require.Equal(t, 1, blocks.Len()) + + blocks.Add(aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, nil), false) + require.Equal(t, 2, blocks.Len()) +} diff --git a/multidownloader/storage/storage_reorg.go b/multidownloader/storage/storage_reorg.go new file mode 100644 index 000000000..e5584550a --- /dev/null +++ b/multidownloader/storage/storage_reorg.go @@ -0,0 +1,217 @@ +package storage + +import ( + "database/sql" + "errors" + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +type reorgRow struct { + ReorgID uint64 `meddler:"reorg_id"` + DetectedAtBlock uint64 `meddler:"detected_at_block"` + ReorgedFromBlock uint64 `meddler:"reorged_from_block"` + ReorgedToBlock uint64 `meddler:"reorged_to_block"` + DetectedTimestamp uint64 `meddler:"detected_timestamp"` + NetworkLatestBlock uint64 `meddler:"network_latest_block"` + NetworkFinalizedBlock uint64 `meddler:"network_finalized_block"` + NetworkFinalizedBlockName string `meddler:"network_finalized_block_name"` + Description string `meddler:"description"` +} + +func newReorgRowFromReorgData(reorgData mdrtypes.ReorgData) *reorgRow { + return &reorgRow{ + ReorgID: reorgData.ReorgID, + DetectedAtBlock: reorgData.DetectedAtBlock, + ReorgedFromBlock: reorgData.BlockRangeAffected.FromBlock, + ReorgedToBlock: reorgData.BlockRangeAffected.ToBlock, + DetectedTimestamp: reorgData.DetectedTimestamp, + NetworkLatestBlock: reorgData.NetworkLatestBlock, + NetworkFinalizedBlock: reorgData.NetworkFinalizedBlock, + NetworkFinalizedBlockName: reorgData.NetworkFinalizedBlockName.String(), + Description: reorgData.Description, + } +} + +// returns ReorgID of the inserted reorg +func (a *MultidownloaderStorage) InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, + reorgData mdrtypes.ReorgData) (uint64, error) { + if tx == nil { + return 0, fmt.Errorf("InsertNewReorg: requires a tx because it performs multiple operations that need to be atomic") + } + reorgRow := newReorgRowFromReorgData(reorgData) + a.mutex.Lock() + defer a.mutex.Unlock() + // Get Next ReorgID from storage using rowid + lastReorgID := struct { + ReorgID *uint64 `meddler:"reorg_id"` + }{} + err := meddler.QueryRow(tx, &lastReorgID, "SELECT MAX(reorg_id) as reorg_id FROM reorgs") + if err != nil { + return 0, fmt.Errorf("InsertNewReorg: error getting last reorg_id: %w", err) + } + if lastReorgID.ReorgID == nil { + reorgRow.ReorgID = 1 + } else { + reorgRow.ReorgID = *lastReorgID.ReorgID + 1 + } + + if err := meddler.Insert(tx, "reorgs", reorgRow); err != nil { + return 0, fmt.Errorf("InsertNewReorg: error inserting reorgs (%s): %w", reorgData.String(), err) + } + if err := a.moveReorgedBlocksAndLogsNoMutex(tx, reorgRow.ReorgID, + reorgData.BlockRangeAffected); err != nil { + return 0, fmt.Errorf("InsertNewReorg: error moving reorged blocks to block_reorged: %w", err) + } + // Adjust sync_status table to reflect the reorg + err = a.adjustSyncStatusForReorgNoMutex(tx, reorgData) + if err != nil { + return 0, fmt.Errorf("InsertNewReorg: error adjusting sync_status for reorg: %w", err) + } + return reorgRow.ReorgID, nil +} + +func (a *MultidownloaderStorage) moveReorgedBlocksAndLogsNoMutex(tx dbtypes.Querier, reorgID uint64, + blockRangeAffected aggkitcommon.BlockRange) error { + a.logger.Debugf("storage: moving blocks to blocks_reorged - reorg_id: %d, range: %s", + reorgID, blockRangeAffected.String()) + query := `INSERT INTO blocks_reorged (reorg_id, block_number, block_hash,block_parent_hash, block_timestamp) + SELECT ?, block_number, block_hash, block_parent_hash, block_timestamp + FROM blocks + WHERE block_number >= ? AND block_number <= ?; + INSERT INTO logs_reorged (reorg_id, block_number, address,topics, data, tx_hash, tx_index, log_index) + SELECT ?, block_number, address, topics, data, tx_hash, tx_index, log_index + FROM logs + WHERE block_number >= ? AND block_number <= ?; + DELETE FROM logs + WHERE block_number >= ? AND block_number <= ?; + DELETE FROM blocks + WHERE block_number >= ? AND block_number <= ?;` + _, err := tx.Exec(query, + reorgID, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + reorgID, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock, + blockRangeAffected.FromBlock, blockRangeAffected.ToBlock) + if err != nil { + return fmt.Errorf("moveReorgedBlocks: error moving reorged blocks to block_reorged: %w", err) + } + return nil +} + +func (a *MultidownloaderStorage) GetBlockReorgedReorgID(tx dbtypes.Querier, + blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + var reorgIDRow struct { + ReorgID *uint64 `meddler:"reorg_id"` + } + query := `SELECT br.reorg_id FROM blocks_reorged br + INNER JOIN reorgs r ON br.reorg_id = r.reorg_id + WHERE br.block_number = ? AND br.block_hash = ? + ORDER BY r.reorged_from_block ASC + LIMIT 1;` + err := tx.QueryRow(query, blockNumber, blockHash.Hex()).Scan(&reorgIDRow.ReorgID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, false, nil + } + return 0, false, fmt.Errorf("GetBlockReorgedReorgID: error querying blocks_reorged: %w", err) + } + if reorgIDRow.ReorgID == nil { + return 0, false, nil + } + return *reorgIDRow.ReorgID, true, nil +} + +func (a *MultidownloaderStorage) GetReorgedDataByReorgID(tx dbtypes.Querier, + reorgID uint64) (*mdrtypes.ReorgData, error) { + if tx == nil { + tx = a.db + } + a.mutex.RLock() + defer a.mutex.RUnlock() + + var row reorgRow + query := `SELECT reorg_id, detected_at_block, reorged_from_block, reorged_to_block, + detected_timestamp, network_latest_block, network_finalized_block, network_finalized_block_name, description + FROM reorgs WHERE reorg_id = ? LIMIT 1;` + + err := meddler.QueryRow(tx, &row, query, reorgID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("GetReorgedDataByReorgID: error querying reorgs table: %w", err) + } + + // Convert string to BlockNumberFinality + blockFinality, err := aggkittypes.NewBlockNumberFinality(row.NetworkFinalizedBlockName) + if err != nil { + return nil, fmt.Errorf("GetReorgedDataByReorgID: error parsing NetworkFinalizedBlockName: %w", err) + } + + reorgData := &mdrtypes.ReorgData{ + ReorgID: row.ReorgID, + BlockRangeAffected: aggkitcommon.NewBlockRange( + row.ReorgedFromBlock, + row.ReorgedToBlock, + ), + DetectedAtBlock: row.DetectedAtBlock, + DetectedTimestamp: row.DetectedTimestamp, + NetworkLatestBlock: row.NetworkLatestBlock, + NetworkFinalizedBlock: row.NetworkFinalizedBlock, + NetworkFinalizedBlockName: *blockFinality, + Description: row.Description, + } + + return reorgData, nil +} + +// AdjustSyncStatusForReorg adjusts the sync_status table after a reorg by setting +// synced_to_block to the block before the reorg started for all affected contracts +func (a *MultidownloaderStorage) adjustSyncStatusForReorgNoMutex(tx dbtypes.Querier, + reorgData mdrtypes.ReorgData) error { + if tx == nil { + return fmt.Errorf("AdjustSyncStatusForReorg: require a tx to ensure atomicity") + } + // Calculate the new synced_to_block (one block before the reorg) + var newSyncedToBlock uint64 + if reorgData.BlockRangeAffected.FromBlock > 0 { + newSyncedToBlock = reorgData.BlockRangeAffected.FromBlock - 1 + } else { + newSyncedToBlock = 0 + } + + // Update all contracts that have synced beyond the reorg point + query := `UPDATE sync_status + SET synced_to_block = ? + WHERE synced_to_block >= ?` + + result, err := tx.Exec(query, newSyncedToBlock, reorgData.BlockRangeAffected.FromBlock) + if err != nil { + return fmt.Errorf("AdjustSyncStatusForReorg: error updating sync_status: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("AdjustSyncStatusForReorg: error getting rows affected: %w", err) + } + + a.logger.Infof("AdjustSyncStatusForReorg: adjusted %d contract(s) to synced_to_block=%d "+ + "due to reorg at blocks [%d-%d]", + rowsAffected, newSyncedToBlock, + reorgData.BlockRangeAffected.FromBlock, reorgData.BlockRangeAffected.ToBlock) + + return nil +} diff --git a/multidownloader/storage/storage_reorg_test.go b/multidownloader/storage/storage_reorg_test.go new file mode 100644 index 000000000..8f3308821 --- /dev/null +++ b/multidownloader/storage/storage_reorg_test.go @@ -0,0 +1,262 @@ +package storage + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_InsertNewReorg(t *testing.T) { + storage := newStorageForTest(t, nil) + reorgData := mdrtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(5000, 5010), + DetectedAtBlock: 5020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 6000, + NetworkFinalizedBlock: 5990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(1), reorgID, "first reorg ID must be 1") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") + + tx, err = storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + reorgID, err = storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(2), reorgID, "second reorg ID must be 2") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") +} + +func TestStorage_InsertNewReorgAndMoveBlocks(t *testing.T) { + storage := newStorageForTest(t, nil) + populateLogsAndBlocksForTest(t, storage, + 5000, 20, 5) + + reorgData := mdrtypes.ReorgData{ + ReorgID: 0, // will be set by InsertNewReorg + BlockRangeAffected: aggkitcommon.NewBlockRange(5005, 5015), + DetectedAtBlock: 5020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 6000, + NetworkFinalizedBlock: 5990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err, "cannot start new transaction") + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, reorgData) + require.NoError(t, err, "cannot insert new reorg") + require.Equal(t, uint64(1), reorgID, "first reorg ID must be 1") + err = tx.Commit() + require.NoError(t, err, "cannot commit transaction") + // Now check that blocks from 5005 to 5015 are in block_reorged + for i := uint64(5005); i <= 5015; i++ { + hdr, _, err := storage.GetBlockHeaderByNumber(nil, i) + require.NoError(t, err) + require.Nil(t, hdr, "block header should not be in blocks table anymore") + } +} + +func TestStorage_GetBlockReorgedReorgID_MultipleChains(t *testing.T) { + t.Run("returns reorg_id with lowest reorged_from_block when block exists in multiple chains", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // First, populate some blocks that will be reorged + populateLogsAndBlocksForTest(t, storage, 1000, 50, 2) + + // Create first reorg with reorged_from_block=1010 + reorgData1 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1010, 1020), + DetectedAtBlock: 1025, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: "First reorg", + } + + tx1, err := storage.NewTx(t.Context()) + require.NoError(t, err) + reorgID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + require.NoError(t, err) + require.Equal(t, uint64(1), reorgID1) + err = tx1.Commit() + require.NoError(t, err) + + // Create second reorg with reorged_from_block=1005 (lower than first) + reorgData2 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1005, 1009), + DetectedAtBlock: 1030, + DetectedTimestamp: 1630004000, + NetworkLatestBlock: 2100, + NetworkFinalizedBlock: 2090, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + Description: "Second reorg", + } + + tx2, err := storage.NewTx(t.Context()) + require.NoError(t, err) + reorgID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + require.NoError(t, err) + require.Equal(t, uint64(2), reorgID2) + err = tx2.Commit() + require.NoError(t, err) + + // The key test: insert the SAME block_number and block_hash into MULTIPLE chains + // This is the scenario the user wants to test - when a block exists in multiple reorg chains, + // the function should return the reorg_id with the lowest reorged_from_block + testBlockNumber := uint64(2000) // Use a block number outside the reorg ranges + testBlockHash := exampleTestHash[7] + + tx3, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + // Insert the SAME block into chain 1 (reorged_from_block=1010) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (reorg_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, reorgID1, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + require.NoError(t, err) + + // Insert the SAME block into chain 2 (reorged_from_block=1005, lower!) + _, err = tx3.Exec(`INSERT INTO blocks_reorged (reorg_id, block_number, block_hash, block_parent_hash, block_timestamp) + VALUES (?, ?, ?, ?, ?)`, reorgID2, testBlockNumber, testBlockHash.Hex(), exampleTestHash[4].Hex(), 1630000000) + require.NoError(t, err) + + err = tx3.Commit() + require.NoError(t, err) + + // Query for the block - should return reorgID2 since it has the lowest reorged_from_block (1005 < 1010) + returnedReorgID, found, err := storage.GetBlockReorgedReorgID(nil, testBlockNumber, testBlockHash) + require.NoError(t, err) + require.True(t, found, "block should be found") + require.Equal(t, reorgID2, returnedReorgID, "should return reorg_id with lowest reorged_from_block (chain 2 with reorged_from_block=1005)") + + // Verify the reorged_from_block values to confirm our expectation + reorgData1Retrieved, err := storage.GetReorgedDataByReorgID(nil, reorgID1) + require.NoError(t, err) + require.Equal(t, uint64(1010), reorgData1Retrieved.BlockRangeAffected.FromBlock) + + reorgData2Retrieved, err := storage.GetReorgedDataByReorgID(nil, reorgID2) + require.NoError(t, err) + require.Equal(t, uint64(1005), reorgData2Retrieved.BlockRangeAffected.FromBlock) + }) + + t.Run("returns false when block not found in any chain", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Query for non-existent block + reorgID, found, err := storage.GetBlockReorgedReorgID(nil, 9999, exampleTestHash[0]) + require.NoError(t, err) + require.False(t, found, "block should not be found") + require.Equal(t, uint64(0), reorgID) + }) +} + +func TestStorage_GetReorgedDataByReorgID(t *testing.T) { + t.Run("returns reorg data when found", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert a reorg + expectedReorgData := mdrtypes.ReorgData{ + ReorgID: 0, // will be set by InsertNewReorg + BlockRangeAffected: aggkitcommon.NewBlockRange(1000, 1010), + DetectedAtBlock: 1020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + reorgID, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx, expectedReorgData) + require.NoError(t, err) + require.Equal(t, uint64(1), reorgID) + err = tx.Commit() + require.NoError(t, err) + + // Retrieve the reorg data + reorgData, err := storage.GetReorgedDataByReorgID(nil, reorgID) + require.NoError(t, err) + require.NotNil(t, reorgData, "reorg data should not be nil when found") + require.Equal(t, reorgID, reorgData.ReorgID) + require.Equal(t, expectedReorgData.BlockRangeAffected, reorgData.BlockRangeAffected) + require.Equal(t, expectedReorgData.DetectedAtBlock, reorgData.DetectedAtBlock) + require.Equal(t, expectedReorgData.DetectedTimestamp, reorgData.DetectedTimestamp) + require.Equal(t, expectedReorgData.NetworkLatestBlock, reorgData.NetworkLatestBlock) + require.Equal(t, expectedReorgData.NetworkFinalizedBlock, reorgData.NetworkFinalizedBlock) + require.Equal(t, expectedReorgData.NetworkFinalizedBlockName, reorgData.NetworkFinalizedBlockName) + }) + + t.Run("returns nil when reorgID not found", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Try to retrieve a non-existent reorgID + reorgData, err := storage.GetReorgedDataByReorgID(nil, 999) + require.NoError(t, err, "should not return error when reorgID not found") + require.Nil(t, reorgData, "reorg data should be nil when not found") + }) + + t.Run("returns correct data for multiple reorgs", func(t *testing.T) { + storage := newStorageForTest(t, nil) + + // Insert multiple reorgs + reorgData1 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(1000, 1010), + DetectedAtBlock: 1020, + DetectedTimestamp: 1630003000, + NetworkLatestBlock: 2000, + NetworkFinalizedBlock: 1990, + NetworkFinalizedBlockName: aggkittypes.FinalizedBlock, + } + + reorgData2 := mdrtypes.ReorgData{ + BlockRangeAffected: aggkitcommon.NewBlockRange(2000, 2020), + DetectedAtBlock: 2030, + DetectedTimestamp: 1630004000, + NetworkLatestBlock: 3000, + NetworkFinalizedBlock: 2990, + NetworkFinalizedBlockName: aggkittypes.SafeBlock, + } + + tx1, err := storage.NewTx(t.Context()) + require.NoError(t, err) + reorgID1, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx1, reorgData1) + require.NoError(t, err) + require.Equal(t, uint64(1), reorgID1) + err = tx1.Commit() + require.NoError(t, err) + + tx2, err := storage.NewTx(t.Context()) + require.NoError(t, err) + reorgID2, err := storage.InsertReorgAndMoveReorgedBlocksAndLogs(tx2, reorgData2) + require.NoError(t, err) + require.Equal(t, uint64(2), reorgID2) + err = tx2.Commit() + require.NoError(t, err) + + // Retrieve first reorg + retrieved1, err := storage.GetReorgedDataByReorgID(nil, reorgID1) + require.NoError(t, err) + require.NotNil(t, retrieved1) + require.Equal(t, reorgID1, retrieved1.ReorgID) + require.Equal(t, reorgData1.BlockRangeAffected, retrieved1.BlockRangeAffected) + require.Equal(t, reorgData1.NetworkFinalizedBlockName, retrieved1.NetworkFinalizedBlockName) + + // Retrieve second reorg + retrieved2, err := storage.GetReorgedDataByReorgID(nil, reorgID2) + require.NoError(t, err) + require.NotNil(t, retrieved2) + require.Equal(t, reorgID2, retrieved2.ReorgID) + require.Equal(t, reorgData2.BlockRangeAffected, retrieved2.BlockRangeAffected) + require.Equal(t, reorgData2.NetworkFinalizedBlockName, retrieved2.NetworkFinalizedBlockName) + }) +} diff --git a/multidownloader/storage/storage_sync.go b/multidownloader/storage/storage_sync.go new file mode 100644 index 000000000..2f2d13cdc --- /dev/null +++ b/multidownloader/storage/storage_sync.go @@ -0,0 +1,139 @@ +package storage + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + dbtypes "github.com/agglayer/aggkit/db/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +type syncStatusRow struct { + Address common.Address `meddler:"contract_address,address"` + TargetFromBlock uint64 `meddler:"target_from_block"` + TargetToBlock string `meddler:"target_to_block"` + SyncedFromBlock uint64 `meddler:"synced_from_block"` + SyncedToBlock uint64 `meddler:"synced_to_block"` + SyncersIDs string `meddler:"syncers_id"` +} + +func (r *syncStatusRow) ToSyncSegment() (mdrtypes.SyncSegment, error) { + targetToBlock, err := aggkittypes.NewBlockNumberFinality(r.TargetToBlock) + if err != nil { + return mdrtypes.SyncSegment{}, fmt.Errorf("ToSyncSegment: error parsing target to block finality (%s): %w", + r.TargetToBlock, err) + } + var blockRange aggkitcommon.BlockRange + + if r.SyncedFromBlock == 0 && r.SyncedToBlock == 0 { + // We use value {0,0} to represent empty range in the database, but in the code + // we want to use the IsEmpty() method of BlockRange + blockRange = aggkitcommon.BlockRangeZero + } else { + blockRange = aggkitcommon.NewBlockRange(r.SyncedFromBlock, r.SyncedToBlock) + } + return mdrtypes.SyncSegment{ + ContractAddr: r.Address, + TargetToBlock: *targetToBlock, + BlockRange: blockRange, + }, nil +} + +func (a *MultidownloaderStorage) GetSyncedBlockRangePerContract(tx dbtypes.Querier) (mdrtypes.SetSyncSegment, error) { + a.mutex.RLock() + defer a.mutex.RUnlock() + result := make([]*syncStatusRow, 0) + if tx == nil { + tx = a.db + } + err := meddler.QueryAll(tx, &result, "SELECT * FROM sync_status") + if err != nil { + return mdrtypes.SetSyncSegment{}, fmt.Errorf("error querying sync status: %w", err) + } + setSegments := mdrtypes.NewSetSyncSegment() + for _, row := range result { + segment, err := row.ToSyncSegment() + if err != nil { + return mdrtypes.SetSyncSegment{}, + fmt.Errorf("GetSyncedBlockRangePerContract: error converting row to sync segment: %w", err) + } + setSegments.Add(segment) + } + return setSegments, nil +} + +func (a *MultidownloaderStorage) UpdateSyncedStatus(tx dbtypes.Querier, + segments []mdrtypes.SyncSegment) error { + if tx == nil { + tx = a.db + } + query := ` + UPDATE sync_status SET + synced_from_block = ?, + synced_to_block = ? + WHERE contract_address = ?; + ` + a.mutex.Lock() + defer a.mutex.Unlock() + for _, segment := range segments { + if !segment.IsValid() { + return fmt.Errorf("UpdateSyncedStatus: invalid segment %s", segment.String()) + } + br := segment.BlockRange + if br.IsEmpty() { + // We use value {0,0} to represent empty range in the database + br = aggkitcommon.BlockRangeZero + } + result, err := tx.Exec(query, br.FromBlock, + br.ToBlock, segment.ContractAddr.Hex()) + if err != nil { + return fmt.Errorf("error updating %s sync status: %w", segment.String(), err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("error getting rows affected for contract %s: %w", + segment.ContractAddr.Hex(), err) + } + if rowsAffected == 0 { + return fmt.Errorf("no rows updated for contract %s", segment.ContractAddr.Hex()) + } + } + return nil +} + +func (a *MultidownloaderStorage) UpsertSyncerConfigs(tx dbtypes.Querier, configs []mdrtypes.ContractConfig) error { + if tx == nil { + tx = a.db + } + a.mutex.Lock() + defer a.mutex.Unlock() + for _, config := range configs { + row := syncStatusRow{ + Address: config.Address, + TargetFromBlock: config.FromBlock, + TargetToBlock: config.ToBlock.String(), + SyncedFromBlock: 0, + SyncedToBlock: 0, + SyncersIDs: fmt.Sprintf("%v", config.Syncers), + } + // Upsert logic + query := ` + INSERT INTO sync_status (contract_address, target_from_block, + target_to_block, synced_from_block, synced_to_block, syncers_id) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(contract_address) DO UPDATE SET + target_from_block = excluded.target_from_block, + target_to_block = excluded.target_to_block, + syncers_id = excluded.syncers_id + ` + _, err := tx.Exec(query, row.Address.Hex(), row.TargetFromBlock, row.TargetToBlock, + row.SyncedFromBlock, row.SyncedToBlock, row.SyncersIDs) + if err != nil { + return fmt.Errorf("error updating sync status: %w", err) + } + } + return nil +} diff --git a/multidownloader/storage/storage_sync_test.go b/multidownloader/storage/storage_sync_test.go new file mode 100644 index 000000000..9c03da795 --- /dev/null +++ b/multidownloader/storage/storage_sync_test.go @@ -0,0 +1,194 @@ +package storage + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestStorage_GetSyncedBlockRangePerContract(t *testing.T) { + storage := newStorageForTest(t, nil) + data, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, "SetSyncSegment: ", data.String()) +} + +func TestStorage_UpsertSyncerConfigs(t *testing.T) { + storage := newStorageForTest(t, nil) + configs := []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1000, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 2000, + ToBlock: aggkittypes.LatestBlock, + }, + } + err := storage.UpsertSyncerConfigs(nil, configs) + require.NoError(t, err) + + // Upsert again with different start block + configsUpdated := []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1300, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 1600, + ToBlock: aggkittypes.FinalizedBlock, + }, + } + err = storage.UpsertSyncerConfigs(nil, configsUpdated) + require.NoError(t, err) + + syncSegments, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, 0, len(syncSegments.GetAddressesForBlockRange( + aggkitcommon.NewBlockRange(0, 10000), + )), + "There are no synced segments for the given block range", + ) + seg1, exists := syncSegments.GetByContract(exampleAddr1) + require.True(t, exists) + require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) + require.Equal(t, aggkitcommon.BlockRangeZero, seg1.BlockRange) + + seg2, exists := syncSegments.GetByContract(exampleAddr2) + require.True(t, exists) + require.Equal(t, aggkittypes.FinalizedBlock, seg2.TargetToBlock) +} + +func TestStorage_UpdateSyncedStatus(t *testing.T) { + storage := newStorageForTest(t, nil) + segments := []mdrtypes.SyncSegment{ + mdrtypes.NewSyncSegment( + exampleAddr1, + aggkitcommon.NewBlockRange(1000, 2000), + aggkittypes.FinalizedBlock, + true, + ), + mdrtypes.NewSyncSegment( + exampleAddr2, + aggkitcommon.NewBlockRange(1500, 2500), + aggkittypes.LatestBlock, + false, + ), + } + err := storage.UpsertSyncerConfigs(nil, []mdrtypes.ContractConfig{ + { + Address: exampleAddr1, + FromBlock: 1000, + ToBlock: aggkittypes.FinalizedBlock, + }, + { + Address: exampleAddr2, + FromBlock: 1500, + ToBlock: aggkittypes.LatestBlock, + }, + }) + require.NoError(t, err) + err = storage.UpdateSyncedStatus(nil, segments) + require.NoError(t, err) + + syncedSegments, err := storage.GetSyncedBlockRangePerContract(nil) + require.NoError(t, err) + require.Equal(t, 2, len(syncedSegments.GetAddressesForBlockRange( + aggkitcommon.NewBlockRange(0, 3000), + ))) + seg1, exists := syncedSegments.GetByContract(exampleAddr1) + require.True(t, exists) + require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), seg1.BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) + + seg2, exists := syncedSegments.GetByContract(exampleAddr2) + require.True(t, exists) + require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) + require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) + + invalidSyncSegment := mdrtypes.NewSyncSegment( + exampleAddr1, + aggkitcommon.NewBlockRange(0, 0), + aggkittypes.FinalizedBlock, + true, + ) + err = storage.UpdateSyncedStatus(nil, []mdrtypes.SyncSegment{invalidSyncSegment}) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid segment") +} + +func TestSyncStatusRow_ToSyncSegment(t *testing.T) { + t.Run("converts row to sync segment successfully with finalized block", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "FinalizedBlock", + SyncedFromBlock: 1000, + SyncedToBlock: 2000, + SyncersIDs: "syncer1,syncer2", + } + + segment, err := row.ToSyncSegment() + + require.NoError(t, err) + require.Equal(t, exampleAddr1, segment.ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), segment.BlockRange) + require.Equal(t, aggkittypes.FinalizedBlock, segment.TargetToBlock) + }) + + t.Run("converts row to sync segment successfully with latest block", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr2, + TargetFromBlock: 500, + TargetToBlock: "LatestBlock", + SyncedFromBlock: 500, + SyncedToBlock: 1500, + SyncersIDs: "syncer3", + } + + segment, err := row.ToSyncSegment() + + require.NoError(t, err) + require.Equal(t, exampleAddr2, segment.ContractAddr) + require.Equal(t, aggkitcommon.NewBlockRange(500, 1500), segment.BlockRange) + require.Equal(t, aggkittypes.LatestBlock, segment.TargetToBlock) + }) + + t.Run("returns error for invalid target to block finality", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "invalid_finality", + SyncedFromBlock: 1000, + SyncedToBlock: 2000, + SyncersIDs: "syncer1", + } + + segment, err := row.ToSyncSegment() + + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing target to block finality") + require.Equal(t, mdrtypes.SyncSegment{}, segment) + }) + t.Run("empty range", func(t *testing.T) { + row := syncStatusRow{ + Address: exampleAddr1, + TargetFromBlock: 1000, + TargetToBlock: "FinalizedBlock", + SyncedFromBlock: 0, + SyncedToBlock: 0, + SyncersIDs: "syncer1", + } + + segment, err := row.ToSyncSegment() + require.NoError(t, err) + require.Equal(t, true, segment.BlockRange.IsEmpty()) + }) +} diff --git a/multidownloader/storage/storage_test.go b/multidownloader/storage/storage_test.go index 08256c9e9..57274b26f 100644 --- a/multidownloader/storage/storage_test.go +++ b/multidownloader/storage/storage_test.go @@ -1,10 +1,10 @@ package storage import ( + "encoding/json" "path" "testing" - aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/log" mdrtypes "github.com/agglayer/aggkit/multidownloader/types" aggkittypes "github.com/agglayer/aggkit/types" @@ -48,30 +48,6 @@ func TestStorage_Exploratory(t *testing.T) { log.Infof("Retrieved block: %+v", block) } -func TestStorage_GetBlock(t *testing.T) { - storage := newStorageForTest(t, nil) - // BlockBase not present - blockHeader, _, err := storage.GetBlockHeaderByNumber(nil, 1234) - require.NoError(t, err, "cannot get BlockHeader") - require.Nil(t, blockHeader, "expected nil BlockHeader") - block := aggkittypes.NewBlockHeader(1234, exampleTestHash[0], 5678, &exampleTestHash[1]) - err = storage.saveAggkitBlock(nil, block, true) - require.NoError(t, err, "cannot insert BlockHeader") - // Get and verify block - readBlock, isFinal, err := storage.GetBlockHeaderByNumber(nil, 1234) - require.NoError(t, err, "cannot get BlockHeader") - require.NotNil(t, readBlock, "expected non-nil BlockHeader") - require.Equal(t, block, readBlock, "BlockHeader mismatch") - require.True(t, isFinal, "expected block to be final") - - blockNilParentHash := aggkittypes.NewBlockHeader(1235, exampleTestHash[0], 5678, nil) - err = storage.saveAggkitBlock(nil, blockNilParentHash, true) - require.NoError(t, err, "cannot get BlockHeader") - readBlock, _, err = storage.GetBlockHeaderByNumber(nil, blockNilParentHash.Number) - require.NoError(t, err, "cannot get BlockHeader") - require.Equal(t, blockNilParentHash, readBlock, "BlockHeader mismatch") -} - func TestStorage_GetLogs(t *testing.T) { storage := newStorageForTest(t, nil) // Logs not present @@ -197,106 +173,275 @@ func TestStorage_SaveEthLogsWithHeaders(t *testing.T) { require.Equal(t, logs[1], readLogs[1]) } -func TestStorage_GetSyncedBlockRangePerContract(t *testing.T) { - storage := newStorageForTest(t, nil) - data, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, "SetSyncSegment: ", data.String()) -} +func TestStorage_LogQuery(t *testing.T) { + t.Run("returns empty response when no logs exist", func(t *testing.T) { + storage := newStorageForTest(t, nil) + query := mdrtypes.NewLogQuery(1000, 2000, []common.Address{exampleAddr1}) -func TestStorage_UpsertSyncerConfigs(t *testing.T) { - storage := newStorageForTest(t, nil) - configs := []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1000, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 2000, - ToBlock: aggkittypes.LatestBlock, - }, - } - err := storage.UpsertSyncerConfigs(nil, configs) - require.NoError(t, err) + response, err := storage.LogQuery(nil, query) - // Upsert again with different start block - configsUpdated := []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1300, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 1600, - ToBlock: aggkittypes.FinalizedBlock, - }, - } - err = storage.UpsertSyncerConfigs(nil, configsUpdated) - require.NoError(t, err) + require.NoError(t, err) + require.Empty(t, response.Blocks) + require.Equal(t, query.BlockRange, response.ResponseRange) + }) - syncSegments, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, 2, len(syncSegments.GetAddressesForBlockRange( - aggkitcommon.NewBlockRange(0, 10000), - ))) - seg1, exists := syncSegments.GetByContract(exampleAddr1) - require.True(t, exists) - require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) - - seg2, exists := syncSegments.GetByContract(exampleAddr2) - require.True(t, exists) - require.Equal(t, aggkittypes.FinalizedBlock, seg2.TargetToBlock) -} + t.Run("returns logs grouped by blocks with correct ordering", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) -func TestStorage_UpdateSyncedStatus(t *testing.T) { - storage := newStorageForTest(t, nil) - segments := []mdrtypes.SyncSegment{ - mdrtypes.NewSyncSegment( - exampleAddr1, - aggkitcommon.NewBlockRange(1000, 2000), - aggkittypes.FinalizedBlock, - true, - ), - mdrtypes.NewSyncSegment( - exampleAddr2, - aggkitcommon.NewBlockRange(1500, 2500), - aggkittypes.LatestBlock, - false, - ), - } - err := storage.UpsertSyncerConfigs(nil, []mdrtypes.ContractConfig{ - { - Address: exampleAddr1, - FromBlock: 1000, - ToBlock: aggkittypes.FinalizedBlock, - }, - { - Address: exampleAddr2, - FromBlock: 1500, - ToBlock: aggkittypes.LatestBlock, - }, + // Create block headers + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(1000, exampleTestHash[0], 1630000000, nil), + aggkittypes.NewBlockHeader(1001, exampleTestHash[1], 1630000060, &exampleTestHash[0]), + aggkittypes.NewBlockHeader(1002, exampleTestHash[2], 1630000120, &exampleTestHash[1]), + } + + // Create logs - multiple logs per block and across different blocks + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[3]}, + Data: []byte{0x01}, + TxHash: exampleTestHash[5], + TxIndex: 0, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[4]}, + Data: []byte{0x02}, + TxHash: exampleTestHash[5], + TxIndex: 1, + Index: 1, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[6]}, + Data: []byte{0x03}, + TxHash: exampleTestHash[7], + TxIndex: 0, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 1002, + BlockHash: exampleTestHash[2], + BlockTimestamp: 1630000120, + Topics: []common.Hash{exampleTestHash[8]}, + Data: []byte{0x04}, + TxHash: exampleTestHash[9], + TxIndex: 0, + Index: 0, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query for logs from both addresses + query := mdrtypes.NewLogQuery(1000, 1002, []common.Address{exampleAddr1, exampleAddr2}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Equal(t, query.BlockRange, response.ResponseRange) + require.Len(t, response.Blocks, 3, "expected 3 blocks") + + // Verify first block (block 1000) - has 2 logs from exampleAddr1 + require.Equal(t, uint64(1000), response.Blocks[0].Header.Number) + require.Equal(t, exampleTestHash[0], response.Blocks[0].Header.Hash) + require.Equal(t, uint64(1630000000), response.Blocks[0].Header.Time) + require.True(t, response.Blocks[0].IsFinal) + require.Len(t, response.Blocks[0].Logs, 2) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[0].Address) + require.Equal(t, uint(0), response.Blocks[0].Logs[0].Index) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[1].Address) + require.Equal(t, uint(1), response.Blocks[0].Logs[1].Index) + + // Verify second block (block 1001) - has 1 log from exampleAddr2 + require.Equal(t, uint64(1001), response.Blocks[1].Header.Number) + require.Equal(t, exampleTestHash[1], response.Blocks[1].Header.Hash) + require.True(t, response.Blocks[1].IsFinal) + require.Len(t, response.Blocks[1].Logs, 1) + require.Equal(t, exampleAddr2, response.Blocks[1].Logs[0].Address) + + // Verify third block (block 1002) - has 1 log from exampleAddr1 + require.Equal(t, uint64(1002), response.Blocks[2].Header.Number) + require.Equal(t, exampleTestHash[2], response.Blocks[2].Header.Hash) + require.True(t, response.Blocks[2].IsFinal) + require.Len(t, response.Blocks[2].Logs, 1) + require.Equal(t, exampleAddr1, response.Blocks[2].Logs[0].Address) }) - require.NoError(t, err) - err = storage.UpdateSyncedStatus(nil, segments) - require.NoError(t, err) - syncedSegments, err := storage.GetSyncedBlockRangePerContract(nil) - require.NoError(t, err) - require.Equal(t, 2, len(syncedSegments.GetAddressesForBlockRange( - aggkitcommon.NewBlockRange(0, 3000), - ))) - seg1, exists := syncedSegments.GetByContract(exampleAddr1) - require.True(t, exists) - require.Equal(t, aggkitcommon.NewBlockRange(1000, 2000), seg1.BlockRange) - require.Equal(t, aggkittypes.FinalizedBlock, seg1.TargetToBlock) - - seg2, exists := syncedSegments.GetByContract(exampleAddr2) - require.True(t, exists) - require.Equal(t, aggkitcommon.NewBlockRange(1500, 2500), seg2.BlockRange) - require.Equal(t, aggkittypes.LatestBlock, seg2.TargetToBlock) + t.Run("filters logs by single address", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(2000, exampleTestHash[0], 1630001000, nil), + } + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 2000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630001000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0xAA}, + Index: 0, + }, + { + Address: exampleAddr2, + BlockNumber: 2000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630001000, + Topics: []common.Hash{exampleTestHash[2]}, + Data: []byte{0xBB}, + Index: 1, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, false) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query only for exampleAddr1 + query := mdrtypes.NewLogQuery(2000, 2000, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1) + require.Len(t, response.Blocks[0].Logs, 1) + require.Equal(t, exampleAddr1, response.Blocks[0].Logs[0].Address) + require.Equal(t, []byte{0xAA}, response.Blocks[0].Logs[0].Data) + require.False(t, response.Blocks[0].IsFinal, "expected block to not be final") + }) + + t.Run("respects block range boundaries", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(3000, exampleTestHash[0], 1630002000, nil), + aggkittypes.NewBlockHeader(3001, exampleTestHash[1], 1630002060, &exampleTestHash[0]), + aggkittypes.NewBlockHeader(3002, exampleTestHash[2], 1630002120, &exampleTestHash[1]), + } + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 3000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630002000, + Topics: []common.Hash{}, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 3001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630002060, + Topics: []common.Hash{}, + Index: 0, + }, + { + Address: exampleAddr1, + BlockNumber: 3002, + BlockHash: exampleTestHash[2], + BlockTimestamp: 1630002120, + Topics: []common.Hash{}, + Index: 0, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + // Query for middle block only + query := mdrtypes.NewLogQuery(3001, 3001, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1, "expected only 1 block in range") + require.Equal(t, uint64(3001), response.Blocks[0].Header.Number) + }) + + t.Run("preserves log field values correctly", func(t *testing.T) { + storage := newStorageForTest(t, nil) + tx, err := storage.NewTx(t.Context()) + require.NoError(t, err) + + parentHash := exampleTestHash[9] + blockHeaders := []*aggkittypes.BlockHeader{ + aggkittypes.NewBlockHeader(4000, exampleTestHash[0], 1630003000, &parentHash), + } + + expectedTopics := []common.Hash{exampleTestHash[1], exampleTestHash[2], exampleTestHash[3]} + expectedData := []byte{0xDE, 0xAD, 0xBE, 0xEF} + expectedTxHash := exampleTestHash[5] + + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 4000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630003000, + Topics: expectedTopics, + Data: expectedData, + TxHash: expectedTxHash, + TxIndex: 42, + Index: 7, + }, + } + + err = storage.SaveEthLogsWithHeaders(tx, blockHeaders, logs, true) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + + query := mdrtypes.NewLogQuery(4000, 4000, []common.Address{exampleAddr1}) + response, err := storage.LogQuery(nil, query) + + require.NoError(t, err) + require.Len(t, response.Blocks, 1) + require.Len(t, response.Blocks[0].Logs, 1) + + log := response.Blocks[0].Logs[0] + require.Equal(t, exampleAddr1, log.Address) + require.Equal(t, expectedTopics, log.Topics) + require.Equal(t, expectedData, log.Data) + require.Equal(t, expectedTxHash, log.TxHash) + require.Equal(t, uint(42), log.TxIndex) + require.Equal(t, uint(7), log.Index) + require.Equal(t, uint64(4000), log.BlockNumber) + require.Equal(t, uint64(1630003000), log.BlockTimestamp) + require.False(t, log.Removed) + + // Verify block header fields + header := response.Blocks[0].Header + require.Equal(t, uint64(4000), header.Number) + require.Equal(t, exampleTestHash[0], header.Hash) + require.Equal(t, uint64(1630003000), header.Time) + require.NotNil(t, header.ParentHash) + require.Equal(t, parentHash, *header.ParentHash) + }) } func TestStorage_UpdateIsFinal(t *testing.T) { @@ -311,7 +456,10 @@ func TestStorage_UpdateIsFinal(t *testing.T) { require.Equal(t, block, readBlock, "BlockHeader mismatch") require.False(t, isFinal, "expected block to not be final") - err = storage.updateIsFinal(nil, []uint64{block.Number}) + err = storage.UpdateBlockToFinalized(nil, []uint64{}) + require.NoError(t, err, "if no blocks provided, should be no-op") + + err = storage.UpdateBlockToFinalized(nil, []uint64{block.Number}) require.NoError(t, err, "cannot update IsFinal") readBlock, isFinal, err = storage.GetBlockHeaderByNumber(nil, block.Number) @@ -372,3 +520,212 @@ func newStorageForTest(t *testing.T, dbFileFullPath *string) *MultidownloaderSto require.NoError(t, err, "cannot create storage") return storage } + +func populateLogsAndBlocksForTest(t *testing.T, storage *MultidownloaderStorage, + startingBlock uint64, numBlocks int, logsPerBlock int) { + t.Helper() + var blocks []*aggkittypes.BlockHeader + var logs []types.Log + for i := 0; i < numBlocks; i++ { + blockNumber := startingBlock + uint64(i) + blockHash := exampleTestHash[i%len(exampleTestHash)] + var parentHash *common.Hash + if i > 0 { + parentHash = &exampleTestHash[(i-1)%len(exampleTestHash)] + } + block := aggkittypes.NewBlockHeader(blockNumber, blockHash, 1630000000+uint64(i*60), parentHash) + blocks = append(blocks, block) + + for j := 0; j < logsPerBlock; j++ { + logEntry := types.Log{ + Address: exampleAddr1, + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockTimestamp: 1630000000 + uint64(i*60), + Topics: []common.Hash{ + exampleTestHash[j%len(exampleTestHash)], + }, + Data: []byte{0x01, 0x02, byte(j)}, + TxHash: exampleTestHash[(i+j)%len(exampleTestHash)], + TxIndex: uint(100 + j), + Index: uint(10 + j), + } + logs = append(logs, logEntry) + } + } + + err := storage.SaveEthLogsWithHeaders(nil, blocks, logs, true) + require.NoError(t, err, "cannot populate logs and blocks") +} + +func TestNewLogRowFromEthLog(t *testing.T) { + ethLog := types.Log{ + Address: exampleAddr1, + BlockNumber: 1234, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{ + exampleTestHash[1], + exampleTestHash[2], + }, + Data: []byte{0xDE, 0xAD, 0xBE, 0xEF}, + TxHash: exampleTestHash[3], + TxIndex: 42, + Index: 7, + } + + row := NewLogRowFromEthLog(ethLog) + + require.NotNil(t, row) + require.Equal(t, ethLog.Address, row.Address) + require.Equal(t, ethLog.BlockNumber, row.BlockNumber) + require.Equal(t, ethLog.Data, row.Data) + require.Equal(t, ethLog.TxHash, row.TxHash) + require.Equal(t, ethLog.TxIndex, row.TxIndex) + require.Equal(t, ethLog.Index, row.Index) + + // Verify topics are correctly marshaled as JSON + var topics []common.Hash + err := json.Unmarshal([]byte(row.Topics), &topics) + require.NoError(t, err) + require.Equal(t, ethLog.Topics, topics) +} + +func TestNewLogRowsFromEthLogs(t *testing.T) { + ethLogs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + TxHash: exampleTestHash[2], + TxIndex: 10, + Index: 0, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[3], exampleTestHash[4]}, + Data: []byte{0x02, 0x03}, + TxHash: exampleTestHash[5], + TxIndex: 20, + Index: 1, + }, + } + + rows := NewLogRowsFromEthLogs(ethLogs) + + require.Len(t, rows, 2) + require.Equal(t, ethLogs[0].Address, rows[0].Address) + require.Equal(t, ethLogs[0].BlockNumber, rows[0].BlockNumber) + require.Equal(t, ethLogs[1].Address, rows[1].Address) + require.Equal(t, ethLogs[1].BlockNumber, rows[1].BlockNumber) +} + +func TestNewBlockRowFromEthLog(t *testing.T) { + ethLog := types.Log{ + Address: exampleAddr1, + BlockNumber: 5000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630002000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + } + + row := NewBlockRowFromEthLog(ethLog, true) + + require.NotNil(t, row) + require.Equal(t, ethLog.BlockNumber, row.BlockNumber) + require.Equal(t, ethLog.BlockHash, row.BlockHash) + require.Equal(t, ethLog.BlockTimestamp, row.BlockTimestamp) + require.Nil(t, row.BlockParentHash) + require.True(t, row.IsFinal) + + rowNotFinal := NewBlockRowFromEthLog(ethLog, false) + require.False(t, rowNotFinal.IsFinal) +} + +func TestNewBlockRowFromAggkitBlock(t *testing.T) { + parentHash := exampleTestHash[0] + block := aggkittypes.NewBlockHeader(3000, exampleTestHash[1], 1630003000, &parentHash) + + row := newBlockRowFromAggkitBlock(block, true) + + require.NotNil(t, row) + require.Equal(t, block.Number, row.BlockNumber) + require.Equal(t, block.Hash, row.BlockHash) + require.Equal(t, block.Time, row.BlockTimestamp) + require.NotNil(t, row.BlockParentHash) + require.Equal(t, parentHash, *row.BlockParentHash) + require.True(t, row.IsFinal) +} + +func TestNewBlockRowsFromLogs(t *testing.T) { + logs := []types.Log{ + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[1]}, + Data: []byte{0x01}, + }, + { + Address: exampleAddr1, + BlockNumber: 1000, + BlockHash: exampleTestHash[0], + BlockTimestamp: 1630000000, + Topics: []common.Hash{exampleTestHash[2]}, + Data: []byte{0x02}, + }, + { + Address: exampleAddr2, + BlockNumber: 1001, + BlockHash: exampleTestHash[1], + BlockTimestamp: 1630000060, + Topics: []common.Hash{exampleTestHash[3]}, + Data: []byte{0x03}, + }, + } + + blockRows := NewBlockRowsFromLogs(logs, true) + + require.Len(t, blockRows, 2, "expected 2 unique blocks") + require.NotNil(t, blockRows[1000]) + require.Equal(t, uint64(1000), blockRows[1000].BlockNumber) + require.Equal(t, exampleTestHash[0], blockRows[1000].BlockHash) + require.True(t, blockRows[1000].IsFinal) + require.NotNil(t, blockRows[1001]) + require.Equal(t, uint64(1001), blockRows[1001].BlockNumber) + require.Equal(t, exampleTestHash[1], blockRows[1001].BlockHash) + require.True(t, blockRows[1001].IsFinal) +} + +func TestNewBlockRowsFromAggkitBlock(t *testing.T) { + parentHash1 := exampleTestHash[0] + parentHash2 := exampleTestHash[1] + blockHeaders := aggkittypes.ListBlockHeaders{ + aggkittypes.NewBlockHeader(2000, exampleTestHash[1], 1630001000, &parentHash1), + aggkittypes.NewBlockHeader(2001, exampleTestHash[2], 1630001060, &parentHash2), + } + + blockRows := NewBlockRowsFromAggkitBlock(blockHeaders, false) + + require.Len(t, blockRows, 2) + require.NotNil(t, blockRows[2000]) + require.Equal(t, uint64(2000), blockRows[2000].BlockNumber) + require.Equal(t, exampleTestHash[1], blockRows[2000].BlockHash) + require.NotNil(t, blockRows[2000].BlockParentHash) + require.Equal(t, parentHash1, *blockRows[2000].BlockParentHash) + require.False(t, blockRows[2000].IsFinal) + + require.NotNil(t, blockRows[2001]) + require.Equal(t, uint64(2001), blockRows[2001].BlockNumber) + require.NotNil(t, blockRows[2001].BlockParentHash) + require.Equal(t, parentHash2, *blockRows[2001].BlockParentHash) + require.False(t, blockRows[2001].IsFinal) +} diff --git a/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go b/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go new file mode 100644 index 000000000..441469600 --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_certificate_submission_service_client.go @@ -0,0 +1,114 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// CertificateSubmissionServiceClient is an autogenerated mock type for the CertificateSubmissionServiceClient type +type CertificateSubmissionServiceClient struct { + mock.Mock +} + +type CertificateSubmissionServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *CertificateSubmissionServiceClient) EXPECT() *CertificateSubmissionServiceClient_Expecter { + return &CertificateSubmissionServiceClient_Expecter{mock: &_m.Mock} +} + +// SubmitCertificate provides a mock function with given fields: ctx, in, opts +func (_m *CertificateSubmissionServiceClient) SubmitCertificate(ctx context.Context, in *nodev1.SubmitCertificateRequest, opts ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubmitCertificate") + } + + var r0 *nodev1.SubmitCertificateResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) *nodev1.SubmitCertificateResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.SubmitCertificateResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CertificateSubmissionServiceClient_SubmitCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitCertificate' +type CertificateSubmissionServiceClient_SubmitCertificate_Call struct { + *mock.Call +} + +// SubmitCertificate is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.SubmitCertificateRequest +// - opts ...grpc.CallOption +func (_e *CertificateSubmissionServiceClient_Expecter) SubmitCertificate(ctx interface{}, in interface{}, opts ...interface{}) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + return &CertificateSubmissionServiceClient_SubmitCertificate_Call{Call: _e.mock.On("SubmitCertificate", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) Run(run func(ctx context.Context, in *nodev1.SubmitCertificateRequest, opts ...grpc.CallOption)) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.SubmitCertificateRequest), variadicArgs...) + }) + return _c +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) Return(_a0 *nodev1.SubmitCertificateResponse, _a1 error) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CertificateSubmissionServiceClient_SubmitCertificate_Call) RunAndReturn(run func(context.Context, *nodev1.SubmitCertificateRequest, ...grpc.CallOption) (*nodev1.SubmitCertificateResponse, error)) *CertificateSubmissionServiceClient_SubmitCertificate_Call { + _c.Call.Return(run) + return _c +} + +// NewCertificateSubmissionServiceClient creates a new instance of CertificateSubmissionServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCertificateSubmissionServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *CertificateSubmissionServiceClient { + mock := &CertificateSubmissionServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go b/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go new file mode 100644 index 000000000..4ec9191b4 --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_configuration_service_client.go @@ -0,0 +1,114 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// ConfigurationServiceClient is an autogenerated mock type for the ConfigurationServiceClient type +type ConfigurationServiceClient struct { + mock.Mock +} + +type ConfigurationServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *ConfigurationServiceClient) EXPECT() *ConfigurationServiceClient_Expecter { + return &ConfigurationServiceClient_Expecter{mock: &_m.Mock} +} + +// GetEpochConfiguration provides a mock function with given fields: ctx, in, opts +func (_m *ConfigurationServiceClient) GetEpochConfiguration(ctx context.Context, in *nodev1.GetEpochConfigurationRequest, opts ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetEpochConfiguration") + } + + var r0 *nodev1.GetEpochConfigurationResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) *nodev1.GetEpochConfigurationResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetEpochConfigurationResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConfigurationServiceClient_GetEpochConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEpochConfiguration' +type ConfigurationServiceClient_GetEpochConfiguration_Call struct { + *mock.Call +} + +// GetEpochConfiguration is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetEpochConfigurationRequest +// - opts ...grpc.CallOption +func (_e *ConfigurationServiceClient_Expecter) GetEpochConfiguration(ctx interface{}, in interface{}, opts ...interface{}) *ConfigurationServiceClient_GetEpochConfiguration_Call { + return &ConfigurationServiceClient_GetEpochConfiguration_Call{Call: _e.mock.On("GetEpochConfiguration", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) Run(run func(ctx context.Context, in *nodev1.GetEpochConfigurationRequest, opts ...grpc.CallOption)) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetEpochConfigurationRequest), variadicArgs...) + }) + return _c +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) Return(_a0 *nodev1.GetEpochConfigurationResponse, _a1 error) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ConfigurationServiceClient_GetEpochConfiguration_Call) RunAndReturn(run func(context.Context, *nodev1.GetEpochConfigurationRequest, ...grpc.CallOption) (*nodev1.GetEpochConfigurationResponse, error)) *ConfigurationServiceClient_GetEpochConfiguration_Call { + _c.Call.Return(run) + return _c +} + +// NewConfigurationServiceClient creates a new instance of ConfigurationServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfigurationServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ConfigurationServiceClient { + mock := &ConfigurationServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go b/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go new file mode 100644 index 000000000..e5f3ed15d --- /dev/null +++ b/multidownloader/sync/agglayer/mocks/mock_node_state_service_client.go @@ -0,0 +1,262 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + nodev1 "buf.build/gen/go/agglayer/agglayer/protocolbuffers/go/agglayer/node/v1" +) + +// NodeStateServiceClient is an autogenerated mock type for the NodeStateServiceClient type +type NodeStateServiceClient struct { + mock.Mock +} + +type NodeStateServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeStateServiceClient) EXPECT() *NodeStateServiceClient_Expecter { + return &NodeStateServiceClient_Expecter{mock: &_m.Mock} +} + +// GetCertificateHeader provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetCertificateHeader(ctx context.Context, in *nodev1.GetCertificateHeaderRequest, opts ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateHeader") + } + + var r0 *nodev1.GetCertificateHeaderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) *nodev1.GetCertificateHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetCertificateHeaderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateHeader' +type NodeStateServiceClient_GetCertificateHeader_Call struct { + *mock.Call +} + +// GetCertificateHeader is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetCertificateHeaderRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetCertificateHeader(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetCertificateHeader_Call { + return &NodeStateServiceClient_GetCertificateHeader_Call{Call: _e.mock.On("GetCertificateHeader", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) Run(run func(ctx context.Context, in *nodev1.GetCertificateHeaderRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetCertificateHeaderRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) Return(_a0 *nodev1.GetCertificateHeaderResponse, _a1 error) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetCertificateHeader_Call) RunAndReturn(run func(context.Context, *nodev1.GetCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetCertificateHeaderResponse, error)) *NodeStateServiceClient_GetCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestCertificateHeader provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetLatestCertificateHeader(ctx context.Context, in *nodev1.GetLatestCertificateHeaderRequest, opts ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetLatestCertificateHeader") + } + + var r0 *nodev1.GetLatestCertificateHeaderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) *nodev1.GetLatestCertificateHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetLatestCertificateHeaderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetLatestCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestCertificateHeader' +type NodeStateServiceClient_GetLatestCertificateHeader_Call struct { + *mock.Call +} + +// GetLatestCertificateHeader is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetLatestCertificateHeaderRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetLatestCertificateHeader(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + return &NodeStateServiceClient_GetLatestCertificateHeader_Call{Call: _e.mock.On("GetLatestCertificateHeader", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) Run(run func(ctx context.Context, in *nodev1.GetLatestCertificateHeaderRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetLatestCertificateHeaderRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) Return(_a0 *nodev1.GetLatestCertificateHeaderResponse, _a1 error) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetLatestCertificateHeader_Call) RunAndReturn(run func(context.Context, *nodev1.GetLatestCertificateHeaderRequest, ...grpc.CallOption) (*nodev1.GetLatestCertificateHeaderResponse, error)) *NodeStateServiceClient_GetLatestCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + +// GetNetworkInfo provides a mock function with given fields: ctx, in, opts +func (_m *NodeStateServiceClient) GetNetworkInfo(ctx context.Context, in *nodev1.GetNetworkInfoRequest, opts ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetNetworkInfo") + } + + var r0 *nodev1.GetNetworkInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) *nodev1.GetNetworkInfoResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*nodev1.GetNetworkInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStateServiceClient_GetNetworkInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkInfo' +type NodeStateServiceClient_GetNetworkInfo_Call struct { + *mock.Call +} + +// GetNetworkInfo is a helper method to define mock.On call +// - ctx context.Context +// - in *nodev1.GetNetworkInfoRequest +// - opts ...grpc.CallOption +func (_e *NodeStateServiceClient_Expecter) GetNetworkInfo(ctx interface{}, in interface{}, opts ...interface{}) *NodeStateServiceClient_GetNetworkInfo_Call { + return &NodeStateServiceClient_GetNetworkInfo_Call{Call: _e.mock.On("GetNetworkInfo", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) Run(run func(ctx context.Context, in *nodev1.GetNetworkInfoRequest, opts ...grpc.CallOption)) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*nodev1.GetNetworkInfoRequest), variadicArgs...) + }) + return _c +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) Return(_a0 *nodev1.GetNetworkInfoResponse, _a1 error) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeStateServiceClient_GetNetworkInfo_Call) RunAndReturn(run func(context.Context, *nodev1.GetNetworkInfoRequest, ...grpc.CallOption) (*nodev1.GetNetworkInfoResponse, error)) *NodeStateServiceClient_GetNetworkInfo_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeStateServiceClient creates a new instance of NodeStateServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeStateServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeStateServiceClient { + mock := &NodeStateServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go new file mode 100644 index 000000000..d29665525 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_client.go @@ -0,0 +1,188 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + proverv1 "buf.build/gen/go/agglayer/provers/protocolbuffers/go/aggkit/prover/v1" +) + +// AggchainProofServiceClient is an autogenerated mock type for the AggchainProofServiceClient type +type AggchainProofServiceClient struct { + mock.Mock +} + +type AggchainProofServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *AggchainProofServiceClient) EXPECT() *AggchainProofServiceClient_Expecter { + return &AggchainProofServiceClient_Expecter{mock: &_m.Mock} +} + +// GenerateAggchainProof provides a mock function with given fields: ctx, in, opts +func (_m *AggchainProofServiceClient) GenerateAggchainProof(ctx context.Context, in *proverv1.GenerateAggchainProofRequest, opts ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GenerateAggchainProof") + } + + var r0 *proverv1.GenerateAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) *proverv1.GenerateAggchainProofResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceClient_GenerateAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateAggchainProof' +type AggchainProofServiceClient_GenerateAggchainProof_Call struct { + *mock.Call +} + +// GenerateAggchainProof is a helper method to define mock.On call +// - ctx context.Context +// - in *proverv1.GenerateAggchainProofRequest +// - opts ...grpc.CallOption +func (_e *AggchainProofServiceClient_Expecter) GenerateAggchainProof(ctx interface{}, in interface{}, opts ...interface{}) *AggchainProofServiceClient_GenerateAggchainProof_Call { + return &AggchainProofServiceClient_GenerateAggchainProof_Call{Call: _e.mock.On("GenerateAggchainProof", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) Run(run func(ctx context.Context, in *proverv1.GenerateAggchainProofRequest, opts ...grpc.CallOption)) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*proverv1.GenerateAggchainProofRequest), variadicArgs...) + }) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) Return(_a0 *proverv1.GenerateAggchainProofResponse, _a1 error) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateAggchainProofResponse, error)) *AggchainProofServiceClient_GenerateAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// GenerateOptimisticAggchainProof provides a mock function with given fields: ctx, in, opts +func (_m *AggchainProofServiceClient) GenerateOptimisticAggchainProof(ctx context.Context, in *proverv1.GenerateOptimisticAggchainProofRequest, opts ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GenerateOptimisticAggchainProof") + } + + var r0 *proverv1.GenerateOptimisticAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) *proverv1.GenerateOptimisticAggchainProofResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateOptimisticAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateOptimisticAggchainProof' +type AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call struct { + *mock.Call +} + +// GenerateOptimisticAggchainProof is a helper method to define mock.On call +// - ctx context.Context +// - in *proverv1.GenerateOptimisticAggchainProofRequest +// - opts ...grpc.CallOption +func (_e *AggchainProofServiceClient_Expecter) GenerateOptimisticAggchainProof(ctx interface{}, in interface{}, opts ...interface{}) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + return &AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call{Call: _e.mock.On("GenerateOptimisticAggchainProof", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) Run(run func(ctx context.Context, in *proverv1.GenerateOptimisticAggchainProofRequest, opts ...grpc.CallOption)) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*proverv1.GenerateOptimisticAggchainProofRequest), variadicArgs...) + }) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) Return(_a0 *proverv1.GenerateOptimisticAggchainProofResponse, _a1 error) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest, ...grpc.CallOption) (*proverv1.GenerateOptimisticAggchainProofResponse, error)) *AggchainProofServiceClient_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// NewAggchainProofServiceClient creates a new instance of AggchainProofServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggchainProofServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *AggchainProofServiceClient { + mock := &AggchainProofServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go new file mode 100644 index 000000000..1b8617a37 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_aggchain_proof_service_server.go @@ -0,0 +1,155 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + proverv1 "buf.build/gen/go/agglayer/provers/protocolbuffers/go/aggkit/prover/v1" + mock "github.com/stretchr/testify/mock" +) + +// AggchainProofServiceServer is an autogenerated mock type for the AggchainProofServiceServer type +type AggchainProofServiceServer struct { + mock.Mock +} + +type AggchainProofServiceServer_Expecter struct { + mock *mock.Mock +} + +func (_m *AggchainProofServiceServer) EXPECT() *AggchainProofServiceServer_Expecter { + return &AggchainProofServiceServer_Expecter{mock: &_m.Mock} +} + +// GenerateAggchainProof provides a mock function with given fields: _a0, _a1 +func (_m *AggchainProofServiceServer) GenerateAggchainProof(_a0 context.Context, _a1 *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GenerateAggchainProof") + } + + var r0 *proverv1.GenerateAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateAggchainProofRequest) *proverv1.GenerateAggchainProofResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateAggchainProofRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceServer_GenerateAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateAggchainProof' +type AggchainProofServiceServer_GenerateAggchainProof_Call struct { + *mock.Call +} + +// GenerateAggchainProof is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *proverv1.GenerateAggchainProofRequest +func (_e *AggchainProofServiceServer_Expecter) GenerateAggchainProof(_a0 interface{}, _a1 interface{}) *AggchainProofServiceServer_GenerateAggchainProof_Call { + return &AggchainProofServiceServer_GenerateAggchainProof_Call{Call: _e.mock.On("GenerateAggchainProof", _a0, _a1)} +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) Run(run func(_a0 context.Context, _a1 *proverv1.GenerateAggchainProofRequest)) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*proverv1.GenerateAggchainProofRequest)) + }) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) Return(_a0 *proverv1.GenerateAggchainProofResponse, _a1 error) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateAggchainProofRequest) (*proverv1.GenerateAggchainProofResponse, error)) *AggchainProofServiceServer_GenerateAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// GenerateOptimisticAggchainProof provides a mock function with given fields: _a0, _a1 +func (_m *AggchainProofServiceServer) GenerateOptimisticAggchainProof(_a0 context.Context, _a1 *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GenerateOptimisticAggchainProof") + } + + var r0 *proverv1.GenerateOptimisticAggchainProofResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) *proverv1.GenerateOptimisticAggchainProofResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proverv1.GenerateOptimisticAggchainProofResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateOptimisticAggchainProof' +type AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call struct { + *mock.Call +} + +// GenerateOptimisticAggchainProof is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *proverv1.GenerateOptimisticAggchainProofRequest +func (_e *AggchainProofServiceServer_Expecter) GenerateOptimisticAggchainProof(_a0 interface{}, _a1 interface{}) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + return &AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call{Call: _e.mock.On("GenerateOptimisticAggchainProof", _a0, _a1)} +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) Run(run func(_a0 context.Context, _a1 *proverv1.GenerateOptimisticAggchainProofRequest)) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*proverv1.GenerateOptimisticAggchainProofRequest)) + }) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) Return(_a0 *proverv1.GenerateOptimisticAggchainProofResponse, _a1 error) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call) RunAndReturn(run func(context.Context, *proverv1.GenerateOptimisticAggchainProofRequest) (*proverv1.GenerateOptimisticAggchainProofResponse, error)) *AggchainProofServiceServer_GenerateOptimisticAggchainProof_Call { + _c.Call.Return(run) + return _c +} + +// NewAggchainProofServiceServer creates a new instance of AggchainProofServiceServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggchainProofServiceServer(t interface { + mock.TestingT + Cleanup(func()) +}) *AggchainProofServiceServer { + mock := &AggchainProofServiceServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go b/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go new file mode 100644 index 000000000..9f081d656 --- /dev/null +++ b/multidownloader/sync/aggsender/mocks/mock_unsafe_aggchain_proof_service_server.go @@ -0,0 +1,64 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// UnsafeAggchainProofServiceServer is an autogenerated mock type for the UnsafeAggchainProofServiceServer type +type UnsafeAggchainProofServiceServer struct { + mock.Mock +} + +type UnsafeAggchainProofServiceServer_Expecter struct { + mock *mock.Mock +} + +func (_m *UnsafeAggchainProofServiceServer) EXPECT() *UnsafeAggchainProofServiceServer_Expecter { + return &UnsafeAggchainProofServiceServer_Expecter{mock: &_m.Mock} +} + +// mustEmbedUnimplementedAggchainProofServiceServer provides a mock function with no fields +func (_m *UnsafeAggchainProofServiceServer) mustEmbedUnimplementedAggchainProofServiceServer() { + _m.Called() +} + +// UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedAggchainProofServiceServer' +type UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call struct { + *mock.Call +} + +// mustEmbedUnimplementedAggchainProofServiceServer is a helper method to define mock.On call +func (_e *UnsafeAggchainProofServiceServer_Expecter) mustEmbedUnimplementedAggchainProofServiceServer() *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + return &UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call{Call: _e.mock.On("mustEmbedUnimplementedAggchainProofServiceServer")} +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) Run(run func()) *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) Return() *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Call.Return() + return _c +} + +func (_c *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call) RunAndReturn(run func()) *UnsafeAggchainProofServiceServer_mustEmbedUnimplementedAggchainProofServiceServer_Call { + _c.Run(run) + return _c +} + +// NewUnsafeAggchainProofServiceServer creates a new instance of UnsafeAggchainProofServiceServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnsafeAggchainProofServiceServer(t interface { + mock.TestingT + Cleanup(func()) +}) *UnsafeAggchainProofServiceServer { + mock := &UnsafeAggchainProofServiceServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/evmdownloader.go b/multidownloader/sync/evmdownloader.go new file mode 100644 index 000000000..0718fff38 --- /dev/null +++ b/multidownloader/sync/evmdownloader.go @@ -0,0 +1,335 @@ +package multidownloader + +import ( + "context" + "errors" + "fmt" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/core/types" +) + +const ( + percentTotallyCompleted = 100.0 +) + +var ( + ErrLogsNotAvailable = fmt.Errorf("logs not available") +) + +type EVMDownloader struct { + multidownloader mdrsynctypes.MultidownloaderInterface + logger aggkitcommon.Logger + rh *sync.RetryHandler + appender sync.LogAppenderMap + // Maximum duration to wait to catch up the maximum request + waitPeriodToCatchUpMaximumLogRange time.Duration + pullingPeriod time.Duration +} + +func NewEVMDownloader( + mdr mdrsynctypes.MultidownloaderInterface, + logger aggkitcommon.Logger, + rh *sync.RetryHandler, + appender sync.LogAppenderMap, + waitPeriodToCatchUpMaximumLogRange time.Duration, + pullingPeriod time.Duration, +) *EVMDownloader { + return &EVMDownloader{ + multidownloader: mdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: waitPeriodToCatchUpMaximumLogRange, + pullingPeriod: pullingPeriod, + } +} + +func (d *EVMDownloader) Finality() aggkittypes.BlockNumberFinality { + return d.multidownloader.Finality() +} + +func (d *EVMDownloader) DownloadNextBlocks(ctx context.Context, + lastBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { + // Check Context cancellation + if ctx.Err() != nil { + return nil, ctx.Err() + } + err := d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return nil, err + } + + maxLogQuery := d.newMaxLogQuery(lastBlockHeader, maxBlocks, syncerConfig) + var result *mdrsynctypes.DownloadResult + conditionMet, err := aggkitcommon.PollingWithTimeout(ctx, d.pullingPeriod, + d.waitPeriodToCatchUpMaximumLogRange, func() (bool, error) { + var err error + err = d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return false, err + } + result, err = d.executeLogQuery(ctx, maxLogQuery, syncerConfig) + if err != nil { + // The only allowed error is ErrLogsNotAvailable + if errors.Is(err, ErrLogsNotAvailable) { + return false, nil + } + return false, err + } + + return true, nil + }) + if errors.Is(err, aggkitcommon.ErrTimeoutReached) { + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s after waiting %s: %w", + maxLogQuery.String(), d.waitPeriodToCatchUpMaximumLogRange.String(), ErrLogsNotAvailable) + } + if err != nil { + return nil, err + } + if !conditionMet { + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: logs not available for query: %s. Err: %w", + maxLogQuery.String(), ErrLogsNotAvailable) + } + if result == nil { + return nil, fmt.Errorf("EVMDownloader.DownloadNextBlocks: executeLogQuery "+ + "return result=nil. Range: %s", maxLogQuery.BlockRange.String()) + } + // Before returning we check again that lastBlockHeader is not reorged + err = d.checkReorgedBlock(ctx, lastBlockHeader) + if err != nil { + return nil, err + } + + return result, nil +} + +func (d *EVMDownloader) ChainID(ctx context.Context) (uint64, error) { + return d.multidownloader.ChainID(ctx) +} + +// executeLogQuery executes the log query, checking for partial availability +// if there are no logs available returns an error +func (d *EVMDownloader) executeLogQuery(ctx context.Context, + fullLogQuery mdrtypes.LogQuery, syncerConfig aggkittypes.SyncerConfig) (*mdrsynctypes.DownloadResult, error) { + logQuery := fullLogQuery + if !d.multidownloader.IsAvailable(fullLogQuery) { + isPartial, partialLogQuery := d.multidownloader.IsPartiallyAvailable(fullLogQuery) + if !isPartial { + return nil, fmt.Errorf("DownloadNextBlocks: logs not available for query: %s. Err: %w", fullLogQuery.String(), + ErrLogsNotAvailable) + } + logQuery = *partialLogQuery + } + + logQueryResponse, err := d.multidownloader.LogQuery(ctx, logQuery) + if err != nil { + return nil, fmt.Errorf("EVMMultidownloader.FilterLogs: cannot get logs: %w", err) + } + totalLogs := logQueryResponse.CountLogs() + percentComplete, err := d.calculatePercentCompletation(ctx, syncerConfig, logQuery.BlockRange) + if err != nil { + d.logger.Warn(err.Error()) + } + result := &mdrsynctypes.DownloadResult{ + Data: d.logQueryResponseToEVMBlocks(ctx, logQueryResponse), + CompletionPercentage: percentComplete, + } + err = d.addLastBlockIfNotIncluded(ctx, result, + logQueryResponse.ResponseRange, logQueryResponse.UnsafeRange) + if err != nil { + return nil, fmt.Errorf("EVMDownloader.executeLogQuery: adding last block: %w", err) + } + d.logger.Infof("EVMDownloader.executeLogQuery(block:%s): len(logs)= %d", logQuery.BlockRange.String(), totalLogs) + return result, nil +} + +func (d *EVMDownloader) getFullBlockRange(ctx context.Context, + syncerConfig aggkittypes.SyncerConfig) (*aggkitcommon.BlockRange, error) { + blockTo, err := d.multidownloader.HeaderByNumber(ctx, &syncerConfig.ToBlock) + if err != nil || blockTo == nil { + return nil, fmt.Errorf("EVMDownloader.getFullBlockRange: error getting 'to' block header: %w", err) + } + br := aggkitcommon.NewBlockRange(syncerConfig.FromBlock, blockTo.Number) + return &br, nil +} + +// Return the percent of completion of synchronization +func (d *EVMDownloader) calculatePercentCompletation(ctx context.Context, + syncerConfig aggkittypes.SyncerConfig, lastRange aggkitcommon.BlockRange) (float64, error) { + fullRange, err := d.getFullBlockRange(ctx, syncerConfig) + if err != nil { + return 0, fmt.Errorf("EVMDownloader.calculatePercentCompletation: error getting full block range: %w", err) + } + totalBlocks := fullRange.CountBlocks() + pendingRange := aggkitcommon.NewBlockRange(lastRange.ToBlock+1, fullRange.ToBlock) + if pendingRange.CountBlocks() == 0 { + return percentTotallyCompleted, nil + } + blocksCompleted := totalBlocks - pendingRange.CountBlocks() + percent := (float64(blocksCompleted) / float64(totalBlocks)) * percentTotallyCompleted + return percent, nil +} + +func (d *EVMDownloader) addLastBlockIfNotIncluded(ctx context.Context, + result *mdrsynctypes.DownloadResult, + responseRange aggkitcommon.BlockRange, + unsafeRange aggkitcommon.BlockRange) error { + lastBlockNumber := responseRange.ToBlock + // If it's already included, return + for _, b := range result.Data { + if b.Num == lastBlockNumber { + return nil + } + } + + hdr, _, err := d.multidownloader.StorageHeaderByNumber(ctx, aggkittypes.NewBlockNumber(lastBlockNumber)) + if err != nil { + d.logger.Errorf("EVMDownloader: error getting block header for block number %d: %v", lastBlockNumber, err) + return nil + } + isFinalizedBlock := !unsafeRange.ContainsBlockNumber(lastBlockNumber) + if hdr == nil { + // Check that we are not in the unsafe zone. Because in that case we can't fake the Hash and it's an error + // because the block must in in storage + if !isFinalizedBlock { + err := fmt.Errorf("EVMDownloader: cannot get block header for block number %d in unsafe zone", lastBlockNumber) + d.logger.Error(err) + return err + } + hdr = &aggkittypes.BlockHeader{ + Number: lastBlockNumber, + Hash: aggkitcommon.ZeroHash, + Time: 0, + ParentHash: nil, + } + } + // Add empty block + emptyBlock := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: lastBlockNumber, + Hash: hdr.Hash, + Timestamp: hdr.Time, + }, + IsFinalizedBlock: isFinalizedBlock, + Events: []interface{}{}, + } + if hdr.ParentHash != nil { + emptyBlock.ParentHash = *hdr.ParentHash + } + d.logger.Debugf("EVMDownloader.addLastBlockIfNotIncluded: to response %s adding empty block number %d / %s", + responseRange.String(), + lastBlockNumber, hdr.Hash.Hex()) + result.Data = append(result.Data, emptyBlock) + return nil +} + +func (d *EVMDownloader) logQueryResponseToEVMBlocks( + ctx context.Context, response mdrtypes.LogQueryResponse) sync.EVMBlocks { + blocks := make(sync.EVMBlocks, 0, len(response.Blocks)) + for _, blockWithLogs := range response.Blocks { + evmBlock := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: blockWithLogs.Header.Number, + Hash: blockWithLogs.Header.Hash, + Timestamp: blockWithLogs.Header.Time, + }, + IsFinalizedBlock: blockWithLogs.IsFinal, + Events: []interface{}{}, + } + if blockWithLogs.Header.ParentHash != nil { + evmBlock.ParentHash = *blockWithLogs.Header.ParentHash + } + // Convert mdrtypes.Log to types.Log and append + for _, mdrLog := range blockWithLogs.Logs { + ethLog := types.Log{ + Address: mdrLog.Address, + Topics: mdrLog.Topics, + Data: mdrLog.Data, + BlockNumber: mdrLog.BlockNumber, + TxHash: mdrLog.TxHash, + TxIndex: mdrLog.TxIndex, + BlockHash: blockWithLogs.Header.Hash, + Index: mdrLog.Index, + Removed: mdrLog.Removed, + BlockTimestamp: mdrLog.BlockTimestamp, + } + d.appendLog(ctx, evmBlock, ethLog) + } + blocks = append(blocks, evmBlock) + } + return blocks +} + +func (d *EVMDownloader) appendLog(ctx context.Context, block *sync.EVMBlock, log types.Log) { + appenderFn := d.appender[log.Topics[0]] + if appenderFn == nil { + // d.logger.Debugf("no appender function found for topic: %s", log.Topics[0].Hex()) + return + } + attempts := 0 + for { + err := appenderFn(block, log) + if err != nil { + attempts++ + d.logger.Errorf("error trying to append log (attempt %d): %v", attempts, err) + d.rh.Handle(ctx, "appendLogs", attempts) + continue + } + break + } +} + +// newMaxLogQuery creates a new LogQuery based on the syncerConfig and maxBlocks +func (d *EVMDownloader) newMaxLogQuery(lastBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) mdrtypes.LogQuery { + var fromBlock uint64 + if lastBlockHeader != nil { + fromBlock = lastBlockHeader.Number + 1 + } else { + fromBlock = syncerConfig.FromBlock + } + toBlock := fromBlock + maxBlocks - 1 + logQuery := mdrtypes.NewLogQuery(fromBlock, toBlock, syncerConfig.ContractAddresses) + return logQuery +} + +func (d *EVMDownloader) checkReorgedBlock(ctx context.Context, + blockHeader *aggkittypes.BlockHeader) error { + // Check Context cancellation + if ctx.Err() != nil { + return ctx.Err() + } + // If blockHeader is nil, there's nothing to check + // if hash== ZeroHash means that is a 'fake' block added to mark the end of the log range + if blockHeader == nil || blockHeader.Hash == aggkitcommon.ZeroHash { + return nil + } + // Check blockHeader is not reorged + isValid, reorgID, err := d.multidownloader.CheckValidBlock(ctx, blockHeader.Number, blockHeader.Hash) + if err != nil { + return err + } + if !isValid { + reorgData, err := d.multidownloader.GetReorgedDataByReorgID(ctx, reorgID) + if err != nil { + return err + } + // TODO: if reorgData is nil?? can't happen + if reorgData == nil { + return fmt.Errorf("reorg data not found for reorg ID %d", reorgID) + } + return mdrtypes.NewReorgedError(reorgData.BlockRangeAffected, reorgID, + fmt.Sprintf("detected at block number %d", blockHeader.Number), + ) + } + return nil +} diff --git a/multidownloader/sync/evmdownloader_test.go b/multidownloader/sync/evmdownloader_test.go new file mode 100644 index 000000000..5f636c81a --- /dev/null +++ b/multidownloader/sync/evmdownloader_test.go @@ -0,0 +1,1103 @@ +package multidownloader + +// Unit tests for download.go +// Coverage: Most functions have 100% coverage including: +// - executeLogQuery: 100% +// - logsToEVMBlock: 100% +// - appendLog: 100% +// - newMaxLogQuery: 100% +// - checkReorgedBlock: 100% +// - DownloadNextBlocks: 91.3% (includes retry, timeout, and context cancellation scenarios) + +import ( + "context" + "fmt" + "testing" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/log" + mdrsynctypesmocks "github.com/agglayer/aggkit/multidownloader/sync/types/mocks" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestDownloadNextBlocks_Success(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + // Create a mock appender + appenderCalled := false + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + appenderCalled = true + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // Setup mocks + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil) + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true) + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0xblock101"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 101, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 101, ToBlock: 110}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 1100, + }, mdrtypes.Finalized, nil) + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(101), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) + require.True(t, appenderCalled) +} + +func TestDownloadNextBlocks_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Equal(t, context.Canceled, err) +} + +func TestDownloadNextBlocks_ReorgDetected(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + reorgData := &mdrtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + // Setup mocks - reorg detected + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil) + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.True(t, mdrtypes.IsReorgedError(err)) + reorgErr := mdrtypes.CastReorgedError(err) + require.Equal(t, uint64(1), reorgErr.ReorgID) +} + +func TestDownloadNextBlocks_NilLastBlockHeader(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // Setup mocks + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true) + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 50, + Hash: common.HexToHash("0xblock50"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 50, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 50, ToBlock: 59}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 59, + Hash: common.HexToHash("0xblock59"), + Time: 1090, + }, mdrtypes.Finalized, nil) + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) + result, err := download.DownloadNextBlocks(ctx, nil, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(50), result.Data[0].Num) + require.Equal(t, uint64(59), result.Data[1].Num) +} + +func TestDownloadNextBlocks_LogsNotAvailableInitially(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 500 * time.Millisecond, + pullingPeriod: 50 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // First call: checkReorgedBlock before PollingWithTimeout (line 65) + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // First iteration: PollingWithTimeout calls checkCondition immediately + // This calls checkReorgedBlock (line 74) and executeLogQuery + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() + + // Second iteration in polling loop + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(true).Once() + mockMdr.EXPECT().LogQuery(ctx, mock.Anything).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 101, + Hash: common.HexToHash("0xblock101"), + Time: 1000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 101, + BlockTimestamp: 1000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 101, ToBlock: 110}, + }, nil).Once() + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 1100, + }, mdrtypes.Finalized, nil).Once() + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) + // Final checkReorgedBlock after PollingWithTimeout completes (line 101) + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(101), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) +} + +func TestDownloadNextBlocks_TimeoutWaitingForLogs(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 100 * time.Millisecond, + pullingPeriod: 200 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // First call: checkReorgedBlock before PollingWithTimeout (line 65) + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // PollingWithTimeout calls checkCondition multiple times until timeout + // Each call includes checkReorgedBlock and executeLogQuery + // Since timeout is 100ms and polling period is 200ms, it will try only once before timeout + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + // After timeout, should return error + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "logs not available") +} + +func TestDownloadNextBlocks_ContextCancelledDuringRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 5 * time.Second, + pullingPeriod: 50 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + // checkReorgedBlock and executeLogQuery may be called multiple times before context is cancelled + // Using Maybe() to allow flexible number of calls depending on timing + mockMdr.EXPECT().CheckValidBlock(mock.Anything, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Maybe() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Maybe() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Maybe() + + // During retry loop, cancel the context after a short delay + go func() { + time.Sleep(30 * time.Millisecond) + cancel() + }() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "context") +} + +func TestDownloadNextBlocks_ReorgDetectedDuringRetry(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 500 * time.Millisecond, + pullingPeriod: 30 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + reorgData := &mdrtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + // First call: checkReorgedBlock before PollingWithTimeout (line 65) + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + + // First iteration: PollingWithTimeout calls checkCondition immediately + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(true, uint64(0), nil).Once() + mockMdr.EXPECT().IsAvailable(mock.Anything).Return(false).Once() + mockMdr.EXPECT().IsPartiallyAvailable(mock.Anything).Return(false, nil).Once() + + // Second iteration: reorg detected during checkReorgedBlock + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), lastBlockHeader.Hash).Return(false, uint64(1), nil).Once() + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil).Once() + + result, err := download.DownloadNextBlocks(ctx, lastBlockHeader, 10, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.True(t, mdrtypes.IsReorgedError(err)) +} + +func TestExecuteLogQuery_FullyAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) + mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 105, + Hash: common.HexToHash("0xblock105"), + Time: 2000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 105, + BlockTimestamp: 2000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 100, ToBlock: 110}, + }, nil) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, mdrtypes.Finalized, nil) + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(105), result.Data[0].Num) + require.Equal(t, uint64(110), result.Data[1].Num) +} + +func TestExecuteLogQuery_PartiallyAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + b.Events = append(b.Events, "test_event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + partialQuery := mdrtypes.NewLogQuery(100, 105, []common.Address{common.HexToAddress("0x123")}) + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) + mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(true, &partialQuery) + mockMdr.EXPECT().LogQuery(ctx, partialQuery).Return(mdrtypes.LogQueryResponse{ + Blocks: []mdrtypes.BlockWithLogs{ + { + Header: aggkittypes.BlockHeader{ + Number: 103, + Hash: common.HexToHash("0xblock103"), + Time: 2000, + }, + IsFinal: true, + Logs: []mdrtypes.Log{ + { + BlockNumber: 103, + BlockTimestamp: 2000, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + }, + }, + }, + }, + ResponseRange: aggkitcommon.BlockRange{FromBlock: 100, ToBlock: 105}, + }, nil) + // When using partial query, addLastBlockIfNotIncluded uses responseRange.ToBlock (105) + mockMdr.EXPECT().StorageHeaderByNumber(ctx, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 105, + Hash: common.HexToHash("0xblock105"), + Time: 2050, + }, mdrtypes.Finalized, nil) + mockMdr.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(&aggkittypes.BlockHeader{ + Number: 110, + Hash: common.HexToHash("0xblock110"), + Time: 2100, + }, nil) + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Data, 2) + require.Equal(t, uint64(103), result.Data[0].Num) + require.Equal(t, uint64(105), result.Data[1].Num) // Last block is from partial response range +} + +func TestExecuteLogQuery_NotAvailable(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + mockMdr.EXPECT().IsAvailable(logQuery).Return(false) + mockMdr.EXPECT().IsPartiallyAvailable(logQuery).Return(false, nil) + + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "logs not available") +} + +func TestExecuteLogQuery_GetEthLogsError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + logQuery := mdrtypes.NewLogQuery(100, 110, []common.Address{common.HexToAddress("0x123")}) + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + mockMdr.EXPECT().IsAvailable(logQuery).Return(true) + mockMdr.EXPECT().LogQuery(ctx, logQuery).Return(mdrtypes.LogQueryResponse{}, fmt.Errorf("database error")) + + result, err := download.executeLogQuery(ctx, logQuery, syncerConfig) + + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "cannot get logs") +} + +func TestNewMaxLogQuery_WithLastBlockHeader(t *testing.T) { + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: nil, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + lastBlockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + query := download.newMaxLogQuery(lastBlockHeader, 10, syncerConfig) + + require.Equal(t, uint64(101), query.BlockRange.FromBlock) + require.Equal(t, uint64(110), query.BlockRange.ToBlock) + require.Equal(t, syncerConfig.ContractAddresses, query.Addrs) +} + +func TestNewMaxLogQuery_WithoutLastBlockHeader(t *testing.T) { + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: nil, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + syncerConfig := aggkittypes.SyncerConfig{ + FromBlock: 50, + ContractAddresses: []common.Address{common.HexToAddress("0x123")}, + } + + query := download.newMaxLogQuery(nil, 10, syncerConfig) + + require.Equal(t, uint64(50), query.BlockRange.FromBlock) + require.Equal(t, uint64(59), query.BlockRange.ToBlock) + require.Equal(t, syncerConfig.ContractAddresses, query.Addrs) +} + +func TestCheckReorgedBlock_NilBlockHeader(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + // When blockHeader is nil, no reorg check should be performed + err := download.checkReorgedBlock(ctx, nil) + + require.NoError(t, err) +} + +func TestCheckReorgedBlock_ValidBlock(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(true, uint64(0), nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.NoError(t, err) +} + +func TestCheckReorgedBlock_InvalidBlock(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + reorgData := &mdrtypes.ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 105), + DetectedAtBlock: 106, + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(reorgData, nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.True(t, mdrtypes.IsReorgedError(err)) + reorgErr := mdrtypes.CastReorgedError(err) + require.Equal(t, uint64(1), reorgErr.ReorgID) +} + +func TestCheckReorgedBlock_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Equal(t, context.Canceled, err) +} + +func TestCheckReorgedBlock_CheckValidBlockError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(0), fmt.Errorf("check error")) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "check error") +} + +func TestCheckReorgedBlock_GetReorgedDataError(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(nil, fmt.Errorf("database error")) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "database error") +} + +func TestCheckReorgedBlock_NilReorgData(t *testing.T) { + ctx := context.Background() + mockMdr := mdrsynctypesmocks.NewMultidownloaderInterface(t) + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + download := &EVMDownloader{ + multidownloader: mockMdr, + logger: logger, + rh: rh, + appender: sync.LogAppenderMap{}, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + blockHeader := &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0xabc"), + } + + mockMdr.EXPECT().CheckValidBlock(ctx, uint64(100), blockHeader.Hash).Return(false, uint64(1), nil) + mockMdr.EXPECT().GetReorgedDataByReorgID(ctx, uint64(1)).Return(nil, nil) + + err := download.checkReorgedBlock(ctx, blockHeader) + + require.Error(t, err) + require.Contains(t, err.Error(), "reorg data not found") +} + +func TestAppendLog_Success(t *testing.T) { + ctx := context.Background() + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + callCount := 0 + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + callCount++ + b.Events = append(b.Events, "event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: nil, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + block := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 100, + }, + Events: []interface{}{}, + } + + log := types.Log{ + BlockNumber: 100, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + } + + download.appendLog(ctx, block, log) + + require.Equal(t, 1, callCount) + require.Len(t, block.Events, 1) +} + +func TestAppendLog_RetryOnError(t *testing.T) { + ctx := context.Background() + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: 10 * time.Millisecond, + MaxRetryAttemptsAfterError: 3, + } + + callCount := 0 + appender := sync.LogAppenderMap{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): func(b *sync.EVMBlock, l types.Log) error { + callCount++ + if callCount < 3 { + return fmt.Errorf("temporary error") + } + b.Events = append(b.Events, "event") + return nil + }, + } + + download := &EVMDownloader{ + multidownloader: nil, + logger: logger, + rh: rh, + appender: appender, + waitPeriodToCatchUpMaximumLogRange: 1 * time.Second, + pullingPeriod: 100 * time.Millisecond, + } + + block := &sync.EVMBlock{ + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 100, + }, + Events: []interface{}{}, + } + + log := types.Log{ + BlockNumber: 100, + Topics: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + }, + } + + download.appendLog(ctx, block, log) + + require.Equal(t, 3, callCount) + require.Len(t, block.Events, 1) +} diff --git a/multidownloader/sync/evmdriver.go b/multidownloader/sync/evmdriver.go new file mode 100644 index 000000000..b20b2d286 --- /dev/null +++ b/multidownloader/sync/evmdriver.go @@ -0,0 +1,176 @@ +package multidownloader + +import ( + "context" + "errors" + "fmt" + "sync" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db/compatibility" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkitsync "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type EVMDriver struct { + processor mdrsynctypes.ProcessorInterface + downloader mdrsynctypes.DownloaderInterface + syncerConfig aggkittypes.SyncerConfig + rh *aggkitsync.RetryHandler + logger aggkitcommon.Logger + compatibilityChecker compatibility.CompatibilityChecker + // This mutext protect to: + // - syncBlockChunkSize, because it can be updated dynamically by the user and read by the sync loop + // - completionPercentage, because it can be updated by the downloader and read by the sync loop and by the API server + mutex sync.Mutex + syncBlockChunkSize uint64 + // It's the percentage of completion of the download, it can be used to estimate the progress of the sync + // can be nil is there are no information yet + // 0 -> 0%, 100, -> 100% + completionPercentage *float64 +} + +func NewEVMDriver( + logger aggkitcommon.Logger, + processor mdrsynctypes.ProcessorInterface, + downloader mdrsynctypes.DownloaderInterface, + syncerConfig aggkittypes.SyncerConfig, + syncBlockChunkSize uint64, + rh *aggkitsync.RetryHandler, + compatibilityChecker compatibility.CompatibilityChecker, +) *EVMDriver { + return &EVMDriver{ + processor: processor, + downloader: downloader, + syncerConfig: syncerConfig, + syncBlockChunkSize: syncBlockChunkSize, + rh: rh, + logger: logger, + compatibilityChecker: compatibilityChecker, + } +} + +func (d *EVMDriver) Sync(ctx context.Context) { + attempts := 0 + for { + if ctx.Err() != nil { + d.logger.Info("context cancelled") + return + } + if err := d.syncStep(ctx); err != nil { + attempts++ + d.logger.Error("error during syncing ", err) + d.rh.Handle(ctx, "Sync", attempts) + continue + } + } +} + +func (d *EVMDriver) syncStep(ctx context.Context) error { + if d.compatibilityChecker != nil { + if err := d.compatibilityChecker.Check(ctx, nil); err != nil { + err := fmt.Errorf("EVMDriver: error checking compatibility data between downloader (runtime)"+ + " and processor (db): %w", err) + return err + } + d.compatibilityChecker = nil // only check once + } + + lastBlockHeader, err := d.processor.GetLastProcessedBlockHeader(ctx) + if err != nil { + return fmt.Errorf("EVMDriver: error getting last processed block from processor: %w", err) + } + d.logger.Infof("EVMDriver: starting sync from last processed block: %s", lastBlockHeader.Brief()) + blocks, err := d.downloader.DownloadNextBlocks(ctx, + lastBlockHeader, + d.syncBlockChunkSize, + d.syncerConfig) + + if err != nil { + switch { + case mdrtypes.IsReorgedError(err): + if reorgErr := d.handleReorg(ctx, mdrtypes.CastReorgedError(err)); reorgErr != nil { + return fmt.Errorf("EVMDriver: error handling reorg: %w", reorgErr) + } + // Reorg processed + return nil + case errors.Is(err, ErrLogsNotAvailable): + d.logger.Debug("EVMDriver: no logs available yet, waiting to retry") + return nil + default: + return fmt.Errorf("EVMDriver: error downloading blocks: %w", err) + } + } + if err = d.processBlocks(ctx, blocks); err != nil { + return fmt.Errorf("EVMDriver: error processing blocks: %w", err) + } + if blocks != nil { + LastProcessedBlock := blocks.Data.LastBlock() + d.logger.Infof("EVMDriver: processed %d blocks, percent %.2f%% complete. LastBlock: %s", + len(blocks.Data), blocks.CompletionPercentage, LastProcessedBlock.Brief()) + } + return nil +} + +func (d *EVMDriver) processBlocks(ctx context.Context, data *mdrsynctypes.DownloadResult) error { + if data == nil || len(data.Data) == 0 { + return nil + } + + err := d.withRetry(ctx, "processBlocks", func() error { + return d.processor.ProcessBlocks(ctx, data) + }) + // If no error update percentage + if err == nil { + d.setCompletionPercentage(data.CompletionPercentage) + } + return err +} + +func (d *EVMDriver) handleReorg(ctx context.Context, err *mdrtypes.ReorgedError) error { + d.logger.Warnf("reorg detected: %s", err.Error()) + return d.withRetry(ctx, "handleReorg", func() error { + return d.processor.Reorg(ctx, err.BlockRangeReorged.FromBlock) + }) +} + +// withRetry is a helper wrapper function that invokes the fn callback on failed attempts +func (d *EVMDriver) withRetry(ctx context.Context, opName string, fn func() error) error { + attempts := 0 + for { + select { + case <-ctx.Done(): + d.logger.Warnf("context canceled during %s", opName) + return nil + default: + err := fn() + if err != nil { + attempts++ + d.logger.Errorf("error during %s (attempt %d): %v", opName, attempts, err) + d.rh.Handle(ctx, opName, attempts) + } else { + return nil + } + } + } +} + +func (d *EVMDriver) GetCompletionPercentage() *float64 { + d.mutex.Lock() + defer d.mutex.Unlock() + // This is done to copy the value avoid passing internal + // pointer. + if d.completionPercentage == nil { + return nil + } + percent := *d.completionPercentage + return &percent +} + +func (d *EVMDriver) setCompletionPercentage(percent float64) { + d.mutex.Lock() + defer d.mutex.Unlock() + d.completionPercentage = &percent +} diff --git a/multidownloader/sync/evmdriver_test.go b/multidownloader/sync/evmdriver_test.go new file mode 100644 index 000000000..908b8a016 --- /dev/null +++ b/multidownloader/sync/evmdriver_test.go @@ -0,0 +1,141 @@ +package multidownloader + +import ( + "errors" + "fmt" + "testing" + "time" + + aggkitcommon "github.com/agglayer/aggkit/common" + compatibilityMocks "github.com/agglayer/aggkit/db/compatibility/mocks" + "github.com/agglayer/aggkit/log" + mdrsynctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + "github.com/agglayer/aggkit/multidownloader/sync/types/mocks" + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type evmDriverTestData struct { + driver *EVMDriver + mockProcessor *mocks.ProcessorInterface + mockDownloader *mocks.DownloaderInterface + mockCompatibilityChecker *compatibilityMocks.CompatibilityChecker + syncerConfig aggkittypes.SyncerConfig + logger aggkitcommon.Logger + rh *sync.RetryHandler +} + +func newEVMDriverTestData(t *testing.T, compatibilityCheckExpectations bool) *evmDriverTestData { + t.Helper() + mockProcessor := mocks.NewProcessorInterface(t) + mockDownloader := mocks.NewDownloaderInterface(t) + mockCompatibilityChecker := compatibilityMocks.NewCompatibilityChecker(t) + syncerConfig := aggkittypes.SyncerConfig{} + logger := log.WithFields("module", "test") + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: time.Millisecond * 10, + MaxRetryAttemptsAfterError: 0, + } + if compatibilityCheckExpectations { + mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(nil).Maybe() + } + driver := NewEVMDriver( + logger, + mockProcessor, + mockDownloader, + syncerConfig, + 100, + rh, + mockCompatibilityChecker, + ) + require.NotNil(t, driver) + return &evmDriverTestData{ + driver: driver, + mockProcessor: mockProcessor, + mockDownloader: mockDownloader, + mockCompatibilityChecker: mockCompatibilityChecker, + syncerConfig: syncerConfig, + logger: logger, + rh: rh, + } +} + +func TestNewEVMDriver_SyncStep(t *testing.T) { + t.Run("fail compatibility check", func(t *testing.T) { + testData := newEVMDriverTestData(t, false) + expectedErr := errors.New("compatibility check failed") + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(expectedErr).Once() + ctx := t.Context() + err := testData.driver.syncStep(ctx) + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("compatibility check it's only executed 1 time", func(t *testing.T) { + testData := newEVMDriverTestData(t, false) + expectedErr := errors.New("compatibility check failed") + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(expectedErr).Once() + ctx := t.Context() + err := testData.driver.syncStep(ctx) + require.ErrorIs(t, err, expectedErr) + // This round the compatibility check is called because the previous one failed + testData.mockCompatibilityChecker.EXPECT().Check(mock.Anything, mock.Anything).Return(nil).Once() + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() + err = testData.driver.syncStep(ctx) + require.NoError(t, err) + // This round the compatibility check should not be executed again + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() + err = testData.driver.syncStep(ctx) + require.NoError(t, err) + }) + + t.Run("DownloadNextBlocks returns ErrLogsNotAvailable", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, ErrLogsNotAvailable).Once() + err := testData.driver.syncStep(t.Context()) + require.NoError(t, err) + }) + + t.Run("DownloadNextBlocks returns ReorgedError", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + expectedErr := mdrtypes.NewReorgedError(aggkitcommon.NewBlockRange(10, 20), 20, "test") + testData.mockProcessor.EXPECT().GetLastProcessedBlockHeader(mock.Anything).Return(nil, nil).Once() + testData.mockDownloader.EXPECT().DownloadNextBlocks(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, expectedErr).Once() + testData.mockProcessor.EXPECT().Reorg(mock.Anything, uint64(10)).Return(nil).Once() + err := testData.driver.syncStep(t.Context()) + require.NoError(t, err) + }) +} + +func TestNewEVMDriver_ProcessBlocks(t *testing.T) { + t.Run("xxx", func(t *testing.T) { + testData := newEVMDriverTestData(t, true) + ctx := t.Context() + testData.driver.rh.MaxRetryAttemptsAfterError = 2 + data := &mdrsynctypes.DownloadResult{ + Data: []*sync.EVMBlock{ + { // sync.EVMBlock + EVMBlockHeader: sync.EVMBlockHeader{ + Num: 10, + }, + }, + }, + CompletionPercentage: 50, + } + errProcessBlock := fmt.Errorf("error processing blocks") + testData.mockProcessor.EXPECT(). + ProcessBlocks(mock.Anything, data).Return(errProcessBlock).Once() + testData.mockProcessor.EXPECT(). + ProcessBlocks(mock.Anything, data).Return(nil).Once() + err := testData.driver.processBlocks(ctx, data) + require.NoError(t, err) + require.Equal(t, data.CompletionPercentage, *testData.driver.GetCompletionPercentage()) + }) +} diff --git a/multidownloader/sync/runtimedata.go b/multidownloader/sync/runtimedata.go new file mode 100644 index 000000000..0aa58e151 --- /dev/null +++ b/multidownloader/sync/runtimedata.go @@ -0,0 +1,37 @@ +package multidownloader + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// RuntimeData is the data that is used to check that the DB is compatible with the runtime data +// basically it contains the relevant data from runtime environment +type RuntimeData struct { + ChainID uint64 + Addresses []common.Address +} + +func (r RuntimeData) String() string { + res := fmt.Sprintf("ChainID: %d, Addresses: ", r.ChainID) + for _, addr := range r.Addresses { + res += addr.String() + ", " + } + return res +} + +func (r RuntimeData) IsCompatible(other RuntimeData) error { + if r.ChainID != other.ChainID { + return fmt.Errorf("chain ID mismatch: %d != %d", r.ChainID, other.ChainID) + } + if len(r.Addresses) != len(other.Addresses) { + return fmt.Errorf("addresses len mismatch: %d != %d", len(r.Addresses), len(other.Addresses)) + } + for i, addr := range r.Addresses { + if addr != other.Addresses[i] { + return fmt.Errorf("addresses[%d] mismatch: %s != %s", i, addr.String(), other.Addresses[i].String()) + } + } + return nil +} diff --git a/multidownloader/sync/runtimedata_test.go b/multidownloader/sync/runtimedata_test.go new file mode 100644 index 000000000..32fdbe6d5 --- /dev/null +++ b/multidownloader/sync/runtimedata_test.go @@ -0,0 +1,501 @@ +package multidownloader + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestRuntimeData_String(t *testing.T) { + tests := []struct { + name string + data RuntimeData + expected string + }{ + { + name: "empty addresses", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + expected: "ChainID: 1, Addresses: ", + }, + { + name: "single address", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + expected: "ChainID: 1, Addresses: 0x0000000000000000000000000000000000000123, ", + }, + { + name: "two addresses", + data: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + expected: "ChainID: 1, Addresses: 0x1234567890AbcdEF1234567890aBcdef12345678, 0xABcdEFABcdEFabcdEfAbCdefabcdeFABcDEFabCD, ", + }, + { + name: "multiple addresses", + data: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + expected: "ChainID: 42, Addresses: 0x0000000000000000000000000000000000000123, 0x0000000000000000000000000000000000000456, 0x0000000000000000000000000000000000000789, ", + }, + { + name: "zero chain ID", + data: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0xabc")}, + }, + expected: "ChainID: 0, Addresses: 0x0000000000000000000000000000000000000aBc, ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.data.String() + require.Equal(t, tt.expected, result) + }) + } +} + +func TestRuntimeData_IsCompatible_Success(t *testing.T) { + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + }{ + { + name: "identical data with single address", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + }, + { + name: "identical data with two addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + }, + { + name: "identical data with multiple addresses", + data1: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + data2: RuntimeData{ + ChainID: 42, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + }, + }, + { + name: "both have empty addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + }, + { + name: "zero chain ID with matching data", + data1: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x789")}, + }, + data2: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x789")}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.NoError(t, err) + }) + } +} + +func TestRuntimeData_IsCompatible_ChainIDMismatch(t *testing.T) { + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + chainID1 uint64 + chainID2 uint64 + }{ + { + name: "different chain IDs with same address", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 2, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + chainID1: 1, + chainID2: 2, + }, + { + name: "chain ID 0 vs 1", + data1: RuntimeData{ + ChainID: 0, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + chainID1: 0, + chainID2: 1, + }, + { + name: "large chain ID difference", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 999999, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + chainID1: 1, + chainID2: 999999, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID mismatch") + }) + } +} + +func TestRuntimeData_IsCompatible_AddressesLenMismatch(t *testing.T) { + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + }{ + { + name: "data1 has more addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + }, + { + name: "data2 has more addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + }, + { + name: "data1 empty, data2 has addresses", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + }, + { + name: "data1 has addresses, data2 empty", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + }, + }, + { + name: "large difference in address count", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + common.HexToAddress("0x444"), + common.HexToAddress("0x555"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) + } +} + +func TestRuntimeData_IsCompatible_AddressMismatch(t *testing.T) { + tests := []struct { + name string + data1 RuntimeData + data2 RuntimeData + index int + }{ + { + name: "single address mismatch", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + }, + }, + index: 0, + }, + { + name: "first address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x789"), + common.HexToAddress("0x456"), + }, + }, + index: 0, + }, + { + name: "second address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x456"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x123"), + common.HexToAddress("0x789"), + }, + }, + index: 1, + }, + { + name: "middle address differs in longer list", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + common.HexToAddress("0x444"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x999"), + common.HexToAddress("0x444"), + }, + }, + index: 2, + }, + { + name: "last address differs", + data1: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x333"), + }, + }, + data2: RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x111"), + common.HexToAddress("0x222"), + common.HexToAddress("0x999"), + }, + }, + index: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.data1.IsCompatible(tt.data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses") + require.Contains(t, err.Error(), "mismatch") + }) + } +} + +func TestRuntimeData_IsCompatible_ErrorPrecedence(t *testing.T) { + t.Run("chain ID mismatch takes precedence over address differences", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } + data2 := RuntimeData{ + ChainID: 2, + Addresses: []common.Address{common.HexToAddress("0x456")}, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "chain ID mismatch") + }) + + t.Run("length mismatch checked before address comparison", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{ + common.HexToAddress("0x456"), + common.HexToAddress("0x789"), + }, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) +} + +func TestRuntimeData_IsCompatible_NilAddresses(t *testing.T) { + t.Run("both nil addresses", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + + err := data1.IsCompatible(data2) + require.NoError(t, err) + }) + + t.Run("one nil, one empty", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{}, + } + + err := data1.IsCompatible(data2) + require.NoError(t, err) + }) + + t.Run("nil vs non-empty", func(t *testing.T) { + data1 := RuntimeData{ + ChainID: 1, + Addresses: nil, + } + data2 := RuntimeData{ + ChainID: 1, + Addresses: []common.Address{common.HexToAddress("0x123")}, + } + + err := data1.IsCompatible(data2) + require.Error(t, err) + require.Contains(t, err.Error(), "addresses len mismatch") + }) +} diff --git a/multidownloader/sync/types/downloader.go b/multidownloader/sync/types/downloader.go new file mode 100644 index 000000000..818839c30 --- /dev/null +++ b/multidownloader/sync/types/downloader.go @@ -0,0 +1,39 @@ +package types + +import ( + "context" + + "github.com/agglayer/aggkit/sync" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type DownloadResult struct { + Data sync.EVMBlocks + // CompletionPercentage indicates the percent of completion of the download + // 0 -> 0%, 100 -> 100% + CompletionPercentage float64 +} + +type DownloaderInterface interface { + // DownloadNextBlocks downloads the next blocks starting from fromBlockHeader + // up to maxBlocks, according to the syncerConfig + // parameters: + // - fromBlockHeader: the block header to start downloading from (exclusive) + // If it's nil means that there are no previous blocks processed + // - maxBlocks: the maximum number of blocks to return (it could return less or none) + // - syncerConfig: the syncer configuration + // returns: + // - DownloadResult: the result of the download, containing the blocks and the percent complete + // DownloadResult is never nil + // DownloadResult.Data could be nil if no blocks were downloaded + // DownloadResult.CompletionPercentage indicates the percent of completion of the download + // 0 -> 0%, 100 -> 100% + // - error: if any error occurred during the download + // special error: errors.Is(err, ErrLogsNotAvailable) indicates that it works + // but there are no logs yet + DownloadNextBlocks(ctx context.Context, + fromBlockHeader *aggkittypes.BlockHeader, + maxBlocks uint64, + syncerConfig aggkittypes.SyncerConfig) (*DownloadResult, error) + ChainID(ctx context.Context) (uint64, error) +} diff --git a/multidownloader/sync/types/mocks/mock_downloader_interface.go b/multidownloader/sync/types/mocks/mock_downloader_interface.go new file mode 100644 index 000000000..9e017f3e6 --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_downloader_interface.go @@ -0,0 +1,157 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + aggkittypes "github.com/agglayer/aggkit/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/multidownloader/sync/types" +) + +// DownloaderInterface is an autogenerated mock type for the DownloaderInterface type +type DownloaderInterface struct { + mock.Mock +} + +type DownloaderInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *DownloaderInterface) EXPECT() *DownloaderInterface_Expecter { + return &DownloaderInterface_Expecter{mock: &_m.Mock} +} + +// ChainID provides a mock function with given fields: ctx +func (_m *DownloaderInterface) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DownloaderInterface_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type DownloaderInterface_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *DownloaderInterface_Expecter) ChainID(ctx interface{}) *DownloaderInterface_ChainID_Call { + return &DownloaderInterface_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *DownloaderInterface_ChainID_Call) Run(run func(ctx context.Context)) *DownloaderInterface_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DownloaderInterface_ChainID_Call) Return(_a0 uint64, _a1 error) *DownloaderInterface_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DownloaderInterface_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *DownloaderInterface_ChainID_Call { + _c.Call.Return(run) + return _c +} + +// DownloadNextBlocks provides a mock function with given fields: ctx, fromBlockHeader, maxBlocks, syncerConfig +func (_m *DownloaderInterface) DownloadNextBlocks(ctx context.Context, fromBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig) (*types.DownloadResult, error) { + ret := _m.Called(ctx, fromBlockHeader, maxBlocks, syncerConfig) + + if len(ret) == 0 { + panic("no return value specified for DownloadNextBlocks") + } + + var r0 *types.DownloadResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) (*types.DownloadResult, error)); ok { + return rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) *types.DownloadResult); ok { + r0 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.DownloadResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) error); ok { + r1 = rf(ctx, fromBlockHeader, maxBlocks, syncerConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DownloaderInterface_DownloadNextBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DownloadNextBlocks' +type DownloaderInterface_DownloadNextBlocks_Call struct { + *mock.Call +} + +// DownloadNextBlocks is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockHeader *aggkittypes.BlockHeader +// - maxBlocks uint64 +// - syncerConfig aggkittypes.SyncerConfig +func (_e *DownloaderInterface_Expecter) DownloadNextBlocks(ctx interface{}, fromBlockHeader interface{}, maxBlocks interface{}, syncerConfig interface{}) *DownloaderInterface_DownloadNextBlocks_Call { + return &DownloaderInterface_DownloadNextBlocks_Call{Call: _e.mock.On("DownloadNextBlocks", ctx, fromBlockHeader, maxBlocks, syncerConfig)} +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Run(run func(ctx context.Context, fromBlockHeader *aggkittypes.BlockHeader, maxBlocks uint64, syncerConfig aggkittypes.SyncerConfig)) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*aggkittypes.BlockHeader), args[2].(uint64), args[3].(aggkittypes.SyncerConfig)) + }) + return _c +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) Return(_a0 *types.DownloadResult, _a1 error) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DownloaderInterface_DownloadNextBlocks_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockHeader, uint64, aggkittypes.SyncerConfig) (*types.DownloadResult, error)) *DownloaderInterface_DownloadNextBlocks_Call { + _c.Call.Return(run) + return _c +} + +// NewDownloaderInterface creates a new instance of DownloaderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDownloaderInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *DownloaderInterface { + mock := &DownloaderInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/mocks/mock_multidownloader_interface.go b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go new file mode 100644 index 000000000..efb26b2da --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_multidownloader_interface.go @@ -0,0 +1,552 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + aggkittypes "github.com/agglayer/aggkit/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" +) + +// MultidownloaderInterface is an autogenerated mock type for the MultidownloaderInterface type +type MultidownloaderInterface struct { + mock.Mock +} + +type MultidownloaderInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MultidownloaderInterface) EXPECT() *MultidownloaderInterface_Expecter { + return &MultidownloaderInterface_Expecter{mock: &_m.Mock} +} + +// ChainID provides a mock function with given fields: ctx +func (_m *MultidownloaderInterface) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type MultidownloaderInterface_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultidownloaderInterface_Expecter) ChainID(ctx interface{}) *MultidownloaderInterface_ChainID_Call { + return &MultidownloaderInterface_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *MultidownloaderInterface_ChainID_Call) Run(run func(ctx context.Context)) *MultidownloaderInterface_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultidownloaderInterface_ChainID_Call) Return(_a0 uint64, _a1 error) *MultidownloaderInterface_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *MultidownloaderInterface_ChainID_Call { + _c.Call.Return(run) + return _c +} + +// CheckValidBlock provides a mock function with given fields: ctx, blockNumber, blockHash +func (_m *MultidownloaderInterface) CheckValidBlock(ctx context.Context, blockNumber uint64, blockHash common.Hash) (bool, uint64, error) { + ret := _m.Called(ctx, blockNumber, blockHash) + + if len(ret) == 0 { + panic("no return value specified for CheckValidBlock") + } + + var r0 bool + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, common.Hash) (bool, uint64, error)); ok { + return rf(ctx, blockNumber, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, common.Hash) bool); ok { + r0 = rf(ctx, blockNumber, blockHash) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, common.Hash) uint64); ok { + r1 = rf(ctx, blockNumber, blockHash) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, common.Hash) error); ok { + r2 = rf(ctx, blockNumber, blockHash) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultidownloaderInterface_CheckValidBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckValidBlock' +type MultidownloaderInterface_CheckValidBlock_Call struct { + *mock.Call +} + +// CheckValidBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - blockHash common.Hash +func (_e *MultidownloaderInterface_Expecter) CheckValidBlock(ctx interface{}, blockNumber interface{}, blockHash interface{}) *MultidownloaderInterface_CheckValidBlock_Call { + return &MultidownloaderInterface_CheckValidBlock_Call{Call: _e.mock.On("CheckValidBlock", ctx, blockNumber, blockHash)} +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) Run(run func(ctx context.Context, blockNumber uint64, blockHash common.Hash)) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(common.Hash)) + }) + return _c +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) Return(_a0 bool, _a1 uint64, _a2 error) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MultidownloaderInterface_CheckValidBlock_Call) RunAndReturn(run func(context.Context, uint64, common.Hash) (bool, uint64, error)) *MultidownloaderInterface_CheckValidBlock_Call { + _c.Call.Return(run) + return _c +} + +// Finality provides a mock function with no fields +func (_m *MultidownloaderInterface) Finality() aggkittypes.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Finality") + } + + var r0 aggkittypes.BlockNumberFinality + if rf, ok := ret.Get(0).(func() aggkittypes.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(aggkittypes.BlockNumberFinality) + } + + return r0 +} + +// MultidownloaderInterface_Finality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finality' +type MultidownloaderInterface_Finality_Call struct { + *mock.Call +} + +// Finality is a helper method to define mock.On call +func (_e *MultidownloaderInterface_Expecter) Finality() *MultidownloaderInterface_Finality_Call { + return &MultidownloaderInterface_Finality_Call{Call: _e.mock.On("Finality")} +} + +func (_c *MultidownloaderInterface_Finality_Call) Run(run func()) *MultidownloaderInterface_Finality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MultidownloaderInterface_Finality_Call) Return(_a0 aggkittypes.BlockNumberFinality) *MultidownloaderInterface_Finality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultidownloaderInterface_Finality_Call) RunAndReturn(run func() aggkittypes.BlockNumberFinality) *MultidownloaderInterface_Finality_Call { + _c.Call.Return(run) + return _c +} + +// GetReorgedDataByReorgID provides a mock function with given fields: ctx, reorgID +func (_m *MultidownloaderInterface) GetReorgedDataByReorgID(ctx context.Context, reorgID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(ctx, reorgID) + + if len(ret) == 0 { + panic("no return value specified for GetReorgedDataByReorgID") + } + + var r0 *multidownloadertypes.ReorgData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)); ok { + return rf(ctx, reorgID) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *multidownloadertypes.ReorgData); ok { + r0 = rf(ctx, reorgID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.ReorgData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, reorgID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_GetReorgedDataByReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByReorgID' +type MultidownloaderInterface_GetReorgedDataByReorgID_Call struct { + *mock.Call +} + +// GetReorgedDataByReorgID is a helper method to define mock.On call +// - ctx context.Context +// - reorgID uint64 +func (_e *MultidownloaderInterface_Expecter) GetReorgedDataByReorgID(ctx interface{}, reorgID interface{}) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { + return &MultidownloaderInterface_GetReorgedDataByReorgID_Call{Call: _e.mock.On("GetReorgedDataByReorgID", ctx, reorgID)} +} + +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) Run(run func(ctx context.Context, reorgID uint64)) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_GetReorgedDataByReorgID_Call) RunAndReturn(run func(context.Context, uint64) (*multidownloadertypes.ReorgData, error)) *MultidownloaderInterface_GetReorgedDataByReorgID_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultidownloaderInterface) HeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) *aggkittypes.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type MultidownloaderInterface_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *aggkittypes.BlockNumberFinality +func (_e *MultidownloaderInterface_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *MultidownloaderInterface_HeaderByNumber_Call { + return &MultidownloaderInterface_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) Run(run func(ctx context.Context, number *aggkittypes.BlockNumberFinality)) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*aggkittypes.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 error) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error)) *MultidownloaderInterface_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// IsAvailable provides a mock function with given fields: query +func (_m *MultidownloaderInterface) IsAvailable(query multidownloadertypes.LogQuery) bool { + ret := _m.Called(query) + + if len(ret) == 0 { + panic("no return value specified for IsAvailable") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) bool); ok { + r0 = rf(query) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MultidownloaderInterface_IsAvailable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsAvailable' +type MultidownloaderInterface_IsAvailable_Call struct { + *mock.Call +} + +// IsAvailable is a helper method to define mock.On call +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) IsAvailable(query interface{}) *MultidownloaderInterface_IsAvailable_Call { + return &MultidownloaderInterface_IsAvailable_Call{Call: _e.mock.On("IsAvailable", query)} +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) Run(run func(query multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) Return(_a0 bool) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultidownloaderInterface_IsAvailable_Call) RunAndReturn(run func(multidownloadertypes.LogQuery) bool) *MultidownloaderInterface_IsAvailable_Call { + _c.Call.Return(run) + return _c +} + +// IsPartiallyAvailable provides a mock function with given fields: query +func (_m *MultidownloaderInterface) IsPartiallyAvailable(query multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery) { + ret := _m.Called(query) + + if len(ret) == 0 { + panic("no return value specified for IsPartiallyAvailable") + } + + var r0 bool + var r1 *multidownloadertypes.LogQuery + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery)); ok { + return rf(query) + } + if rf, ok := ret.Get(0).(func(multidownloadertypes.LogQuery) bool); ok { + r0 = rf(query) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(multidownloadertypes.LogQuery) *multidownloadertypes.LogQuery); ok { + r1 = rf(query) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*multidownloadertypes.LogQuery) + } + } + + return r0, r1 +} + +// MultidownloaderInterface_IsPartiallyAvailable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsPartiallyAvailable' +type MultidownloaderInterface_IsPartiallyAvailable_Call struct { + *mock.Call +} + +// IsPartiallyAvailable is a helper method to define mock.On call +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) IsPartiallyAvailable(query interface{}) *MultidownloaderInterface_IsPartiallyAvailable_Call { + return &MultidownloaderInterface_IsPartiallyAvailable_Call{Call: _e.mock.On("IsPartiallyAvailable", query)} +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) Run(run func(query multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) Return(_a0 bool, _a1 *multidownloadertypes.LogQuery) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_IsPartiallyAvailable_Call) RunAndReturn(run func(multidownloadertypes.LogQuery) (bool, *multidownloadertypes.LogQuery)) *MultidownloaderInterface_IsPartiallyAvailable_Call { + _c.Call.Return(run) + return _c +} + +// LogQuery provides a mock function with given fields: ctx, query +func (_m *MultidownloaderInterface) LogQuery(ctx context.Context, query multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error) { + ret := _m.Called(ctx, query) + + if len(ret) == 0 { + panic("no return value specified for LogQuery") + } + + var r0 multidownloadertypes.LogQueryResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)); ok { + return rf(ctx, query) + } + if rf, ok := ret.Get(0).(func(context.Context, multidownloadertypes.LogQuery) multidownloadertypes.LogQueryResponse); ok { + r0 = rf(ctx, query) + } else { + r0 = ret.Get(0).(multidownloadertypes.LogQueryResponse) + } + + if rf, ok := ret.Get(1).(func(context.Context, multidownloadertypes.LogQuery) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultidownloaderInterface_LogQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LogQuery' +type MultidownloaderInterface_LogQuery_Call struct { + *mock.Call +} + +// LogQuery is a helper method to define mock.On call +// - ctx context.Context +// - query multidownloadertypes.LogQuery +func (_e *MultidownloaderInterface_Expecter) LogQuery(ctx interface{}, query interface{}) *MultidownloaderInterface_LogQuery_Call { + return &MultidownloaderInterface_LogQuery_Call{Call: _e.mock.On("LogQuery", ctx, query)} +} + +func (_c *MultidownloaderInterface_LogQuery_Call) Run(run func(ctx context.Context, query multidownloadertypes.LogQuery)) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *MultidownloaderInterface_LogQuery_Call) Return(_a0 multidownloadertypes.LogQueryResponse, _a1 error) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultidownloaderInterface_LogQuery_Call) RunAndReturn(run func(context.Context, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)) *MultidownloaderInterface_LogQuery_Call { + _c.Call.Return(run) + return _c +} + +// StorageHeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultidownloaderInterface) StorageHeaderByNumber(ctx context.Context, number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for StorageHeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 multidownloadertypes.FinalizedType + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *aggkittypes.BlockNumberFinality) *aggkittypes.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *aggkittypes.BlockNumberFinality) multidownloadertypes.FinalizedType); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) + } + + if rf, ok := ret.Get(2).(func(context.Context, *aggkittypes.BlockNumberFinality) error); ok { + r2 = rf(ctx, number) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultidownloaderInterface_StorageHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StorageHeaderByNumber' +type MultidownloaderInterface_StorageHeaderByNumber_Call struct { + *mock.Call +} + +// StorageHeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *aggkittypes.BlockNumberFinality +func (_e *MultidownloaderInterface_Expecter) StorageHeaderByNumber(ctx interface{}, number interface{}) *MultidownloaderInterface_StorageHeaderByNumber_Call { + return &MultidownloaderInterface_StorageHeaderByNumber_Call{Call: _e.mock.On("StorageHeaderByNumber", ctx, number)} +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) Run(run func(ctx context.Context, number *aggkittypes.BlockNumberFinality)) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*aggkittypes.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MultidownloaderInterface_StorageHeaderByNumber_Call) RunAndReturn(run func(context.Context, *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *MultidownloaderInterface_StorageHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewMultidownloaderInterface creates a new instance of MultidownloaderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMultidownloaderInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MultidownloaderInterface { + mock := &MultidownloaderInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/mocks/mock_processor_interface.go b/multidownloader/sync/types/mocks/mock_processor_interface.go new file mode 100644 index 000000000..28840b6d2 --- /dev/null +++ b/multidownloader/sync/types/mocks/mock_processor_interface.go @@ -0,0 +1,191 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + synctypes "github.com/agglayer/aggkit/multidownloader/sync/types" + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/types" +) + +// ProcessorInterface is an autogenerated mock type for the ProcessorInterface type +type ProcessorInterface struct { + mock.Mock +} + +type ProcessorInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ProcessorInterface) EXPECT() *ProcessorInterface_Expecter { + return &ProcessorInterface_Expecter{mock: &_m.Mock} +} + +// GetLastProcessedBlockHeader provides a mock function with given fields: ctx +func (_m *ProcessorInterface) GetLastProcessedBlockHeader(ctx context.Context) (*types.BlockHeader, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlockHeader") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.BlockHeader, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.BlockHeader); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessorInterface_GetLastProcessedBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlockHeader' +type ProcessorInterface_GetLastProcessedBlockHeader_Call struct { + *mock.Call +} + +// GetLastProcessedBlockHeader is a helper method to define mock.On call +// - ctx context.Context +func (_e *ProcessorInterface_Expecter) GetLastProcessedBlockHeader(ctx interface{}) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + return &ProcessorInterface_GetLastProcessedBlockHeader_Call{Call: _e.mock.On("GetLastProcessedBlockHeader", ctx)} +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) Run(run func(ctx context.Context)) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) Return(_a0 *types.BlockHeader, _a1 error) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ProcessorInterface_GetLastProcessedBlockHeader_Call) RunAndReturn(run func(context.Context) (*types.BlockHeader, error)) *ProcessorInterface_GetLastProcessedBlockHeader_Call { + _c.Call.Return(run) + return _c +} + +// ProcessBlocks provides a mock function with given fields: ctx, blocks +func (_m *ProcessorInterface) ProcessBlocks(ctx context.Context, blocks *synctypes.DownloadResult) error { + ret := _m.Called(ctx, blocks) + + if len(ret) == 0 { + panic("no return value specified for ProcessBlocks") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *synctypes.DownloadResult) error); ok { + r0 = rf(ctx, blocks) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessorInterface_ProcessBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlocks' +type ProcessorInterface_ProcessBlocks_Call struct { + *mock.Call +} + +// ProcessBlocks is a helper method to define mock.On call +// - ctx context.Context +// - blocks *synctypes.DownloadResult +func (_e *ProcessorInterface_Expecter) ProcessBlocks(ctx interface{}, blocks interface{}) *ProcessorInterface_ProcessBlocks_Call { + return &ProcessorInterface_ProcessBlocks_Call{Call: _e.mock.On("ProcessBlocks", ctx, blocks)} +} + +func (_c *ProcessorInterface_ProcessBlocks_Call) Run(run func(ctx context.Context, blocks *synctypes.DownloadResult)) *ProcessorInterface_ProcessBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*synctypes.DownloadResult)) + }) + return _c +} + +func (_c *ProcessorInterface_ProcessBlocks_Call) Return(_a0 error) *ProcessorInterface_ProcessBlocks_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorInterface_ProcessBlocks_Call) RunAndReturn(run func(context.Context, *synctypes.DownloadResult) error) *ProcessorInterface_ProcessBlocks_Call { + _c.Call.Return(run) + return _c +} + +// Reorg provides a mock function with given fields: ctx, firstReorgedBlock +func (_m *ProcessorInterface) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + ret := _m.Called(ctx, firstReorgedBlock) + + if len(ret) == 0 { + panic("no return value specified for Reorg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, firstReorgedBlock) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessorInterface_Reorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reorg' +type ProcessorInterface_Reorg_Call struct { + *mock.Call +} + +// Reorg is a helper method to define mock.On call +// - ctx context.Context +// - firstReorgedBlock uint64 +func (_e *ProcessorInterface_Expecter) Reorg(ctx interface{}, firstReorgedBlock interface{}) *ProcessorInterface_Reorg_Call { + return &ProcessorInterface_Reorg_Call{Call: _e.mock.On("Reorg", ctx, firstReorgedBlock)} +} + +func (_c *ProcessorInterface_Reorg_Call) Run(run func(ctx context.Context, firstReorgedBlock uint64)) *ProcessorInterface_Reorg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ProcessorInterface_Reorg_Call) Return(_a0 error) *ProcessorInterface_Reorg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorInterface_Reorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ProcessorInterface_Reorg_Call { + _c.Call.Return(run) + return _c +} + +// NewProcessorInterface creates a new instance of ProcessorInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcessorInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ProcessorInterface { + mock := &ProcessorInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/sync/types/multidownloader.go b/multidownloader/sync/types/multidownloader.go new file mode 100644 index 000000000..6336c8e7b --- /dev/null +++ b/multidownloader/sync/types/multidownloader.go @@ -0,0 +1,33 @@ +package types + +import ( + "context" + + mdrtypes "github.com/agglayer/aggkit/multidownloader/types" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type MultidownloaderInterface interface { + // CheckValidBlock checks if the given blockNumber and blockHash are still valid + // returns: isValid bool, reorgID uint64, err error + CheckValidBlock(ctx context.Context, blockNumber uint64, + blockHash common.Hash) (bool, uint64, error) + // GetReorgedDataByReorgID retrieves the reorged data by reorg ID + GetReorgedDataByReorgID(ctx context.Context, reorgID uint64) (*mdrtypes.ReorgData, error) + // IsAvailable checks if the logs for the given query are available + IsAvailable(query mdrtypes.LogQuery) bool + // IsPartiallyAvailable checks if the logs for the given query are partially available + IsPartiallyAvailable(query mdrtypes.LogQuery) (bool, *mdrtypes.LogQuery) + // GetEthLogs retrieves the logs for the given query + LogQuery(ctx context.Context, query mdrtypes.LogQuery) (mdrtypes.LogQueryResponse, error) + // Finality is which block to consider final (typically finalizedBlock) + Finality() aggkittypes.BlockNumberFinality + // HeaderByNumber gets the block header for the given block number finality + HeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, error) + StorageHeaderByNumber(ctx context.Context, + number *aggkittypes.BlockNumberFinality) (*aggkittypes.BlockHeader, mdrtypes.FinalizedType, error) + // ChainID returns the chain ID of the EVM chain + ChainID(ctx context.Context) (uint64, error) +} diff --git a/multidownloader/sync/types/processor.go b/multidownloader/sync/types/processor.go new file mode 100644 index 000000000..273cbc787 --- /dev/null +++ b/multidownloader/sync/types/processor.go @@ -0,0 +1,24 @@ +package types + +import ( + "context" + + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ProcessorInterface interface { + // GetLastProcessedBlockHeader it must return the last processed block header. + // or nil if no block has been processed yet. + // It is used to determine from which block number the downloader should start. + GetLastProcessedBlockHeader(ctx context.Context) (*aggkittypes.BlockHeader, error) + // ProcessBlocks processes the block. It is called for all blocks that are downloaded and + // must be processed. + // NOTE: legacy syncer use ProcessBlock for each block but it's slower because + // can't take advantage of batch processing. ProcessBlocks is called with batches of blocks + // and it is more efficient. + // It is the responsibility of the syncer to process them in batch or one by one. + ProcessBlocks(ctx context.Context, blocks *DownloadResult) error + // Reorg is called when a reorg is detected. Must execute a syncer reorg if apply + // it's possible that the reorged blocks doesn't affect to this syncer + Reorg(ctx context.Context, firstReorgedBlock uint64) error +} diff --git a/multidownloader/types/log_query.go b/multidownloader/types/log_query.go index ea3e442eb..fa0c798e7 100644 --- a/multidownloader/types/log_query.go +++ b/multidownloader/types/log_query.go @@ -13,6 +13,8 @@ import ( type LogQuery struct { Addrs []common.Address BlockRange aggkitcommon.BlockRange + // If BlockHash is set BlockRange contains the corresponding blockNumber + BlockHash *common.Hash } // NewLogQuery creates a new LogQuery @@ -23,12 +25,31 @@ func NewLogQuery(fromBlock uint64, toBlock uint64, addrs []common.Address) LogQu } } +func NewLogQueryBlockHash(blockNumber uint64, blockHash common.Hash, addrs []common.Address) LogQuery { + blockRange := aggkitcommon.BlockRangeZero + if blockNumber != 0 { + blockRange = aggkitcommon.NewBlockRange(blockNumber, blockNumber) + } + return LogQuery{ + Addrs: addrs, + BlockRange: blockRange, + BlockHash: &blockHash, + } +} + // NewLogQueryFromEthereumFilter creates a new LogQuery from an Ethereum FilterQuery func NewLogQueryFromEthereumFilter(query ethereum.FilterQuery) LogQuery { - return LogQuery{ - Addrs: query.Addresses, - BlockRange: aggkitcommon.NewBlockRange(query.FromBlock.Uint64(), query.ToBlock.Uint64()), + if query.BlockHash != nil { + blockNumber := uint64(0) + if query.FromBlock != nil { + blockNumber = query.FromBlock.Uint64() + } + return NewLogQueryBlockHash(blockNumber, *query.BlockHash, query.Addresses) + } + if query.ToBlock == nil { + panic("NewLogQueryFromEthereumFilter: unsupported nil ToBlock") } + return NewLogQuery(query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses) } // String returns a string representation of the LogQuery @@ -36,14 +57,52 @@ func (l *LogQuery) String() string { if l == nil { return "LogQuery: " } + if l.IsBlockHashQuery() { + bn := " (?)" + if !l.BlockRange.IsEmpty() { + bn = fmt.Sprintf(" (%d)", l.BlockRange.FromBlock) + } + return fmt.Sprintf("LogQuery: addrs=%v, blockHash=%s%s", l.Addrs, l.BlockHash.String(), bn) + } return fmt.Sprintf("LogQuery: addrs=%v, blockRange=%s", l.Addrs, l.BlockRange.String()) } +func (l *LogQuery) IsBlockHashQuery() bool { + return l != nil && l.BlockHash != nil +} +func (l *LogQuery) IsBlockRangeQuery() bool { + return l != nil && l.BlockHash == nil +} // ToRPCFilterQuery converts the LogQuery to an Ethereum FilterQuery func (l *LogQuery) ToRPCFilterQuery() ethereum.FilterQuery { + if l.BlockHash != nil { + return ethereum.FilterQuery{ + Addresses: l.Addrs, + BlockHash: l.BlockHash, + } + } return ethereum.FilterQuery{ Addresses: l.Addrs, FromBlock: new(big.Int).SetUint64(l.BlockRange.FromBlock), ToBlock: new(big.Int).SetUint64(l.BlockRange.ToBlock), } } +func (l *LogQuery) IsEmpty() bool { + return l == nil || len(l.Addrs) == 0 && l.BlockRange.IsEmpty() && + l.BlockHash == nil +} + +func (l *LogQuery) IsValid() bool { + if l == nil { + return true + } + if l.BlockHash != nil { + return true + } + // We use value {0,0} to represent empty range in DB, so it's forbidden + // to use the BlockRange(0,0) for multidownloader + if !l.BlockRange.IsEmpty() && l.BlockRange.FromBlock == 0 && l.BlockRange.ToBlock == 0 { + return false + } + return true +} diff --git a/multidownloader/types/log_query_response.go b/multidownloader/types/log_query_response.go new file mode 100644 index 000000000..acc0f8027 --- /dev/null +++ b/multidownloader/types/log_query_response.go @@ -0,0 +1,59 @@ +package types + +import ( + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" +) + +type Log struct { + // Consensus fields: + // address of the contract that generated the event + Address common.Address `json:"address" gencodec:"required"` + // list of topics provided by the contract. + Topics []common.Hash `json:"topics" gencodec:"required"` + // supplied by the contract, usually ABI-encoded + Data []byte `json:"data" gencodec:"required"` + + // Derived fields. These fields are filled in by the node + // but not secured by consensus. + // block in which the transaction was included + BlockNumber uint64 `json:"blockNumber" rlp:"-"` + // hash of the transaction + TxHash common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` + // index of the transaction in the block + TxIndex uint `json:"transactionIndex" rlp:"-"` + // timestamp of the block in which the transaction was included + BlockTimestamp uint64 `json:"blockTimestamp" rlp:"-"` + // index of the log in the block + Index uint `json:"logIndex" rlp:"-"` + + // The Removed field is true if this log was reverted due to a chain reorganisation. + // You must pay attention to this field if you receive logs through a filter query. + Removed bool `json:"removed" rlp:"-"` +} + +type BlockWithLogs struct { + Header aggkittypes.BlockHeader + IsFinal bool + Logs []Log +} + +type LogQueryResponse struct { + Blocks []BlockWithLogs + // ResponseRange indicates the block range covered by the response, even if blocks are empty + ResponseRange aggkitcommon.BlockRange + // UnsafeRange indicates the block range that are in unsafe zone (not finalized) + UnsafeRange aggkitcommon.BlockRange +} + +func (lqr *LogQueryResponse) CountLogs() int { + if lqr == nil { + return 0 + } + count := 0 + for _, block := range lqr.Blocks { + count += len(block.Logs) + } + return count +} diff --git a/multidownloader/types/log_query_response_test.go b/multidownloader/types/log_query_response_test.go new file mode 100644 index 000000000..c171a3087 --- /dev/null +++ b/multidownloader/types/log_query_response_test.go @@ -0,0 +1,311 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestLogQueryResponse_CountLogs_Nil(t *testing.T) { + var lqr *LogQueryResponse + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLogQueryResponse_CountLogs_EmptyBlocks(t *testing.T) { + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{}, + ResponseRange: aggkitcommon.NewBlockRange(100, 200), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLogQueryResponse_CountLogs_SingleBlockWithLogs(t *testing.T) { + parentHash := common.HexToHash("0x1234") + block := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0x9abc")}, + Data: []byte("data2"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 1, + BlockTimestamp: 1234567890, + Index: 1, + Removed: false, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block}, + ResponseRange: aggkitcommon.NewBlockRange(100, 100), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + + count := lqr.CountLogs() + require.Equal(t, 2, count) +} + +func TestLogQueryResponse_CountLogs_MultipleBlocksWithLogs(t *testing.T) { + parentHash1 := common.HexToHash("0x1234") + parentHash2 := common.HexToHash("0x5678") + + block1 := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0x9abc")}, + Data: []byte("data2"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 1, + BlockTimestamp: 1234567890, + Index: 1, + Removed: false, + }, + }, + } + + block2 := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), + IsFinal: false, + Logs: []Log{ + { + Address: common.HexToAddress("0x3333"), + Topics: []common.Hash{common.HexToHash("0xaaa")}, + Data: []byte("data3"), + BlockNumber: 101, + TxHash: common.HexToHash("0xghi"), + TxIndex: 0, + BlockTimestamp: 1234567900, + Index: 0, + Removed: false, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block1, block2}, + ResponseRange: aggkitcommon.NewBlockRange(100, 101), + UnsafeRange: aggkitcommon.NewBlockRange(101, 101), + } + + count := lqr.CountLogs() + require.Equal(t, 3, count) +} + +func TestLogQueryResponse_CountLogs_MixedBlocks(t *testing.T) { + parentHash1 := common.HexToHash("0x1234") + parentHash2 := common.HexToHash("0x5678") + parentHash3 := common.HexToHash("0x9abc") + + blockWithLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash1), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 0, + BlockTimestamp: 1234567890, + Index: 0, + Removed: false, + }, + }, + } + + blockWithoutLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(101, common.HexToHash("0xdef"), 1234567900, &parentHash2), + IsFinal: true, + Logs: []Log{}, + } + + blockWithMultipleLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(102, common.HexToHash("0xghi"), 1234567910, &parentHash3), + IsFinal: false, + Logs: []Log{ + { + Address: common.HexToAddress("0x2222"), + Topics: []common.Hash{common.HexToHash("0xaaa")}, + Data: []byte("data2"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 0, + BlockTimestamp: 1234567910, + Index: 0, + Removed: false, + }, + { + Address: common.HexToAddress("0x3333"), + Topics: []common.Hash{common.HexToHash("0xbbb")}, + Data: []byte("data3"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 1, + BlockTimestamp: 1234567910, + Index: 1, + Removed: false, + }, + { + Address: common.HexToAddress("0x4444"), + Topics: []common.Hash{common.HexToHash("0xccc")}, + Data: []byte("data4"), + BlockNumber: 102, + TxHash: common.HexToHash("0xjkl"), + TxIndex: 2, + BlockTimestamp: 1234567910, + Index: 2, + Removed: true, + }, + }, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{blockWithLogs, blockWithoutLogs, blockWithMultipleLogs}, + ResponseRange: aggkitcommon.NewBlockRange(100, 102), + UnsafeRange: aggkitcommon.NewBlockRange(102, 102), + } + + count := lqr.CountLogs() + require.Equal(t, 4, count) +} + +func TestLogQueryResponse_CountLogs_BlocksWithNilLogs(t *testing.T) { + parentHash := common.HexToHash("0x1234") + + blockWithNilLogs := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: nil, + } + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{blockWithNilLogs}, + ResponseRange: aggkitcommon.NewBlockRange(100, 100), + UnsafeRange: aggkitcommon.NewBlockRange(0, 0), + } + + count := lqr.CountLogs() + require.Equal(t, 0, count) +} + +func TestLog_Structure(t *testing.T) { + log := Log{ + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678"), common.HexToHash("0x9abc")}, + Data: []byte("test data"), + BlockNumber: 100, + TxHash: common.HexToHash("0xdef"), + TxIndex: 5, + BlockTimestamp: 1234567890, + Index: 10, + Removed: false, + } + + require.Equal(t, common.HexToAddress("0x1111"), log.Address) + require.Equal(t, 2, len(log.Topics)) + require.Equal(t, common.HexToHash("0x5678"), log.Topics[0]) + require.Equal(t, common.HexToHash("0x9abc"), log.Topics[1]) + require.Equal(t, []byte("test data"), log.Data) + require.Equal(t, uint64(100), log.BlockNumber) + require.Equal(t, common.HexToHash("0xdef"), log.TxHash) + require.Equal(t, uint(5), log.TxIndex) + require.Equal(t, uint64(1234567890), log.BlockTimestamp) + require.Equal(t, uint(10), log.Index) + require.False(t, log.Removed) +} + +func TestBlockWithLogs_Structure(t *testing.T) { + parentHash := common.HexToHash("0x1234") + header := aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash) + + logs := []Log{ + { + Address: common.HexToAddress("0x1111"), + Topics: []common.Hash{common.HexToHash("0x5678")}, + Data: []byte("data1"), + BlockNumber: 100, + Removed: false, + }, + } + + block := BlockWithLogs{ + Header: *header, + IsFinal: true, + Logs: logs, + } + + require.Equal(t, uint64(100), block.Header.Number) + require.Equal(t, common.HexToHash("0xabc"), block.Header.Hash) + require.True(t, block.IsFinal) + require.Equal(t, 1, len(block.Logs)) + require.Equal(t, common.HexToAddress("0x1111"), block.Logs[0].Address) +} + +func TestLogQueryResponse_Structure(t *testing.T) { + parentHash := common.HexToHash("0x1234") + + block := BlockWithLogs{ + Header: *aggkittypes.NewBlockHeader(100, common.HexToHash("0xabc"), 1234567890, &parentHash), + IsFinal: true, + Logs: []Log{ + { + Address: common.HexToAddress("0x1111"), + BlockNumber: 100, + }, + }, + } + + responseRange := aggkitcommon.NewBlockRange(100, 200) + unsafeRange := aggkitcommon.NewBlockRange(150, 200) + + lqr := &LogQueryResponse{ + Blocks: []BlockWithLogs{block}, + ResponseRange: responseRange, + UnsafeRange: unsafeRange, + } + + require.Equal(t, 1, len(lqr.Blocks)) + require.Equal(t, responseRange, lqr.ResponseRange) + require.Equal(t, unsafeRange, lqr.UnsafeRange) + require.Equal(t, uint64(100), lqr.ResponseRange.FromBlock) + require.Equal(t, uint64(200), lqr.ResponseRange.ToBlock) + require.Equal(t, uint64(150), lqr.UnsafeRange.FromBlock) + require.Equal(t, uint64(200), lqr.UnsafeRange.ToBlock) +} diff --git a/multidownloader/types/log_query_test.go b/multidownloader/types/log_query_test.go index 99a98923d..30fa339ee 100644 --- a/multidownloader/types/log_query_test.go +++ b/multidownloader/types/log_query_test.go @@ -64,3 +64,49 @@ func TestLogQuery_ToRPCFilterQuery(t *testing.T) { require.Equal(t, big.NewInt(1), filter.FromBlock) require.Equal(t, big.NewInt(10), filter.ToBlock) } + +func TestLogQuery_BlockHash(t *testing.T) { + lq := NewLogQueryBlockHash(1234, common.HexToHash("0xabc"), []common.Address{common.HexToAddress("0x123")}) + require.Equal(t, common.HexToHash("0xabc"), *lq.BlockHash) + require.Equal(t, []common.Address{common.HexToAddress("0x123")}, lq.Addrs) + blockHash := common.HexToHash("0xabc") + lq2 := NewLogQueryFromEthereumFilter(ethereum.FilterQuery{ + Addresses: []common.Address{common.HexToAddress("0x123")}, + BlockHash: &blockHash, + }) + require.Equal(t, "LogQuery: addrs=[0x0000000000000000000000000000000000000123], blockHash=0x0000000000000000000000000000000000000000000000000000000000000abc (?)", + lq2.String()) + + rpcFilter := lq.ToRPCFilterQuery() + require.Equal(t, common.HexToHash("0xabc"), *rpcFilter.BlockHash) + require.Equal(t, []common.Address{common.HexToAddress("0x123")}, rpcFilter.Addresses) + require.Equal(t, "LogQuery: addrs=[0x0000000000000000000000000000000000000123], blockHash=0x0000000000000000000000000000000000000000000000000000000000000abc (1234)", + lq.String()) +} +func TestLogQuery_IsEmpty(t *testing.T) { + var lq *LogQuery + require.True(t, lq.IsEmpty()) + + lq = &LogQuery{} + require.True(t, lq.IsEmpty()) + + lq.BlockRange = aggkitcommon.NewBlockRange(1, 10) + require.False(t, lq.IsEmpty()) + + lq.BlockRange = aggkitcommon.BlockRangeZero + require.True(t, lq.IsEmpty()) + + lq.BlockHash = new(common.Hash) + require.False(t, lq.IsEmpty()) +} + +func TestLogQuery_IsValid(t *testing.T) { + var lq *LogQuery + require.True(t, lq.IsValid()) + lq = &LogQuery{} + require.True(t, lq.IsValid(), "blockRange is {0,0} but is empty") + lq.BlockRange = aggkitcommon.NewBlockRange(0, 0) + require.False(t, lq.IsValid()) + lq.BlockHash = new(common.Hash) + require.True(t, lq.IsValid(), "bn={0,0} but it use blockHash") +} diff --git a/multidownloader/types/mocks/mock_reorg_porter.go b/multidownloader/types/mocks/mock_reorg_porter.go new file mode 100644 index 000000000..9cab54f05 --- /dev/null +++ b/multidownloader/types/mocks/mock_reorg_porter.go @@ -0,0 +1,374 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + dbtypes "github.com/agglayer/aggkit/db/types" + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" + + types "github.com/agglayer/aggkit/types" +) + +// ReorgPorter is an autogenerated mock type for the ReorgPorter type +type ReorgPorter struct { + mock.Mock +} + +type ReorgPorter_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgPorter) EXPECT() *ReorgPorter_Expecter { + return &ReorgPorter_Expecter{mock: &_m.Mock} +} + +// GetBlockNumberInRPC provides a mock function with given fields: ctx, blockFinality +func (_m *ReorgPorter) GetBlockNumberInRPC(ctx context.Context, blockFinality types.BlockNumberFinality) (uint64, error) { + ret := _m.Called(ctx, blockFinality) + + if len(ret) == 0 { + panic("no return value specified for GetBlockNumberInRPC") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (uint64, error)); ok { + return rf(ctx, blockFinality) + } + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) uint64); ok { + r0 = rf(ctx, blockFinality) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { + r1 = rf(ctx, blockFinality) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetBlockNumberInRPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockNumberInRPC' +type ReorgPorter_GetBlockNumberInRPC_Call struct { + *mock.Call +} + +// GetBlockNumberInRPC is a helper method to define mock.On call +// - ctx context.Context +// - blockFinality types.BlockNumberFinality +func (_e *ReorgPorter_Expecter) GetBlockNumberInRPC(ctx interface{}, blockFinality interface{}) *ReorgPorter_GetBlockNumberInRPC_Call { + return &ReorgPorter_GetBlockNumberInRPC_Call{Call: _e.mock.On("GetBlockNumberInRPC", ctx, blockFinality)} +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) Run(run func(ctx context.Context, blockFinality types.BlockNumberFinality)) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) + }) + return _c +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetBlockNumberInRPC_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (uint64, error)) *ReorgPorter_GetBlockNumberInRPC_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockStorageAndRPC provides a mock function with given fields: ctx, tx, blockNumber +func (_m *ReorgPorter) GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*multidownloadertypes.CompareBlockHeaders, error) { + ret := _m.Called(ctx, tx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBlockStorageAndRPC") + } + + var r0 *multidownloadertypes.CompareBlockHeaders + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dbtypes.Querier, uint64) (*multidownloadertypes.CompareBlockHeaders, error)); ok { + return rf(ctx, tx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, dbtypes.Querier, uint64) *multidownloadertypes.CompareBlockHeaders); ok { + r0 = rf(ctx, tx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.CompareBlockHeaders) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dbtypes.Querier, uint64) error); ok { + r1 = rf(ctx, tx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetBlockStorageAndRPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockStorageAndRPC' +type ReorgPorter_GetBlockStorageAndRPC_Call struct { + *mock.Call +} + +// GetBlockStorageAndRPC is a helper method to define mock.On call +// - ctx context.Context +// - tx dbtypes.Querier +// - blockNumber uint64 +func (_e *ReorgPorter_Expecter) GetBlockStorageAndRPC(ctx interface{}, tx interface{}, blockNumber interface{}) *ReorgPorter_GetBlockStorageAndRPC_Call { + return &ReorgPorter_GetBlockStorageAndRPC_Call{Call: _e.mock.On("GetBlockStorageAndRPC", ctx, tx, blockNumber)} +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) Run(run func(ctx context.Context, tx dbtypes.Querier, blockNumber uint64)) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dbtypes.Querier), args[2].(uint64)) + }) + return _c +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) Return(_a0 *multidownloadertypes.CompareBlockHeaders, _a1 error) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetBlockStorageAndRPC_Call) RunAndReturn(run func(context.Context, dbtypes.Querier, uint64) (*multidownloadertypes.CompareBlockHeaders, error)) *ReorgPorter_GetBlockStorageAndRPC_Call { + _c.Call.Return(run) + return _c +} + +// GetLastBlockNumberInStorage provides a mock function with given fields: tx +func (_m *ReorgPorter) GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) { + ret := _m.Called(tx) + + if len(ret) == 0 { + panic("no return value specified for GetLastBlockNumberInStorage") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(dbtypes.Querier) (uint64, error)); ok { + return rf(tx) + } + if rf, ok := ret.Get(0).(func(dbtypes.Querier) uint64); ok { + r0 = rf(tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(dbtypes.Querier) error); ok { + r1 = rf(tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_GetLastBlockNumberInStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastBlockNumberInStorage' +type ReorgPorter_GetLastBlockNumberInStorage_Call struct { + *mock.Call +} + +// GetLastBlockNumberInStorage is a helper method to define mock.On call +// - tx dbtypes.Querier +func (_e *ReorgPorter_Expecter) GetLastBlockNumberInStorage(tx interface{}) *ReorgPorter_GetLastBlockNumberInStorage_Call { + return &ReorgPorter_GetLastBlockNumberInStorage_Call{Call: _e.mock.On("GetLastBlockNumberInStorage", tx)} +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) Run(run func(tx dbtypes.Querier)) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(dbtypes.Querier)) + }) + return _c +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_GetLastBlockNumberInStorage_Call) RunAndReturn(run func(dbtypes.Querier) (uint64, error)) *ReorgPorter_GetLastBlockNumberInStorage_Call { + _c.Call.Return(run) + return _c +} + +// MoveReorgedBlocks provides a mock function with given fields: tx, reorgData +func (_m *ReorgPorter) MoveReorgedBlocks(tx dbtypes.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for MoveReorgedBlocks") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(dbtypes.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(dbtypes.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(dbtypes.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_MoveReorgedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveReorgedBlocks' +type ReorgPorter_MoveReorgedBlocks_Call struct { + *mock.Call +} + +// MoveReorgedBlocks is a helper method to define mock.On call +// - tx dbtypes.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *ReorgPorter_Expecter) MoveReorgedBlocks(tx interface{}, reorgData interface{}) *ReorgPorter_MoveReorgedBlocks_Call { + return &ReorgPorter_MoveReorgedBlocks_Call{Call: _e.mock.On("MoveReorgedBlocks", tx, reorgData)} +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) Run(run func(tx dbtypes.Querier, reorgData multidownloadertypes.ReorgData)) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(dbtypes.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) Return(_a0 uint64, _a1 error) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_MoveReorgedBlocks_Call) RunAndReturn(run func(dbtypes.Querier, multidownloadertypes.ReorgData) (uint64, error)) *ReorgPorter_MoveReorgedBlocks_Call { + _c.Call.Return(run) + return _c +} + +// NewTx provides a mock function with given fields: ctx +func (_m *ReorgPorter) NewTx(ctx context.Context) (dbtypes.Txer, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for NewTx") + } + + var r0 dbtypes.Txer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (dbtypes.Txer, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) dbtypes.Txer); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(dbtypes.Txer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgPorter_NewTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTx' +type ReorgPorter_NewTx_Call struct { + *mock.Call +} + +// NewTx is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReorgPorter_Expecter) NewTx(ctx interface{}) *ReorgPorter_NewTx_Call { + return &ReorgPorter_NewTx_Call{Call: _e.mock.On("NewTx", ctx)} +} + +func (_c *ReorgPorter_NewTx_Call) Run(run func(ctx context.Context)) *ReorgPorter_NewTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReorgPorter_NewTx_Call) Return(_a0 dbtypes.Txer, _a1 error) *ReorgPorter_NewTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgPorter_NewTx_Call) RunAndReturn(run func(context.Context) (dbtypes.Txer, error)) *ReorgPorter_NewTx_Call { + _c.Call.Return(run) + return _c +} + +// TimeNowUnix provides a mock function with no fields +func (_m *ReorgPorter) TimeNowUnix() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeNowUnix") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// ReorgPorter_TimeNowUnix_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TimeNowUnix' +type ReorgPorter_TimeNowUnix_Call struct { + *mock.Call +} + +// TimeNowUnix is a helper method to define mock.On call +func (_e *ReorgPorter_Expecter) TimeNowUnix() *ReorgPorter_TimeNowUnix_Call { + return &ReorgPorter_TimeNowUnix_Call{Call: _e.mock.On("TimeNowUnix")} +} + +func (_c *ReorgPorter_TimeNowUnix_Call) Run(run func()) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ReorgPorter_TimeNowUnix_Call) Return(_a0 uint64) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgPorter_TimeNowUnix_Call) RunAndReturn(run func() uint64) *ReorgPorter_TimeNowUnix_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgPorter creates a new instance of ReorgPorter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgPorter(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgPorter { + mock := &ReorgPorter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/mocks/mock_reorg_processor.go b/multidownloader/types/mocks/mock_reorg_processor.go new file mode 100644 index 000000000..f6be0bc37 --- /dev/null +++ b/multidownloader/types/mocks/mock_reorg_processor.go @@ -0,0 +1,88 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + aggkittypes "github.com/agglayer/aggkit/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/multidownloader/types" +) + +// ReorgProcessor is an autogenerated mock type for the ReorgProcessor type +type ReorgProcessor struct { + mock.Mock +} + +type ReorgProcessor_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgProcessor) EXPECT() *ReorgProcessor_Expecter { + return &ReorgProcessor_Expecter{mock: &_m.Mock} +} + +// ProcessReorg provides a mock function with given fields: ctx, detectedReorgError, finalizedBlockTag +func (_m *ReorgProcessor) ProcessReorg(ctx context.Context, detectedReorgError types.DetectedReorgError, finalizedBlockTag aggkittypes.BlockNumberFinality) error { + ret := _m.Called(ctx, detectedReorgError, finalizedBlockTag) + + if len(ret) == 0 { + panic("no return value specified for ProcessReorg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.DetectedReorgError, aggkittypes.BlockNumberFinality) error); ok { + r0 = rf(ctx, detectedReorgError, finalizedBlockTag) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorgProcessor_ProcessReorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessReorg' +type ReorgProcessor_ProcessReorg_Call struct { + *mock.Call +} + +// ProcessReorg is a helper method to define mock.On call +// - ctx context.Context +// - detectedReorgError types.DetectedReorgError +// - finalizedBlockTag aggkittypes.BlockNumberFinality +func (_e *ReorgProcessor_Expecter) ProcessReorg(ctx interface{}, detectedReorgError interface{}, finalizedBlockTag interface{}) *ReorgProcessor_ProcessReorg_Call { + return &ReorgProcessor_ProcessReorg_Call{Call: _e.mock.On("ProcessReorg", ctx, detectedReorgError, finalizedBlockTag)} +} + +func (_c *ReorgProcessor_ProcessReorg_Call) Run(run func(ctx context.Context, detectedReorgError types.DetectedReorgError, finalizedBlockTag aggkittypes.BlockNumberFinality)) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.DetectedReorgError), args[2].(aggkittypes.BlockNumberFinality)) + }) + return _c +} + +func (_c *ReorgProcessor_ProcessReorg_Call) Return(_a0 error) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgProcessor_ProcessReorg_Call) RunAndReturn(run func(context.Context, types.DetectedReorgError, aggkittypes.BlockNumberFinality) error) *ReorgProcessor_ProcessReorg_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgProcessor creates a new instance of ReorgProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgProcessor(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgProcessor { + mock := &ReorgProcessor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/mocks/mock_storager.go b/multidownloader/types/mocks/mock_storager.go index 2319d32c0..dbe8c6bb4 100644 --- a/multidownloader/types/mocks/mock_storager.go +++ b/multidownloader/types/mocks/mock_storager.go @@ -3,9 +3,10 @@ package mocks import ( - context "context" - aggkittypes "github.com/agglayer/aggkit/types" + common "github.com/ethereum/go-ethereum/common" + + context "context" coretypes "github.com/ethereum/go-ethereum/core/types" @@ -30,7 +31,7 @@ func (_m *Storager) EXPECT() *Storager_Expecter { } // GetBlockHeaderByNumber provides a mock function with given fields: tx, blockNumber -func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) { +func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { ret := _m.Called(tx, blockNumber) if len(ret) == 0 { @@ -38,9 +39,9 @@ func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) } var r0 *aggkittypes.BlockHeader - var r1 bool + var r1 multidownloadertypes.FinalizedType var r2 error - if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, bool, error)); ok { + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { return rf(tx, blockNumber) } if rf, ok := ret.Get(0).(func(types.Querier, uint64) *aggkittypes.BlockHeader); ok { @@ -51,10 +52,10 @@ func (_m *Storager) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) } } - if rf, ok := ret.Get(1).(func(types.Querier, uint64) bool); ok { + if rf, ok := ret.Get(1).(func(types.Querier, uint64) multidownloadertypes.FinalizedType); ok { r1 = rf(tx, blockNumber) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) } if rf, ok := ret.Get(2).(func(types.Querier, uint64) error); ok { @@ -85,12 +86,136 @@ func (_c *Storager_GetBlockHeaderByNumber_Call) Run(run func(tx types.Querier, b return _c } -func (_c *Storager_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 bool, _a2 error) *Storager_GetBlockHeaderByNumber_Call { +func (_c *Storager_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *Storager_GetBlockHeaderByNumber_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, bool, error)) *Storager_GetBlockHeaderByNumber_Call { +func (_c *Storager_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *Storager_GetBlockHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockHeadersNotFinalized provides a mock function with given fields: tx, maxBlock +func (_m *Storager) GetBlockHeadersNotFinalized(tx types.Querier, maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) { + ret := _m.Called(tx, maxBlock) + + if len(ret) == 0 { + panic("no return value specified for GetBlockHeadersNotFinalized") + } + + var r0 aggkittypes.ListBlockHeaders + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, *uint64) (aggkittypes.ListBlockHeaders, error)); ok { + return rf(tx, maxBlock) + } + if rf, ok := ret.Get(0).(func(types.Querier, *uint64) aggkittypes.ListBlockHeaders); ok { + r0 = rf(tx, maxBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(aggkittypes.ListBlockHeaders) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, *uint64) error); ok { + r1 = rf(tx, maxBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetBlockHeadersNotFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeadersNotFinalized' +type Storager_GetBlockHeadersNotFinalized_Call struct { + *mock.Call +} + +// GetBlockHeadersNotFinalized is a helper method to define mock.On call +// - tx types.Querier +// - maxBlock *uint64 +func (_e *Storager_Expecter) GetBlockHeadersNotFinalized(tx interface{}, maxBlock interface{}) *Storager_GetBlockHeadersNotFinalized_Call { + return &Storager_GetBlockHeadersNotFinalized_Call{Call: _e.mock.On("GetBlockHeadersNotFinalized", tx, maxBlock)} +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Run(run func(tx types.Querier, maxBlock *uint64)) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(*uint64)) + }) + return _c +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) Return(_a0 aggkittypes.ListBlockHeaders, _a1 error) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetBlockHeadersNotFinalized_Call) RunAndReturn(run func(types.Querier, *uint64) (aggkittypes.ListBlockHeaders, error)) *Storager_GetBlockHeadersNotFinalized_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockReorgedReorgID provides a mock function with given fields: tx, blockNumber, blockHash +func (_m *Storager) GetBlockReorgedReorgID(tx types.Querier, blockNumber uint64, blockHash common.Hash) (uint64, bool, error) { + ret := _m.Called(tx, blockNumber, blockHash) + + if len(ret) == 0 { + panic("no return value specified for GetBlockReorgedReorgID") + } + + var r0 uint64 + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64, common.Hash) (uint64, bool, error)); ok { + return rf(tx, blockNumber, blockHash) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64, common.Hash) uint64); ok { + r0 = rf(tx, blockNumber, blockHash) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64, common.Hash) bool); ok { + r1 = rf(tx, blockNumber, blockHash) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(types.Querier, uint64, common.Hash) error); ok { + r2 = rf(tx, blockNumber, blockHash) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Storager_GetBlockReorgedReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockReorgedReorgID' +type Storager_GetBlockReorgedReorgID_Call struct { + *mock.Call +} + +// GetBlockReorgedReorgID is a helper method to define mock.On call +// - tx types.Querier +// - blockNumber uint64 +// - blockHash common.Hash +func (_e *Storager_Expecter) GetBlockReorgedReorgID(tx interface{}, blockNumber interface{}, blockHash interface{}) *Storager_GetBlockReorgedReorgID_Call { + return &Storager_GetBlockReorgedReorgID_Call{Call: _e.mock.On("GetBlockReorgedReorgID", tx, blockNumber, blockHash)} +} + +func (_c *Storager_GetBlockReorgedReorgID_Call) Run(run func(tx types.Querier, blockNumber uint64, blockHash common.Hash)) *Storager_GetBlockReorgedReorgID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64), args[2].(common.Hash)) + }) + return _c +} + +func (_c *Storager_GetBlockReorgedReorgID_Call) Return(_a0 uint64, _a1 bool, _a2 error) *Storager_GetBlockReorgedReorgID_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Storager_GetBlockReorgedReorgID_Call) RunAndReturn(run func(types.Querier, uint64, common.Hash) (uint64, bool, error)) *Storager_GetBlockReorgedReorgID_Call { _c.Call.Return(run) return _c } @@ -154,6 +279,189 @@ func (_c *Storager_GetEthLogs_Call) RunAndReturn(run func(types.Querier, multido return _c } +// GetHighestBlockNumber provides a mock function with given fields: tx +func (_m *Storager) GetHighestBlockNumber(tx types.Querier) (uint64, error) { + ret := _m.Called(tx) + + if len(ret) == 0 { + panic("no return value specified for GetHighestBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier) (uint64, error)); ok { + return rf(tx) + } + if rf, ok := ret.Get(0).(func(types.Querier) uint64); ok { + r0 = rf(tx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier) error); ok { + r1 = rf(tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetHighestBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHighestBlockNumber' +type Storager_GetHighestBlockNumber_Call struct { + *mock.Call +} + +// GetHighestBlockNumber is a helper method to define mock.On call +// - tx types.Querier +func (_e *Storager_Expecter) GetHighestBlockNumber(tx interface{}) *Storager_GetHighestBlockNumber_Call { + return &Storager_GetHighestBlockNumber_Call{Call: _e.mock.On("GetHighestBlockNumber", tx)} +} + +func (_c *Storager_GetHighestBlockNumber_Call) Run(run func(tx types.Querier)) *Storager_GetHighestBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier)) + }) + return _c +} + +func (_c *Storager_GetHighestBlockNumber_Call) Return(_a0 uint64, _a1 error) *Storager_GetHighestBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetHighestBlockNumber_Call) RunAndReturn(run func(types.Querier) (uint64, error)) *Storager_GetHighestBlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetRangeBlockHeader provides a mock function with given fields: tx, isFinal +func (_m *Storager) GetRangeBlockHeader(tx types.Querier, isFinal multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error) { + ret := _m.Called(tx, isFinal) + + if len(ret) == 0 { + panic("no return value specified for GetRangeBlockHeader") + } + + var r0 *aggkittypes.BlockHeader + var r1 *aggkittypes.BlockHeader + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error)); ok { + return rf(tx, isFinal) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.FinalizedType) *aggkittypes.BlockHeader); ok { + r0 = rf(tx, isFinal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.FinalizedType) *aggkittypes.BlockHeader); ok { + r1 = rf(tx, isFinal) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(2).(func(types.Querier, multidownloadertypes.FinalizedType) error); ok { + r2 = rf(tx, isFinal) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Storager_GetRangeBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRangeBlockHeader' +type Storager_GetRangeBlockHeader_Call struct { + *mock.Call +} + +// GetRangeBlockHeader is a helper method to define mock.On call +// - tx types.Querier +// - isFinal multidownloadertypes.FinalizedType +func (_e *Storager_Expecter) GetRangeBlockHeader(tx interface{}, isFinal interface{}) *Storager_GetRangeBlockHeader_Call { + return &Storager_GetRangeBlockHeader_Call{Call: _e.mock.On("GetRangeBlockHeader", tx, isFinal)} +} + +func (_c *Storager_GetRangeBlockHeader_Call) Run(run func(tx types.Querier, isFinal multidownloadertypes.FinalizedType)) *Storager_GetRangeBlockHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.FinalizedType)) + }) + return _c +} + +func (_c *Storager_GetRangeBlockHeader_Call) Return(lowest *aggkittypes.BlockHeader, highest *aggkittypes.BlockHeader, err error) *Storager_GetRangeBlockHeader_Call { + _c.Call.Return(lowest, highest, err) + return _c +} + +func (_c *Storager_GetRangeBlockHeader_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.FinalizedType) (*aggkittypes.BlockHeader, *aggkittypes.BlockHeader, error)) *Storager_GetRangeBlockHeader_Call { + _c.Call.Return(run) + return _c +} + +// GetReorgedDataByReorgID provides a mock function with given fields: tx, reorgID +func (_m *Storager) GetReorgedDataByReorgID(tx types.Querier, reorgID uint64) (*multidownloadertypes.ReorgData, error) { + ret := _m.Called(tx, reorgID) + + if len(ret) == 0 { + panic("no return value specified for GetReorgedDataByReorgID") + } + + var r0 *multidownloadertypes.ReorgData + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)); ok { + return rf(tx, reorgID) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64) *multidownloadertypes.ReorgData); ok { + r0 = rf(tx, reorgID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*multidownloadertypes.ReorgData) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64) error); ok { + r1 = rf(tx, reorgID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_GetReorgedDataByReorgID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReorgedDataByReorgID' +type Storager_GetReorgedDataByReorgID_Call struct { + *mock.Call +} + +// GetReorgedDataByReorgID is a helper method to define mock.On call +// - tx types.Querier +// - reorgID uint64 +func (_e *Storager_Expecter) GetReorgedDataByReorgID(tx interface{}, reorgID interface{}) *Storager_GetReorgedDataByReorgID_Call { + return &Storager_GetReorgedDataByReorgID_Call{Call: _e.mock.On("GetReorgedDataByReorgID", tx, reorgID)} +} + +func (_c *Storager_GetReorgedDataByReorgID_Call) Run(run func(tx types.Querier, reorgID uint64)) *Storager_GetReorgedDataByReorgID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *Storager_GetReorgedDataByReorgID_Call) Return(_a0 *multidownloadertypes.ReorgData, _a1 error) *Storager_GetReorgedDataByReorgID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_GetReorgedDataByReorgID_Call) RunAndReturn(run func(types.Querier, uint64) (*multidownloadertypes.ReorgData, error)) *Storager_GetReorgedDataByReorgID_Call { + _c.Call.Return(run) + return _c +} + // GetSyncedBlockRangePerContract provides a mock function with given fields: tx func (_m *Storager) GetSyncedBlockRangePerContract(tx types.Querier) (multidownloadertypes.SetSyncSegment, error) { ret := _m.Called(tx) @@ -268,6 +576,63 @@ func (_c *Storager_GetValue_Call) RunAndReturn(run func(types.Querier, string, s return _c } +// InsertReorgAndMoveReorgedBlocksAndLogs provides a mock function with given fields: tx, reorgData +func (_m *Storager) InsertReorgAndMoveReorgedBlocksAndLogs(tx types.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for InsertReorgAndMoveReorgedBlocksAndLogs") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertReorgAndMoveReorgedBlocksAndLogs' +type Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call struct { + *mock.Call +} + +// InsertReorgAndMoveReorgedBlocksAndLogs is a helper method to define mock.On call +// - tx types.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *Storager_Expecter) InsertReorgAndMoveReorgedBlocksAndLogs(tx interface{}, reorgData interface{}) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + return &Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call{Call: _e.mock.On("InsertReorgAndMoveReorgedBlocksAndLogs", tx, reorgData)} +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Run(run func(tx types.Querier, reorgData multidownloadertypes.ReorgData)) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Return(_a0 uint64, _a1 error) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)) *Storager_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(run) + return _c +} + // InsertValue provides a mock function with given fields: tx, owner, key, value func (_m *Storager) InsertValue(tx types.Querier, owner string, key string, value string) error { ret := _m.Called(tx, owner, key, value) @@ -317,6 +682,63 @@ func (_c *Storager_InsertValue_Call) RunAndReturn(run func(types.Querier, string return _c } +// LogQuery provides a mock function with given fields: tx, query +func (_m *Storager) LogQuery(tx types.Querier, query multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error) { + ret := _m.Called(tx, query) + + if len(ret) == 0 { + panic("no return value specified for LogQuery") + } + + var r0 multidownloadertypes.LogQueryResponse + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)); ok { + return rf(tx, query) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.LogQuery) multidownloadertypes.LogQueryResponse); ok { + r0 = rf(tx, query) + } else { + r0 = ret.Get(0).(multidownloadertypes.LogQueryResponse) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.LogQuery) error); ok { + r1 = rf(tx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storager_LogQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LogQuery' +type Storager_LogQuery_Call struct { + *mock.Call +} + +// LogQuery is a helper method to define mock.On call +// - tx types.Querier +// - query multidownloadertypes.LogQuery +func (_e *Storager_Expecter) LogQuery(tx interface{}, query interface{}) *Storager_LogQuery_Call { + return &Storager_LogQuery_Call{Call: _e.mock.On("LogQuery", tx, query)} +} + +func (_c *Storager_LogQuery_Call) Run(run func(tx types.Querier, query multidownloadertypes.LogQuery)) *Storager_LogQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.LogQuery)) + }) + return _c +} + +func (_c *Storager_LogQuery_Call) Return(_a0 multidownloadertypes.LogQueryResponse, _a1 error) *Storager_LogQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storager_LogQuery_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.LogQuery) (multidownloadertypes.LogQueryResponse, error)) *Storager_LogQuery_Call { + _c.Call.Return(run) + return _c +} + // NewTx provides a mock function with given fields: ctx func (_m *Storager) NewTx(ctx context.Context) (types.Txer, error) { ret := _m.Called(ctx) @@ -376,7 +798,7 @@ func (_c *Storager_NewTx_Call) RunAndReturn(run func(context.Context) (types.Txe } // SaveEthLogsWithHeaders provides a mock function with given fields: tx, blockHeaders, logs, isFinal -func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders []*aggkittypes.BlockHeader, logs []coretypes.Log, isFinal bool) error { +func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []coretypes.Log, isFinal bool) error { ret := _m.Called(tx, blockHeaders, logs, isFinal) if len(ret) == 0 { @@ -384,7 +806,7 @@ func (_m *Storager) SaveEthLogsWithHeaders(tx types.Querier, blockHeaders []*agg } var r0 error - if rf, ok := ret.Get(0).(func(types.Querier, []*aggkittypes.BlockHeader, []coretypes.Log, bool) error); ok { + if rf, ok := ret.Get(0).(func(types.Querier, aggkittypes.ListBlockHeaders, []coretypes.Log, bool) error); ok { r0 = rf(tx, blockHeaders, logs, isFinal) } else { r0 = ret.Error(0) @@ -400,16 +822,16 @@ type Storager_SaveEthLogsWithHeaders_Call struct { // SaveEthLogsWithHeaders is a helper method to define mock.On call // - tx types.Querier -// - blockHeaders []*aggkittypes.BlockHeader +// - blockHeaders aggkittypes.ListBlockHeaders // - logs []coretypes.Log // - isFinal bool func (_e *Storager_Expecter) SaveEthLogsWithHeaders(tx interface{}, blockHeaders interface{}, logs interface{}, isFinal interface{}) *Storager_SaveEthLogsWithHeaders_Call { return &Storager_SaveEthLogsWithHeaders_Call{Call: _e.mock.On("SaveEthLogsWithHeaders", tx, blockHeaders, logs, isFinal)} } -func (_c *Storager_SaveEthLogsWithHeaders_Call) Run(run func(tx types.Querier, blockHeaders []*aggkittypes.BlockHeader, logs []coretypes.Log, isFinal bool)) *Storager_SaveEthLogsWithHeaders_Call { +func (_c *Storager_SaveEthLogsWithHeaders_Call) Run(run func(tx types.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []coretypes.Log, isFinal bool)) *Storager_SaveEthLogsWithHeaders_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(types.Querier), args[1].([]*aggkittypes.BlockHeader), args[2].([]coretypes.Log), args[3].(bool)) + run(args[0].(types.Querier), args[1].(aggkittypes.ListBlockHeaders), args[2].([]coretypes.Log), args[3].(bool)) }) return _c } @@ -419,7 +841,54 @@ func (_c *Storager_SaveEthLogsWithHeaders_Call) Return(_a0 error) *Storager_Save return _c } -func (_c *Storager_SaveEthLogsWithHeaders_Call) RunAndReturn(run func(types.Querier, []*aggkittypes.BlockHeader, []coretypes.Log, bool) error) *Storager_SaveEthLogsWithHeaders_Call { +func (_c *Storager_SaveEthLogsWithHeaders_Call) RunAndReturn(run func(types.Querier, aggkittypes.ListBlockHeaders, []coretypes.Log, bool) error) *Storager_SaveEthLogsWithHeaders_Call { + _c.Call.Return(run) + return _c +} + +// UpdateBlockToFinalized provides a mock function with given fields: tx, blockNumbers +func (_m *Storager) UpdateBlockToFinalized(tx types.Querier, blockNumbers []uint64) error { + ret := _m.Called(tx, blockNumbers) + + if len(ret) == 0 { + panic("no return value specified for UpdateBlockToFinalized") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.Querier, []uint64) error); ok { + r0 = rf(tx, blockNumbers) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Storager_UpdateBlockToFinalized_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateBlockToFinalized' +type Storager_UpdateBlockToFinalized_Call struct { + *mock.Call +} + +// UpdateBlockToFinalized is a helper method to define mock.On call +// - tx types.Querier +// - blockNumbers []uint64 +func (_e *Storager_Expecter) UpdateBlockToFinalized(tx interface{}, blockNumbers interface{}) *Storager_UpdateBlockToFinalized_Call { + return &Storager_UpdateBlockToFinalized_Call{Call: _e.mock.On("UpdateBlockToFinalized", tx, blockNumbers)} +} + +func (_c *Storager_UpdateBlockToFinalized_Call) Run(run func(tx types.Querier, blockNumbers []uint64)) *Storager_UpdateBlockToFinalized_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].([]uint64)) + }) + return _c +} + +func (_c *Storager_UpdateBlockToFinalized_Call) Return(_a0 error) *Storager_UpdateBlockToFinalized_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Storager_UpdateBlockToFinalized_Call) RunAndReturn(run func(types.Querier, []uint64) error) *Storager_UpdateBlockToFinalized_Call { _c.Call.Return(run) return _c } diff --git a/multidownloader/types/mocks/mock_storager_for_reorg.go b/multidownloader/types/mocks/mock_storager_for_reorg.go new file mode 100644 index 000000000..74bf29868 --- /dev/null +++ b/multidownloader/types/mocks/mock_storager_for_reorg.go @@ -0,0 +1,162 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + aggkittypes "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" + + multidownloadertypes "github.com/agglayer/aggkit/multidownloader/types" + + types "github.com/agglayer/aggkit/db/types" +) + +// StoragerForReorg is an autogenerated mock type for the StoragerForReorg type +type StoragerForReorg struct { + mock.Mock +} + +type StoragerForReorg_Expecter struct { + mock *mock.Mock +} + +func (_m *StoragerForReorg) EXPECT() *StoragerForReorg_Expecter { + return &StoragerForReorg_Expecter{mock: &_m.Mock} +} + +// GetBlockHeaderByNumber provides a mock function with given fields: tx, blockNumber +func (_m *StoragerForReorg) GetBlockHeaderByNumber(tx types.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error) { + ret := _m.Called(tx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByNumber") + } + + var r0 *aggkittypes.BlockHeader + var r1 multidownloadertypes.FinalizedType + var r2 error + if rf, ok := ret.Get(0).(func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)); ok { + return rf(tx, blockNumber) + } + if rf, ok := ret.Get(0).(func(types.Querier, uint64) *aggkittypes.BlockHeader); ok { + r0 = rf(tx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggkittypes.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(types.Querier, uint64) multidownloadertypes.FinalizedType); ok { + r1 = rf(tx, blockNumber) + } else { + r1 = ret.Get(1).(multidownloadertypes.FinalizedType) + } + + if rf, ok := ret.Get(2).(func(types.Querier, uint64) error); ok { + r2 = rf(tx, blockNumber) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// StoragerForReorg_GetBlockHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeaderByNumber' +type StoragerForReorg_GetBlockHeaderByNumber_Call struct { + *mock.Call +} + +// GetBlockHeaderByNumber is a helper method to define mock.On call +// - tx types.Querier +// - blockNumber uint64 +func (_e *StoragerForReorg_Expecter) GetBlockHeaderByNumber(tx interface{}, blockNumber interface{}) *StoragerForReorg_GetBlockHeaderByNumber_Call { + return &StoragerForReorg_GetBlockHeaderByNumber_Call{Call: _e.mock.On("GetBlockHeaderByNumber", tx, blockNumber)} +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) Run(run func(tx types.Querier, blockNumber uint64)) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(uint64)) + }) + return _c +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) Return(_a0 *aggkittypes.BlockHeader, _a1 multidownloadertypes.FinalizedType, _a2 error) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *StoragerForReorg_GetBlockHeaderByNumber_Call) RunAndReturn(run func(types.Querier, uint64) (*aggkittypes.BlockHeader, multidownloadertypes.FinalizedType, error)) *StoragerForReorg_GetBlockHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// InsertReorgAndMoveReorgedBlocksAndLogs provides a mock function with given fields: tx, reorgData +func (_m *StoragerForReorg) InsertReorgAndMoveReorgedBlocksAndLogs(tx types.Querier, reorgData multidownloadertypes.ReorgData) (uint64, error) { + ret := _m.Called(tx, reorgData) + + if len(ret) == 0 { + panic("no return value specified for InsertReorgAndMoveReorgedBlocksAndLogs") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)); ok { + return rf(tx, reorgData) + } + if rf, ok := ret.Get(0).(func(types.Querier, multidownloadertypes.ReorgData) uint64); ok { + r0 = rf(tx, reorgData) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(types.Querier, multidownloadertypes.ReorgData) error); ok { + r1 = rf(tx, reorgData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertReorgAndMoveReorgedBlocksAndLogs' +type StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call struct { + *mock.Call +} + +// InsertReorgAndMoveReorgedBlocksAndLogs is a helper method to define mock.On call +// - tx types.Querier +// - reorgData multidownloadertypes.ReorgData +func (_e *StoragerForReorg_Expecter) InsertReorgAndMoveReorgedBlocksAndLogs(tx interface{}, reorgData interface{}) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + return &StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call{Call: _e.mock.On("InsertReorgAndMoveReorgedBlocksAndLogs", tx, reorgData)} +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Run(run func(tx types.Querier, reorgData multidownloadertypes.ReorgData)) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.Querier), args[1].(multidownloadertypes.ReorgData)) + }) + return _c +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) Return(_a0 uint64, _a1 error) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call) RunAndReturn(run func(types.Querier, multidownloadertypes.ReorgData) (uint64, error)) *StoragerForReorg_InsertReorgAndMoveReorgedBlocksAndLogs_Call { + _c.Call.Return(run) + return _c +} + +// NewStoragerForReorg creates a new instance of StoragerForReorg. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStoragerForReorg(t interface { + mock.TestingT + Cleanup(func()) +}) *StoragerForReorg { + mock := &StoragerForReorg{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multidownloader/types/reorg_data.go b/multidownloader/types/reorg_data.go new file mode 100644 index 000000000..0bc059534 --- /dev/null +++ b/multidownloader/types/reorg_data.go @@ -0,0 +1,35 @@ +package types + +import ( + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgData struct { + // ReorgID is the unique identifier for the reorg stored in DB (incremental ID) + ReorgID uint64 + // BlockRangeAffected is the range of blocks affected by the reorg (from,to inclusive) + BlockRangeAffected aggkitcommon.BlockRange + // DetectedAtBlock is the block number where the reorg was detected + DetectedAtBlock uint64 + DetectedTimestamp uint64 + NetworkLatestBlock uint64 + NetworkFinalizedBlock uint64 + NetworkFinalizedBlockName aggkittypes.BlockNumberFinality + Description string +} + +func (r *ReorgData) String() string { + return fmt.Sprintf("ReorgData{ReorgID: %d, BlockRangeAffected: %s, DetectedAtBlock: %d, DetectedTimestamp: %d, "+ + "NetworkLatestBlock: %d, NetworkFinalizedBlock: %d (%s), Description: %s}", + r.ReorgID, + r.BlockRangeAffected.String(), + r.DetectedAtBlock, + r.DetectedTimestamp, + r.NetworkLatestBlock, + r.NetworkFinalizedBlock, + r.NetworkFinalizedBlockName.String(), + r.Description) +} diff --git a/multidownloader/types/reorg_data_test.go b/multidownloader/types/reorg_data_test.go new file mode 100644 index 000000000..e7682fdfa --- /dev/null +++ b/multidownloader/types/reorg_data_test.go @@ -0,0 +1,26 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/stretchr/testify/require" +) + +func TestReorgData_String(t *testing.T) { + reorgData := &ReorgData{ + ReorgID: 1, + BlockRangeAffected: aggkitcommon.NewBlockRange(100, 200), + DetectedAtBlock: 250, + DetectedTimestamp: 1620000000, + NetworkLatestBlock: 300, + NetworkFinalizedBlock: 240, + NetworkFinalizedBlockName: aggkittypes.LatestBlock, + Description: "Test reorg description", + } + require.Equal(t, "ReorgData{ReorgID: 1, BlockRangeAffected: From: 100, To: 200 (101), "+ + "DetectedAtBlock: 250, DetectedTimestamp: 1620000000, NetworkLatestBlock: 300, NetworkFinalizedBlock: 240 (LatestBlock), "+ + "Description: Test reorg description}", + reorgData.String()) +} diff --git a/multidownloader/types/reorg_error.go b/multidownloader/types/reorg_error.go new file mode 100644 index 000000000..7bb47f37e --- /dev/null +++ b/multidownloader/types/reorg_error.go @@ -0,0 +1,126 @@ +package types + +import ( + "errors" + "fmt" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" +) + +type ReorgDetectionReason int + +const ( + ReorgDetectionReason_BlockHashMismatch ReorgDetectionReason = iota + 1 + ReorgDetectionReason_ParentHashMismatch + ReorgDetectionReason_MissingBlock + // Forced act as MissingBlock but without checking it is basically a debug mode + // to produce reorgs scenario (it must have enabled the develMode) + ReorgDetectionReason_Forced +) + +func (r ReorgDetectionReason) String() string { + switch r { + case ReorgDetectionReason_BlockHashMismatch: + return "BlockHashMismatch" + case ReorgDetectionReason_ParentHashMismatch: + return "ParentHashMismatch" + case ReorgDetectionReason_MissingBlock: + return "MissingBlock" + case ReorgDetectionReason_Forced: + return "Forced" + } + return fmt.Sprintf("ReorgDetectionReason(%d)", int(r)) +} + +// DetectedReorgError is an error that is raised when a reorg is detected +// The block is one of the blocks that were reorged, but not necessarily the first one +type DetectedReorgError struct { + OffendingBlockNumber uint64 // Important: is not the first reorged block, but one of them + OldHash common.Hash + NewHash common.Hash + ReorgDetectionReason ReorgDetectionReason + Message string +} + +// IsDetectedReorgError checks if an error is a DetectedReorgError +func IsDetectedReorgError(err error) bool { + c := CastDetectedReorgError(err) + return c != nil +} + +// NewDetectedReorgError creates a new DetectedReorgError +func NewDetectedReorgError(offendingBlockNumber uint64, + reason ReorgDetectionReason, + oldHash, newHash common.Hash, msg string) *DetectedReorgError { + return &DetectedReorgError{ + OffendingBlockNumber: offendingBlockNumber, + OldHash: oldHash, + NewHash: newHash, + ReorgDetectionReason: reason, + Message: msg, + } +} + +func (e *DetectedReorgError) Error() string { + switch e.ReorgDetectionReason { + case ReorgDetectionReason_MissingBlock: + return fmt.Sprintf("reorgError: block number %d is missing: %s", + e.OffendingBlockNumber, e.Message) + case ReorgDetectionReason_BlockHashMismatch: + return fmt.Sprintf("reorgError: block number %d: old hash %s != new hash %s: %s", + e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) + case ReorgDetectionReason_ParentHashMismatch: + return fmt.Sprintf("reorgError: block number %d: old parent hash %s != new parent hash %s: %s", + e.OffendingBlockNumber, e.OldHash.String(), e.NewHash.String(), e.Message) + case ReorgDetectionReason_Forced: + return fmt.Sprintf("reorgError: block number %d: forced reason: %s", + e.OffendingBlockNumber, e.Message) + default: + return fmt.Sprintf("reorgError: block number %d: reason %d: %s", + e.OffendingBlockNumber, e.ReorgDetectionReason, e.Message) + } +} + +func CastDetectedReorgError(err error) *DetectedReorgError { + var reorgErr *DetectedReorgError + if errors.As(err, &reorgErr) { + return reorgErr + } + return nil +} + +type ReorgedError struct { + Message string + BlockRangeReorged aggkitcommon.BlockRange + ReorgID uint64 +} + +func NewReorgedError(blockRangeReorged aggkitcommon.BlockRange, + reorgID uint64, + msg string) *ReorgedError { + return &ReorgedError{ + Message: msg, + BlockRangeReorged: blockRangeReorged, + ReorgID: reorgID, + } +} + +func (e *ReorgedError) Error() string { + return fmt.Sprintf("reorgedError: reorgID=%d blockRangeReorged=%s: %s", + e.ReorgID, e.BlockRangeReorged.String(), e.Message) +} + +// IsReorgedError checks if an error is a ReorgedError +func IsReorgedError(err error) bool { + c := CastReorgedError(err) + return c != nil +} + +func CastReorgedError(err error) *ReorgedError { + var reorgErr *ReorgedError + if errors.As(err, &reorgErr) { + return reorgErr + } + return nil +} diff --git a/multidownloader/types/reorg_error_test.go b/multidownloader/types/reorg_error_test.go new file mode 100644 index 000000000..6e95bf22e --- /dev/null +++ b/multidownloader/types/reorg_error_test.go @@ -0,0 +1,308 @@ +package types + +import ( + "errors" + "fmt" + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestReorgDetectionReason_String(t *testing.T) { + tests := []struct { + name string + reason ReorgDetectionReason + expected string + }{ + { + name: "BlockHashMismatch", + reason: ReorgDetectionReason_BlockHashMismatch, + expected: "BlockHashMismatch", + }, + { + name: "ParentHashMismatch", + reason: ReorgDetectionReason_ParentHashMismatch, + expected: "ParentHashMismatch", + }, + { + name: "MissingBlock", + reason: ReorgDetectionReason_MissingBlock, + expected: "MissingBlock", + }, + { + name: "Forced", + reason: ReorgDetectionReason_Forced, + expected: "Forced", + }, + { + name: "Unknown reason", + reason: ReorgDetectionReason(99), + expected: "ReorgDetectionReason(99)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.reason.String() + require.Equal(t, tt.expected, result) + }) + } +} + +const testReorgMsg = "test message" + +func TestNewDetectedReorgError(t *testing.T) { + blockNum := uint64(100) + reason := ReorgDetectionReason_BlockHashMismatch + oldHash := common.HexToHash("0x1234") + newHash := common.HexToHash("0x5678") + + err := NewDetectedReorgError(blockNum, reason, oldHash, newHash, testReorgMsg) + + require.NotNil(t, err) + require.Equal(t, blockNum, err.OffendingBlockNumber) + require.Equal(t, reason, err.ReorgDetectionReason) + require.Equal(t, oldHash, err.OldHash) + require.Equal(t, newHash, err.NewHash) + require.Equal(t, testReorgMsg, err.Message) +} + +func TestDetectedReorgError_Error(t *testing.T) { + blockNum := uint64(100) + oldHash := common.HexToHash("0x1234") + newHash := common.HexToHash("0x5678") + + tests := []struct { + name string + reason ReorgDetectionReason + expectedPrefix string + }{ + { + name: "MissingBlock error message", + reason: ReorgDetectionReason_MissingBlock, + expectedPrefix: "reorgError: block number 100 is missing: test message", + }, + { + name: "BlockHashMismatch error message", + reason: ReorgDetectionReason_BlockHashMismatch, + expectedPrefix: fmt.Sprintf("reorgError: block number 100: old hash %s != new hash %s: test message", oldHash.String(), newHash.String()), + }, + { + name: "ParentHashMismatch error message", + reason: ReorgDetectionReason_ParentHashMismatch, + expectedPrefix: fmt.Sprintf("reorgError: block number 100: old parent hash %s != new parent hash %s: test message", oldHash.String(), newHash.String()), + }, + { + name: "Forced error message", + reason: ReorgDetectionReason_Forced, + expectedPrefix: "reorgError: block number 100: forced reason: test message", + }, + { + name: "Unknown reason error message", + reason: ReorgDetectionReason(99), + expectedPrefix: "reorgError: block number 100: reason 99: test message", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := NewDetectedReorgError(blockNum, tt.reason, oldHash, newHash, testReorgMsg) + result := err.Error() + require.Equal(t, tt.expectedPrefix, result) + }) + } +} + +func TestIsDetectedReorgError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Valid DetectedReorgError", + err: NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.Hash{}, common.Hash{}, "test"), + expected: true, + }, + { + name: "Wrapped DetectedReorgError", + err: fmt.Errorf("wrapped: %w", NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.Hash{}, common.Hash{}, "test")), + expected: true, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expected: false, + }, + { + name: "Nil error", + err: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsDetectedReorgError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCastDetectedReorgError(t *testing.T) { + originalErr := NewDetectedReorgError(100, ReorgDetectionReason_BlockHashMismatch, common.HexToHash("0x1234"), common.HexToHash("0x5678"), "test") + + tests := []struct { + name string + err error + expectNil bool + expectEqual *DetectedReorgError + }{ + { + name: "Valid DetectedReorgError", + err: originalErr, + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Wrapped DetectedReorgError", + err: fmt.Errorf("wrapped: %w", originalErr), + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expectNil: true, + expectEqual: nil, + }, + { + name: "Nil error", + err: nil, + expectNil: true, + expectEqual: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CastDetectedReorgError(tt.err) + if tt.expectNil { + require.Nil(t, result) + } else { + require.NotNil(t, result) + require.Equal(t, tt.expectEqual, result) + } + }) + } +} + +func TestNewReorgedError(t *testing.T) { + blockRange := aggkitcommon.NewBlockRange(100, 200) + chainID := uint64(1) + + err := NewReorgedError(blockRange, chainID, testReorgMsg) + + require.NotNil(t, err) + require.Equal(t, blockRange, err.BlockRangeReorged) + require.Equal(t, chainID, err.ReorgID) + require.Equal(t, testReorgMsg, err.Message) +} + +func TestReorgedError_Error(t *testing.T) { + blockRange := aggkitcommon.NewBlockRange(100, 200) + reorgID := uint64(1) + msg := "test message" + + err := NewReorgedError(blockRange, reorgID, testReorgMsg) + result := err.Error() + + expected := fmt.Sprintf("reorgedError: reorgID=%d blockRangeReorged=%s: %s", reorgID, blockRange.String(), msg) + require.Equal(t, expected, result) +} + +func TestIsReorgedError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Valid ReorgedError", + err: NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test"), + expected: true, + }, + { + name: "Wrapped ReorgedError", + err: fmt.Errorf("wrapped: %w", NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test")), + expected: true, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expected: false, + }, + { + name: "Nil error", + err: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsReorgedError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCastReorgedError(t *testing.T) { + originalErr := NewReorgedError(aggkitcommon.NewBlockRange(100, 200), 1, "test") + + tests := []struct { + name string + err error + expectNil bool + expectEqual *ReorgedError + }{ + { + name: "Valid ReorgedError", + err: originalErr, + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Wrapped ReorgedError", + err: fmt.Errorf("wrapped: %w", originalErr), + expectNil: false, + expectEqual: originalErr, + }, + { + name: "Regular error", + err: errors.New("regular error"), + expectNil: true, + expectEqual: nil, + }, + { + name: "Nil error", + err: nil, + expectNil: true, + expectEqual: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CastReorgedError(tt.err) + if tt.expectNil { + require.Nil(t, result) + } else { + require.NotNil(t, result) + require.Equal(t, tt.expectEqual, result) + } + }) + } +} diff --git a/multidownloader/types/reorg_port.go b/multidownloader/types/reorg_port.go new file mode 100644 index 000000000..ce3bfdaa8 --- /dev/null +++ b/multidownloader/types/reorg_port.go @@ -0,0 +1,38 @@ +package types + +import ( + "context" + + dbtypes "github.com/agglayer/aggkit/db/types" + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgPorter interface { + NewTx(ctx context.Context) (dbtypes.Txer, error) + GetBlockStorageAndRPC(ctx context.Context, tx dbtypes.Querier, blockNumber uint64) (*CompareBlockHeaders, error) + GetLastBlockNumberInStorage(tx dbtypes.Querier) (uint64, error) + // Return ChainID of the inserted reorg + MoveReorgedBlocks(tx dbtypes.Querier, reorgData ReorgData) (uint64, error) + GetBlockNumberInRPC(ctx context.Context, blockFinality aggkittypes.BlockNumberFinality) (uint64, error) + TimeNowUnix() uint64 +} + +type CompareBlockHeaders struct { + BlockNumber uint64 + StorageHeader *aggkittypes.BlockHeader + IsFinalized FinalizedType + RpcHeader *aggkittypes.BlockHeader +} + +func (c *CompareBlockHeaders) ExistsRPCBlock() bool { + if c == nil { + return false + } + return c.RpcHeader != nil +} +func (c *CompareBlockHeaders) ExistsStorageBlock() bool { + if c == nil { + return false + } + return c.StorageHeader != nil +} diff --git a/multidownloader/types/reorg_port_test.go b/multidownloader/types/reorg_port_test.go new file mode 100644 index 000000000..c28894779 --- /dev/null +++ b/multidownloader/types/reorg_port_test.go @@ -0,0 +1,69 @@ +package types + +import ( + "testing" + + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestCompareBlockHeaders_ExistsRPCBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *CompareBlockHeaders + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns false when RpcHeader is nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{Number: 100}, + RpcHeader: nil, + } + result := c.ExistsRPCBlock() + require.False(t, result) + }) + + t.Run("returns true when RpcHeader is not nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + RpcHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x1234"), + }, + } + result := c.ExistsRPCBlock() + require.True(t, result) + }) +} + +func TestCompareBlockHeaders_ExistsStorageBlock(t *testing.T) { + t.Run("returns false when receiver is nil", func(t *testing.T) { + var c *CompareBlockHeaders + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns false when StorageHeader is nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: nil, + RpcHeader: &aggkittypes.BlockHeader{Number: 100}, + } + result := c.ExistsStorageBlock() + require.False(t, result) + }) + + t.Run("returns true when StorageHeader is not nil", func(t *testing.T) { + c := &CompareBlockHeaders{ + BlockNumber: 100, + StorageHeader: &aggkittypes.BlockHeader{ + Number: 100, + Hash: common.HexToHash("0x5678"), + }, + } + result := c.ExistsStorageBlock() + require.True(t, result) + }) +} diff --git a/multidownloader/types/reorg_processor.go b/multidownloader/types/reorg_processor.go new file mode 100644 index 000000000..86f27bc90 --- /dev/null +++ b/multidownloader/types/reorg_processor.go @@ -0,0 +1,20 @@ +package types + +import ( + "context" + + aggkittypes "github.com/agglayer/aggkit/types" +) + +type ReorgProcessor interface { + // ProcessReorg processes a detected reorg starting from the offending block number. + // It identifies the range of blocks affected by the reorg and takes necessary actions + // to handle the reorganization. + // input parameters: + // - ctx: the context for managing cancellation and timeouts + // - detectedReorgError: the error returned by the reorg detection logic, containing + // the offending block number and the reason for the reorg detection + // - finalizedBlockTag: the block tag to consider as finalized (typically finalizedBlock) + ProcessReorg(ctx context.Context, detectedReorgError DetectedReorgError, + finalizedBlockTag aggkittypes.BlockNumberFinality) error +} diff --git a/multidownloader/types/set_sync_segment.go b/multidownloader/types/set_sync_segment.go index 5ce083580..7f669fcba 100644 --- a/multidownloader/types/set_sync_segment.go +++ b/multidownloader/types/set_sync_segment.go @@ -1,12 +1,11 @@ package types import ( - "context" "fmt" "strings" aggkitcommon "github.com/agglayer/aggkit/common" - ethermantypes "github.com/agglayer/aggkit/etherman/types" + aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" ) @@ -36,7 +35,7 @@ func NewSetSyncSegment() SetSyncSegment { } // NewSetSyncSegmentFromLogQuery creates a new SetSyncSegment from a LogQuery -func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) SetSyncSegment { +func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) (SetSyncSegment, error) { set := NewSetSyncSegment() for _, addr := range logQuery.Addrs { segment := SyncSegment{ @@ -45,16 +44,7 @@ func NewSetSyncSegmentFromLogQuery(logQuery *LogQuery) SetSyncSegment { } set.Add(segment) } - return set -} - -// Segments returns all SyncSegments in the SetSyncSegment -func (s *SetSyncSegment) Segments() []SyncSegment { - result := make([]SyncSegment, 0, len(s.segments)) - for _, segment := range s.segments { - result = append(result, *segment) - } - return result + return set, nil } // Add adds a new SyncSegment to the SetSyncSegment, merging block ranges @@ -68,10 +58,17 @@ func (s *SetSyncSegment) Add(segment SyncSegment) { return } // Merge syncers - s.UpdateBlockRange(¤t, current.BlockRange.Extend(segment.BlockRange)) + var newBlockRange aggkitcommon.BlockRange + if current.BlockRange.IsEmpty() { + newBlockRange = segment.BlockRange + } else { + newBlockRange = current.BlockRange.Extend(segment.BlockRange) + } + s.UpdateBlockRange(¤t, newBlockRange) } // GetByContract returns the SyncSegment for the given contract address +// it returns true if it exists, otherwise it returns false func (s *SetSyncSegment) GetByContract(addr common.Address) (SyncSegment, bool) { if s == nil { return SyncSegment{}, false @@ -91,13 +88,13 @@ func (f *SetSyncSegment) SubtractSegments(segments *SetSyncSegment) error { return nil } newSegments := f.Clone() - for _, segment := range segments.Segments() { + for _, segment := range segments.segments { previousSegment, exists := newSegments.GetByContract(segment.ContractAddr) - if exists { + if exists && !previousSegment.IsEmpty() { brs := previousSegment.BlockRange.Subtract(segment.BlockRange) switch len(brs) { case 0: - newSegments.Remove(&previousSegment) + newSegments.Empty(&previousSegment) case 1: newSegments.UpdateBlockRange(&previousSegment, brs[0]) default: @@ -116,7 +113,10 @@ func (f *SetSyncSegment) SubtractLogQuery(logQuery *LogQuery) error { if logQuery == nil { return nil } - newSegments := NewSetSyncSegmentFromLogQuery(logQuery) + newSegments, err := NewSetSyncSegmentFromLogQuery(logQuery) + if err != nil { + return err + } return f.SubtractSegments(&newSegments) } func isIncluded(ranges []aggkitcommon.BlockRange, br aggkitcommon.BlockRange) bool { @@ -155,21 +155,27 @@ func (f *SetSyncSegment) TotalBlocks() uint64 { return total } -// UpdateTargetBlockToNumber updates the ToBlock to real blockNumber -func (f *SetSyncSegment) UpdateTargetBlockToNumber(ctx context.Context, - blockNotifierGetter ethermantypes.BlockNotifierManager) error { +// GetTargetToBlockTags returns the list of TargetToBlock tags in the +// SetSyncSegment witout duplicates +func (f *SetSyncSegment) GetTargetToBlockTags() []aggkittypes.BlockNumberFinality { if f == nil { return nil } + result := make([]aggkittypes.BlockNumberFinality, 0, len(f.segments)) for _, segment := range f.segments { - currentBlock, err := blockNotifierGetter.GetCurrentBlockNumber(ctx, segment.TargetToBlock) - if err != nil { - return fmt.Errorf("setSyncSegment.UpdateToBlock: error getting BlockNotifier for finality=%s: %w", - segment.TargetToBlock.String(), err) + // if it's already in list don't add it again + exists := false + for _, existing := range result { + if existing == segment.TargetToBlock { + exists = true + break + } + } + if !exists { + result = append(result, segment.TargetToBlock) } - segment.UpdateToBlock(currentBlock) } - return nil + return result } // IsAvailable checks if the required LogQuery data is already synced @@ -186,9 +192,64 @@ func (f *SetSyncSegment) IsAvailable(query LogQuery) bool { return true } +// IsPartiallyAvailable checks if some part of the LogQuery is already synced +// always starting from FromBlock +// If there are any data avaible, it returns true and the LogQuery with the available data +func (f *SetSyncSegment) IsPartiallyAvailable(query LogQuery) (bool, *LogQuery) { + if f == nil || len(query.Addrs) == 0 { + return false, nil + } + + // Find the maximum contiguous range starting from FromBlock that is available + // for all addresses in the query + var maxAvailableToBlock *uint64 + + for _, addr := range query.Addrs { + segment, exists := f.GetByContract(addr) + if !exists { + // If any address is not synced at all, nothing is available + return false, nil + } + + // Calculate the intersection between the segment and the query range + intersection := segment.BlockRange.Intersect(query.BlockRange) + if intersection.IsEmpty() { + // If there's no overlap, nothing is available + return false, nil + } + + // Check if the intersection starts at FromBlock + // If not, there's a gap at the beginning, so nothing is available + if intersection.FromBlock != query.BlockRange.FromBlock { + return false, nil + } + + // Update the minimum ToBlock (the bottleneck across all addresses) + if maxAvailableToBlock == nil || intersection.ToBlock < *maxAvailableToBlock { + maxAvailableToBlock = &intersection.ToBlock + } + } + + if maxAvailableToBlock == nil { + return false, nil + } + + // Create the available LogQuery + availableQuery := &LogQuery{ + Addrs: query.Addrs, + BlockRange: aggkitcommon.NewBlockRange( + query.BlockRange.FromBlock, + *maxAvailableToBlock, + ), + } + + return true, availableQuery +} + // NextQuery generates the next LogQuery to sync based on the lowest FromBlock pending // to synchronize -func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uint64) (*LogQuery, error) { +func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, + maxBlockNumber uint64, applyMaxBlockNumber bool) (*LogQuery, error) { if f == nil || len(f.segments) == 0 { return nil, ErrFinished } @@ -200,7 +261,7 @@ func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uin lowestSegment.BlockRange.FromBlock, lowestSegment.BlockRange.FromBlock+uint64(syncBlockChunkSize)-1, )) - if maxBlockNumber > 0 { + if applyMaxBlockNumber { br = br.Cap(maxBlockNumber) } if br.IsEmpty() { @@ -215,6 +276,41 @@ func (f *SetSyncSegment) NextQuery(syncBlockChunkSize uint32, maxBlockNumber uin BlockRange: br, }, nil } +func (f *SetSyncSegment) GetHighestBlockNumber() (uint64, aggkittypes.BlockNumberFinality) { + if f == nil || len(f.segments) == 0 { + return 0, aggkittypes.LatestBlock + } + highest := uint64(0) + finality := aggkittypes.LatestBlock + for _, segment := range f.segments { + if segment.BlockRange.ToBlock > highest { + highest = segment.BlockRange.ToBlock + finality = segment.TargetToBlock + } + } + return highest, finality +} + +func (f *SetSyncSegment) GetTotalPendingBlockRange() *aggkitcommon.BlockRange { + if f == nil || len(f.segments) == 0 { + return nil + } + var totalRange *aggkitcommon.BlockRange + for _, segment := range f.segments { + // Skip empty segments to avoid creating invalid BlockRanges + if segment.IsEmpty() { + continue + } + if totalRange == nil { + br := segment.BlockRange + totalRange = &br + } else { + extended := totalRange.Extend(segment.BlockRange) + totalRange = &extended + } + } + return totalRange +} func (f *SetSyncSegment) GetLowestFromBlockSegment() *SyncSegment { if f == nil || len(f.segments) == 0 { @@ -239,8 +335,21 @@ func (f *SetSyncSegment) GetAddressesForBlockRange(blockRange aggkitcommon.Block return addresses } +func (f *SetSyncSegment) GetAddressesForBlock(blockNumber uint64) []common.Address { + blockRange := aggkitcommon.NewBlockRange(blockNumber, blockNumber) + return f.GetAddressesForBlockRange(blockRange) +} + func (f *SetSyncSegment) Finished() bool { - return f == nil || len(f.segments) == 0 + if f == nil || len(f.segments) == 0 { + return true + } + for _, segment := range f.segments { + if !segment.IsEmpty() { + return false + } + } + return true } func (f *SetSyncSegment) Clone() *SetSyncSegment { @@ -254,6 +363,15 @@ func (f *SetSyncSegment) Clone() *SetSyncSegment { return &newSet } +func (f *SetSyncSegment) Empty(segment *SyncSegment) { + for _, s := range f.segments { + if s.Equal(*segment) { + s.Empty() + return + } + } +} + func (f *SetSyncSegment) Remove(segmentToRemove *SyncSegment) { if f == nil || segmentToRemove == nil { return @@ -305,3 +423,21 @@ func (s *SetSyncSegment) SegmentsByContract(addrs []common.Address) []SyncSegmen } return result } + +// GetContracts returns the list of contract addresses +// in the SetSyncSegment +func (s *SetSyncSegment) GetContracts() []common.Address { + contracts := make([]common.Address, 0, len(s.segments)) + for _, segment := range s.segments { + contracts = append(contracts, segment.ContractAddr) + } + return contracts +} + +func (s *SetSyncSegment) GetSegments() []SyncSegment { + res := make([]SyncSegment, 0, len(s.segments)) + for _, segment := range s.segments { + res = append(res, *segment) + } + return res +} diff --git a/multidownloader/types/set_sync_segment_test.go b/multidownloader/types/set_sync_segment_test.go index 27a5fff6f..fc03feb95 100644 --- a/multidownloader/types/set_sync_segment_test.go +++ b/multidownloader/types/set_sync_segment_test.go @@ -4,10 +4,8 @@ import ( "testing" aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/etherman/types/mocks" aggkittypes "github.com/agglayer/aggkit/types" "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -30,19 +28,6 @@ func TestSetSyncSegment_String(t *testing.T) { require.Contains(t, result, "SyncSegment[0]=") } -func TestSetSyncSegment_Segments(t *testing.T) { - set := NewSetSyncSegment() - segment := SyncSegment{ - ContractAddr: common.HexToAddress("0x123"), - BlockRange: aggkitcommon.NewBlockRange(1, 10), - } - set.segments = []*SyncSegment{&segment} - - result := set.Segments() - require.Len(t, result, 1) - require.Equal(t, segment, result[0]) -} - func TestSetSyncSegment_Add(t *testing.T) { t.Run("add new segment", func(t *testing.T) { set := NewSetSyncSegment() @@ -75,6 +60,39 @@ func TestSetSyncSegment_Add(t *testing.T) { require.Equal(t, uint64(1), res.BlockRange.FromBlock) require.Equal(t, uint64(15), res.BlockRange.ToBlock) }) + + t.Run("merge from aggkitcommon.BlockRangeZero", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment1 := SyncSegment{ + ContractAddr: addr, + // That means no sync + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + res, exists := set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(5), res.BlockRange.FromBlock) + require.Equal(t, uint64(15), res.BlockRange.ToBlock) + segment3 := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(2, 5), + TargetToBlock: aggkittypes.LatestBlock, + } + + set.Add(segment3) + res, exists = set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(2), res.BlockRange.FromBlock) + require.Equal(t, uint64(15), res.BlockRange.ToBlock) + }) } func TestSetSyncSegment_GetByContract(t *testing.T) { @@ -130,6 +148,32 @@ func TestSetSyncSegment_Subtract(t *testing.T) { result := set1.SubtractSegments(&set2) require.NotNil(t, result) }) + + t.Run("subtract from empty BlockRange", func(t *testing.T) { + set1 := NewSetSyncSegment() + set2 := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + set1.Add(SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + }) + set2.Add(SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 20), + }) + emptySetStr := set1.String() + Set2Str := set2.String() + // {empty} - {0-20} = {empty} + + err := set1.SubtractSegments(&set2) + require.NoError(t, err) + require.Equal(t, emptySetStr, set1.String()) + + // {0-20} - {empty} = {0-20} + err = set2.SubtractSegments(&set1) + require.NoError(t, err) + require.Equal(t, Set2Str, set2.String()) + }) } func TestSetSyncSegment_TotalBlocks(t *testing.T) { @@ -166,60 +210,245 @@ func TestSetSyncSegment_TotalBlocks(t *testing.T) { require.Equal(t, uint64(20), set.TotalBlocks()) }) } -func TestSetSyncSegment_UpdateTargetBlockToNumber(t *testing.T) { + +func TestSetSyncSegment_IsAvailable(t *testing.T) { t.Run("nil receiver", func(t *testing.T) { var set *SetSyncSegment - err := set.UpdateTargetBlockToNumber(t.Context(), nil) - require.NoError(t, err) + query := LogQuery{ + Addrs: []common.Address{common.HexToAddress("0x123")}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + result := set.IsAvailable(query) + require.False(t, result) }) - t.Run("update target block", func(t *testing.T) { + t.Run("segment not available", func(t *testing.T) { set := NewSetSyncSegment() - finality := aggkittypes.LatestBlock - segment := SyncSegment{ - ContractAddr: common.HexToAddress("0x123"), - BlockRange: aggkitcommon.NewBlockRange(1, 10), - TargetToBlock: finality, + query := LogQuery{ + Addrs: []common.Address{common.HexToAddress("0x123")}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), } - set.Add(segment) - mockBlockNotifierManager := mocks.NewBlockNotifierManager(t) - - mockBlockNotifierManager.EXPECT().GetCurrentBlockNumber(mock.Anything, finality).Return(uint64(150), nil).Once() - err := set.UpdateTargetBlockToNumber(t.Context(), mockBlockNotifierManager) - require.NoError(t, err) + result := set.IsAvailable(query) + require.False(t, result) }) } -func TestSetSyncSegment_IsAvailable(t *testing.T) { + +func TestSetSyncSegment_IsPartiallyAvailable(t *testing.T) { t.Run("nil receiver", func(t *testing.T) { var set *SetSyncSegment query := LogQuery{ Addrs: []common.Address{common.HexToAddress("0x123")}, BlockRange: aggkitcommon.NewBlockRange(1, 10), } - result := set.IsAvailable(query) - require.False(t, result) + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) }) - t.Run("segment not available", func(t *testing.T) { + t.Run("empty addresses in query", func(t *testing.T) { + set := NewSetSyncSegment() + query := LogQuery{ + Addrs: []common.Address{}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("address not synced at all", func(t *testing.T) { set := NewSetSyncSegment() query := LogQuery{ Addrs: []common.Address{common.HexToAddress("0x123")}, BlockRange: aggkitcommon.NewBlockRange(1, 10), } - result := set.IsAvailable(query) - require.False(t, result) + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("no overlap between query and segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(50, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("gap at the beginning - segment starts after FromBlock", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("partially available - segment covers beginning but not all", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr}, result.Addrs) + }) + + t.Run("fully available - segment covers entire query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr}, result.Addrs) + }) + + t.Run("multiple addresses - all have partial data, find bottleneck", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 70), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 50), // Bottleneck + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + require.Equal(t, []common.Address{addr1, addr2}, result.Addrs) + }) + + t.Run("multiple addresses - one has gap at beginning", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(10, 100), // Gap at beginning + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("multiple addresses - one not synced at all", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + // addr2 not added + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.False(t, available) + require.Nil(t, result) + }) + + t.Run("segment extends beyond query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 200), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(1), result.BlockRange.FromBlock) + require.Equal(t, uint64(100), result.BlockRange.ToBlock) }) } func TestSetSyncSegment_NextQuery(t *testing.T) { t.Run("nil or empty segments", func(t *testing.T) { var set *SetSyncSegment - query, err := set.NextQuery(100, 0) + query, err := set.NextQuery(100, 0, false) require.Nil(t, query) require.Equal(t, ErrFinished, err) emptySet := NewSetSyncSegment() - query, err = emptySet.NextQuery(100, 0) + query, err = emptySet.NextQuery(100, 0, false) require.Nil(t, query) require.Equal(t, ErrFinished, err) }) @@ -270,6 +499,16 @@ func TestSetSyncSegment_Finished(t *testing.T) { set.segments = []*SyncSegment{segment} require.False(t, set.Finished()) }) + t.Run("empty segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := &SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + } + segment.Empty() + set.segments = []*SyncSegment{segment} + require.True(t, set.Finished()) + }) } func TestSetSyncSegment_Clone(t *testing.T) { @@ -367,7 +606,7 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Equal(t, uint64(100), res.BlockRange.ToBlock) }) - t.Run("remove totally a segment", func(t *testing.T) { + t.Run("fulfill totally a segment,set it as empty", func(t *testing.T) { set := NewSetSyncSegment() addr := common.HexToAddress("0x123") segment := SyncSegment{ @@ -383,8 +622,9 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { err := set.SubtractLogQuery(logQuery) require.NoError(t, err) - _, exists := set.GetByContract(addr) - require.False(t, exists) + segment, exists := set.GetByContract(addr) + require.True(t, segment.IsEmpty(), "segment is empty") + require.True(t, exists, "is empty but exists") }) t.Run("bad removed segment (middle segment)", func(t *testing.T) { @@ -405,3 +645,850 @@ func TestSetSyncSegment_RemoveLogQuerySegment(t *testing.T) { require.Error(t, err) }) } + +func TestSetSyncSegment_GetTotalPendingBlockRange_WithEmptySegments(t *testing.T) { + t.Run("a segment with empty range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment) + br := set.GetTotalPendingBlockRange() + require.Nil(t, br) + }) + t.Run("single empty segment returns nil", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment) + + // Sync everything + logQuery := &LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + + // Verify segment is empty + segment, exists := set.GetByContract(addr) + require.True(t, exists) + require.True(t, segment.IsEmpty()) + + // GetTotalPendingBlockRange should return nil, not an invalid range + totalRange := set.GetTotalPendingBlockRange() + require.Nil(t, totalRange, "should return nil when all segments are empty") + }) + + t.Run("multiple segments with some empty", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + // Add two segments + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + + // Sync first segment completely + logQuery := &LogQuery{ + Addrs: []common.Address{addr1}, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + + // First segment should be empty + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.True(t, seg1.IsEmpty()) + + // Second segment should not be empty + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.False(t, seg2.IsEmpty()) + + // GetTotalPendingBlockRange should return only the non-empty segment range + totalRange := set.GetTotalPendingBlockRange() + require.NotNil(t, totalRange) + require.Equal(t, uint64(50), totalRange.FromBlock) + require.Equal(t, uint64(150), totalRange.ToBlock) + }) +} + +func TestNewSetSyncSegmentFromLogQuery(t *testing.T) { + t.Run("create from valid log query", func(t *testing.T) { + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + logQuery := &LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + + set, err := NewSetSyncSegmentFromLogQuery(logQuery) + require.NoError(t, err) + require.Len(t, set.segments, 2) + + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.Equal(t, uint64(10), seg1.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg1.BlockRange.ToBlock) + + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.Equal(t, uint64(10), seg2.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg2.BlockRange.ToBlock) + }) +} + +func TestSetSyncSegment_GetTargetToBlockTags(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var set *SetSyncSegment + result := set.GetTargetToBlockTags() + require.Nil(t, result) + }) + + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + result := set.GetTargetToBlockTags() + require.Empty(t, result) + }) + + t.Run("single segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + } + set.Add(segment) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 1) + require.Equal(t, aggkittypes.FinalizedBlock, result[0]) + }) + + t.Run("multiple segments with same tag", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 1) + require.Equal(t, aggkittypes.LatestBlock, result[0]) + }) + + t.Run("multiple segments with different tags", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(5, 15), + TargetToBlock: aggkittypes.FinalizedBlock, + } + segment3 := SyncSegment{ + ContractAddr: common.HexToAddress("0x333"), + BlockRange: aggkitcommon.NewBlockRange(10, 20), + TargetToBlock: aggkittypes.LatestBlock, + } + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + result := set.GetTargetToBlockTags() + require.Len(t, result, 2) + require.Contains(t, result, aggkittypes.LatestBlock) + require.Contains(t, result, aggkittypes.FinalizedBlock) + }) +} + +func TestSetSyncSegment_GetHighestBlockNumber(t *testing.T) { + t.Run("nil or empty set", func(t *testing.T) { + var set *SetSyncSegment + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(0), highest) + require.Equal(t, aggkittypes.LatestBlock, finality) + + emptySet := NewSetSyncSegment() + highest, finality = emptySet.GetHighestBlockNumber() + require.Equal(t, uint64(0), highest) + require.Equal(t, aggkittypes.LatestBlock, finality) + }) + + t.Run("single segment", func(t *testing.T) { + set := NewSetSyncSegment() + segment := SyncSegment{ + ContractAddr: common.HexToAddress("0x123"), + BlockRange: aggkitcommon.NewBlockRange(1, 100), + TargetToBlock: aggkittypes.FinalizedBlock, + } + set.Add(segment) + + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(100), highest) + require.Equal(t, aggkittypes.FinalizedBlock, finality) + }) + + t.Run("multiple segments", func(t *testing.T) { + set := NewSetSyncSegment() + segment1 := SyncSegment{ + ContractAddr: common.HexToAddress("0x111"), + BlockRange: aggkitcommon.NewBlockRange(1, 50), + TargetToBlock: aggkittypes.LatestBlock, + } + segment2 := SyncSegment{ + ContractAddr: common.HexToAddress("0x222"), + BlockRange: aggkitcommon.NewBlockRange(10, 200), + TargetToBlock: aggkittypes.FinalizedBlock, + } + segment3 := SyncSegment{ + ContractAddr: common.HexToAddress("0x333"), + BlockRange: aggkitcommon.NewBlockRange(100, 150), + TargetToBlock: aggkittypes.SafeBlock, + } + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + highest, finality := set.GetHighestBlockNumber() + require.Equal(t, uint64(200), highest) + require.Equal(t, aggkittypes.FinalizedBlock, finality) + }) +} + +func TestSetSyncSegment_GetAddressesForBlock(t *testing.T) { + t.Run("single block within range", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + set.Add(segment1) + set.Add(segment2) + + addresses := set.GetAddressesForBlock(75) + require.Len(t, addresses, 2) + require.Contains(t, addresses, addr1) + require.Contains(t, addresses, addr2) + }) + + t.Run("block outside all ranges", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + + addresses := set.GetAddressesForBlock(200) + require.Empty(t, addresses) + }) + + t.Run("block at range boundary", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 20), + } + set.Add(segment) + + // Test at FromBlock + addresses := set.GetAddressesForBlock(10) + require.Len(t, addresses, 1) + require.Contains(t, addresses, addr) + + // Test at ToBlock + addresses = set.GetAddressesForBlock(20) + require.Len(t, addresses, 1) + require.Contains(t, addresses, addr) + }) +} + +func TestSetSyncSegment_Empty(t *testing.T) { + t.Run("empty existing segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + // Get the segment reference + seg, exists := set.GetByContract(addr) + require.True(t, exists) + require.False(t, seg.IsEmpty()) + + // Empty it + set.Empty(&seg) + + // Verify it's empty + updatedSeg, exists := set.GetByContract(addr) + require.True(t, exists) + require.True(t, updatedSeg.IsEmpty()) + }) + + t.Run("empty non-existent segment does nothing", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + + // Try to empty a segment that's not in the set + set.Empty(&segment) + // Should not panic + + // Verify set is still empty + require.Len(t, set.segments, 0) + }) +} + +func TestSetSyncSegment_Remove_Complete(t *testing.T) { + t.Run("remove existing segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + set.Add(segment2) + require.Len(t, set.segments, 2) + + // Remove first segment + set.Remove(&segment1) + require.Len(t, set.segments, 1) + + // Verify addr1 is gone + _, exists := set.GetByContract(addr1) + require.False(t, exists) + + // Verify addr2 still exists + _, exists = set.GetByContract(addr2) + require.True(t, exists) + }) + + t.Run("remove non-existent segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + + // Try to remove segment that's not in set + set.Remove(&segment2) + require.Len(t, set.segments, 1) + + // Verify addr1 still exists + _, exists := set.GetByContract(addr1) + require.True(t, exists) + }) +} + +func TestSetSyncSegment_AddLogQuery(t *testing.T) { + t.Run("nil set or query", func(t *testing.T) { + var set *SetSyncSegment + require.NoError(t, set.AddLogQuery(nil)) + + validSet := NewSetSyncSegment() + require.NoError(t, validSet.AddLogQuery(nil)) + }) + + t.Run("add log query to empty set", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + logQuery := &LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + + err := set.AddLogQuery(logQuery) + require.NoError(t, err) + require.Len(t, set.segments, 2) + + seg1, exists := set.GetByContract(addr1) + require.True(t, exists) + require.Equal(t, uint64(10), seg1.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg1.BlockRange.ToBlock) + + seg2, exists := set.GetByContract(addr2) + require.True(t, exists) + require.Equal(t, uint64(10), seg2.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg2.BlockRange.ToBlock) + }) + + t.Run("add log query with overlapping ranges", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + // Add initial segment + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 50), + } + set.Add(segment) + + // Add log query with overlapping range + logQuery := &LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(40, 100), + } + + err := set.AddLogQuery(logQuery) + require.NoError(t, err) + + // Should merge the ranges + seg, exists := set.GetByContract(addr) + require.True(t, exists) + require.Equal(t, uint64(1), seg.BlockRange.FromBlock) + require.Equal(t, uint64(100), seg.BlockRange.ToBlock) + }) +} + +func TestSetSyncSegment_SegmentsByContract(t *testing.T) { + t.Run("get segments for addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + addr3 := common.HexToAddress("0x333") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + segment3 := SyncSegment{ + ContractAddr: addr3, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + // Get segments for addr1 and addr2 + result := set.SegmentsByContract([]common.Address{addr1, addr2}) + require.Len(t, result, 2) + require.Equal(t, addr1, result[0].ContractAddr) + require.Equal(t, addr2, result[1].ContractAddr) + }) + + t.Run("get segments for non-existent addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + + // Try to get segment for addr2 which doesn't exist + result := set.SegmentsByContract([]common.Address{addr2}) + require.Empty(t, result) + }) + + t.Run("empty address list", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + result := set.SegmentsByContract([]common.Address{}) + require.Empty(t, result) + }) +} + +func TestSetSyncSegment_GetContracts(t *testing.T) { + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + contracts := set.GetContracts() + require.Empty(t, contracts) + }) + + t.Run("get all contracts", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + addr3 := common.HexToAddress("0x333") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + segment3 := SyncSegment{ + ContractAddr: addr3, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + + set.Add(segment1) + set.Add(segment2) + set.Add(segment3) + + contracts := set.GetContracts() + require.Len(t, contracts, 3) + require.Contains(t, contracts, addr1) + require.Contains(t, contracts, addr2) + require.Contains(t, contracts, addr3) + }) +} + +func TestSetSyncSegment_GetSegments(t *testing.T) { + t.Run("empty set", func(t *testing.T) { + set := NewSetSyncSegment() + segments := set.GetSegments() + require.Empty(t, segments) + }) + + t.Run("get all segments", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(50, 150), + } + + set.Add(segment1) + set.Add(segment2) + + segments := set.GetSegments() + require.Len(t, segments, 2) + require.Equal(t, addr1, segments[0].ContractAddr) + require.Equal(t, addr2, segments[1].ContractAddr) + }) +} + +func TestSetSyncSegment_IsAvailable_PositiveCases(t *testing.T) { + t.Run("query fully available for single address", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.True(t, result) + }) + + t.Run("query fully available for multiple addresses", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.True(t, result) + }) + + t.Run("query not available - one address missing coverage", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(1, 30), // Doesn't cover full range + } + set.Add(segment1) + set.Add(segment2) + + query := LogQuery{ + Addrs: []common.Address{addr1, addr2}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + result := set.IsAvailable(query) + require.False(t, result) + }) +} + +func TestSetSyncSegment_NextQuery_PositiveCases(t *testing.T) { + t.Run("generate next query without maxBlock limit", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 1000), + } + set.Add(segment) + + query, err := set.NextQuery(100, 0, false) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(1), query.BlockRange.FromBlock) + require.Equal(t, uint64(100), query.BlockRange.ToBlock) + require.Len(t, query.Addrs, 1) + require.Contains(t, query.Addrs, addr) + }) + + t.Run("generate next query with maxBlock limit applied", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 1000), + } + set.Add(segment) + + query, err := set.NextQuery(100, 50, true) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(1), query.BlockRange.FromBlock) + require.Equal(t, uint64(50), query.BlockRange.ToBlock) + }) + + t.Run("generate next query with multiple addresses in same range", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(10, 100), + } + set.Add(segment1) + set.Add(segment2) + + query, err := set.NextQuery(50, 0, false) + require.NoError(t, err) + require.NotNil(t, query) + require.Equal(t, uint64(10), query.BlockRange.FromBlock) + require.Equal(t, uint64(59), query.BlockRange.ToBlock) + require.Len(t, query.Addrs, 2) + }) + + t.Run("maxBlock limit results in empty range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + set.Add(segment) + + // Max block is below the segment range + query, err := set.NextQuery(100, 50, true) + require.Error(t, err) + require.Equal(t, ErrFinished, err) + require.Nil(t, query) + }) + + t.Run("returns ErrFinished when lowest segment is empty", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + // Add an empty segment + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + } + set.Add(segment) + + query, err := set.NextQuery(100, 0, false) + require.Error(t, err) + require.Equal(t, ErrFinished, err) + require.Nil(t, query) + }) +} + +func TestSetSyncSegment_SubtractLogQuery_EdgeCases(t *testing.T) { + t.Run("error creating segment from log query", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 100), + } + set.Add(segment) + + // Log query with empty addresses should still work + logQuery := &LogQuery{ + Addrs: []common.Address{}, + BlockRange: aggkitcommon.NewBlockRange(10, 20), + } + + err := set.SubtractLogQuery(logQuery) + require.NoError(t, err) + }) +} + +func TestSetSyncSegment_GetTotalPendingBlockRange_EdgeCases(t *testing.T) { + t.Run("nil set returns nil", func(t *testing.T) { + var set *SetSyncSegment + result := set.GetTotalPendingBlockRange() + require.Nil(t, result) + }) + + t.Run("set with single non-empty segment", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + set.Add(segment) + + result := set.GetTotalPendingBlockRange() + require.NotNil(t, result) + require.Equal(t, uint64(10), result.FromBlock) + require.Equal(t, uint64(50), result.ToBlock) + }) + + t.Run("set with non-overlapping segments", func(t *testing.T) { + set := NewSetSyncSegment() + addr1 := common.HexToAddress("0x111") + addr2 := common.HexToAddress("0x222") + + segment1 := SyncSegment{ + ContractAddr: addr1, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + segment2 := SyncSegment{ + ContractAddr: addr2, + BlockRange: aggkitcommon.NewBlockRange(100, 200), + } + set.Add(segment1) + set.Add(segment2) + + result := set.GetTotalPendingBlockRange() + require.NotNil(t, result) + require.Equal(t, uint64(10), result.FromBlock) + require.Equal(t, uint64(200), result.ToBlock) + }) +} + +func TestSetSyncSegment_IsPartiallyAvailable_EdgeCases(t *testing.T) { + t.Run("segment exactly matches query range", func(t *testing.T) { + set := NewSetSyncSegment() + addr := common.HexToAddress("0x123") + + segment := SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + set.Add(segment) + + query := LogQuery{ + Addrs: []common.Address{addr}, + BlockRange: aggkitcommon.NewBlockRange(10, 50), + } + + available, result := set.IsPartiallyAvailable(query) + require.True(t, available) + require.NotNil(t, result) + require.Equal(t, uint64(10), result.BlockRange.FromBlock) + require.Equal(t, uint64(50), result.BlockRange.ToBlock) + }) +} diff --git a/multidownloader/types/storager.go b/multidownloader/types/storager.go index 6e2db6c9c..3d9a56bc5 100644 --- a/multidownloader/types/storager.go +++ b/multidownloader/types/storager.go @@ -5,18 +5,48 @@ import ( dbtypes "github.com/agglayer/aggkit/db/types" aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) +type FinalizedType = bool + +const ( + NotFinalized FinalizedType = false + Finalized FinalizedType = true +) + type Storager interface { + StoragerForReorg dbtypes.KeyValueStorager // GetSyncedBlockRangePerContract It returns the synced block range stored in DB GetSyncedBlockRangePerContract(tx dbtypes.Querier) (SetSyncSegment, error) - SaveEthLogsWithHeaders(tx dbtypes.Querier, blockHeaders []*aggkittypes.BlockHeader, + SaveEthLogsWithHeaders(tx dbtypes.Querier, blockHeaders aggkittypes.ListBlockHeaders, logs []types.Log, isFinal bool) error + // TODO: Deprecate GetEthLogs and use LogQuery instead GetEthLogs(tx dbtypes.Querier, query LogQuery) ([]types.Log, error) + LogQuery(tx dbtypes.Querier, query LogQuery) (LogQueryResponse, error) UpdateSyncedStatus(tx dbtypes.Querier, segments []SyncSegment) error UpsertSyncerConfigs(tx dbtypes.Querier, configs []ContractConfig) error - GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, bool, error) + GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, FinalizedType, error) NewTx(ctx context.Context) (dbtypes.Txer, error) + // GetBlockHeadersNotFinalized retrieves all block headers that are not finalized <= maxBlock + // if maxBlock is nil, retrieves all not finalized blocks + GetBlockHeadersNotFinalized(tx dbtypes.Querier, maxBlock *uint64) (aggkittypes.ListBlockHeaders, error) + UpdateBlockToFinalized(tx dbtypes.Querier, blockNumbers []uint64) error + GetRangeBlockHeader(tx dbtypes.Querier, isFinal FinalizedType) (lowest *aggkittypes.BlockHeader, + highest *aggkittypes.BlockHeader, err error) + // GetHighestBlockNumber returns the highest block number stored in db + GetHighestBlockNumber(tx dbtypes.Querier) (uint64, error) + // GetBlockReorgedReorgID returns the reorgID of the reorged block if exists + // second return value indicates if the block is reorged + GetBlockReorgedReorgID(tx dbtypes.Querier, + blockNumber uint64, blockHash common.Hash) (uint64, bool, error) + GetReorgedDataByReorgID(tx dbtypes.Querier, + reorgID uint64) (*ReorgData, error) +} + +type StoragerForReorg interface { + GetBlockHeaderByNumber(tx dbtypes.Querier, blockNumber uint64) (*aggkittypes.BlockHeader, FinalizedType, error) + InsertReorgAndMoveReorgedBlocksAndLogs(tx dbtypes.Querier, reorgData ReorgData) (uint64, error) } diff --git a/multidownloader/types/sync_segment.go b/multidownloader/types/sync_segment.go index a1d4594d5..d37c73f87 100644 --- a/multidownloader/types/sync_segment.go +++ b/multidownloader/types/sync_segment.go @@ -11,7 +11,8 @@ import ( // SyncSegment represents a segment of blocks, it is used for synced segments but also // for representing segments to be synced type SyncSegment struct { - ContractAddr common.Address + ContractAddr common.Address + // BlockRange can be empty BlockRange.IsEmpty() BlockRange aggkitcommon.BlockRange TargetToBlock aggkittypes.BlockNumberFinality } @@ -52,12 +53,35 @@ func (s *SyncSegment) Clone() *SyncSegment { } } -// UpdateToBlock updates the ToBlock of the SyncSegment -func (s *SyncSegment) UpdateToBlock(newToBlock uint64) { +// Empty sets the SyncSegment (fromBlock > toBlock) to indicate it is empty +func (s *SyncSegment) Empty() { if s == nil { return } - s.BlockRange.ToBlock = newToBlock + // Set FromBlock greater than ToBlock to indicate empty segment + s.BlockRange = aggkitcommon.BlockRangeZero +} + +func (s *SyncSegment) IsEmpty() bool { + if s == nil { + return true + } + return s.BlockRange.IsEmpty() +} + +// There are special values like BlockRange(0,0) +// that we want to consider invalid for multidownloader, +// so we need this method to check the validity of the SyncSegment +func (s *SyncSegment) IsValid() bool { + if s.IsEmpty() { + return true + } + // We use value {0,0} to represent empty range in DB, so it's forbidden + // to use the BlockRange(0,0) for multidownloader + if !s.BlockRange.IsEmpty() && s.BlockRange.FromBlock == 0 && s.BlockRange.ToBlock == 0 { + return false + } + return true } // Equal checks if two SyncSegments are equal diff --git a/multidownloader/types/sync_segment_test.go b/multidownloader/types/sync_segment_test.go new file mode 100644 index 000000000..0aefa2faa --- /dev/null +++ b/multidownloader/types/sync_segment_test.go @@ -0,0 +1,168 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + aggkittypes "github.com/agglayer/aggkit/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestSyncSegment_IsValid(t *testing.T) { + addr := common.HexToAddress("0x123") + + tests := []struct { + name string + segment *SyncSegment + expected bool + reason string + }{ + { + name: "nil segment is valid", + segment: nil, + expected: true, + reason: "nil segment is considered empty, so it's valid", + }, + { + name: "empty segment with BlockRangeZero is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "empty BlockRange is valid", + }, + { + name: "segment with invalid range (from > to) is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "invalid range is considered empty, so it's valid", + }, + { + name: "segment with {0,0} non-empty range is INVALID", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 0), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + reason: "{0,0} is reserved for DB empty representation, forbidden in multidownloader", + }, + { + name: "segment with valid range {1,10} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "normal valid range", + }, + { + name: "segment with valid range {0,5} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "range starting at 0 is valid as long as it's not {0,0}", + }, + { + name: "segment with single block {5,5} is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(5, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + reason: "single block range is valid", + }, + { + name: "segment with large range is valid", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1000, 999999), + TargetToBlock: aggkittypes.LatestBlock, + }, + expected: true, + reason: "large ranges are valid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.segment.IsValid() + require.Equal(t, tt.expected, got, + "IsValid() for %s: expected %v, got %v. Reason: %s", + tt.name, tt.expected, got, tt.reason) + }) + } +} + +func TestSyncSegment_IsEmpty(t *testing.T) { + addr := common.HexToAddress("0x123") + + tests := []struct { + name string + segment *SyncSegment + expected bool + }{ + { + name: "nil segment is empty", + segment: nil, + expected: true, + }, + { + name: "segment with BlockRangeZero is empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.BlockRangeZero, + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + }, + { + name: "segment with invalid range (from > to) is empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(10, 5), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: true, + }, + { + name: "segment with {0,0} is not empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(0, 0), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + }, + { + name: "segment with valid range {1,10} is not empty", + segment: &SyncSegment{ + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(1, 10), + TargetToBlock: aggkittypes.FinalizedBlock, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.segment.IsEmpty() + require.Equal(t, tt.expected, got, + "IsEmpty() for %s: expected %v, got %v", + tt.name, tt.expected, got) + }) + } +} diff --git a/multidownloader/types/syncer_config.go b/multidownloader/types/syncer_config.go index b394b4c39..24f086d8c 100644 --- a/multidownloader/types/syncer_config.go +++ b/multidownloader/types/syncer_config.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "sort" aggkitcommon "github.com/agglayer/aggkit/common" @@ -59,7 +60,24 @@ func NewSetSyncerConfig() SetSyncerConfig { filters: make(map[SyncerID]aggkittypes.SyncerConfig), } } - +func (f *SetSyncerConfig) Brief() string { + if f == nil || f.filters == nil { + return "SetSyncerConfig{}" + } + result := "SetSyncerConfig{ " + // Sort syncer IDs to ensure deterministic output + syncerIDs := make([]string, 0, len(f.filters)) + for syncerID := range f.filters { + syncerIDs = append(syncerIDs, syncerID) + } + sort.Strings(syncerIDs) + for _, syncerID := range syncerIDs { + filter := f.filters[syncerID] + result += fmt.Sprintf("(%s -> [%d - %s]) ", syncerID, filter.FromBlock, filter.ToBlock.String()) + } + result += "}" + return result +} func (f *SetSyncerConfig) Add(filter aggkittypes.SyncerConfig) { if f.filters == nil { f.filters = make(map[SyncerID]aggkittypes.SyncerConfig) @@ -79,7 +97,7 @@ func (f *SetSyncerConfig) Addresses(blockRange aggkitcommon.BlockRange) []common for _, filter := range f.filters { if filter.FromBlock >= blockRange.FromBlock { - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { if _, exists := dups[addr]; !exists { addresses = append(addresses, addr) dups[addr] = struct{}{} @@ -106,7 +124,7 @@ func (f *SetSyncerConfig) ContractConfigs() []ContractConfig { } contractMap := make(map[common.Address]*ContractConfig) for _, filter := range f.filters { - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { cc, exists := contractMap[addr] if !exists { contractMap[addr] = NewContractConfigFromSyncerConfig(addr, filter) @@ -119,27 +137,22 @@ func (f *SetSyncerConfig) ContractConfigs() []ContractConfig { return convertContractMapToSlice(contractMap) } -// convertContractMapToSlice converts map to slice -func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) []ContractConfig { - contractConfigs := make([]ContractConfig, 0, len(contractMap)) - for _, cc := range contractMap { - contractConfigs = append(contractConfigs, *cc) - } - return contractConfigs -} - // SyncSegments groups the SetSyncerConfig into segments per contract address and blockRange -func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { +func (f *SetSyncerConfig) SyncSegments( + blockNumbers map[aggkittypes.BlockNumberFinality]uint64) (*SetSyncSegment, error) { segments := NewSetSyncSegment() // Trivial implementation; it needs to be improved to group by // contract address and block range for _, filter := range f.filters { // TODO: instead of calling RPC use block_notifier_values - for _, addr := range filter.ContractsAddr { + for _, addr := range filter.ContractAddresses { + toBlock, ok := blockNumbers[filter.ToBlock] + if !ok { + return nil, fmt.Errorf("SyncSegments: block number for finality %s not found", filter.ToBlock.String()) + } segment := SyncSegment{ - ContractAddr: addr, - // Initially set ToBlock as 0; it will be updated later - BlockRange: aggkitcommon.NewBlockRange(filter.FromBlock, 0), + ContractAddr: addr, + BlockRange: aggkitcommon.NewBlockRange(filter.FromBlock, toBlock), TargetToBlock: filter.ToBlock, } segments.Add(segment) @@ -147,3 +160,39 @@ func (f *SetSyncerConfig) SyncSegments() (*SetSyncSegment, error) { } return &segments, nil } + +// GetTargetToBlockTags returns the list of TargetToBlock tags in the +// SetSyncSegment witout duplicates +func (f *SetSyncerConfig) GetTargetToBlockTags() []aggkittypes.BlockNumberFinality { + if f == nil { + return nil + } + result := make([]aggkittypes.BlockNumberFinality, 0, len(f.filters)) + for _, segment := range f.filters { + // if it's already in list don't add it again + exists := false + for _, existing := range result { + if existing == segment.ToBlock { + exists = true + break + } + } + if !exists { + result = append(result, segment.ToBlock) + } + } + return result +} + +// convertContractMapToSlice converts map to slice +func convertContractMapToSlice(contractMap map[common.Address]*ContractConfig) []ContractConfig { + contractConfigs := make([]ContractConfig, 0, len(contractMap)) + for _, cc := range contractMap { + contractConfigs = append(contractConfigs, *cc) + } + // Sort by address to ensure deterministic output + sort.Slice(contractConfigs, func(i, j int) bool { + return contractConfigs[i].Address.Hex() < contractConfigs[j].Address.Hex() + }) + return contractConfigs +} diff --git a/multidownloader/types/syncer_config_test.go b/multidownloader/types/syncer_config_test.go index 0207d16c9..d2eb24a9e 100644 --- a/multidownloader/types/syncer_config_test.go +++ b/multidownloader/types/syncer_config_test.go @@ -1,6 +1,7 @@ package types import ( + "strings" "testing" aggkittypes "github.com/agglayer/aggkit/types" @@ -22,10 +23,10 @@ func TestContractConfigs_SingleSyncerSingleContract(t *testing.T) { addr := common.HexToAddress("0x1") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr}, - FromBlock: 10, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 10, + ToBlock: aggkittypes.FinalizedBlock, }) configs := set.ContractConfigs() @@ -41,16 +42,16 @@ func TestContractConfigs_MultipleSyncersSameContract(t *testing.T) { addr := common.HexToAddress("0x2") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{addr}, - FromBlock: 5, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{addr}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, }) configs := set.ContractConfigs() @@ -69,16 +70,16 @@ func TestContractConfigs_MultipleSyncersMultipleContracts(t *testing.T) { addr2 := common.HexToAddress("0x4") set := NewSetSyncerConfig() set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer1", - ContractsAddr: []common.Address{addr1, addr2}, - FromBlock: 1, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer1", + ContractAddresses: []common.Address{addr1, addr2}, + FromBlock: 1, + ToBlock: aggkittypes.FinalizedBlock, }) set.Add(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{addr2}, - FromBlock: 2, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{addr2}, + FromBlock: 2, + ToBlock: aggkittypes.LatestBlock, }) configs := set.ContractConfigs() @@ -111,10 +112,10 @@ func TestContractConfig_Update_FromBlock(t *testing.T) { // Update with lower FromBlock cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 5, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, uint64(5), cc.FromBlock) @@ -122,10 +123,10 @@ func TestContractConfig_Update_FromBlock(t *testing.T) { // Update with higher FromBlock (should not change) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer3", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer3", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, uint64(5), cc.FromBlock) @@ -142,10 +143,10 @@ func TestContractConfig_Update_ToBlock(t *testing.T) { // Update with less final ToBlock (LatestBlock < FinalizedBlock) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.LatestBlock, }) require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) @@ -153,10 +154,10 @@ func TestContractConfig_Update_ToBlock(t *testing.T) { // Update with more final ToBlock (should not change) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer3", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 20, - ToBlock: aggkittypes.SafeBlock, + SyncerID: "syncer3", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 20, + ToBlock: aggkittypes.SafeBlock, }) require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) @@ -173,20 +174,20 @@ func TestContractConfig_Update_Syncers(t *testing.T) { // Add new syncer cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 15, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 15, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, []SyncerID{"syncer1", "syncer2", "syncer3"}, cc.Syncers) // Add existing syncer (should not duplicate) cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 20, - ToBlock: aggkittypes.FinalizedBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 20, + ToBlock: aggkittypes.FinalizedBlock, }) require.Equal(t, []SyncerID{"syncer1", "syncer2", "syncer3"}, cc.Syncers) @@ -202,13 +203,40 @@ func TestContractConfig_Update_Combined(t *testing.T) { // Update all fields at once cc.Update(aggkittypes.SyncerConfig{ - SyncerID: "syncer2", - ContractsAddr: []common.Address{common.HexToAddress("0x1")}, - FromBlock: 5, - ToBlock: aggkittypes.LatestBlock, + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, }) require.Equal(t, uint64(5), cc.FromBlock) require.Equal(t, aggkittypes.LatestBlock, cc.ToBlock) require.Equal(t, []SyncerID{"syncer1", "syncer2"}, cc.Syncers) } + +func TestContractConfig_Update_Brief(t *testing.T) { + t.Run("brief with valid config", func(t *testing.T) { + sut := NewSetSyncerConfig() + sut.Add(aggkittypes.SyncerConfig{ + SyncerID: "syncer1", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 10, + ToBlock: aggkittypes.FinalizedBlock, + }) + sut.Add(aggkittypes.SyncerConfig{ + SyncerID: "syncer2", + ContractAddresses: []common.Address{common.HexToAddress("0x1")}, + FromBlock: 5, + ToBlock: aggkittypes.LatestBlock, + }) + + expected := "SetSyncerConfig{ (syncer1 -> [10 - FinalizedBlock]) (syncer2 -> [5 - LatestBlock]) }" + require.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(sut.Brief())) + }) + + t.Run("brief with nil config", func(t *testing.T) { + var cc *SetSyncerConfig + expected := "SetSyncerConfig{}" + require.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(cc.Brief())) + }) +} diff --git a/scripts/local_config_helper b/scripts/local_config_helper index c3a80f046..5b0c200b4 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -312,8 +312,9 @@ function parse_command_line_args() { -h | --help) echo "Usage: $0" echo " -h: help" - echo " -e: kurtosis enclave name (default $KURTOSIS_ENCLAVE)" - echo " -p: expose same ports as service in kurtosis (RPC and REST)" + echo " -e: kurtosis enclave name (default $KURTOSIS_ENCLAVE) (--enclave)" + echo " -p: expose same ports as service in kurtosis (RPC and REST) (--expose-ports) " + echo " -s: skip setting up aggsender committee override URLs (--skip-committee-override)" exit 0 ;; -e | --enclave) @@ -325,6 +326,10 @@ function parse_command_line_args() { export EXPOSE_PORTS=0 shift ;; + -s | --skip-committee-override) + export SKIP_COMMITTEE_OVERRIDE=1 + shift + ;; -*) echo "Invalid Option: $1" 1>&2 exit 1 @@ -342,8 +347,13 @@ function main(){ parse_command_line_args $* check_requirements create_dest_folder - - common_aggsender_committee_override_urls + if [ -z "$SKIP_COMMITTEE_OVERRIDE" ]; then + log_debug "Setting up aggsender_committee_override_urls with validators in kurtosis enclave" + common_aggsender_committee_override_urls + else + log_debug "skipping setup of aggsender_committee_override_urls with validators in kurtosis enclave, setting it to empty" + export aggsender_committee_override_urls="{}" + fi download_kurtosis_artifacts export_ports_from_kurtosis diff --git a/sync/adapter_eth_to_multidownloader.go b/sync/adapter_eth_to_multidownloader.go index 036e80013..7349e1f09 100644 --- a/sync/adapter_eth_to_multidownloader.go +++ b/sync/adapter_eth_to_multidownloader.go @@ -19,7 +19,7 @@ type AdaptEthClientToMultidownloader struct { ethClient aggkittypes.BaseEthereumClienter } -var _ (aggkittypes.MultiDownloader) = (*AdaptEthClientToMultidownloader)(nil) +var _ (aggkittypes.MultiDownloaderLegacy) = (*AdaptEthClientToMultidownloader)(nil) func NewAdapterEthClientToMultidownloader(ethClient aggkittypes.BaseEthereumClienter) *AdaptEthClientToMultidownloader { return &AdaptEthClientToMultidownloader{ diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 7c32827c0..cfad11382 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "slices" + "sort" "strings" "time" @@ -45,6 +46,10 @@ func (m LogAppenderMap) GetTopics() []common.Hash { for topic := range m { topics = append(topics, topic) } + // Sort topics to ensure deterministic output + sort.Slice(topics, func(i, j int) bool { + return topics[i].Hex() < topics[j].Hex() + }) return topics } @@ -60,7 +65,7 @@ type EVMDownloader struct { func NewEVMDownloader( syncerID string, - ethClient aggkittypes.MultiDownloader, + ethClient aggkittypes.MultiDownloaderLegacy, syncBlockChunkSize uint64, finality aggkittypes.BlockNumberFinality, waitForNewBlocksPeriod time.Duration, @@ -252,7 +257,7 @@ func (d *EVMDownloader) reportEmptyBlock(ctx context.Context, downloadedCh chan } type EVMDownloaderImplementation struct { - ethClient aggkittypes.MultiDownloader + ethClient aggkittypes.MultiDownloaderLegacy blockFinality aggkittypes.BlockNumberFinality waitForNewBlocksPeriod time.Duration appender LogAppenderMap @@ -269,7 +274,7 @@ type EVMDownloaderImplementation struct { // finalizedBlockType can be nil, in this case, it means that the reorgs are not happening on the network func NewEVMDownloaderImplementation( syncerID string, - ethClient aggkittypes.MultiDownloader, + ethClient aggkittypes.MultiDownloaderLegacy, blockFinality aggkittypes.BlockNumberFinality, waitForNewBlocksPeriod time.Duration, appender LogAppenderMap, @@ -324,7 +329,7 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( d.log.Info("context cancelled") return latestSyncedBlock case <-ticker.C: - blockHeader, err := d.ethClient.BlockHeader(ctx, d.blockFinality) + blockHeader, err := d.ethClient.HeaderByNumber(ctx, &d.blockFinality) if err != nil { if ctx.Err() == nil { attempts++ diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index cd1a83351..773164ba8 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -383,20 +383,20 @@ func TestWaitForNewBlocks(t *testing.T) { currentBlock := uint64(5) expectedBlock := uint64(6) aggkittypesBlockHeader := aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil) - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypesBlockHeader, nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypesBlockHeader, nil).Once() actualBlock := d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) // 2 iterations - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(5, common.Hash{}, 0, nil), nil).Once() - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(5, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() actualBlock = d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) // after error from client - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() - clientMock.EXPECT().BlockHeader(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeader(6, common.Hash{}, 0, nil), nil).Once() actualBlock = d.WaitForNewBlocks(ctx, currentBlock) assert.Equal(t, expectedBlock, actualBlock) } @@ -428,7 +428,7 @@ func TestWaitForNewBlocksWithReorgDetection(t *testing.T) { headerHash := latestHeader.Hash() trackedBlock := &reorgdetector.Header{Hash: common.HexToHash("0x456")} - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return( + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return( aggkittypes.NewBlockHeaderFromEthHeader(latestHeader), nil).Once() reorgDetectorMock.EXPECT().GetTrackedBlockByBlockNumber("test-reorg-detector-id", currentBlockNumber).Return(trackedBlock, nil).Once() reorgDetectorMock.EXPECT().AddBlockToTrack(ctx, "test-reorg-detector-id", currentBlockNumber, headerHash).Return(nil).Once() @@ -462,10 +462,10 @@ func TestWaitForNewBlocksWithReorgDetection(t *testing.T) { latestHeader := &types.Header{Number: big.NewInt(int64(currentBlockNumber))} latestHeaderNext := &types.Header{Number: big.NewInt(int64(currentBlockNumber + 1))} - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return( + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return( aggkittypes.NewBlockHeaderFromEthHeader(latestHeader), nil).Once() reorgDetectorMock.EXPECT().GetTrackedBlockByBlockNumber("test-reorg-detector-id", currentBlockNumber).Return(nil, errors.New("database error")).Once() - clientMock.EXPECT().BlockHeader(ctx, aggkittypes.LatestBlock).Return(aggkittypes.NewBlockHeaderFromEthHeader(latestHeaderNext), nil).Once() + clientMock.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(aggkittypes.NewBlockHeaderFromEthHeader(latestHeaderNext), nil).Once() headerHashNext := latestHeaderNext.Hash() reorgDetectorMock.EXPECT().AddBlockToTrack(ctx, "test-reorg-detector-id", currentBlockNumber+1, headerHashNext).Return(nil).Once() diff --git a/sync/evmdriver.go b/sync/evmdriver.go index d0e984f0f..3352f5386 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -125,6 +125,11 @@ func (d *EVMDriver) SubscribeToNewBlocks(subscriberName string) <-chan Block { return d.blockSubscriber.Subscribe(subscriberName) } +// Legacy syncer doesn't support completion percentage, so we return nil here. +func (d *EVMDriver) GetCompletionPercentage() *float64 { + return nil +} + func (d *EVMDriver) Sync(ctx context.Context) { reset: var ( diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index af43a98bd..2b278f32a 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -435,3 +435,8 @@ func TestEVMDriver_Sync(t *testing.T) { }) } } + +func TestEVMDriver_GetCompletionPercentage(t *testing.T) { + sut := &EVMDriver{} + require.Nil(t, sut.GetCompletionPercentage(), "expected GetCompletionPercentage to return nil for legacy syncer") +} diff --git a/sync/evmtypes.go b/sync/evmtypes.go index 739154f90..4e58d662f 100644 --- a/sync/evmtypes.go +++ b/sync/evmtypes.go @@ -1,6 +1,10 @@ package sync -import "github.com/ethereum/go-ethereum/common" +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) type EVMBlocks []*EVMBlock @@ -8,12 +12,27 @@ func (e EVMBlocks) Len() int { return len(e) } +func (e EVMBlocks) LastBlock() *EVMBlock { + if len(e) == 0 { + return nil + } + return e[len(e)-1] +} + type EVMBlock struct { EVMBlockHeader IsFinalizedBlock bool Events []interface{} } +func (e *EVMBlock) Brief() string { + if e == nil { + return "EVMBlock" + } + return fmt.Sprintf("EVMBlock{Num: %d, IsFinalizedBlock: %t, EventsCount: %d}", + e.Num, e.IsFinalizedBlock, len(e.Events)) +} + type EVMBlockHeader struct { Num uint64 Hash common.Hash diff --git a/sync/evmtypes_test.go b/sync/evmtypes_test.go new file mode 100644 index 000000000..5ab12afe4 --- /dev/null +++ b/sync/evmtypes_test.go @@ -0,0 +1,168 @@ +package sync + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestEVMBlocks_Len(t *testing.T) { + t.Run("empty blocks", func(t *testing.T) { + blocks := EVMBlocks{} + require.Equal(t, 0, blocks.Len()) + }) + + t.Run("single block", func(t *testing.T) { + blocks := EVMBlocks{ + {EVMBlockHeader: EVMBlockHeader{Num: 1}}, + } + require.Equal(t, 1, blocks.Len()) + }) + + t.Run("multiple blocks", func(t *testing.T) { + blocks := EVMBlocks{ + {EVMBlockHeader: EVMBlockHeader{Num: 1}}, + {EVMBlockHeader: EVMBlockHeader{Num: 2}}, + {EVMBlockHeader: EVMBlockHeader{Num: 3}}, + } + require.Equal(t, 3, blocks.Len()) + }) + + t.Run("nil blocks slice", func(t *testing.T) { + var blocks EVMBlocks + require.Equal(t, 0, blocks.Len()) + }) +} + +func TestEVMBlocks_LastBlock(t *testing.T) { + t.Run("empty blocks returns nil", func(t *testing.T) { + blocks := EVMBlocks{} + result := blocks.LastBlock() + require.Nil(t, result) + }) + + t.Run("nil blocks returns nil", func(t *testing.T) { + var blocks EVMBlocks + result := blocks.LastBlock() + require.Nil(t, result) + }) + + t.Run("single block returns that block", func(t *testing.T) { + expectedBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 100, + Hash: common.HexToHash("0x123"), + }, + IsFinalizedBlock: true, + } + blocks := EVMBlocks{expectedBlock} + result := blocks.LastBlock() + require.NotNil(t, result) + require.Equal(t, expectedBlock, result) + require.Equal(t, uint64(100), result.Num) + require.Equal(t, common.HexToHash("0x123"), result.Hash) + require.True(t, result.IsFinalizedBlock) + }) + + t.Run("multiple blocks returns last block", func(t *testing.T) { + firstBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{Num: 1}, + } + secondBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{Num: 2}, + } + lastBlock := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 3, + Hash: common.HexToHash("0xLAST"), + ParentHash: common.HexToHash("0xPARENT"), + Timestamp: 1234567890, + }, + IsFinalizedBlock: false, + Events: []any{"event1", "event2"}, + } + blocks := EVMBlocks{firstBlock, secondBlock, lastBlock} + result := blocks.LastBlock() + require.NotNil(t, result) + require.Equal(t, lastBlock, result) + require.Equal(t, uint64(3), result.Num) + require.Equal(t, common.HexToHash("0xLAST"), result.Hash) + require.Equal(t, common.HexToHash("0xPARENT"), result.ParentHash) + require.Equal(t, uint64(1234567890), result.Timestamp) + require.False(t, result.IsFinalizedBlock) + require.Len(t, result.Events, 2) + }) +} + +func TestEVMBlock_Brief(t *testing.T) { + t.Run("nil block returns special string", func(t *testing.T) { + var block *EVMBlock + result := block.Brief() + require.Equal(t, "EVMBlock", result) + }) + + t.Run("block with no events", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 100, + }, + IsFinalizedBlock: true, + Events: []any{}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 100, IsFinalizedBlock: true, EventsCount: 0}", result) + }) + + t.Run("block with events and finalized", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 12345, + }, + IsFinalizedBlock: true, + Events: []any{"event1", "event2", "event3"}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 12345, IsFinalizedBlock: true, EventsCount: 3}", result) + }) + + t.Run("block not finalized with single event", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 999, + }, + IsFinalizedBlock: false, + Events: []any{"single_event"}, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 999, IsFinalizedBlock: false, EventsCount: 1}", result) + }) + + t.Run("block with nil events", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 50, + }, + IsFinalizedBlock: false, + Events: nil, + } + result := block.Brief() + require.Equal(t, "EVMBlock{Num: 50, IsFinalizedBlock: false, EventsCount: 0}", result) + }) + + t.Run("block with complete header information", func(t *testing.T) { + block := &EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 777, + Hash: common.HexToHash("0xABC"), + ParentHash: common.HexToHash("0xDEF"), + Timestamp: 1640000000, + }, + IsFinalizedBlock: true, + Events: []any{"ev1", "ev2", "ev3", "ev4", "ev5"}, + } + result := block.Brief() + // Brief only includes Num, IsFinalizedBlock, and EventsCount + require.Equal(t, "EVMBlock{Num: 777, IsFinalizedBlock: true, EventsCount: 5}", result) + }) +} diff --git a/test/contracts/abi/logemitter.abi b/test/contracts/abi/logemitter.abi new file mode 100644 index 000000000..d129511f4 --- /dev/null +++ b/test/contracts/abi/logemitter.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"string","name":"bootMessage","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"bytes32","name":"topic","type":"bytes32"},{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Data","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"string","name":"message","type":"string"}],"name":"Ping","type":"event"},{"inputs":[],"name":"counter","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"topic","type":"bytes32"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"emitData","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"string","name":"message","type":"string"}],"name":"emitPing","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/bin/logemitter.bin b/test/contracts/bin/logemitter.bin new file mode 100644 index 000000000..8188e7658 --- /dev/null +++ b/test/contracts/bin/logemitter.bin @@ -0,0 +1 @@ +6080604052346100e05761033b80380380610019816100fb565b9283398101602080838303126100e05782516001600160401b03938482116100e057019082601f830112156100e05781519384116100e557601f1993610065601f8201861683016100fb565b818152828101948383860101116100e0576000956100a960409387867f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a9801610120565b6100c58351948593818552519788809286015285850190610120565b601f339601168101030190a36040516101f790816101448239f35b600080fd5b634e487b7160e01b600052604160045260246000fd5b6040519190601f01601f191682016001600160401b038111838210176100e557604052565b60005b8381106101335750506000910152565b818101518382015260200161012356fe60808060405260048036101561001457600080fd5b600091823560e01c90816361bc221a14610153575080638b692c37146100c05763e85f05f21461004357600080fd5b346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8577f5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b26100976100b2923690850161016d565b929093604051918291602083523595339560208401916101a0565b0390a380f35b8280fd5b5080fd5b50346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8576100f1903690830161016d565b9091835460018101809111610140577f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a916100b2918655604051918291602083523595339560208401916101a0565b634e487b7160e01b855260118252602485fd5b8390346100bc57816003193601126100bc57602091548152f35b9181601f8401121561019b5782359167ffffffffffffffff831161019b576020838186019501011161019b57565b600080fd5b908060209392818452848401376000828201840152601f01601f191601019056fea26469706673582212200fe8d91d7cb5850d0d16712aa2af488fa7d2240ee843a08e90d8fc7ba83ddc3d64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/bind.sh b/test/contracts/bind.sh index 25ddd7820..5a4d44d60 100755 --- a/test/contracts/bind.sh +++ b/test/contracts/bind.sh @@ -11,4 +11,5 @@ gen() { gen verifybatchesmock gen claimmock gen claimmockcaller -gen claimmocktest \ No newline at end of file +gen claimmocktest +gen logemitter \ No newline at end of file diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh index 7dd357a9e..ae3d1cd3a 100755 --- a/test/contracts/compile.sh +++ b/test/contracts/compile.sh @@ -18,7 +18,12 @@ docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/cl mv -f ClaimMockTest.abi abi/claimmocktest.abi mv -f ClaimMockTest.bin bin/claimmocktest.bin +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/logemitter/LogEmitter.sol -o /contracts --abi --bin --overwrite --optimize --via-ir +mv -f LogEmitter.abi abi/logemitter.abi +mv -f LogEmitter.bin bin/logemitter.bin + + rm -f IClaimMock.abi rm -f IClaimMock.bin rm -f IClaimMockCaller.abi -rm -f IClaimMockCaller.bin \ No newline at end of file +rm -f IClaimMockCaller.bin diff --git a/test/contracts/logemitter/LogEmitter.sol b/test/contracts/logemitter/LogEmitter.sol new file mode 100644 index 000000000..419e2e9f5 --- /dev/null +++ b/test/contracts/logemitter/LogEmitter.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity 0.8.18; + +contract LogEmitter { + // Simple event + event Ping(address indexed from, uint256 indexed id, string message); + + // Event with arbitrary data + event Data(address indexed from, bytes32 indexed topic, bytes data); + + uint256 public counter; + + constructor(string memory bootMessage) { + // Emits a log on deployment + emit Ping(msg.sender, 0, bootMessage); + } + + // Emits an event and increments a counter + function emitPing(uint256 id, string calldata message) external { + counter += 1; + emit Ping(msg.sender, id, message); + } + + // Emits an event with arbitrary bytes (useful for tests) + function emitData(bytes32 topic, bytes calldata data) external { + emit Data(msg.sender, topic, data); + } +} \ No newline at end of file diff --git a/test/contracts/logemitter/logemitter.go b/test/contracts/logemitter/logemitter.go new file mode 100644 index 000000000..9f6f29d4e --- /dev/null +++ b/test/contracts/logemitter/logemitter.go @@ -0,0 +1,584 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package logemitter + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// LogemitterMetaData contains all meta data concerning the Logemitter contract. +var LogemitterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"bootMessage\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"topic\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Data\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"Ping\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"topic\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"emitData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"emitPing\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052346100e05761033b80380380610019816100fb565b9283398101602080838303126100e05782516001600160401b03938482116100e057019082601f830112156100e05781519384116100e557601f1993610065601f8201861683016100fb565b818152828101948383860101116100e0576000956100a960409387867f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a9801610120565b6100c58351948593818552519788809286015285850190610120565b601f339601168101030190a36040516101f790816101448239f35b600080fd5b634e487b7160e01b600052604160045260246000fd5b6040519190601f01601f191682016001600160401b038111838210176100e557604052565b60005b8381106101335750506000910152565b818101518382015260200161012356fe60808060405260048036101561001457600080fd5b600091823560e01c90816361bc221a14610153575080638b692c37146100c05763e85f05f21461004357600080fd5b346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8577f5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b26100976100b2923690850161016d565b929093604051918291602083523595339560208401916101a0565b0390a380f35b8280fd5b5080fd5b50346100bc5760403660031901126100bc5760243567ffffffffffffffff81116100b8576100f1903690830161016d565b9091835460018101809111610140577f70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a916100b2918655604051918291602083523595339560208401916101a0565b634e487b7160e01b855260118252602485fd5b8390346100bc57816003193601126100bc57602091548152f35b9181601f8401121561019b5782359167ffffffffffffffff831161019b576020838186019501011161019b57565b600080fd5b908060209392818452848401376000828201840152601f01601f191601019056fea26469706673582212200fe8d91d7cb5850d0d16712aa2af488fa7d2240ee843a08e90d8fc7ba83ddc3d64736f6c63430008120033", +} + +// LogemitterABI is the input ABI used to generate the binding from. +// Deprecated: Use LogemitterMetaData.ABI instead. +var LogemitterABI = LogemitterMetaData.ABI + +// LogemitterBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use LogemitterMetaData.Bin instead. +var LogemitterBin = LogemitterMetaData.Bin + +// DeployLogemitter deploys a new Ethereum contract, binding an instance of Logemitter to it. +func DeployLogemitter(auth *bind.TransactOpts, backend bind.ContractBackend, bootMessage string) (common.Address, *types.Transaction, *Logemitter, error) { + parsed, err := LogemitterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogemitterBin), backend, bootMessage) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Logemitter{LogemitterCaller: LogemitterCaller{contract: contract}, LogemitterTransactor: LogemitterTransactor{contract: contract}, LogemitterFilterer: LogemitterFilterer{contract: contract}}, nil +} + +// Logemitter is an auto generated Go binding around an Ethereum contract. +type Logemitter struct { + LogemitterCaller // Read-only binding to the contract + LogemitterTransactor // Write-only binding to the contract + LogemitterFilterer // Log filterer for contract events +} + +// LogemitterCaller is an auto generated read-only Go binding around an Ethereum contract. +type LogemitterCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterTransactor is an auto generated write-only Go binding around an Ethereum contract. +type LogemitterTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type LogemitterFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LogemitterSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type LogemitterSession struct { + Contract *Logemitter // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LogemitterCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type LogemitterCallerSession struct { + Contract *LogemitterCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// LogemitterTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type LogemitterTransactorSession struct { + Contract *LogemitterTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LogemitterRaw is an auto generated low-level Go binding around an Ethereum contract. +type LogemitterRaw struct { + Contract *Logemitter // Generic contract binding to access the raw methods on +} + +// LogemitterCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type LogemitterCallerRaw struct { + Contract *LogemitterCaller // Generic read-only contract binding to access the raw methods on +} + +// LogemitterTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type LogemitterTransactorRaw struct { + Contract *LogemitterTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewLogemitter creates a new instance of Logemitter, bound to a specific deployed contract. +func NewLogemitter(address common.Address, backend bind.ContractBackend) (*Logemitter, error) { + contract, err := bindLogemitter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Logemitter{LogemitterCaller: LogemitterCaller{contract: contract}, LogemitterTransactor: LogemitterTransactor{contract: contract}, LogemitterFilterer: LogemitterFilterer{contract: contract}}, nil +} + +// NewLogemitterCaller creates a new read-only instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterCaller(address common.Address, caller bind.ContractCaller) (*LogemitterCaller, error) { + contract, err := bindLogemitter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogemitterCaller{contract: contract}, nil +} + +// NewLogemitterTransactor creates a new write-only instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterTransactor(address common.Address, transactor bind.ContractTransactor) (*LogemitterTransactor, error) { + contract, err := bindLogemitter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogemitterTransactor{contract: contract}, nil +} + +// NewLogemitterFilterer creates a new log filterer instance of Logemitter, bound to a specific deployed contract. +func NewLogemitterFilterer(address common.Address, filterer bind.ContractFilterer) (*LogemitterFilterer, error) { + contract, err := bindLogemitter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogemitterFilterer{contract: contract}, nil +} + +// bindLogemitter binds a generic wrapper to an already deployed contract. +func bindLogemitter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LogemitterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Logemitter *LogemitterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Logemitter.Contract.LogemitterCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Logemitter *LogemitterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Logemitter.Contract.LogemitterTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Logemitter *LogemitterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Logemitter.Contract.LogemitterTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Logemitter *LogemitterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Logemitter.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Logemitter *LogemitterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Logemitter.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Logemitter *LogemitterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Logemitter.Contract.contract.Transact(opts, method, params...) +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Logemitter.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterSession) Counter() (*big.Int, error) { + return _Logemitter.Contract.Counter(&_Logemitter.CallOpts) +} + +// Counter is a free data retrieval call binding the contract method 0x61bc221a. +// +// Solidity: function counter() view returns(uint256) +func (_Logemitter *LogemitterCallerSession) Counter() (*big.Int, error) { + return _Logemitter.Contract.Counter(&_Logemitter.CallOpts) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterTransactor) EmitData(opts *bind.TransactOpts, topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.contract.Transact(opts, "emitData", topic, data) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterSession) EmitData(topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.Contract.EmitData(&_Logemitter.TransactOpts, topic, data) +} + +// EmitData is a paid mutator transaction binding the contract method 0xe85f05f2. +// +// Solidity: function emitData(bytes32 topic, bytes data) returns() +func (_Logemitter *LogemitterTransactorSession) EmitData(topic [32]byte, data []byte) (*types.Transaction, error) { + return _Logemitter.Contract.EmitData(&_Logemitter.TransactOpts, topic, data) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterTransactor) EmitPing(opts *bind.TransactOpts, id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.contract.Transact(opts, "emitPing", id, message) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterSession) EmitPing(id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.Contract.EmitPing(&_Logemitter.TransactOpts, id, message) +} + +// EmitPing is a paid mutator transaction binding the contract method 0x8b692c37. +// +// Solidity: function emitPing(uint256 id, string message) returns() +func (_Logemitter *LogemitterTransactorSession) EmitPing(id *big.Int, message string) (*types.Transaction, error) { + return _Logemitter.Contract.EmitPing(&_Logemitter.TransactOpts, id, message) +} + +// LogemitterDataIterator is returned from FilterData and is used to iterate over the raw logs and unpacked data for Data events raised by the Logemitter contract. +type LogemitterDataIterator struct { + Event *LogemitterData // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LogemitterDataIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogemitterData) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LogemitterData) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LogemitterDataIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LogemitterDataIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LogemitterData represents a Data event raised by the Logemitter contract. +type LogemitterData struct { + From common.Address + Topic [32]byte + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterData is a free log retrieval operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) FilterData(opts *bind.FilterOpts, from []common.Address, topic [][32]byte) (*LogemitterDataIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var topicRule []interface{} + for _, topicItem := range topic { + topicRule = append(topicRule, topicItem) + } + + logs, sub, err := _Logemitter.contract.FilterLogs(opts, "Data", fromRule, topicRule) + if err != nil { + return nil, err + } + return &LogemitterDataIterator{contract: _Logemitter.contract, event: "Data", logs: logs, sub: sub}, nil +} + +// WatchData is a free log subscription operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) WatchData(opts *bind.WatchOpts, sink chan<- *LogemitterData, from []common.Address, topic [][32]byte) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var topicRule []interface{} + for _, topicItem := range topic { + topicRule = append(topicRule, topicItem) + } + + logs, sub, err := _Logemitter.contract.WatchLogs(opts, "Data", fromRule, topicRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LogemitterData) + if err := _Logemitter.contract.UnpackLog(event, "Data", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseData is a log parse operation binding the contract event 0x5046ba6c1f270fb52212c8d175bba9a2f32035c54f076818682099b666acf9b2. +// +// Solidity: event Data(address indexed from, bytes32 indexed topic, bytes data) +func (_Logemitter *LogemitterFilterer) ParseData(log types.Log) (*LogemitterData, error) { + event := new(LogemitterData) + if err := _Logemitter.contract.UnpackLog(event, "Data", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// LogemitterPingIterator is returned from FilterPing and is used to iterate over the raw logs and unpacked data for Ping events raised by the Logemitter contract. +type LogemitterPingIterator struct { + Event *LogemitterPing // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LogemitterPingIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogemitterPing) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LogemitterPing) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LogemitterPingIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LogemitterPingIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LogemitterPing represents a Ping event raised by the Logemitter contract. +type LogemitterPing struct { + From common.Address + Id *big.Int + Message string + Raw types.Log // Blockchain specific contextual infos +} + +// FilterPing is a free log retrieval operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) FilterPing(opts *bind.FilterOpts, from []common.Address, id []*big.Int) (*LogemitterPingIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Logemitter.contract.FilterLogs(opts, "Ping", fromRule, idRule) + if err != nil { + return nil, err + } + return &LogemitterPingIterator{contract: _Logemitter.contract, event: "Ping", logs: logs, sub: sub}, nil +} + +// WatchPing is a free log subscription operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) WatchPing(opts *bind.WatchOpts, sink chan<- *LogemitterPing, from []common.Address, id []*big.Int) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Logemitter.contract.WatchLogs(opts, "Ping", fromRule, idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LogemitterPing) + if err := _Logemitter.contract.UnpackLog(event, "Ping", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParsePing is a log parse operation binding the contract event 0x70b9fa9db7248779b82f3212f84983f03b8f0b0df01c3e83a8c642df6897002a. +// +// Solidity: event Ping(address indexed from, uint256 indexed id, string message) +func (_Logemitter *LogemitterFilterer) ParsePing(log types.Log) (*LogemitterPing, error) { + event := new(LogemitterPing) + if err := _Logemitter.contract.UnpackLog(event, "Ping", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 3fa6c9c03..69d241202 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -159,7 +159,7 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { WaitForNewBlocksPeriod: cfgtypes.NewDuration(time.Millisecond), } - var multidownloaderClient aggkittypes.MultiDownloader + var multidownloaderClient aggkittypes.MultiDownloaderLegacy if useMultidownloaderForTest { multidownloaderClient, err = multidownloader.NewEVMMultidownloader( log.WithFields("module", "multidownloader"), @@ -167,14 +167,15 @@ func L1Setup(t *testing.T, cfg *EnvironmentConfig) *L1Environment { "testMD", l1EthClient, nil, // RPC client is not simulated - nil, - nil, + nil, // Storage will be created internally + nil, // blockNotifierManager will be created internally + nil, // reorgProcessor will be created internally ) require.NoError(t, err) } else { multidownloaderClient = aggkitsync.NewAdapterEthClientToMultidownloader(l1EthClient) } - l1InfoTreeSync, err := l1infotreesync.New( + l1InfoTreeSync, err := l1infotreesync.NewLegacy( ctx, l1InfoTreeSyncCfg, multidownloaderClient, diff --git a/types/block_header.go b/types/block_header.go index 184107dcc..f491c3f3d 100644 --- a/types/block_header.go +++ b/types/block_header.go @@ -16,6 +16,13 @@ type BlockHeader struct { RequestedBlock *BlockNumberFinality } +func (gb *BlockHeader) Brief() string { + if gb == nil { + return "" + } + return fmt.Sprintf("BlockHeader{Number: %d, Hash: %s}", gb.Number, gb.Hash.Hex()) +} + func NewBlockHeader(number uint64, hash common.Hash, time uint64, parentHash *common.Hash) *BlockHeader { return &BlockHeader{ Number: number, @@ -34,6 +41,9 @@ func NewBlockHeaderFromEthHeader(ethHeader *types.Header) *BlockHeader { ethHeader.Time, ðHeader.ParentHash) } +func (gb *BlockHeader) Empty() bool { + return gb == nil +} func (gb *BlockHeader) String() string { if gb == nil { diff --git a/types/block_header_test.go b/types/block_header_test.go index 7ca86dea2..073c82e39 100644 --- a/types/block_header_test.go +++ b/types/block_header_test.go @@ -68,3 +68,60 @@ func TestBlockHeader_String(t *testing.T) { require.Equal(t, "", result) }) } + +func TestBlockHeader_Brief(t *testing.T) { + t.Run("with valid block header", func(t *testing.T) { + hash := common.HexToHash("0x1234567890abcdef") + parentHash := common.HexToHash("0xabcdef1234567890") + header := &BlockHeader{ + Number: 123, + Hash: hash, + Time: 1640995200, + ParentHash: &parentHash, + } + + result := header.Brief() + expected := "BlockHeader{Number: 123, Hash: 0x0000000000000000000000000000000000000000000000001234567890abcdef}" + require.Equal(t, expected, result) + }) + + t.Run("with nil block header", func(t *testing.T) { + var header *BlockHeader + result := header.Brief() + require.Equal(t, "", result) + }) +} + +func TestBlockHeader_Empty(t *testing.T) { + t.Run("with nil block header", func(t *testing.T) { + var header *BlockHeader + result := header.Empty() + require.True(t, result) + }) + + t.Run("with valid block header", func(t *testing.T) { + hash := common.HexToHash("0x1234567890abcdef") + parentHash := common.HexToHash("0xabcdef1234567890") + header := &BlockHeader{ + Number: 123, + Hash: hash, + Time: 1640995200, + ParentHash: &parentHash, + } + + result := header.Empty() + require.False(t, result) + }) + + t.Run("with zero-valued block header", func(t *testing.T) { + header := &BlockHeader{ + Number: 0, + Hash: common.Hash{}, + Time: 0, + ParentHash: nil, + } + + result := header.Empty() + require.False(t, result) + }) +} diff --git a/types/list_block_header.go b/types/list_block_header.go new file mode 100644 index 000000000..244764c68 --- /dev/null +++ b/types/list_block_header.go @@ -0,0 +1,73 @@ +package types + +import ( + "sort" + + aggkitcommon "github.com/agglayer/aggkit/common" +) + +type ListBlockHeaders []*BlockHeader + +// NewListBlockHeadersEmpty creates a new ListBlockHeaders with pre-allocated items set to nil +func NewListBlockHeadersEmpty(preAllocatedSize int) ListBlockHeaders { + return ListBlockHeaders(make([]*BlockHeader, 0, preAllocatedSize)) +} + +// NewListBlockHeaders creates a new ListBlockHeaders with the given size to zero element +func NewListBlockHeaders(size int) ListBlockHeaders { + return ListBlockHeaders(make([]*BlockHeader, size)) +} +func (lbs ListBlockHeaders) Len() int { + return len(lbs) +} + +func (lbs ListBlockHeaders) ToMap() MapBlockHeaders { + result := NewMapBlockHeadersEmpty(lbs.Len()) + for _, header := range lbs { + if header != nil { + result[header.Number] = header + } + } + return result +} + +func (lbs ListBlockHeaders) BlockNumbers() []uint64 { + result := make([]uint64, 0, len(lbs)) + for _, header := range lbs { + if header != nil { + result = append(result, header.Number) + } + } + sort.Slice(result, func(i, j int) bool { + return result[i] < result[j] + }) + return result +} + +func (lbs ListBlockHeaders) BlockRange() aggkitcommon.BlockRange { + if len(lbs) == 0 { + return aggkitcommon.BlockRangeZero + } + var minBlock, maxBlock uint64 + initialized := false + for _, header := range lbs { + if header != nil { + if !initialized { + minBlock = header.Number + maxBlock = header.Number + initialized = true + } else { + if header.Number < minBlock { + minBlock = header.Number + } + if header.Number > maxBlock { + maxBlock = header.Number + } + } + } + } + if !initialized { + return aggkitcommon.BlockRangeZero + } + return aggkitcommon.NewBlockRange(minBlock, maxBlock) +} diff --git a/types/list_block_header_test.go b/types/list_block_header_test.go new file mode 100644 index 000000000..70f149f46 --- /dev/null +++ b/types/list_block_header_test.go @@ -0,0 +1,253 @@ +package types + +import ( + "testing" + + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestNewListBlockHeadersEmpty(t *testing.T) { + t.Run("creates empty list with pre-allocated capacity", func(t *testing.T) { + size := 10 + list := NewListBlockHeadersEmpty(size) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + require.Equal(t, size, cap(list)) + }) + + t.Run("creates empty list with zero capacity", func(t *testing.T) { + list := NewListBlockHeadersEmpty(0) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + }) +} + +func TestNewListBlockHeaders(t *testing.T) { + t.Run("creates list with specified size filled with nil", func(t *testing.T) { + size := 5 + list := NewListBlockHeaders(size) + + require.NotNil(t, list) + require.Equal(t, size, list.Len()) + for i := range size { + require.Nil(t, list[i]) + } + }) + + t.Run("creates empty list when size is zero", func(t *testing.T) { + list := NewListBlockHeaders(0) + + require.NotNil(t, list) + require.Equal(t, 0, list.Len()) + }) +} + +func TestListBlockHeaders_Len(t *testing.T) { + t.Run("returns correct length for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + require.Equal(t, 0, list.Len()) + }) + + t.Run("returns correct length for list with elements", func(t *testing.T) { + list := ListBlockHeaders{ + NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil), + NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil), + NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil), + } + require.Equal(t, 3, list.Len()) + }) + + t.Run("returns correct length for list with nil elements", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + require.Equal(t, 3, list.Len()) + }) +} + +func TestListBlockHeaders_ToMap(t *testing.T) { + t.Run("converts empty list to empty map", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.ToMap() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("converts list with headers to map", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.ToMap() + + require.Equal(t, 3, len(result)) + require.Equal(t, header1, result[1]) + require.Equal(t, header2, result[2]) + require.Equal(t, header3, result[5]) + }) + + t.Run("skips nil headers when converting to map", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header3 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + + list := ListBlockHeaders{header1, nil, header3, nil} + result := list.ToMap() + + require.Equal(t, 2, len(result)) + require.Equal(t, header1, result[1]) + require.Equal(t, header3, result[3]) + _, exists := result[0] + require.False(t, exists) + }) + + t.Run("handles list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.ToMap() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) +} + +func TestListBlockHeaders_BlockNumbers(t *testing.T) { + t.Run("returns empty slice for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.BlockNumbers() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("returns sorted block numbers", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(8, common.HexToHash("0x08"), 8000, nil) + header4 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3, header4} + result := list.BlockNumbers() + + require.Equal(t, 4, len(result)) + require.Equal(t, []uint64{1, 2, 5, 8}, result) + }) + + t.Run("skips nil headers when extracting block numbers", func(t *testing.T) { + header1 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + header2 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{nil, header1, nil, header2, nil} + result := list.BlockNumbers() + + require.Equal(t, 2, len(result)) + require.Equal(t, []uint64{1, 3}, result) + }) + + t.Run("returns empty slice for list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.BlockNumbers() + + require.NotNil(t, result) + require.Equal(t, 0, len(result)) + }) + + t.Run("handles duplicate block numbers", func(t *testing.T) { + header1 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02b"), 2001, nil) + header3 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockNumbers() + + require.Equal(t, 3, len(result)) + require.Equal(t, []uint64{1, 2, 2}, result) + }) +} + +func TestListBlockHeaders_BlockRange(t *testing.T) { + t.Run("returns empty block range for empty list", func(t *testing.T) { + list := ListBlockHeaders{} + result := list.BlockRange() + + require.Equal(t, aggkitcommon.BlockRange{}, result) + }) + + t.Run("returns correct range for single header", func(t *testing.T) { + header := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + list := ListBlockHeaders{header} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(5, 5) + require.Equal(t, expected, result) + }) + + t.Run("returns correct range for multiple headers in order", func(t *testing.T) { + header1 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(1, 3) + require.Equal(t, expected, result) + }) + + t.Run("returns correct range for multiple headers out of order", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(2, common.HexToHash("0x02"), 2000, nil) + header3 := NewBlockHeader(8, common.HexToHash("0x08"), 8000, nil) + header4 := NewBlockHeader(1, common.HexToHash("0x01"), 1000, nil) + + list := ListBlockHeaders{header1, header2, header3, header4} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(1, 8) + require.Equal(t, expected, result) + }) + + t.Run("skips nil headers when calculating range", func(t *testing.T) { + header1 := NewBlockHeader(3, common.HexToHash("0x03"), 3000, nil) + header2 := NewBlockHeader(10, common.HexToHash("0x0a"), 10000, nil) + + list := ListBlockHeaders{nil, header1, nil, header2, nil} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(3, 10) + require.Equal(t, expected, result) + }) + + t.Run("returns empty range for list with only nil headers", func(t *testing.T) { + list := ListBlockHeaders{nil, nil, nil} + result := list.BlockRange() + + require.Equal(t, aggkitcommon.BlockRange{}, result) + }) + + t.Run("handles non-consecutive block numbers", func(t *testing.T) { + header1 := NewBlockHeader(100, common.HexToHash("0x64"), 100000, nil) + header2 := NewBlockHeader(500, common.HexToHash("0x01f4"), 500000, nil) + header3 := NewBlockHeader(250, common.HexToHash("0xfa"), 250000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(100, 500) + require.Equal(t, expected, result) + }) + + t.Run("handles duplicate block numbers", func(t *testing.T) { + header1 := NewBlockHeader(5, common.HexToHash("0x05"), 5000, nil) + header2 := NewBlockHeader(5, common.HexToHash("0x05b"), 5001, nil) + header3 := NewBlockHeader(10, common.HexToHash("0x0a"), 10000, nil) + + list := ListBlockHeaders{header1, header2, header3} + result := list.BlockRange() + + expected := aggkitcommon.NewBlockRange(5, 10) + require.Equal(t, expected, result) + }) +} diff --git a/types/map_block_header.go b/types/map_block_header.go new file mode 100644 index 000000000..76d9529eb --- /dev/null +++ b/types/map_block_header.go @@ -0,0 +1,7 @@ +package types + +type MapBlockHeaders map[uint64]*BlockHeader + +func NewMapBlockHeadersEmpty(preAllocatedSize int) MapBlockHeaders { + return MapBlockHeaders(make(map[uint64]*BlockHeader, preAllocatedSize)) +} diff --git a/types/mocks/mock_custom_ethereum_clienter.go b/types/mocks/mock_custom_ethereum_clienter.go new file mode 100644 index 000000000..99cdf3376 --- /dev/null +++ b/types/mocks/mock_custom_ethereum_clienter.go @@ -0,0 +1,96 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/agglayer/aggkit/types" + mock "github.com/stretchr/testify/mock" +) + +// CustomEthereumClienter is an autogenerated mock type for the CustomEthereumClienter type +type CustomEthereumClienter struct { + mock.Mock +} + +type CustomEthereumClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *CustomEthereumClienter) EXPECT() *CustomEthereumClienter_Expecter { + return &CustomEthereumClienter_Expecter{mock: &_m.Mock} +} + +// CustomHeaderByNumber provides a mock function with given fields: ctx, number +func (_m *CustomEthereumClienter) CustomHeaderByNumber(ctx context.Context, number *types.BlockNumberFinality) (*types.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for CustomHeaderByNumber") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) *types.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CustomEthereumClienter_CustomHeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CustomHeaderByNumber' +type CustomEthereumClienter_CustomHeaderByNumber_Call struct { + *mock.Call +} + +// CustomHeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *types.BlockNumberFinality +func (_e *CustomEthereumClienter_Expecter) CustomHeaderByNumber(ctx interface{}, number interface{}) *CustomEthereumClienter_CustomHeaderByNumber_Call { + return &CustomEthereumClienter_CustomHeaderByNumber_Call{Call: _e.mock.On("CustomHeaderByNumber", ctx, number)} +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) Run(run func(ctx context.Context, number *types.BlockNumberFinality)) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.BlockNumberFinality)) + }) + return _c +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) Return(_a0 *types.BlockHeader, _a1 error) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CustomEthereumClienter_CustomHeaderByNumber_Call) RunAndReturn(run func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)) *CustomEthereumClienter_CustomHeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewCustomEthereumClienter creates a new instance of CustomEthereumClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCustomEthereumClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *CustomEthereumClienter { + mock := &CustomEthereumClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/mock_eth_chain_reader.go b/types/mocks/mock_eth_chain_reader.go new file mode 100644 index 000000000..479a653c2 --- /dev/null +++ b/types/mocks/mock_eth_chain_reader.go @@ -0,0 +1,99 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthChainReader is an autogenerated mock type for the EthChainReader type +type EthChainReader struct { + mock.Mock +} + +type EthChainReader_Expecter struct { + mock *mock.Mock +} + +func (_m *EthChainReader) EXPECT() *EthChainReader_Expecter { + return &EthChainReader_Expecter{mock: &_m.Mock} +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthChainReader) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthChainReader_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthChainReader_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthChainReader_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthChainReader_HeaderByHash_Call { + return &EthChainReader_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthChainReader_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthChainReader_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthChainReader_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthChainReader_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthChainReader_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthChainReader_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// NewEthChainReader creates a new instance of EthChainReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthChainReader(t interface { + mock.TestingT + Cleanup(func()) +}) *EthChainReader { + mock := &EthChainReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/mock_multi_downloader.go b/types/mocks/mock_multi_downloader.go index 91dbca44e..a0dadcd22 100644 --- a/types/mocks/mock_multi_downloader.go +++ b/types/mocks/mock_multi_downloader.go @@ -26,65 +26,6 @@ func (_m *MultiDownloader) EXPECT() *MultiDownloader_Expecter { return &MultiDownloader_Expecter{mock: &_m.Mock} } -// BlockHeader provides a mock function with given fields: ctx, finality -func (_m *MultiDownloader) BlockHeader(ctx context.Context, finality types.BlockNumberFinality) (*types.BlockHeader, error) { - ret := _m.Called(ctx, finality) - - if len(ret) == 0 { - panic("no return value specified for BlockHeader") - } - - var r0 *types.BlockHeader - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (*types.BlockHeader, error)); ok { - return rf(ctx, finality) - } - if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) *types.BlockHeader); ok { - r0 = rf(ctx, finality) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.BlockHeader) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { - r1 = rf(ctx, finality) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MultiDownloader_BlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHeader' -type MultiDownloader_BlockHeader_Call struct { - *mock.Call -} - -// BlockHeader is a helper method to define mock.On call -// - ctx context.Context -// - finality types.BlockNumberFinality -func (_e *MultiDownloader_Expecter) BlockHeader(ctx interface{}, finality interface{}) *MultiDownloader_BlockHeader_Call { - return &MultiDownloader_BlockHeader_Call{Call: _e.mock.On("BlockHeader", ctx, finality)} -} - -func (_c *MultiDownloader_BlockHeader_Call) Run(run func(ctx context.Context, finality types.BlockNumberFinality)) *MultiDownloader_BlockHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) - }) - return _c -} - -func (_c *MultiDownloader_BlockHeader_Call) Return(_a0 *types.BlockHeader, _a1 error) *MultiDownloader_BlockHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MultiDownloader_BlockHeader_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (*types.BlockHeader, error)) *MultiDownloader_BlockHeader_Call { - _c.Call.Return(run) - return _c -} - // BlockNumber provides a mock function with given fields: ctx, finality func (_m *MultiDownloader) BlockNumber(ctx context.Context, finality types.BlockNumberFinality) (uint64, error) { ret := _m.Called(ctx, finality) diff --git a/types/mocks/mock_multi_downloader_legacy.go b/types/mocks/mock_multi_downloader_legacy.go new file mode 100644 index 000000000..eb5329d35 --- /dev/null +++ b/types/mocks/mock_multi_downloader_legacy.go @@ -0,0 +1,411 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + ethereum "github.com/ethereum/go-ethereum" + coretypes "github.com/ethereum/go-ethereum/core/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/agglayer/aggkit/types" +) + +// MultiDownloaderLegacy is an autogenerated mock type for the MultiDownloaderLegacy type +type MultiDownloaderLegacy struct { + mock.Mock +} + +type MultiDownloaderLegacy_Expecter struct { + mock *mock.Mock +} + +func (_m *MultiDownloaderLegacy) EXPECT() *MultiDownloaderLegacy_Expecter { + return &MultiDownloaderLegacy_Expecter{mock: &_m.Mock} +} + +// BlockNumber provides a mock function with given fields: ctx, finality +func (_m *MultiDownloaderLegacy) BlockNumber(ctx context.Context, finality types.BlockNumberFinality) (uint64, error) { + ret := _m.Called(ctx, finality) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) (uint64, error)); ok { + return rf(ctx, finality) + } + if rf, ok := ret.Get(0).(func(context.Context, types.BlockNumberFinality) uint64); ok { + r0 = rf(ctx, finality) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.BlockNumberFinality) error); ok { + r1 = rf(ctx, finality) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type MultiDownloaderLegacy_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +// - finality types.BlockNumberFinality +func (_e *MultiDownloaderLegacy_Expecter) BlockNumber(ctx interface{}, finality interface{}) *MultiDownloaderLegacy_BlockNumber_Call { + return &MultiDownloaderLegacy_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx, finality)} +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) Run(run func(ctx context.Context, finality types.BlockNumberFinality)) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) Return(_a0 uint64, _a1 error) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_BlockNumber_Call) RunAndReturn(run func(context.Context, types.BlockNumberFinality) (uint64, error)) *MultiDownloaderLegacy_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// ChainID provides a mock function with given fields: ctx +func (_m *MultiDownloaderLegacy) ChainID(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type MultiDownloaderLegacy_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultiDownloaderLegacy_Expecter) ChainID(ctx interface{}) *MultiDownloaderLegacy_ChainID_Call { + return &MultiDownloaderLegacy_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) Run(run func(ctx context.Context)) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) Return(_a0 uint64, _a1 error) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_ChainID_Call) RunAndReturn(run func(context.Context) (uint64, error)) *MultiDownloaderLegacy_ChainID_Call { + _c.Call.Return(run) + return _c +} + +// EthClient provides a mock function with no fields +func (_m *MultiDownloaderLegacy) EthClient() types.BaseEthereumClienter { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EthClient") + } + + var r0 types.BaseEthereumClienter + if rf, ok := ret.Get(0).(func() types.BaseEthereumClienter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.BaseEthereumClienter) + } + } + + return r0 +} + +// MultiDownloaderLegacy_EthClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EthClient' +type MultiDownloaderLegacy_EthClient_Call struct { + *mock.Call +} + +// EthClient is a helper method to define mock.On call +func (_e *MultiDownloaderLegacy_Expecter) EthClient() *MultiDownloaderLegacy_EthClient_Call { + return &MultiDownloaderLegacy_EthClient_Call{Call: _e.mock.On("EthClient")} +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) Run(run func()) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) Return(_a0 types.BaseEthereumClienter) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_EthClient_Call) RunAndReturn(run func() types.BaseEthereumClienter) *MultiDownloaderLegacy_EthClient_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *MultiDownloaderLegacy) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]coretypes.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []coretypes.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []coretypes.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coretypes.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type MultiDownloaderLegacy_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *MultiDownloaderLegacy_Expecter) FilterLogs(ctx interface{}, q interface{}) *MultiDownloaderLegacy_FilterLogs_Call { + return &MultiDownloaderLegacy_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) Return(_a0 []coretypes.Log, _a1 error) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)) *MultiDownloaderLegacy_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *MultiDownloaderLegacy) HeaderByNumber(ctx context.Context, number *types.BlockNumberFinality) (*types.BlockHeader, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.BlockHeader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.BlockNumberFinality) *types.BlockHeader); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.BlockNumberFinality) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiDownloaderLegacy_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type MultiDownloaderLegacy_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *types.BlockNumberFinality +func (_e *MultiDownloaderLegacy_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *MultiDownloaderLegacy_HeaderByNumber_Call { + return &MultiDownloaderLegacy_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) Run(run func(ctx context.Context, number *types.BlockNumberFinality)) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.BlockNumberFinality)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) Return(_a0 *types.BlockHeader, _a1 error) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MultiDownloaderLegacy_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *types.BlockNumberFinality) (*types.BlockHeader, error)) *MultiDownloaderLegacy_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// RegisterSyncer provides a mock function with given fields: data +func (_m *MultiDownloaderLegacy) RegisterSyncer(data types.SyncerConfig) error { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for RegisterSyncer") + } + + var r0 error + if rf, ok := ret.Get(0).(func(types.SyncerConfig) error); ok { + r0 = rf(data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiDownloaderLegacy_RegisterSyncer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisterSyncer' +type MultiDownloaderLegacy_RegisterSyncer_Call struct { + *mock.Call +} + +// RegisterSyncer is a helper method to define mock.On call +// - data types.SyncerConfig +func (_e *MultiDownloaderLegacy_Expecter) RegisterSyncer(data interface{}) *MultiDownloaderLegacy_RegisterSyncer_Call { + return &MultiDownloaderLegacy_RegisterSyncer_Call{Call: _e.mock.On("RegisterSyncer", data)} +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) Run(run func(data types.SyncerConfig)) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.SyncerConfig)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) Return(_a0 error) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_RegisterSyncer_Call) RunAndReturn(run func(types.SyncerConfig) error) *MultiDownloaderLegacy_RegisterSyncer_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: ctx +func (_m *MultiDownloaderLegacy) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiDownloaderLegacy_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type MultiDownloaderLegacy_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +func (_e *MultiDownloaderLegacy_Expecter) Start(ctx interface{}) *MultiDownloaderLegacy_Start_Call { + return &MultiDownloaderLegacy_Start_Call{Call: _e.mock.On("Start", ctx)} +} + +func (_c *MultiDownloaderLegacy_Start_Call) Run(run func(ctx context.Context)) *MultiDownloaderLegacy_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MultiDownloaderLegacy_Start_Call) Return(_a0 error) *MultiDownloaderLegacy_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MultiDownloaderLegacy_Start_Call) RunAndReturn(run func(context.Context) error) *MultiDownloaderLegacy_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewMultiDownloaderLegacy creates a new instance of MultiDownloaderLegacy. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMultiDownloaderLegacy(t interface { + mock.TestingT + Cleanup(func()) +}) *MultiDownloaderLegacy { + mock := &MultiDownloaderLegacy{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/multidownloader.go b/types/multidownloader.go index 4c4dc279c..611cc887d 100644 --- a/types/multidownloader.go +++ b/types/multidownloader.go @@ -11,20 +11,20 @@ import ( type SyncerConfig struct { // SyncerID is the unique identifier for the syncer SyncerID string - // ContractAddr is list of contract addresses to sync - ContractsAddr []common.Address + // ContractAddresses is list of contract addresses to sync + ContractAddresses []common.Address // Starting block FromBlock uint64 // Target for final block (e.g. LatestBlock, SafeBlock, FinalizedBlock) ToBlock BlockNumberFinality } -type MultiDownloader interface { +type MultiDownloaderLegacy interface { ChainID(ctx context.Context) (uint64, error) BlockNumber(ctx context.Context, finality BlockNumberFinality) (uint64, error) - // TODO: delete this method because it's only required for a intermediate fix of old RerogDetector - BlockHeader(ctx context.Context, finality BlockNumberFinality) (*BlockHeader, error) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]ethtypes.Log, error) + // Get block header by number and finality + // if number is nil, it gets the latest block HeaderByNumber(ctx context.Context, number *BlockNumberFinality) (*BlockHeader, error) EthClient() BaseEthereumClienter RegisterSyncer(data SyncerConfig) error