From df33d3a0a34642e58256a36753aae48b1e586455 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 30 Jan 2026 18:15:13 +0800 Subject: [PATCH 01/12] update --- tx-submitter/batch/batch.go | 164 ++++ tx-submitter/batch/batch_cache.go | 921 +++++++++++++++++++++++ tx-submitter/batch/batch_cache_test.go | 40 + tx-submitter/batch/batch_header.go | 215 ++++++ tx-submitter/batch/batch_query.go | 383 ++++++++++ tx-submitter/batch/batch_restart_test.go | 674 +++++++++++++++++ tx-submitter/batch/blob.go | 210 ++++++ tx-submitter/types/converter.go | 25 + tx-submitter/utils/methods.go | 2 +- 9 files changed, 2633 insertions(+), 1 deletion(-) create mode 100644 tx-submitter/batch/batch.go create mode 100644 tx-submitter/batch/batch_cache.go create mode 100644 tx-submitter/batch/batch_cache_test.go create mode 100644 tx-submitter/batch/batch_header.go create mode 100644 tx-submitter/batch/batch_query.go create mode 100644 tx-submitter/batch/batch_restart_test.go create mode 100644 tx-submitter/batch/blob.go create mode 100644 tx-submitter/types/converter.go diff --git a/tx-submitter/batch/batch.go b/tx-submitter/batch/batch.go new file mode 100644 index 000000000..b28e2c65f --- /dev/null +++ b/tx-submitter/batch/batch.go @@ -0,0 +1,164 @@ +package batch + +import ( + "encoding/binary" + "fmt" + + "morph-l2/node/zstd" + "morph-l2/tx-submitter/types" + + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" +) + +var ( + EmptyVersionedHash = common.HexToHash("0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014") +) + +type BatchData struct { + blockContexts []byte + l1TxHashes []byte + l1TxNum uint16 + blockNum uint16 + txsPayload []byte + + hash *common.Hash +} + +func NewBatchData() *BatchData { + return &BatchData{ + blockContexts: make([]byte, 0), + l1TxHashes: make([]byte, 0), + txsPayload: make([]byte, 0), + } +} + +func (cks *BatchData) Append(blockContext, txsPayload []byte, l1TxHashes []common.Hash) { + if cks == nil { + return + } + cks.blockContexts = append(cks.blockContexts, blockContext...) + cks.txsPayload = append(cks.txsPayload, txsPayload...) + cks.blockNum++ + for _, txHash := range l1TxHashes { + cks.l1TxHashes = append(cks.l1TxHashes, txHash.Bytes()...) + } + cks.l1TxNum += uint16(len(l1TxHashes)) +} + +// Encode encodes the data into bytes +// Below is the encoding, total 60*n+1+m bytes. +// Field Bytes Type Index Comments +// numBlocks 2 uint16 0 The number of blocks in this chunk +// block[0] 60 BlockContext 1 The first block in this chunk +// ...... +// block[i] 60 BlockContext 60*i+1 The (i+1)'th block in this chunk +// ...... +// block[n-1] 60 BlockContext 60*n-59 The last block in this chunk +func (cks *BatchData) Encode() ([]byte, error) { + if cks == nil || cks.blockNum == 0 { + return []byte{}, nil + } + + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, cks.blockNum) + data = append(data, cks.blockContexts...) + return data, nil +} + +func (cks *BatchData) IsEmpty() bool { + return cks == nil || len(cks.blockContexts) == 0 +} + +func (cks *BatchData) DataHash() common.Hash { + if cks.hash != nil { + return *cks.hash + } + + var bz []byte + for i := 0; i < int(cks.blockNum); i++ { + bz = append(bz, cks.blockContexts[i*60:i*60+58]...) + } + bz = append(bz, cks.l1TxHashes...) + return crypto.Keccak256Hash(bz) +} + +// DataHashV2 computes the Keccak-256 hash of the batch data, incorporating +// the last block height, L1 transaction count, and L1 transaction hashes. +func (cks *BatchData) DataHashV2() (common.Hash, error) { + // Validate blockContexts length + if len(cks.blockContexts) < 60 { + return common.Hash{}, fmt.Errorf("blockContexts too short, length: %d", len(cks.blockContexts)) + } + + // Extract the last 60 bytes + lastBlockContext := cks.blockContexts[len(cks.blockContexts)-60:] + + // Parse block height + height, err := types.HeightFromBlockContextBytes(lastBlockContext) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to parse blockContext: context length=%d, lastBlockContext=%x, err=%w", + len(cks.blockContexts), lastBlockContext, err) + } + + // Compute the hash + return cks.calculateHash(height), nil +} + +func (cks *BatchData) calculateHash(height uint64) common.Hash { + // Preallocate memory for efficiency + hashData := make([]byte, 8+2+len(cks.l1TxHashes)) // 8 bytes for height, 2 bytes for l1TxNum + copy(hashData[:8], types.Uint64ToBigEndianBytes(height)) + copy(hashData[8:10], types.Uint16ToBigEndianBytes(cks.l1TxNum)) + copy(hashData[10:], cks.l1TxHashes) + + return crypto.Keccak256Hash(hashData) +} + +func (cks *BatchData) TxsPayload() []byte { + return cks.txsPayload +} + +// TxsPayloadV2 returns the bytes combining the block contexts with the tx payload +func (cks *BatchData) TxsPayloadV2() []byte { + return append(cks.blockContexts, cks.txsPayload...) +} + +func (cks *BatchData) BlockNum() uint16 { return cks.blockNum } + +func (cks *BatchData) EstimateCompressedSizeWithNewPayload(txPayload []byte) (bool, error) { + blobBytes := append(cks.txsPayload, txPayload...) + if len(blobBytes) <= MaxBlobBytesSize { + return false, nil + } + compressed, err := zstd.CompressBatchBytes(blobBytes) + if err != nil { + return false, err + } + return len(compressed) > MaxBlobBytesSize, nil +} + +func (cks *BatchData) combinePayloads(newBlockContext, newTxPayload []byte) []byte { + totalLength := len(cks.blockContexts) + len(newBlockContext) + len(cks.txsPayload) + len(newTxPayload) + combined := make([]byte, totalLength) + copy(combined, cks.blockContexts) + copy(combined[len(cks.blockContexts):], newBlockContext) + copy(combined[len(cks.blockContexts)+len(newBlockContext):], cks.txsPayload) + copy(combined[len(cks.blockContexts)+len(newBlockContext)+len(cks.txsPayload):], newTxPayload) + return combined +} + +// WillExceedCompressedSizeLimit checks if the size of the combined block contexts +// and transaction payloads (after compression) exceeds the maximum allowed size. +func (cks *BatchData) WillExceedCompressedSizeLimit(newBlockContext, newTxPayload []byte) (bool, error) { + // Combine the existing and new block contexts and transaction payloads + combinedBytes := cks.combinePayloads(newBlockContext, newTxPayload) + if len(combinedBytes) <= MaxBlobBytesSize { + return false, nil + } + compressed, err := zstd.CompressBatchBytes(combinedBytes) + if err != nil { + return false, fmt.Errorf("compression failed: %w", err) + } + return len(compressed) > MaxBlobBytesSize, nil +} diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go new file mode 100644 index 000000000..ab15ca17a --- /dev/null +++ b/tx-submitter/batch/batch_cache.go @@ -0,0 +1,921 @@ +package batch + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/log" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/iface" + + "github.com/morph-l2/go-ethereum/common" + ethtypes "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/ethclient" +) + +// SealedBatchInfo stores sealed batch information +type SealedBatchInfo struct { + BatchHeader BatchHeaderBytes // complete batch header + BatchHash common.Hash // batch hash + Sidecar *ethtypes.BlobTxSidecar // blob sidecar + CompressedPayload []byte // compressed payload + DataHash common.Hash // batch data hash + LastBlockNumber uint64 // last block number + PostStateRoot common.Hash // post state root + WithdrawRoot common.Hash // withdraw root + TotalL1MessagePopped uint64 // total L1 messages popped + SealedAt time.Time // sealed timestamp +} + +// BatchCache is a structure for caching and building batch data +// Stores all batch information starting from 0, and has the functionality to pack batches +type BatchCache struct { + mu sync.RWMutex + + // key: batchIndex, value: SealedBatchInfo + sealedBatches map[uint64]*SealedBatchInfo + + // Currently accumulating batch data (referencing node's BatchingCache) + // Parent batch information + parentBatchHeader *BatchHeaderBytes + prevStateRoot common.Hash + + // Accumulated batch data + batchData *BatchData + totalL1MessagePopped uint64 + postStateRoot common.Hash + withdrawRoot common.Hash + lastPackedBlockHeight uint64 + + // Currently processing block data (referencing node's BatchingCache) + // This data will not be appended to batch until block is confirmed + currentBlockContext []byte + currentTxsPayload []byte + currentL1TxsHashes []common.Hash + totalL1MessagePoppedAfterCurBlock uint64 + currentStateRoot common.Hash + currentWithdrawRoot common.Hash + currentBlockNumber uint64 + currentBlockHash common.Hash + + // Function to determine if batch is upgraded + isBatchUpgraded func(uint64) bool + + // Clients and contracts + l1Client *ethclient.Client + l2Client iface.L2Client + rollupContract *bindings.Rollup + sequencerContract *bindings.Sequencer + l2MessagePasserContract *bindings.L2ToL1MessagePasser + govContract *bindings.Gov + + // config + batchTimeOut uint64 + blockInterval uint64 +} + +// NewBatchCache creates and initializes a new BatchCache instance +func NewBatchCache( + isBatchUpgraded func(uint64) bool, + l1Client *ethclient.Client, + l2Client iface.L2Client, + rollupContract *bindings.Rollup, + sequencerContract *bindings.Sequencer, + l2MessagePasserContract *bindings.L2ToL1MessagePasser, + govContract *bindings.Gov, +) *BatchCache { + if isBatchUpgraded == nil { + // Default implementation: always returns true (use V1 version) + isBatchUpgraded = func(uint64) bool { return true } + } + + return &BatchCache{ + sealedBatches: make(map[uint64]*SealedBatchInfo), + parentBatchHeader: nil, + prevStateRoot: common.Hash{}, + batchData: NewBatchData(), + totalL1MessagePopped: 0, + postStateRoot: common.Hash{}, + withdrawRoot: common.Hash{}, + lastPackedBlockHeight: 0, + currentBlockContext: nil, + currentTxsPayload: nil, + currentL1TxsHashes: nil, + totalL1MessagePoppedAfterCurBlock: 0, + currentStateRoot: common.Hash{}, + currentWithdrawRoot: common.Hash{}, + currentBlockNumber: 0, + currentBlockHash: common.Hash{}, + isBatchUpgraded: isBatchUpgraded, + l1Client: l1Client, + l2Client: l2Client, + rollupContract: rollupContract, + sequencerContract: sequencerContract, + l2MessagePasserContract: l2MessagePasserContract, + govContract: govContract, + } +} + +func (bc *BatchCache) InitFromRollupByRange() error { + err := bc.updateBatchConfigFromGov() + if err != nil { + return err + } + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(fi.Uint64()) + if err != nil { + return fmt.Errorf("get last finalize batch header err: %w", err) + } + parentStateRoot, err := headerBytes.PostStateRoot() + if err != nil { + return fmt.Errorf("get post state root err: %w", err) + } + // Initialize BatchCache parent batch information + // prevStateRoot should be the parent batch's postStateRoot + bc.parentBatchHeader = headerBytes + bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot + bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() + if err != nil { + return fmt.Errorf("get last block number err: %w", err) + } + bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + if err != nil { + return fmt.Errorf("get total l1 message popped err: %w", err) + } + log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) + + err = bc.assembleBatchHeaderFromL2BlocksByBlockRange() + if err != nil { + return err + } + log.Info("Initialized batch cache success") + return nil +} + +func (bc *BatchCache) InitFromRollup() error { + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(fi.Uint64()) + if err != nil { + return fmt.Errorf("get last finalize batch header err: %w", err) + } + parentStateRoot, err := headerBytes.PostStateRoot() + if err != nil { + return fmt.Errorf("get post state root err: %w", err) + } + // Initialize BatchCache parent batch information + // prevStateRoot should be the parent batch's postStateRoot + bc.parentBatchHeader = headerBytes + bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot + bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() + if err != nil { + return fmt.Errorf("get last block number err: %w", err) + } + bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + if err != nil { + return fmt.Errorf("get total l1 message popped err: %w", err) + } + log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) + for i := fi.Uint64() + 1; i < ci.Uint64(); i++ { + batchIndex := new(big.Int).SetUint64(i) + startNum, endNum, err := bc.getBatchBlockRange(batchIndex) + if err != nil { + return fmt.Errorf("get batch block range err: %w,start %v, end %v", err, startNum, endNum) + } + { + startBlock, err := bc.l2Client.BlockByNumber(context.Background(), new(big.Int).SetUint64(startNum)) + if err != nil { + return err + } + endBlock, err := bc.l2Client.BlockByNumber(context.Background(), new(big.Int).SetUint64(endNum)) + if err != nil { + return err + } + log.Info("Assemble batch from l2 blocks", "from block", startNum, "to block", endNum, "time", endBlock.Time()-startBlock.Time()) + } + batchHeaderBytes, err := bc.assembleBatchHeaderFromL2Blocks(startNum, endNum) + if err != nil { + return err + } + batchHash, err := batchHeaderBytes.Hash() + if err != nil { + return fmt.Errorf("get batch hash err: %w", err) + } + correct, err := bc.checkBatchHashCorrect(batchIndex, batchHash) + if err != nil { + return fmt.Errorf("check batch hash failed, err: %w", err) + } + if !correct { + return fmt.Errorf("batch hash check failed: batch index %d is incorrect", i) + } + log.Info("Assemble batch success", "batch index", i, "last batch index", ci.Uint64()) + } + log.Info("Initialized batch cache success") + return nil +} + +func (bc *BatchCache) updateBatchConfigFromGov() error { + interval, err := bc.govContract.BatchBlockInterval(nil) + if err != nil { + return err + } + timeout, err := bc.govContract.BatchTimeout(nil) + if err != nil { + return err + } + bc.batchTimeOut = timeout.Uint64() + bc.blockInterval = interval.Uint64() + log.Info("Update batch config success", "interval", interval.Uint64(), "timeout", timeout.Uint64()) + return nil +} + +func (bc *BatchCache) checkBatchHashCorrect(batchIndex *big.Int, batchHash common.Hash) (bool, error) { + commitBatchHash, err := bc.rollupContract.CommittedBatches(nil, batchIndex) + if err != nil { + return false, err + } + if !bytes.Equal(commitBatchHash[:], batchHash.Bytes()) { + return false, nil + } + return true, nil +} + +func (bc *BatchCache) getBatchStatusFromContract() (*big.Int, *big.Int, error) { + latestCommitBatchIndex, err := bc.rollupContract.LastCommittedBatchIndex(nil) + if err != nil { + return nil, nil, err + } + lastFinalizedBatchIndex, err := bc.rollupContract.LastFinalizedBatchIndex(nil) + if err != nil { + return nil, nil, err + } + return latestCommitBatchIndex, lastFinalizedBatchIndex, nil +} + +func (bc *BatchCache) getBatchBlockRange(batchIndex *big.Int) (uint64, uint64, error) { + preIndex := new(big.Int).Sub(batchIndex, big.NewInt(1)) + preBatchStorge, err := bc.rollupContract.BatchDataStore(nil, preIndex) + if err != nil { + return 0, 0, err + } + batchStorge, err := bc.rollupContract.BatchDataStore(nil, batchIndex) + if err != nil { + return 0, 0, err + } + return preBatchStorge.BlockNumber.Uint64() + 1, batchStorge.BlockNumber.Uint64(), nil +} + +func (bc *BatchCache) getUnFinalizeBlockRange() (uint64, uint64, *big.Int, error) { + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return 0, 0, nil, err + } + finalizeBatchStorage, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return 0, 0, nil, err + } + startNum := finalizeBatchStorage.BlockNumber.Uint64() + 1 + endNum, err := bc.l2Client.BlockNumber(context.Background()) + if err != nil { + return 0, 0, nil, err + } + return startNum, endNum, ci, nil +} + +// IsEmpty checks if current batch data is empty +func (bc *BatchCache) IsEmpty() bool { + bc.mu.RLock() + defer bc.mu.RUnlock() + return bc.batchData == nil || bc.batchData.IsEmpty() +} + +// IsCurrentEmpty checks if current block data is empty +func (bc *BatchCache) IsCurrentEmpty() bool { + bc.mu.RLock() + defer bc.mu.RUnlock() + return len(bc.currentBlockContext) == 0 +} + +// ClearCurrent clears current block data +// Note: lock must be held before calling this method +func (bc *BatchCache) ClearCurrent() { + bc.currentTxsPayload = nil + bc.currentL1TxsHashes = nil + bc.currentBlockContext = nil + bc.totalL1MessagePoppedAfterCurBlock = 0 + bc.currentStateRoot = common.Hash{} + bc.currentWithdrawRoot = common.Hash{} + bc.currentBlockNumber = 0 + bc.currentBlockHash = common.Hash{} +} + +// GetSealedBatch gets sealed batch information +func (bc *BatchCache) GetSealedBatch(batchIndex uint64) (*SealedBatchInfo, bool) { + bc.mu.RLock() + defer bc.mu.RUnlock() + batch, ok := bc.sealedBatches[batchIndex] + return batch, ok +} + +// GetLatestSealedBatchIndex gets the latest sealed batch index +func (bc *BatchCache) GetLatestSealedBatchIndex() uint64 { + bc.mu.RLock() + defer bc.mu.RUnlock() + + var maxIndex uint64 = 0 + for index := range bc.sealedBatches { + if index > maxIndex { + maxIndex = index + } + } + return maxIndex +} + +// CalculateCapWithProposalBlock calculates batch capacity after including the specified block +// References node's CalculateCapWithProposalBlock +// Parameters: +// - blockNumber: block number to check +// - l2Client: L2 client interface +// +// Returns: +// - exceeded: returns true if compressed size would exceed MaxBlobBytesSize after adding this block +// - error: returns error if fetch or processing fails +// +// Note: This method stores block data to currentBlockContext but does not immediately append to batch +// Need to call PackCurrentBlock to confirm and append +func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdrawRoot common.Hash) (bool, error) { + if bc.l2Client == nil { + return false, fmt.Errorf("L2 client is nil") + } + + // Verify block number continuity + bc.mu.Lock() + if blockNumber <= bc.lastPackedBlockHeight && blockNumber != 0 { + bc.mu.Unlock() + return false, fmt.Errorf("wrong block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + } + if blockNumber > bc.lastPackedBlockHeight+1 { + // Some blocks were skipped, need to clear cache + bc.mu.Unlock() + return false, fmt.Errorf("discontinuous block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + } + bc.mu.Unlock() + + // Fetch complete block from L2 client (including transactions) + block, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + return false, fmt.Errorf("failed to fetch block %d: %w", blockNumber, err) + } + + if block == nil { + return false, fmt.Errorf("block is nil for block %d", blockNumber) + } + + header := block.Header() + + // Verify block number matches + if header.Number.Uint64() != blockNumber { + return false, fmt.Errorf("block number mismatch: expected %d, got %d", blockNumber, header.Number.Uint64()) + } + + bc.mu.Lock() + defer bc.mu.Unlock() + + // Ensure BatchData is initialized + if bc.batchData == nil { + bc.batchData = NewBatchData() + } + + // Parse transactions, distinguish L1 and L2 transactions + txsPayload, l1TxHashes, newTotalL1MessagePopped, l2TxNum, err := parsingTxs(block.Transactions(), bc.totalL1MessagePopped) + if err != nil { + return false, fmt.Errorf("failed to parse transactions: %w", err) + } + + l1TxNum := int(newTotalL1MessagePopped - bc.totalL1MessagePopped) + txsNum := l2TxNum + l1TxNum + + // Build BlockContext (60 bytes) + blockContext := buildBlockContext(header, txsNum, l1TxNum) + + // Store to current, do not immediately append to batch + bc.currentBlockContext = blockContext + bc.currentTxsPayload = txsPayload + bc.currentL1TxsHashes = l1TxHashes + bc.totalL1MessagePoppedAfterCurBlock = newTotalL1MessagePopped + bc.currentStateRoot = header.Root + bc.currentBlockNumber = blockNumber + bc.currentBlockHash = block.Hash() + bc.currentWithdrawRoot = withdrawRoot + + // Check capacity: if compressed size would exceed limit after adding current block + var exceeded bool + if bc.isBatchUpgraded(header.Time) { + exceeded, err = bc.batchData.WillExceedCompressedSizeLimit(blockContext, txsPayload) + } else { + exceeded, err = bc.batchData.EstimateCompressedSizeWithNewPayload(txsPayload) + } + if err != nil { + return false, fmt.Errorf("failed to estimate compressed size: %w", err) + } + + return exceeded, nil +} + +// PackCurrentBlock packs current block data into batch +// References node's PackCurrentBlock +// Parameters: +// - blockNumber: block number to pack (for verification) +// +// Returns: +// - error: returns error if packing fails +// +// Note: This method should be called after block is confirmed, appending data from currentBlockContext to batch +func (bc *BatchCache) PackCurrentBlock(blockNumber uint64) error { + bc.mu.Lock() + defer bc.mu.Unlock() + + // If current block is empty, return directly + if len(bc.currentBlockContext) == 0 { + return nil // nothing to pack + } + + // Verify block number matches + if bc.currentBlockNumber != blockNumber { + return fmt.Errorf("block number mismatch: expected %d, got %d", blockNumber, bc.currentBlockNumber) + } + + // Ensure BatchData is initialized + if bc.batchData == nil { + bc.batchData = NewBatchData() + } + + // Append current block data to batch + bc.batchData.Append(bc.currentBlockContext, bc.currentTxsPayload, bc.currentL1TxsHashes) + + // Update accumulated state + bc.totalL1MessagePopped = bc.totalL1MessagePoppedAfterCurBlock + bc.withdrawRoot = bc.currentWithdrawRoot + bc.postStateRoot = bc.currentStateRoot + bc.lastPackedBlockHeight = blockNumber + + // Clear current block data + bc.ClearCurrent() + + return nil +} + +// FetchAndCacheHeader fetches complete block from L2 client for specified block number, parses transactions and stores to current +// Note: This method has been replaced by CalculateCapWithProposalBlock and PackCurrentBlock +// Kept for backward compatibility, but recommend using new methods +func (bc *BatchCache) FetchAndCacheHeader(blockNumber uint64, withdrawRoot common.Hash) (*ethtypes.Header, error) { + // Use new method + _, err := bc.CalculateCapWithProposalBlock(blockNumber, withdrawRoot) + if err != nil { + return nil, err + } + + // Pack immediately (backward compatible behavior) + if err := bc.PackCurrentBlock(blockNumber); err != nil { + return nil, err + } + + bc.mu.RLock() + defer bc.mu.RUnlock() + + // Return header (need to re-fetch because current has been cleared) + block, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + return nil, err + } + return block.Header(), nil +} + +// SealBatch seals the currently accumulated batch, generates batch header and stores to sealedBatches +// Parameters: +// - sequencerSetVerifyHash: sequencer set verification hash (obtained from L1 contract) +// - blockTimestamp: current block timestamp (used to determine batch version) +// +// Returns: +// - batchIndex: sealed batch index +// - batchHash: batch hash +// - reachedExpectedSize: whether the sealed data size reaches expected value (compressed payload size close to or reaches MaxBlobBytesSize) +// - error: returns error if sealing fails +// +// Note: Sealed batch will be stored in BatchCache's sealedBatches, not sent anywhere +func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimestamp uint64) (uint64, common.Hash, bool, error) { + bc.mu.Lock() + defer bc.mu.Unlock() + + // Ensure batch data is not empty + if bc.batchData == nil || bc.batchData.IsEmpty() { + return 0, common.Hash{}, false, errors.New("failed to seal batch: batch cache is empty") + } + + // Compress data and calculate dataHash + compressedPayload, batchDataHash, err := bc.handleBatchSealing(blockTimestamp) + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to handle batch sealing: %w", err) + } + + // Check if sealed data size reaches expected value + // Expected value: compressed payload size close to or reaches MaxBlobBytesSize + // Use 90% as threshold, i.e., if compressed size >= MaxBlobBytesSize * 0.9, consider it reached expected + threshold := float64(MaxBlobBytesSize) * 0.9 + expectedSizeThreshold := uint64(threshold) + reachedExpectedSize := uint64(len(compressedPayload)) >= expectedSizeThreshold + + // Generate blob sidecar + sidecar, err := MakeBlobTxSidecar(compressedPayload) + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to create blob sidecar: %w", err) + } + + // Create batch header + batchHeader := bc.createBatchHeader(batchDataHash, sidecar, sequencerSetVerifyHash, blockTimestamp) + + // Calculate batch hash + batchHash, err := batchHeader.Hash() + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to hash batch header: %w", err) + } + + // Get batch index + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to get batch index: %w", err) + } + + // Store sealed batch information + sealedBatch := &SealedBatchInfo{ + BatchHeader: batchHeader, + BatchHash: batchHash, + Sidecar: sidecar, + CompressedPayload: compressedPayload, + DataHash: batchDataHash, + LastBlockNumber: bc.lastPackedBlockHeight, + PostStateRoot: bc.postStateRoot, + WithdrawRoot: bc.withdrawRoot, + TotalL1MessagePopped: bc.totalL1MessagePopped, + SealedAt: time.Now(), + } + + bc.sealedBatches[batchIndex] = sealedBatch + + // Update parent batch information for next batch + bc.parentBatchHeader = &batchHeader + bc.prevStateRoot = bc.postStateRoot + + // Reset currently accumulated batch data + bc.batchData = NewBatchData() + // totalL1MessagePopped keeps accumulated value for next batch, no need to reset + bc.postStateRoot = common.Hash{} + bc.withdrawRoot = common.Hash{} + + return batchIndex, batchHash, reachedExpectedSize, nil +} + +// CheckBatchSizeReached checks if the specified batch's data size reaches expected value +// Parameters: +// - batchIndex: batch index to check +// +// Returns: +// - reached: returns true if batch exists and compressed payload size reaches expected value (>= MaxBlobBytesSize * 0.9) +// - found: whether batch exists +func (bc *BatchCache) CheckBatchSizeReached(batchIndex uint64) (reached bool, found bool) { + bc.mu.RLock() + defer bc.mu.RUnlock() + + sealedBatch, ok := bc.sealedBatches[batchIndex] + if !ok { + return false, false + } + + // Expected value: compressed payload size >= MaxBlobBytesSize * 0.9 + threshold := float64(MaxBlobBytesSize) * 0.9 + expectedSizeThreshold := uint64(threshold) + reached = uint64(len(sealedBatch.CompressedPayload)) >= expectedSizeThreshold + + return reached, true +} + +// handleBatchSealing determines which version to use for compression and calculates data hash +func (bc *BatchCache) handleBatchSealing(blockTimestamp uint64) ([]byte, common.Hash, error) { + var ( + compressedPayload []byte + batchDataHash common.Hash + err error + ) + + // Check if upgraded version should be used + if bc.isBatchUpgraded(blockTimestamp) { + compressedPayload, err = CompressBatchBytes(bc.batchData.TxsPayloadV2()) + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to compress upgraded payload: %w", err) + } + + if len(compressedPayload) <= MaxBlobBytesSize { + batchDataHash, err = bc.batchData.DataHashV2() + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to calculate upgraded data hash: %w", err) + } + return compressedPayload, batchDataHash, nil + } + } + + // Fall back to old version + compressedPayload, err = CompressBatchBytes(bc.batchData.TxsPayload()) + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to compress payload: %w", err) + } + batchDataHash = bc.batchData.DataHash() + + return compressedPayload, batchDataHash, nil +} + +// createBatchHeader creates BatchHeader +func (bc *BatchCache) createBatchHeader(dataHash common.Hash, sidecar *ethtypes.BlobTxSidecar, sequencerSetVerifyHash common.Hash, blockTimestamp uint64) BatchHeaderBytes { + blobHashes := []common.Hash{EmptyVersionedHash} + if sidecar != nil && len(sidecar.Blobs) > 0 { + blobHashes = sidecar.BlobHashes() + } + + var parentBatchHeaderTotalL1 uint64 + var parentBatchIndex uint64 + var parentBatchHash common.Hash + + if bc.parentBatchHeader != nil { + parentBatchHeaderTotalL1, _ = bc.parentBatchHeader.TotalL1MessagePopped() + parentBatchIndex, _ = bc.parentBatchHeader.BatchIndex() + parentBatchHash, _ = bc.parentBatchHeader.Hash() + } + + l1MessagePopped := bc.totalL1MessagePopped - parentBatchHeaderTotalL1 + + batchHeaderV0 := BatchHeaderV0{ + BatchIndex: parentBatchIndex + 1, + L1MessagePopped: l1MessagePopped, + TotalL1MessagePopped: bc.totalL1MessagePopped, + DataHash: dataHash, + BlobVersionedHash: blobHashes[0], + PrevStateRoot: bc.prevStateRoot, + PostStateRoot: bc.postStateRoot, + WithdrawalRoot: bc.withdrawRoot, + SequencerSetVerifyHash: sequencerSetVerifyHash, + ParentBatchHash: parentBatchHash, + } + + if bc.isBatchUpgraded(blockTimestamp) { + batchHeaderV1 := BatchHeaderV1{ + BatchHeaderV0: batchHeaderV0, + LastBlockNumber: bc.lastPackedBlockHeight, + } + return batchHeaderV1.Bytes() + } + + return batchHeaderV0.Bytes() +} + +// parsingTxs parses transactions, distinguishes L1 and L2 transactions +func parsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( + txsPayload []byte, + l1TxHashes []common.Hash, + totalL1MessagePopped uint64, + l2TxNum int, + err error, +) { + nextIndex := totalL1MessagePoppedBefore + + for i, tx := range transactions { + if isL1MessageTxType(tx) { + l1TxHashes = append(l1TxHashes, tx.Hash()) + currentIndex := tx.L1MessageQueueIndex() + + if currentIndex < nextIndex { + return nil, nil, 0, 0, fmt.Errorf( + "unexpected batch payload, expected queue index: %d, got: %d. transaction hash: %v", + nextIndex, currentIndex, tx.Hash(), + ) + } + + nextIndex = currentIndex + 1 + continue + } + + l2TxNum++ + txBytes, err := tx.MarshalBinary() + if err != nil { + return nil, nil, 0, 0, fmt.Errorf("failed to marshal transaction %d: %w", i, err) + } + txsPayload = append(txsPayload, txBytes...) + } + + totalL1MessagePopped = nextIndex + return +} + +// isL1MessageTxType checks if transaction is L1 message transaction type +func isL1MessageTxType(tx *ethtypes.Transaction) bool { + return tx.Type() == ethtypes.L1MessageTxType +} + +// buildBlockContext builds BlockContext from block header (60 bytes) +// Format: Number(8) || Timestamp(8) || BaseFee(32) || GasLimit(8) || numTxs(2) || numL1Messages(2) +func buildBlockContext(header *ethtypes.Header, txsNum, l1MsgNum int) []byte { + blsBytes := make([]byte, 60) + + // Number (8 bytes) + binary.BigEndian.PutUint64(blsBytes[:8], header.Number.Uint64()) + + // Timestamp (8 bytes) + binary.BigEndian.PutUint64(blsBytes[8:16], header.Time) + + // BaseFee (32 bytes) + if header.BaseFee != nil { + copy(blsBytes[16:48], header.BaseFee.FillBytes(make([]byte, 32))) + } else { + copy(blsBytes[16:48], make([]byte, 32)) + } + + // GasLimit (8 bytes) + binary.BigEndian.PutUint64(blsBytes[48:56], header.GasLimit) + + // numTxs (2 bytes) + binary.BigEndian.PutUint16(blsBytes[56:58], uint16(txsNum)) + + // numL1Messages (2 bytes) + binary.BigEndian.PutUint16(blsBytes[58:60], uint16(l1MsgNum)) + + return blsBytes +} + +func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( + startBlockNum, endBlockNum uint64, +) (*BatchHeaderBytes, error) { + ctx := context.Background() + callOpts := &bind.CallOpts{ + Context: ctx, + } + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + root, err := bc.l2MessagePasserContract.GetTreeRoot(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + _, err = bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return nil, fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + } + + sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + // Get the last block's timestamp for packing + lastBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) + if err != nil { + return nil, fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) + } + blockTimestamp := lastBlock.Time() + + // Seal batch and generate batchHeader + batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + if err != nil { + return nil, fmt.Errorf("failed to seal batch: %w", err) + } + + // Get the sealed batch header + sealedBatch, found := bc.GetSealedBatch(batchIndex) + if !found { + return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + + log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + return &sealedBatch.BatchHeader, nil +} + +func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { + ctx := context.Background() + callOpts := &bind.CallOpts{ + Context: ctx, + } + startBlockNum, endBlockNum, ci, err := bc.getUnFinalizeBlockRange() + if err != nil { + return err + } + + // Get start block once to avoid repeated queries + startBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime := startBlock.Time() + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + root, err := bc.l2MessagePasserContract.GetTreeRoot(callOpts) + if err != nil { + return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + + // Get current block to check timeout after packing + nowBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(blockNum))) + if err != nil { + return fmt.Errorf("failed to get block %d: %w", blockNum, err) + } + nowBlockTime := nowBlock.Time() + + // Check timeout: if elapsed time >= batchTimeOut, must seal batch immediately + // This ensures batch is sealed before exceeding the maximum timeout configured in gov contract + timeout := false + if bc.batchTimeOut > 0 { + elapsedTime := nowBlockTime - startBlockTime + if elapsedTime >= bc.batchTimeOut { + timeout = true + log.Info("Batch timeout reached, must seal batch", "startBlock", startBlockNum, "currentBlock", blockNum, + "elapsedTime", elapsedTime, "batchTimeOut", bc.batchTimeOut) + } + } + + // Check if we need to seal batch due to capacity, block interval, or timeout + // Timeout check ensures batch is sealed before exceeding the maximum timeout + if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { + log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) + sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) + if err != nil { + return fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + lastBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(blockNum))) + if err != nil { + return fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) + } + blockTimestamp := lastBlock.Time() + // Seal batch and generate batchHeader + batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + if err != nil { + return fmt.Errorf("failed to seal batch: %w", err) + } + sealedBatch, found := bc.GetSealedBatch(batchIndex) + if !found { + return fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + if batchIndex <= ci.Uint64() { + // batch already commited, check batch hash + correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.BatchHash) + if err != nil { + return err + } + if !correct { + log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.BatchHash.String()) + return fmt.Errorf("batch hash does not match sealed batch") + } + } + // Update startBlockNum and startBlockTime for next batch + startBlockNum = blockNum + 1 + if startBlockNum <= endBlockNum { + // Update startBlock and startBlockTime for next batch's timeout calculation + startBlock, err = bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d for next batch: %w", startBlockNum, err) + } + startBlockTime = startBlock.Time() + } + log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + } + } + return nil +} diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go new file mode 100644 index 000000000..d18eca0e4 --- /dev/null +++ b/tx-submitter/batch/batch_cache_test.go @@ -0,0 +1,40 @@ +package batch + +import ( + "testing" + + "github.com/stretchr/testify/require" + "morph-l2/bindings/bindings" +) + +func init() { + var err error + rollupContract, err = bindings.NewRollup(rollupAddr, l1Client) + if err != nil { + panic(err) + } + sequencerContract, err = bindings.NewSequencer(sequencerAddr, l2Client) + if err != nil { + panic(err) + } + l2MessagePasserContract, err = bindings.NewL2ToL1MessagePasser(l2MessagePasserAddr, l2Client) + if err != nil { + panic(err) + } + govContract, err = bindings.NewGov(govAddr, l2Client) + if err != nil { + panic(err) + } +} + +func TestBatchCacheInit(t *testing.T) { + cache := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + err := cache.InitFromRollup() + require.NoError(t, err) +} + +func TestBatchCacheInitByBlockRange(t *testing.T) { + cache := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + err := cache.InitFromRollupByRange() + require.NoError(t, err) +} diff --git a/tx-submitter/batch/batch_header.go b/tx-submitter/batch/batch_header.go new file mode 100644 index 000000000..81d38c691 --- /dev/null +++ b/tx-submitter/batch/batch_header.go @@ -0,0 +1,215 @@ +package batch + +import ( + "encoding/binary" + "errors" + + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" + "github.com/morph-l2/go-ethereum/crypto" +) + +type ( + BatchHeaderBytes []byte +) + +const ( + expectedLengthV0 = 249 + expectedLengthV1 = 257 + + BatchHeaderVersion0 = 0 + BatchHeaderVersion1 = 1 +) + +var ( + ErrInvalidBatchHeaderLength = errors.New("invalid BatchHeaderBytes length") + ErrInvalidBatchHeaderVersion = errors.New("invalid BatchHeaderBytes version") + ErrEmptyBatchHeaderBytes = errors.New("empty BatchHeaderBytes") + ErrNotFoundInBatchHeader = errors.New("not found in BatchHeaderBytes") +) + +func (b BatchHeaderBytes) validate() error { + version, err := b.Version() + if err != nil { + return err + } + switch version { + case BatchHeaderVersion0: + if len(b) != expectedLengthV0 { + return ErrInvalidBatchHeaderLength + } + case BatchHeaderVersion1: + if len(b) != expectedLengthV1 { + return ErrInvalidBatchHeaderLength + } + default: + return ErrInvalidBatchHeaderVersion + } + return nil +} + +func (b BatchHeaderBytes) Bytes() []byte { + return b[:] +} + +func (b BatchHeaderBytes) Hash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return crypto.Keccak256Hash(b), nil +} + +func (b BatchHeaderBytes) Version() (uint8, error) { + if len(b) == 0 { + return 0, ErrEmptyBatchHeaderBytes + } + return b[0], nil +} + +func (b BatchHeaderBytes) BatchIndex() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[1:9]), nil +} + +func (b BatchHeaderBytes) L1MessagePopped() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[9:17]), nil +} + +func (b BatchHeaderBytes) TotalL1MessagePopped() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[17:25]), nil +} + +func (b BatchHeaderBytes) DataHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[25:57]), nil +} + +func (b BatchHeaderBytes) BlobVersionedHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[57:89]), nil +} + +func (b BatchHeaderBytes) PrevStateRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[89:121]), nil +} + +func (b BatchHeaderBytes) PostStateRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[121:153]), nil +} + +func (b BatchHeaderBytes) WithdrawalRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[153:185]), nil +} + +func (b BatchHeaderBytes) SequencerSetVerifyHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[185:217]), nil +} + +func (b BatchHeaderBytes) ParentBatchHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[217:249]), nil +} + +func (b BatchHeaderBytes) LastBlockNumber() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + version, _ := b.Version() + if version < 1 { + return 0, errors.New("LastBlockNumber is not available in version 0") + } + return binary.BigEndian.Uint64(b[249:257]), nil +} + +// structed batch header for version 0 +type BatchHeaderV0 struct { + BatchIndex uint64 + L1MessagePopped uint64 + TotalL1MessagePopped uint64 + DataHash common.Hash + BlobVersionedHash common.Hash + PrevStateRoot common.Hash + PostStateRoot common.Hash + WithdrawalRoot common.Hash + SequencerSetVerifyHash common.Hash + ParentBatchHash common.Hash + + //cache + EncodedBytes hexutil.Bytes +} + +func (b BatchHeaderV0) Bytes() BatchHeaderBytes { + if len(b.EncodedBytes) > 0 { + return BatchHeaderBytes(b.EncodedBytes) + } + batchBytes := make([]byte, expectedLengthV0) + batchBytes[0] = BatchHeaderVersion0 + binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex) + binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped) + binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped) + copy(batchBytes[25:], b.DataHash[:]) + copy(batchBytes[57:], b.BlobVersionedHash[:]) + copy(batchBytes[89:], b.PrevStateRoot[:]) + copy(batchBytes[121:], b.PostStateRoot[:]) + copy(batchBytes[153:], b.WithdrawalRoot[:]) + copy(batchBytes[185:], b.SequencerSetVerifyHash[:]) + copy(batchBytes[217:], b.ParentBatchHash[:]) + b.EncodedBytes = batchBytes + return batchBytes +} + +type BatchHeaderV1 struct { + BatchHeaderV0 + LastBlockNumber uint64 + + //cache + EncodedBytes hexutil.Bytes +} + +func (b BatchHeaderV1) Bytes() BatchHeaderBytes { + if len(b.EncodedBytes) > 0 { + return BatchHeaderBytes(b.EncodedBytes) + } + batchBytes := make([]byte, expectedLengthV1) + batchBytes[0] = BatchHeaderVersion1 + binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex) + binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped) + binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped) + copy(batchBytes[25:], b.DataHash[:]) + copy(batchBytes[57:], b.BlobVersionedHash[:]) + copy(batchBytes[89:], b.PrevStateRoot[:]) + copy(batchBytes[121:], b.PostStateRoot[:]) + copy(batchBytes[153:], b.WithdrawalRoot[:]) + copy(batchBytes[185:], b.SequencerSetVerifyHash[:]) + copy(batchBytes[217:], b.ParentBatchHash[:]) + binary.BigEndian.PutUint64(batchBytes[249:], b.LastBlockNumber) + + b.EncodedBytes = batchBytes + return batchBytes +} diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go new file mode 100644 index 000000000..2f99f3bed --- /dev/null +++ b/tx-submitter/batch/batch_query.go @@ -0,0 +1,383 @@ +package batch + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + + "morph-l2/bindings/bindings" + + "github.com/morph-l2/go-ethereum/accounts/abi" + "github.com/morph-l2/go-ethereum/accounts/abi/bind" +) + +// getLastFinalizeBatchHeaderFromRollupByIndex gets the batch header with the specified index from the rollup contract's FinalizeBatch event +// The finalizeBatch function only receives one parameter: batchHeader bytes, so it can be parsed directly from the transaction +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func (bc *BatchCache) getLastFinalizeBatchHeaderFromRollupByIndex(index uint64) (*BatchHeaderBytes, error) { + // Get the current latest block height + latestBlock, err := bc.l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the FinalizeBatch event with the corresponding index from the rollup contract + finalizeEventIter, err := bc.rollupContract.FilterFinalizeBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for finalizeEventIter.Next() { + event := finalizeEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := bc.l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + continue // If getting transaction fails, try next event + } + + // Parse finalizeBatch transaction data to get batchHeader + batchHeader, err := parseFinalizeBatchTxData(tx.Data()) + if err != nil { + continue // If parsing fails, try next event + } + + // Verify if batch index matches + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + continue + } + if batchIndex == index { + finalizeEventIter.Close() + return &batchHeader, nil + } + } + finalizeEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, fmt.Errorf("failed to find last finalized batch header for batchIndex %d", index) +} + +// parseFinalizeBatchTxData parses the finalizeBatch transaction's input data to get BatchHeaderBytes +// finalizeBatch(bytes calldata _batchHeader) only receives one parameter: batchHeader bytes +func parseFinalizeBatchTxData(txData []byte) (BatchHeaderBytes, error) { + // Get rollup ABI + rollupAbi, err := bindings.RollupMetaData.GetAbi() + if err != nil { + return nil, err + } + + // Check if method ID is finalizeBatch + finalizeBatchMethod, ok := rollupAbi.Methods["finalizeBatch"] + if !ok { + return nil, errors.New("finalizeBatch method not found in ABI") + } + + // Check if the first 4 bytes of transaction data match the method ID + if len(txData) < 4 { + return nil, errors.New("transaction data too short") + } + + methodID := txData[:4] + if !bytes.Equal(methodID, finalizeBatchMethod.ID) { + return nil, errors.New("transaction is not a finalizeBatch call") + } + + // Parse parameters (only one parameter: batchHeader bytes) + args, err := finalizeBatchMethod.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, err + } + + if len(args) == 0 { + return nil, errors.New("no arguments found in finalizeBatch transaction") + } + + // The first parameter is batchHeader bytes + batchHeaderBytes, ok := args[0].([]byte) + if !ok { + return nil, errors.New("failed to cast batchHeader to []byte") + } + + return BatchHeaderBytes(batchHeaderBytes), nil +} + +// getCommitBatchDataByIndex gets batchDataInput and batchSignatureInput with the specified index from the rollup contract's CommitBatch event +// Reference the implementation of getLastFinalizeBatchHeaderFromRollupByIndex +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func (bc *BatchCache) getCommitBatchDataByIndex(index uint64) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + // Get the current latest block height + latestBlock, err := bc.l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the CommitBatch event with the corresponding index from the rollup contract + commitEventIter, err := bc.rollupContract.FilterCommitBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for commitEventIter.Next() { + event := commitEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := bc.l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + return nil, nil, fmt.Errorf("failed to get transaction by hash: %w", err) + } + + // Parse commitBatch transaction data to get batchDataInput and batchSignatureInput + batchDataInput, batchSignatureInput, err := parseCommitBatchTxData(tx.Data()) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse commit batch data: %w", err) + } + + // Verify if batch index matches (by checking batchIndex in parentBatchHeader) + if len(batchDataInput.ParentBatchHeader) > 0 { + parentHeader := BatchHeaderBytes(batchDataInput.ParentBatchHeader) + parentBatchIndex, err := parentHeader.BatchIndex() + if err == nil && parentBatchIndex+1 == index { + commitEventIter.Close() + return batchDataInput, batchSignatureInput, nil + } + } + } + commitEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, nil, fmt.Errorf("failed to find commit batch data for index %d", index) +} + +// parseCommitBatchTxData parses the commitBatch transaction's input data to get BatchDataInput and BatchSignatureInput +func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + // Get rollup ABI + rollupAbi, err := bindings.RollupMetaData.GetAbi() + if err != nil { + return nil, nil, err + } + + // Check if method ID is commitBatch + commitBatchMethod, ok := rollupAbi.Methods["commitBatch"] + if !ok { + return nil, nil, errors.New("commitBatch method not found in ABI") + } + + // Check if the first 4 bytes of transaction data match the method ID + if len(txData) < 4 { + return nil, nil, errors.New("transaction data too short") + } + + methodID := txData[:4] + if !bytes.Equal(methodID, commitBatchMethod.ID) { + // Try commitBatchWithProof + commitBatchWithProofMethod, ok := rollupAbi.Methods["commitBatchWithProof"] + if !ok { + return nil, nil, errors.New("commitBatchWithProof method not found in ABI") + } + if bytes.Equal(methodID, commitBatchWithProofMethod.ID) { + // Use commitBatchWithProof method to parse + return parseCommitBatchWithProofTxData(txData, rollupAbi) + } + return nil, nil, errors.New("transaction is not a commit batch or commitBatchWithProof") + } + + // Parse parameters + args, err := commitBatchMethod.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, nil, err + } + + // The first parameter is BatchDataInput + // Note: The struct returned by ABI parsing has JSON tags, need to use matching struct definition + batchDataInputStruct := args[0].(struct { + Version uint8 `json:"version"` + ParentBatchHeader []uint8 `json:"parentBatchHeader"` + LastBlockNumber uint64 `json:"lastBlockNumber"` + NumL1Messages uint16 `json:"numL1Messages"` + PrevStateRoot [32]uint8 `json:"prevStateRoot"` + PostStateRoot [32]uint8 `json:"postStateRoot"` + WithdrawalRoot [32]uint8 `json:"withdrawalRoot"` + }) + + // Convert []uint8 to []byte + parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) + for i, v := range batchDataInputStruct.ParentBatchHeader { + parentBatchHeader[i] = byte(v) + } + + batchDataInput := &bindings.IRollupBatchDataInput{ + Version: batchDataInputStruct.Version, + ParentBatchHeader: parentBatchHeader, + LastBlockNumber: batchDataInputStruct.LastBlockNumber, + NumL1Messages: batchDataInputStruct.NumL1Messages, + PrevStateRoot: batchDataInputStruct.PrevStateRoot, + PostStateRoot: batchDataInputStruct.PostStateRoot, + WithdrawalRoot: batchDataInputStruct.WithdrawalRoot, + } + + // The second parameter is BatchSignatureInput + batchSignatureInputStruct := args[1].(struct { + SignedSequencersBitmap *big.Int `json:"signedSequencersBitmap"` + SequencerSets []uint8 `json:"sequencerSets"` + Signature []uint8 `json:"signature"` + }) + + // Convert []uint8 to []byte + sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) + for i, v := range batchSignatureInputStruct.SequencerSets { + sequencerSets[i] = byte(v) + } + signature := make([]byte, len(batchSignatureInputStruct.Signature)) + for i, v := range batchSignatureInputStruct.Signature { + signature[i] = byte(v) + } + + batchSignatureInput := &bindings.IRollupBatchSignatureInput{ + SignedSequencersBitmap: batchSignatureInputStruct.SignedSequencersBitmap, + SequencerSets: sequencerSets, + Signature: signature, + } + + return batchDataInput, batchSignatureInput, nil +} + +// parseCommitBatchWithProofTxData parses the commitBatchWithProof transaction's input data +// commitBatchWithProof has 4 parameters: batchDataInput, batchSignatureInput, _batchHeader, _batchProof +func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + commitBatchWithProofMethod, ok := rollupAbi.Methods["commitBatchWithProof"] + if !ok { + return nil, nil, errors.New("commitBatchWithProof method not found in ABI") + } + + // Parse parameters + args, err := commitBatchWithProofMethod.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, nil, err + } + + // The first parameter is BatchDataInput + // Note: The struct returned by ABI parsing has JSON tags, need to use matching struct definition + batchDataInputStruct := args[0].(struct { + Version uint8 `json:"version"` + ParentBatchHeader []uint8 `json:"parentBatchHeader"` + LastBlockNumber uint64 `json:"lastBlockNumber"` + NumL1Messages uint16 `json:"numL1Messages"` + PrevStateRoot [32]uint8 `json:"prevStateRoot"` + PostStateRoot [32]uint8 `json:"postStateRoot"` + WithdrawalRoot [32]uint8 `json:"withdrawalRoot"` + }) + + // Convert []uint8 to []byte + parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) + for i, v := range batchDataInputStruct.ParentBatchHeader { + parentBatchHeader[i] = byte(v) + } + + batchDataInput := &bindings.IRollupBatchDataInput{ + Version: batchDataInputStruct.Version, + ParentBatchHeader: parentBatchHeader, + LastBlockNumber: batchDataInputStruct.LastBlockNumber, + NumL1Messages: batchDataInputStruct.NumL1Messages, + PrevStateRoot: batchDataInputStruct.PrevStateRoot, + PostStateRoot: batchDataInputStruct.PostStateRoot, + WithdrawalRoot: batchDataInputStruct.WithdrawalRoot, + } + + // The second parameter is BatchSignatureInput + batchSignatureInputStruct := args[1].(struct { + SignedSequencersBitmap *big.Int `json:"signedSequencersBitmap"` + SequencerSets []uint8 `json:"sequencerSets"` + Signature []uint8 `json:"signature"` + }) + + // Convert []uint8 to []byte + sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) + for i, v := range batchSignatureInputStruct.SequencerSets { + sequencerSets[i] = byte(v) + } + signature := make([]byte, len(batchSignatureInputStruct.Signature)) + for i, v := range batchSignatureInputStruct.Signature { + signature[i] = byte(v) + } + + batchSignatureInput := &bindings.IRollupBatchSignatureInput{ + SignedSequencersBitmap: batchSignatureInputStruct.SignedSequencersBitmap, + SequencerSets: sequencerSets, + Signature: signature, + } + + // The third parameter is _batchHeader (bytes) + // The fourth parameter is _batchProof (bytes) + // These parameters don't need to be returned, but can be used for verification + + return batchDataInput, batchSignatureInput, nil +} diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go new file mode 100644 index 000000000..28a3eac4b --- /dev/null +++ b/tx-submitter/batch/batch_restart_test.go @@ -0,0 +1,674 @@ +package batch + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math/big" + "testing" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/iface" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +var ( + ErrBatchNotFound = errors.New("batch not found") +) + +var ( + rollupAddr = common.HexToAddress("0xd1827e85d8149013778b5675f9d1d7fb750ae31a") + sequencerAddr = common.HexToAddress("0x5300000000000000000000000000000000000017") + l2MessagePasserAddr = common.HexToAddress("0x5300000000000000000000000000000000000001") + govAddr = common.HexToAddress("0x5300000000000000000000000000000000000004") + + l1ClientRpc = "https://old-empty-sound.ethereum-hoodi.quiknode.pro/0f479a6bd068da530e0afdb36755f94c9facef17" + l2ClientRpc = "http://l2-qa-morph-senquencer-1.bitkeep.tools" + l1Client, _ = ethclient.Dial(l1ClientRpc) + l2Client, _ = ethclient.Dial(l2ClientRpc) + + rollupContract *bindings.Rollup + sequencerContract *bindings.Sequencer + l2MessagePasserContract *bindings.L2ToL1MessagePasser + govContract *bindings.Gov +) + +func init() { + var err error + rollupContract, err = bindings.NewRollup(rollupAddr, l1Client) + if err != nil { + panic(err) + } + sequencerContract, err = bindings.NewSequencer(sequencerAddr, l2Client) + if err != nil { + panic(err) + } + l2MessagePasserContract, err = bindings.NewL2ToL1MessagePasser(l2MessagePasserAddr, l2Client) + if err != nil { + panic(err) + } + govContract, err = bindings.NewGov(govAddr, l2Client) + if err != nil { + panic(err) + } +} + +func Test_CommitBatchParse(t *testing.T) { + data, signature, err := getCommitBatchDataByIndex(5357) + require.NoError(t, err) + t.Log("data", data) + t.Log("signature", signature) + t.Log("data.Version", data.Version) + t.Log("data.ParentBatchHeader", hex.EncodeToString(data.ParentBatchHeader)) + t.Log("data.LastBlockNumber", data.LastBlockNumber) + t.Log("data.NumL1Messages", data.NumL1Messages) + t.Log("data.PrevStateRoot", hex.EncodeToString(data.PrevStateRoot[:])) + t.Log("data.PostStateRoot", hex.EncodeToString(data.PostStateRoot[:])) + t.Log("data.WithdrawalRoot", hex.EncodeToString(data.WithdrawalRoot[:])) +} + +func TestBatchRestartInit(t *testing.T) { + sequencerSetVerifyHash, err := sequencerContract.SequencerSetVerifyHash(nil) + require.NoError(t, err) + t.Log("sequencer set verify hash", hex.EncodeToString(sequencerSetVerifyHash[:])) + ci, fi := getInfosFromContract() + t.Log("commit index", ci, " ", "finalize index", fi) + bc := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + startBlockNum, endBlockNum, err := getFirstUnFinalizeBatchBlockNumRange(fi) + require.NoError(t, err) + startBlockNum = new(big.Int).Add(startBlockNum, new(big.Int).SetUint64(1)) + t.Log("start block number", startBlockNum, "end block number", endBlockNum) + + // Get the latest finalized batch header + headerBytes, err := getLastFinalizeBatchHeaderByIndex(fi.Uint64()) + require.NoError(t, err, "failed to get last finalized batch header") + parentStateRoot, err := headerBytes.PostStateRoot() + require.NoError(t, err, "failed to get post state root") + + // Initialize BatchCache parent batch information + // prevStateRoot should be the parent batch's postStateRoot (i.e., the current finalized batch's postStateRoot) + bc.parentBatchHeader = headerBytes + bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot + bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() + require.NoError(t, err) + bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + require.NoError(t, err) + t.Logf("Restored batch header: batchIndex=%d, parentStateRoot=%x (will be used as prevStateRoot for next batch)", + fi, parentStateRoot[:]) + + // Query the first unfinalized batch's block range from rollup contract + firstUnfinalizedIndex := fi.Uint64() + 1 + t.Logf("First unfinalize batch index: %d, block range: %d - %d", firstUnfinalizedIndex, startBlockNum.Uint64(), endBlockNum.Uint64()) + + // Fetch blocks from L2 client in this range and assemble batchHeader + assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetVerifyHash, l2Client, l2MessagePasserContract) + require.NoError(t, err, "failed to assemble batch header from L2 blocks") + t.Log("assembled batch header success", hex.EncodeToString(assembledBatchHeader.Bytes())) + // Verify the assembled batchHeader + assembledBatchIndex, err := assembledBatchHeader.BatchIndex() + require.NoError(t, err) + require.Equal(t, firstUnfinalizedIndex, assembledBatchIndex, "assembled batch index should match") + assembledBatchHash, err := assembledBatchHeader.Hash() + require.NoError(t, err) + + batchDataInput, batchSignatureInput, err := getCommitBatchDataByIndex(firstUnfinalizedIndex) + require.NoError(t, err) + t.Logf("batchDataInput.Version=%d", batchDataInput.Version) + require.Equal(t, hex.EncodeToString(batchDataInput.ParentBatchHeader), hex.EncodeToString(headerBytes.Bytes())) + t.Logf("batchDataInput.LastBlockNumber=%d, %d", batchDataInput.LastBlockNumber, endBlockNum) + l1MsgNum, err := assembledBatchHeader.L1MessagePopped() + require.NoError(t, err) + require.Equal(t, uint64(batchDataInput.NumL1Messages), l1MsgNum) + prevStateRoot, err := assembledBatchHeader.PrevStateRoot() + require.NoError(t, err) + require.Equal(t, batchDataInput.PrevStateRoot[:], prevStateRoot.Bytes()) + postStateRoot, err := assembledBatchHeader.PostStateRoot() + require.NoError(t, err) + require.Equal(t, batchDataInput.PostStateRoot[:], postStateRoot.Bytes()) + + // t.Logf("batchDataInput.WithdrawalRoot=%x", batchDataInput.WithdrawalRoot) + // Perform keccak256 hash on SequencerSets + sequencerSetsHash := crypto.Keccak256Hash(batchSignatureInput.SequencerSets) + t.Logf("batchSignatureInput.SequencerSets keccak256 hash=%s", hex.EncodeToString(sequencerSetsHash[:])) + require.Equal(t, sequencerSetsHash.Bytes(), sequencerSetVerifyHash[:], "sequencer sets hash should match") + + batchHeaderBytes, err := getBatchHeaderFromGeth(firstUnfinalizedIndex) + require.NoError(t, err) + + // Compare the batch header from Geth with the assembled batch header + compareAndReportBatchHeaders(t, assembledBatchHeader, batchHeaderBytes, "assembled", "from Geth") + + // Compare assembledBatchHeader with the batch header built from commitBatch data + // Note: batchDataInput and batchSignatureInput can be used to verify data, but need to build a complete batch header + compareBatchHeaderWithCommitData(t, assembledBatchHeader, batchDataInput, batchSignatureInput, sequencerSetVerifyHash) + + committedBatchHash, err := rollupContract.CommittedBatches(nil, new(big.Int).SetUint64(assembledBatchIndex)) + require.NoError(t, err) + require.Equal(t, assembledBatchHash, common.Hash(committedBatchHash), "assembled batch hash should match") + t.Logf("Successfully assembled batch hash: %x", assembledBatchHash) + t.Logf("Successfully assembled batch header: batchIndex=%d", assembledBatchIndex) +} + +// compareAndReportBatchHeaders compares two batch headers and reports all mismatched fields +func compareAndReportBatchHeaders(t *testing.T, batchHeader1 *BatchHeaderBytes, batchHeader2 *BatchHeaderBytes, name1, name2 string) { + var mismatches []string + + // Compare BatchIndex + index1, err1 := batchHeader1.BatchIndex() + index2, err2 := batchHeader2.BatchIndex() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get BatchIndex: err1=%v, err2=%v", err1, err2) + return + } + if index1 != index2 { + mismatches = append(mismatches, fmt.Sprintf("BatchIndex: %s=%d, %s=%d", name1, index1, name2, index2)) + } else { + t.Logf("✓ BatchIndex: %d (match)", index1) + } + + // Compare L1MessagePopped + l1Msg1, err1 := batchHeader1.L1MessagePopped() + l1Msg2, err2 := batchHeader2.L1MessagePopped() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get L1MessagePopped: err1=%v, err2=%v", err1, err2) + return + } + if l1Msg1 != l1Msg2 { + mismatches = append(mismatches, fmt.Sprintf("L1MessagePopped: %s=%d, %s=%d", name1, l1Msg1, name2, l1Msg2)) + } else { + t.Logf("✓ L1MessagePopped: %d (match)", l1Msg1) + } + + // Compare TotalL1MessagePopped + totalL1Msg1, err1 := batchHeader1.TotalL1MessagePopped() + totalL1Msg2, err2 := batchHeader2.TotalL1MessagePopped() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get TotalL1MessagePopped: err1=%v, err2=%v", err1, err2) + return + } + if totalL1Msg1 != totalL1Msg2 { + mismatches = append(mismatches, fmt.Sprintf("TotalL1MessagePopped: %s=%d, %s=%d", name1, totalL1Msg1, name2, totalL1Msg2)) + } else { + t.Logf("✓ TotalL1MessagePopped: %d (match)", totalL1Msg1) + } + + // Compare DataHash + dataHash1, err1 := batchHeader1.DataHash() + dataHash2, err2 := batchHeader2.DataHash() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get DataHash: err1=%v, err2=%v", err1, err2) + return + } + if dataHash1 != dataHash2 { + mismatches = append(mismatches, fmt.Sprintf("DataHash: %s=%x, %s=%x", name1, dataHash1, name2, dataHash2)) + } else { + t.Logf("✓ DataHash: %x (match)", dataHash1) + } + + // Compare BlobVersionedHash + blobHash1, err1 := batchHeader1.BlobVersionedHash() + blobHash2, err2 := batchHeader2.BlobVersionedHash() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get BlobVersionedHash: err1=%v, err2=%v", err1, err2) + return + } + if blobHash1 != blobHash2 { + mismatches = append(mismatches, fmt.Sprintf("BlobVersionedHash: %s=%x, %s=%x", name1, blobHash1, name2, blobHash2)) + } else { + t.Logf("✓ BlobVersionedHash: %x (match)", blobHash1) + } + + // 比较 PrevStateRoot + prevStateRoot1, err1 := batchHeader1.PrevStateRoot() + prevStateRoot2, err2 := batchHeader2.PrevStateRoot() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get PrevStateRoot: err1=%v, err2=%v", err1, err2) + return + } + if prevStateRoot1 != prevStateRoot2 { + mismatches = append(mismatches, fmt.Sprintf("PrevStateRoot: %s=%x, %s=%x", name1, prevStateRoot1, name2, prevStateRoot2)) + } else { + t.Logf("✓ PrevStateRoot: %x (match)", prevStateRoot1) + } + + // 比较 PostStateRoot + postStateRoot1, err1 := batchHeader1.PostStateRoot() + postStateRoot2, err2 := batchHeader2.PostStateRoot() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get PostStateRoot: err1=%v, err2=%v", err1, err2) + return + } + if postStateRoot1 != postStateRoot2 { + mismatches = append(mismatches, fmt.Sprintf("PostStateRoot: %s=%x, %s=%x", name1, postStateRoot1, name2, postStateRoot2)) + } else { + t.Logf("✓ PostStateRoot: %x (match)", postStateRoot1) + } + + // 比较 WithdrawalRoot + withdrawRoot1, err1 := batchHeader1.WithdrawalRoot() + withdrawRoot2, err2 := batchHeader2.WithdrawalRoot() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get WithdrawalRoot: err1=%v, err2=%v", err1, err2) + return + } + if withdrawRoot1 != withdrawRoot2 { + mismatches = append(mismatches, fmt.Sprintf("WithdrawalRoot: %s=%x, %s=%x", name1, withdrawRoot1, name2, withdrawRoot2)) + } else { + t.Logf("✓ WithdrawalRoot: %x (match)", withdrawRoot1) + } + + // 比较 SequencerSetVerifyHash + seqHash1, err1 := batchHeader1.SequencerSetVerifyHash() + seqHash2, err2 := batchHeader2.SequencerSetVerifyHash() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get SequencerSetVerifyHash: err1=%v, err2=%v", err1, err2) + return + } + if seqHash1 != seqHash2 { + mismatches = append(mismatches, fmt.Sprintf("SequencerSetVerifyHash: %s=%x, %s=%x", name1, seqHash1, name2, seqHash2)) + } else { + t.Logf("✓ SequencerSetVerifyHash: %x (match)", seqHash1) + } + + // Compare ParentBatchHash + parentHash1, err1 := batchHeader1.ParentBatchHash() + parentHash2, err2 := batchHeader2.ParentBatchHash() + if err1 != nil || err2 != nil { + t.Errorf("Failed to get ParentBatchHash: err1=%v, err2=%v", err1, err2) + return + } + if parentHash1 != parentHash2 { + mismatches = append(mismatches, fmt.Sprintf("ParentBatchHash: %s=%x, %s=%x", name1, parentHash1, name2, parentHash2)) + } else { + t.Logf("✓ ParentBatchHash: %x (match)", parentHash1) + } + + // Compare LastBlockNumber (if supported) + lastBlock1, err1 := batchHeader1.LastBlockNumber() + lastBlock2, err2 := batchHeader2.LastBlockNumber() + if err1 == nil && err2 == nil { + if lastBlock1 != lastBlock2 { + mismatches = append(mismatches, fmt.Sprintf("LastBlockNumber: %s=%d, %s=%d", name1, lastBlock1, name2, lastBlock2)) + } else { + t.Logf("✓ LastBlockNumber: %d (match)", lastBlock1) + } + } + + // Report mismatched fields + if len(mismatches) > 0 { + t.Errorf("\n❌ Found %d mismatched fields between %s and %s:", len(mismatches), name1, name2) + for _, mismatch := range mismatches { + t.Errorf(" - %s", mismatch) + } + } else { + t.Logf("\n✅ All fields match between %s and %s", name1, name2) + } +} + +// compareBatchHeaderWithCommitData compares the assembled batch header with information extracted from commitBatch data +func compareBatchHeaderWithCommitData(t *testing.T, assembledBatchHeader *BatchHeaderBytes, batchDataInput *bindings.IRollupBatchDataInput, batchSignatureInput *bindings.IRollupBatchSignatureInput, sequencerSetVerifyHash common.Hash) { + t.Logf("\n=== Comparing assembled batch header with commitBatch data ===") + + // Compare Version + version, err := assembledBatchHeader.Version() + require.NoError(t, err) + if uint8(version) != batchDataInput.Version { + t.Errorf("❌ Version mismatch: assembled=%d, commitBatch=%d", version, batchDataInput.Version) + } else { + t.Logf("✓ Version: %d (match)", version) + } + + // Compare ParentBatchHeader + // Note: We should use batch index instead of version, but we need to get batch index from assembledBatchHeader + batchIndex, err := assembledBatchHeader.BatchIndex() + if err == nil && batchIndex > 0 { + parentBatchHeader, err := getLastFinalizeBatchHeaderByIndex(batchIndex - 1) + if err == nil { + parentBytes := parentBatchHeader.Bytes() + if !bytes.Equal(parentBytes, batchDataInput.ParentBatchHeader) { + t.Errorf("❌ ParentBatchHeader mismatch: assembled=%x, commitBatch=%x", parentBytes[:min(32, len(parentBytes))], batchDataInput.ParentBatchHeader[:min(32, len(batchDataInput.ParentBatchHeader))]) + } else { + t.Logf("✓ ParentBatchHeader: match") + } + } + } + + // Compare LastBlockNumber + lastBlock, err := assembledBatchHeader.LastBlockNumber() + if err == nil { + if lastBlock != batchDataInput.LastBlockNumber { + t.Errorf("❌ LastBlockNumber mismatch: assembled=%d, commitBatch=%d", lastBlock, batchDataInput.LastBlockNumber) + } else { + t.Logf("✓ LastBlockNumber: %d (match)", lastBlock) + } + } + + // Compare NumL1Messages + l1MsgPopped, err := assembledBatchHeader.L1MessagePopped() + require.NoError(t, err) + if l1MsgPopped != uint64(batchDataInput.NumL1Messages) { + t.Errorf("❌ NumL1Messages mismatch: assembled=%d, commitBatch=%d", l1MsgPopped, batchDataInput.NumL1Messages) + } else { + t.Logf("✓ NumL1Messages: %d (match)", l1MsgPopped) + } + + // 比较 PrevStateRoot + prevStateRoot, err := assembledBatchHeader.PrevStateRoot() + require.NoError(t, err) + prevStateRootFromCommit := common.BytesToHash(batchDataInput.PrevStateRoot[:]) + if prevStateRoot != prevStateRootFromCommit { + t.Errorf("❌ PrevStateRoot mismatch: assembled=%x, commitBatch=%x", prevStateRoot, prevStateRootFromCommit) + } else { + t.Logf("✓ PrevStateRoot: %x (match)", prevStateRoot) + } + + // 比较 PostStateRoot + postStateRoot, err := assembledBatchHeader.PostStateRoot() + require.NoError(t, err) + postStateRootFromCommit := common.BytesToHash(batchDataInput.PostStateRoot[:]) + if postStateRoot != postStateRootFromCommit { + t.Errorf("❌ PostStateRoot mismatch: assembled=%x, commitBatch=%x", postStateRoot, postStateRootFromCommit) + } else { + t.Logf("✓ PostStateRoot: %x (match)", postStateRoot) + } + + // 比较 WithdrawalRoot + withdrawRoot, err := assembledBatchHeader.WithdrawalRoot() + require.NoError(t, err) + withdrawRootFromCommit := common.BytesToHash(batchDataInput.WithdrawalRoot[:]) + if withdrawRoot != withdrawRootFromCommit { + t.Errorf("❌ WithdrawalRoot mismatch: assembled=%x, commitBatch=%x", withdrawRoot, withdrawRootFromCommit) + } else { + t.Logf("✓ WithdrawalRoot: %x (match)", withdrawRoot) + } + + // 比较 SequencerSetVerifyHash + sequencerSetsHash := crypto.Keccak256Hash(batchSignatureInput.SequencerSets) + seqHash, err := assembledBatchHeader.SequencerSetVerifyHash() + require.NoError(t, err) + if seqHash != sequencerSetsHash { + t.Errorf("❌ SequencerSetVerifyHash mismatch: assembled=%x, from SequencerSets=%x", seqHash, sequencerSetsHash) + } else { + t.Logf("✓ SequencerSetVerifyHash: %x (match)", seqHash) + } + + if seqHash != sequencerSetVerifyHash { + t.Errorf("❌ SequencerSetVerifyHash mismatch with provided hash: assembled=%x, provided=%x", seqHash, sequencerSetVerifyHash) + } else { + t.Logf("✓ SequencerSetVerifyHash matches provided hash: %x", sequencerSetVerifyHash) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func getBatchHeaderFromGeth(index uint64) (*BatchHeaderBytes, error) { + batch, err := l2Client.GetRollupBatchByIndex(context.Background(), index+1) + if err != nil { + return nil, err + } + batchHeaderBytes := BatchHeaderBytes(batch.ParentBatchHeader[:]) + return &batchHeaderBytes, nil +} + +// getLastFinalizeBatchHeaderByIndex gets the batch header with the specified index from the rollup contract's FinalizeBatch event +// The finalizeBatch function only receives one parameter: batchHeader bytes, so it can be parsed directly from the transaction +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func getLastFinalizeBatchHeaderByIndex(index uint64) (*BatchHeaderBytes, error) { + // Get the current latest block height + latestBlock, err := l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the FinalizeBatch event with the corresponding index from the rollup contract + finalizeEventIter, err := rollupContract.FilterFinalizeBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for finalizeEventIter.Next() { + event := finalizeEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + continue // If getting transaction fails, try next event + } + + // Parse finalizeBatch transaction data to get batchHeader + batchHeader, err := parseFinalizeBatchTxData(tx.Data()) + if err != nil { + continue // If parsing fails, try next event + } + + // Verify if batch index matches + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + continue + } + if batchIndex == index { + finalizeEventIter.Close() + return &batchHeader, nil + } + } + finalizeEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, ErrBatchNotFound +} + +func getInfosFromContract() (*big.Int, *big.Int) { + latestCommitBatchIndex, _ := rollupContract.LastCommittedBatchIndex(nil) + lastFinalizedBatchIndex, _ := rollupContract.LastFinalizedBatchIndex(nil) + return latestCommitBatchIndex, lastFinalizedBatchIndex +} + +func getFirstUnFinalizeBatchBlockNumRange(lastFinalizedBatchIndex *big.Int) (*big.Int, *big.Int, error) { + fis, err := rollupContract.BatchDataStore(nil, lastFinalizedBatchIndex) + if err != nil { + return nil, nil, err + } + ufis, err := rollupContract.BatchDataStore(nil, new(big.Int).SetUint64(lastFinalizedBatchIndex.Uint64()+1)) + if err != nil { + return nil, nil, err + } + + return fis.BlockNumber, ufis.BlockNumber, nil +} + +// getCommitBatchDataByIndex gets batchDataInput and batchSignatureInput with the specified index from the rollup contract's CommitBatch event +// Reference the implementation of getLastFinalizeBatchHeaderByIndex +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func getCommitBatchDataByIndex(index uint64) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + // Get the current latest block height + latestBlock, err := l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the CommitBatch event with the corresponding index from the rollup contract + commitEventIter, err := rollupContract.FilterCommitBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for commitEventIter.Next() { + event := commitEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + return nil, nil, fmt.Errorf("failed to get transaction by hash: %w", err) + } + + // Parse commitBatch transaction data to get batchDataInput and batchSignatureInput + batchDataInput, batchSignatureInput, err := parseCommitBatchTxData(tx.Data()) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse commit batch data: %w", err) + } + + // Verify if batch index matches (by checking batchIndex in parentBatchHeader) + if len(batchDataInput.ParentBatchHeader) > 0 { + parentHeader := BatchHeaderBytes(batchDataInput.ParentBatchHeader) + parentBatchIndex, err := parentHeader.BatchIndex() + if err == nil && parentBatchIndex+1 == index { + commitEventIter.Close() + return batchDataInput, batchSignatureInput, nil + } + } + } + commitEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, nil, ErrBatchNotFound +} + +// assembleBatchHeaderFromL2Blocks fetches blocks from L2 client in the specified range and assembles batchHeader +// Parameters: +// - bc: BatchCache instance (parentBatchHeader and prevStateRoot already initialized) +// - startBlockNum: starting block number +// - endBlockNum: ending block number +// - sequencerSetVerifyHash: sequencer set verification hash +// - l2Client: L2 client +// +// Returns: +// - batchHeader: assembled batchHeader +// - error: returns error if assembly fails +func assembleBatchHeaderFromL2Blocks( + bc *BatchCache, + startBlockNum, endBlockNum uint64, + sequencerSetVerifyHash common.Hash, + l2Client iface.L2Client, + l2MessagePasser *bindings.L2ToL1MessagePasser, +) (*BatchHeaderBytes, error) { + ctx := context.Background() + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + root, err := l2MessagePasser.GetTreeRoot(&bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(blockNum), + }) + if err != nil { + return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Pack current block (confirm and append to batch) + if err := bc.PackCurrentBlock(blockNum); err != nil { + return nil, fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + + // If capacity exceeds limit, can stop early (optional) + if exceeded { + // Note: Can choose to continue packing until endBlockNum, or stop early + // Decide based on business requirements + } + } + + // Get the last block's timestamp for packing + lastBlock, err := l2Client.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) + if err != nil { + return nil, fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) + } + blockTimestamp := lastBlock.Time() + + // Seal batch and generate batchHeader + batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + if err != nil { + return nil, fmt.Errorf("failed to seal batch: %w", err) + } + + // Get the sealed batch header + sealedBatch, found := bc.GetSealedBatch(batchIndex) + if !found { + return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + + _ = batchHash // batch hash + _ = reachedExpectedSize // whether reached expected size + + return &sealedBatch.BatchHeader, nil +} diff --git a/tx-submitter/batch/blob.go b/tx-submitter/batch/blob.go new file mode 100644 index 000000000..c846b24b2 --- /dev/null +++ b/tx-submitter/batch/blob.go @@ -0,0 +1,210 @@ +package batch + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "morph-l2/node/zstd" + + eth "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto/kzg4844" + "github.com/morph-l2/go-ethereum/rlp" +) + +const MaxBlobBytesSize = 4096 * 31 + +var ( + emptyBlob = new(kzg4844.Blob) + emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) + emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) +) + +// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. +func MakeBlobCanonical(blobBytes []byte) (b *kzg4844.Blob, err error) { + if len(blobBytes) > MaxBlobBytesSize { + return nil, fmt.Errorf("data is too large for blob. len=%v", len(blobBytes)) + } + offset := 0 + b = new(kzg4844.Blob) + // encode (up to) 31 bytes of remaining input data at a time into the subsequent field element + for i := 0; i < 4096; i++ { + offset += copy(b[i*32+1:i*32+32], blobBytes[offset:]) + if offset == len(blobBytes) { + break + } + } + if offset < len(blobBytes) { + return nil, fmt.Errorf("failed to fit all data into blob. bytes remaining: %v", len(blobBytes)-offset) + } + return +} + +func RetrieveBlobBytes(blob *kzg4844.Blob) ([]byte, error) { + data := make([]byte, MaxBlobBytesSize) + for i := 0; i < 4096; i++ { + if blob[i*32] != 0 { + return nil, fmt.Errorf("invalid blob, found non-zero high order byte %x of field element %d", data[i*32], i) + } + copy(data[i*31:i*31+31], blob[i*32+1:i*32+32]) + } + return data, nil +} + +func makeBlobCommitment(bz []byte) (b kzg4844.Blob, c kzg4844.Commitment, err error) { + blob, err := MakeBlobCanonical(bz) + if err != nil { + return + } + b = *blob + c, err = kzg4844.BlobToCommitment(&b) + if err != nil { + return + } + return +} + +func MakeBlobTxSidecar(blobBytes []byte) (*eth.BlobTxSidecar, error) { + if len(blobBytes) == 0 { + return ð.BlobTxSidecar{ + Blobs: []kzg4844.Blob{*emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, nil + } + if len(blobBytes) > 2*MaxBlobBytesSize { + return nil, errors.New("only 2 blobs at most is allowed") + } + blobCount := len(blobBytes)/(MaxBlobBytesSize+1) + 1 + var ( + err error + blobs = make([]kzg4844.Blob, blobCount) + commitments = make([]kzg4844.Commitment, blobCount) + ) + switch blobCount { + case 1: + blobs[0], commitments[0], err = makeBlobCommitment(blobBytes) + if err != nil { + return nil, err + } + case 2: + blobs[0], commitments[0], err = makeBlobCommitment(blobBytes[:MaxBlobBytesSize]) + if err != nil { + return nil, err + } + blobs[1], commitments[1], err = makeBlobCommitment(blobBytes[MaxBlobBytesSize:]) + if err != nil { + return nil, err + } + } + return ð.BlobTxSidecar{ + Blobs: blobs, + Commitments: commitments, + }, nil +} + +func CompressBatchBytes(batchBytes []byte) ([]byte, error) { + if len(batchBytes) == 0 { + return nil, nil + } + compressedBatchBytes, err := zstd.CompressBatchBytes(batchBytes) + if err != nil { + return nil, fmt.Errorf("failed to compress batch bytes, err: %w", err) + } + return compressedBatchBytes, nil +} + +func DecodeTxsFromBytes(txsBytes []byte) (eth.Transactions, error) { + reader := bytes.NewReader(txsBytes) + txs := make(eth.Transactions, 0) + for { + var ( + firstByte byte + fullTxBytes []byte + innerTx eth.TxData + err error + ) + if err = binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + // if the blob byte array is completely consumed, then break the loop + if err == io.EOF { + break + } + return nil, err + } + // zero byte is found after valid tx bytes, break the loop + if firstByte == 0 { + break + } + + switch firstByte { + case eth.AccessListTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.AccessListTx) + case eth.DynamicFeeTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.DynamicFeeTx) + case eth.SetCodeTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.SetCodeTx) + case eth.AltFeeTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.AltFeeTx) + default: + if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247) + return nil, fmt.Errorf("not supported tx type: %d", firstByte) + } + innerTx = new(eth.LegacyTx) + } + + // we support the tx types of LegacyTxType/AccessListTxType/DynamicFeeTxType + //if firstByte == eth.AccessListTxType || firstByte == eth.DynamicFeeTxType { + // // the firstByte here is used to indicate tx type, so skip it + // if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + // return nil, err + // } + //} else if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247) + // return nil, fmt.Errorf("not supported tx type: %d", firstByte) + //} + fullTxBytes, err = extractInnerTxFullBytes(firstByte, reader) + if err != nil { + return nil, err + } + if err = rlp.DecodeBytes(fullTxBytes, innerTx); err != nil { + return nil, err + } + txs = append(txs, eth.NewTx(innerTx)) + } + return txs, nil +} + +func extractInnerTxFullBytes(firstByte byte, reader io.Reader) ([]byte, error) { + //the occupied byte length for storing the size of the following rlp encoded bytes + sizeByteLen := firstByte - 0xf7 + + // the size of the following rlp encoded bytes + sizeByte := make([]byte, sizeByteLen) + if err := binary.Read(reader, binary.BigEndian, sizeByte); err != nil { + return nil, err + } + size := binary.BigEndian.Uint32(append(make([]byte, 4-len(sizeByte)), sizeByte...)) + + txRaw := make([]byte, size) + if err := binary.Read(reader, binary.BigEndian, txRaw); err != nil { + return nil, err + } + fullTxBytes := make([]byte, 1+uint32(sizeByteLen)+size) + copy(fullTxBytes[:1], []byte{firstByte}) + copy(fullTxBytes[1:1+sizeByteLen], sizeByte) + copy(fullTxBytes[1+sizeByteLen:], txRaw) + + return fullTxBytes, nil +} diff --git a/tx-submitter/types/converter.go b/tx-submitter/types/converter.go new file mode 100644 index 000000000..d5b16398d --- /dev/null +++ b/tx-submitter/types/converter.go @@ -0,0 +1,25 @@ +package types + +import ( + "encoding/binary" + "fmt" +) + +func Uint64ToBigEndianBytes(value uint64) []byte { + valueBytes := make([]byte, 8) + binary.BigEndian.PutUint64(valueBytes, value) + return valueBytes +} + +func Uint16ToBigEndianBytes(value uint16) []byte { + valueBytes := make([]byte, 2) + binary.BigEndian.PutUint16(valueBytes, value) + return valueBytes +} + +func HeightFromBlockContextBytes(blockContextBytes []byte) (uint64, error) { + if len(blockContextBytes) != 60 { + return 0, fmt.Errorf("wrong block context bytes length, input: %x", blockContextBytes) + } + return binary.BigEndian.Uint64(blockContextBytes[:8]), nil +} diff --git a/tx-submitter/utils/methods.go b/tx-submitter/utils/methods.go index 65a96c15a..b8b4514af 100644 --- a/tx-submitter/utils/methods.go +++ b/tx-submitter/utils/methods.go @@ -69,7 +69,7 @@ func ParseStringToType[T any](s string) (T, error) { var result T var err error - // 获取目标类型的名称 + // Get target type name switch any(result).(type) { case int: var v int64 From f2912a3cbb7d19272674074bad2db8910436daf5 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 30 Jan 2026 19:37:44 +0800 Subject: [PATCH 02/12] update submitter seal bath timming --- tx-submitter/batch/batch_cache.go | 101 +++++++++++------------ tx-submitter/batch/batch_cache_test.go | 2 +- tx-submitter/batch/batch_query.go | 47 +++++++---- tx-submitter/batch/batch_restart_test.go | 13 ++- 4 files changed, 92 insertions(+), 71 deletions(-) diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index ab15ca17a..10bc1aac5 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -10,15 +10,14 @@ import ( "sync" "time" - "github.com/morph-l2/go-ethereum/accounts/abi/bind" - "github.com/morph-l2/go-ethereum/log" - "morph-l2/bindings/bindings" "morph-l2/tx-submitter/iface" + "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" ethtypes "github.com/morph-l2/go-ethereum/core/types" "github.com/morph-l2/go-ethereum/ethclient" + "github.com/morph-l2/go-ethereum/log" ) // SealedBatchInfo stores sealed batch information @@ -137,14 +136,13 @@ func (bc *BatchCache) InitFromRollupByRange() error { if err != nil { return fmt.Errorf("get last finalize batch header err: %w", err) } - parentStateRoot, err := headerBytes.PostStateRoot() - if err != nil { - return fmt.Errorf("get post state root err: %w", err) - } // Initialize BatchCache parent batch information // prevStateRoot should be the parent batch's postStateRoot bc.parentBatchHeader = headerBytes - bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot + bc.prevStateRoot, err = headerBytes.PostStateRoot() + if err != nil { + return fmt.Errorf("get post state root err: %w", err) + } bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() if err != nil { return fmt.Errorf("get last block number err: %w", err) @@ -163,7 +161,7 @@ func (bc *BatchCache) InitFromRollupByRange() error { return nil } -func (bc *BatchCache) InitFromRollup() error { +func (bc *BatchCache) InitAndSyncFromRollup() error { ci, fi, err := bc.getBatchStatusFromContract() if err != nil { return fmt.Errorf("get batch status from rollup failed err: %w", err) @@ -195,17 +193,6 @@ func (bc *BatchCache) InitFromRollup() error { if err != nil { return fmt.Errorf("get batch block range err: %w,start %v, end %v", err, startNum, endNum) } - { - startBlock, err := bc.l2Client.BlockByNumber(context.Background(), new(big.Int).SetUint64(startNum)) - if err != nil { - return err - } - endBlock, err := bc.l2Client.BlockByNumber(context.Background(), new(big.Int).SetUint64(endNum)) - if err != nil { - return err - } - log.Info("Assemble batch from l2 blocks", "from block", startNum, "to block", endNum, "time", endBlock.Time()-startBlock.Time()) - } batchHeaderBytes, err := bc.assembleBatchHeaderFromL2Blocks(startNum, endNum) if err != nil { return err @@ -835,7 +822,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { - callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) root, err := bc.l2MessagePasserContract.GetTreeRoot(callOpts) if err != nil { return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) @@ -847,11 +834,6 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) } - // Pack current block (confirm and append to batch) - if err = bc.PackCurrentBlock(blockNum); err != nil { - return fmt.Errorf("failed to pack block %d: %w", blockNum, err) - } - // Get current block to check timeout after packing nowBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(blockNum))) if err != nil { @@ -875,34 +857,9 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { // Timeout check ensures batch is sealed before exceeding the maximum timeout if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) - sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) - if err != nil { - return fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) - } - lastBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(blockNum))) + batchHash, reachedExpectedSize, err := bc.SealBatchAndCheck(callOpts, ci) if err != nil { - return fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) - } - blockTimestamp := lastBlock.Time() - // Seal batch and generate batchHeader - batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) - if err != nil { - return fmt.Errorf("failed to seal batch: %w", err) - } - sealedBatch, found := bc.GetSealedBatch(batchIndex) - if !found { - return fmt.Errorf("sealed batch not found for index %d", batchIndex) - } - if batchIndex <= ci.Uint64() { - // batch already commited, check batch hash - correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.BatchHash) - if err != nil { - return err - } - if !correct { - log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.BatchHash.String()) - return fmt.Errorf("batch hash does not match sealed batch") - } + return err } // Update startBlockNum and startBlockTime for next batch startBlockNum = blockNum + 1 @@ -916,6 +873,44 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { } log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } } return nil } + +func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, error) { + sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) + if err != nil { + return common.Hash{}, false, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + lastBlock, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) + if err != nil { + return common.Hash{}, false, fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) + } + blockTimestamp := lastBlock.Time() + // Seal batch and generate batchHeader + batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + if err != nil { + return common.Hash{}, false, fmt.Errorf("failed to seal batch: %w", err) + } + sealedBatch, found := bc.GetSealedBatch(batchIndex) + if !found { + return common.Hash{}, false, fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + if batchIndex <= ci.Uint64() { + // batch already commited, check batch hash + correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.BatchHash) + if err != nil { + return common.Hash{}, false, err + } + if !correct { + log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.BatchHash.String()) + return common.Hash{}, false, fmt.Errorf("batch hash does not match sealed batch") + } + } + return batchHash, reachedExpectedSize, nil +} diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index d18eca0e4..f820d7954 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -29,7 +29,7 @@ func init() { func TestBatchCacheInit(t *testing.T) { cache := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) - err := cache.InitFromRollup() + err := cache.InitAndSyncFromRollup() require.NoError(t, err) } diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go index 2f99f3bed..104357815 100644 --- a/tx-submitter/batch/batch_query.go +++ b/tx-submitter/batch/batch_query.go @@ -93,8 +93,9 @@ func (bc *BatchCache) getLastFinalizeBatchHeaderFromRollupByIndex(index uint64) return nil, fmt.Errorf("failed to find last finalized batch header for batchIndex %d", index) } -// parseFinalizeBatchTxData parses the finalizeBatch transaction's input data to get BatchHeaderBytes -// finalizeBatch(bytes calldata _batchHeader) only receives one parameter: batchHeader bytes +// parseFinalizeBatchTxData parses the finalizeBatch or importGenesisBatch transaction's input data to get BatchHeaderBytes +// Both finalizeBatch(bytes calldata _batchHeader) and importGenesisBatch(bytes calldata _batchHeader) receive one parameter: batchHeader bytes +// Both methods emit FinalizeBatch event, so we need to support parsing both func parseFinalizeBatchTxData(txData []byte) (BatchHeaderBytes, error) { // Get rollup ABI rollupAbi, err := bindings.RollupMetaData.GetAbi() @@ -102,36 +103,54 @@ func parseFinalizeBatchTxData(txData []byte) (BatchHeaderBytes, error) { return nil, err } - // Check if method ID is finalizeBatch - finalizeBatchMethod, ok := rollupAbi.Methods["finalizeBatch"] - if !ok { - return nil, errors.New("finalizeBatch method not found in ABI") - } - // Check if the first 4 bytes of transaction data match the method ID if len(txData) < 4 { return nil, errors.New("transaction data too short") } methodID := txData[:4] - if !bytes.Equal(methodID, finalizeBatchMethod.ID) { - return nil, errors.New("transaction is not a finalizeBatch call") + + // Try to get finalizeBatch method + finalizeBatchMethod, ok := rollupAbi.Methods["finalizeBatch"] + if !ok { + return nil, errors.New("finalizeBatch method not found in ABI") + } + + var method abi.Method + var methodName string + + // Check if method ID matches finalizeBatch + if bytes.Equal(methodID, finalizeBatchMethod.ID) { + method = finalizeBatchMethod + methodName = "finalizeBatch" + } else { + // Try importGenesisBatch method + importGenesisBatchMethod, ok := rollupAbi.Methods["importGenesisBatch"] + if !ok { + return nil, errors.New("importGenesisBatch method not found in ABI") + } + if bytes.Equal(methodID, importGenesisBatchMethod.ID) { + method = importGenesisBatchMethod + methodName = "importGenesisBatch" + } else { + return nil, fmt.Errorf("transaction is not a finalizeBatch or importGenesisBatch call, methodID: %x", methodID) + } } // Parse parameters (only one parameter: batchHeader bytes) - args, err := finalizeBatchMethod.Inputs.Unpack(txData[4:]) + args, err := method.Inputs.Unpack(txData[4:]) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to unpack %s transaction parameters: %w", methodName, err) } if len(args) == 0 { - return nil, errors.New("no arguments found in finalizeBatch transaction") + return nil, fmt.Errorf("no arguments found in %s transaction", methodName) } // The first parameter is batchHeader bytes batchHeaderBytes, ok := args[0].([]byte) if !ok { - return nil, errors.New("failed to cast batchHeader to []byte") + return nil, fmt.Errorf("failed to cast batchHeader to []byte in %s transaction", methodName) } return BatchHeaderBytes(batchHeaderBytes), nil diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index 28a3eac4b..b4e9ead53 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -24,13 +24,13 @@ var ( ) var ( - rollupAddr = common.HexToAddress("0xd1827e85d8149013778b5675f9d1d7fb750ae31a") + rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") sequencerAddr = common.HexToAddress("0x5300000000000000000000000000000000000017") l2MessagePasserAddr = common.HexToAddress("0x5300000000000000000000000000000000000001") govAddr = common.HexToAddress("0x5300000000000000000000000000000000000004") - l1ClientRpc = "https://old-empty-sound.ethereum-hoodi.quiknode.pro/0f479a6bd068da530e0afdb36755f94c9facef17" - l2ClientRpc = "http://l2-qa-morph-senquencer-1.bitkeep.tools" + l1ClientRpc = "http://localhost:9545" + l2ClientRpc = "http://localhost:8545" l1Client, _ = ethclient.Dial(l1ClientRpc) l2Client, _ = ethclient.Dial(l2ClientRpc) @@ -60,6 +60,13 @@ func init() { } } +func Test_GetFinalizeBatchHeader(t *testing.T) { + bc := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(0) + require.NoError(t, err) + t.Log("headerBytes", hex.EncodeToString(headerBytes.Bytes())) +} + func Test_CommitBatchParse(t *testing.T) { data, signature, err := getCommitBatchDataByIndex(5357) require.NoError(t, err) From b4c3780072476318969fea0c6b3ed5353ed4f080 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Tue, 3 Feb 2026 09:58:40 +0800 Subject: [PATCH 03/12] update rollup cache config: --- tx-submitter/batch/batch_cache.go | 379 ++++++++++++++---- tx-submitter/batch/batch_cache_test.go | 37 +- .../batch/{batch.go => batch_data.go} | 0 tx-submitter/batch/batch_restart_test.go | 39 +- tx-submitter/entry.go | 8 +- tx-submitter/iface/client.go | 223 +++++++++++ tx-submitter/iface/rollup.go | 20 +- tx-submitter/services/batch_fetcher.go | 39 -- tx-submitter/services/rollup.go | 30 +- tx-submitter/types/batch_cache.go | 97 ----- tx-submitter/types/batch_cache_test.go | 201 ---------- .../types/batch_cache_validation_test.go | 135 ------- tx-submitter/types/l2Caller.go | 67 ++++ 13 files changed, 673 insertions(+), 602 deletions(-) rename tx-submitter/batch/{batch.go => batch_data.go} (100%) delete mode 100644 tx-submitter/services/batch_fetcher.go delete mode 100644 tx-submitter/types/batch_cache.go delete mode 100644 tx-submitter/types/batch_cache_test.go delete mode 100644 tx-submitter/types/batch_cache_validation_test.go create mode 100644 tx-submitter/types/l2Caller.go diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 10bc1aac5..98d887b7b 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -7,40 +7,27 @@ import ( "errors" "fmt" "math/big" - "sync" - "time" - - "morph-l2/bindings/bindings" "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + "sync" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" ethtypes "github.com/morph-l2/go-ethereum/core/types" - "github.com/morph-l2/go-ethereum/ethclient" + "github.com/morph-l2/go-ethereum/eth" "github.com/morph-l2/go-ethereum/log" ) -// SealedBatchInfo stores sealed batch information -type SealedBatchInfo struct { - BatchHeader BatchHeaderBytes // complete batch header - BatchHash common.Hash // batch hash - Sidecar *ethtypes.BlobTxSidecar // blob sidecar - CompressedPayload []byte // compressed payload - DataHash common.Hash // batch data hash - LastBlockNumber uint64 // last block number - PostStateRoot common.Hash // post state root - WithdrawRoot common.Hash // withdraw root - TotalL1MessagePopped uint64 // total L1 messages popped - SealedAt time.Time // sealed timestamp -} - // BatchCache is a structure for caching and building batch data // Stores all batch information starting from 0, and has the functionality to pack batches type BatchCache struct { - mu sync.RWMutex + mu sync.RWMutex + ctx context.Context + initDone bool - // key: batchIndex, value: SealedBatchInfo - sealedBatches map[uint64]*SealedBatchInfo + // key: batchIndex, value: RPCRollupBatch + sealedBatches map[uint64]*eth.RPCRollupBatch // Currently accumulating batch data (referencing node's BatchingCache) // Parent batch information @@ -69,12 +56,10 @@ type BatchCache struct { isBatchUpgraded func(uint64) bool // Clients and contracts - l1Client *ethclient.Client - l2Client iface.L2Client - rollupContract *bindings.Rollup - sequencerContract *bindings.Sequencer - l2MessagePasserContract *bindings.L2ToL1MessagePasser - govContract *bindings.Gov + l1Client iface.Client + l2Clients iface.L2Clients + rollupContract iface.IRollup + l2Caller *types.L2Caller // config batchTimeOut uint64 @@ -84,20 +69,19 @@ type BatchCache struct { // NewBatchCache creates and initializes a new BatchCache instance func NewBatchCache( isBatchUpgraded func(uint64) bool, - l1Client *ethclient.Client, - l2Client iface.L2Client, - rollupContract *bindings.Rollup, - sequencerContract *bindings.Sequencer, - l2MessagePasserContract *bindings.L2ToL1MessagePasser, - govContract *bindings.Gov, + l1Client iface.Client, + l2Clients []iface.L2Client, + rollupContract iface.IRollup, + l2Caller *types.L2Caller, ) *BatchCache { if isBatchUpgraded == nil { // Default implementation: always returns true (use V1 version) isBatchUpgraded = func(uint64) bool { return true } } - return &BatchCache{ - sealedBatches: make(map[uint64]*SealedBatchInfo), + ctx: context.Background(), + initDone: false, + sealedBatches: make(map[uint64]*eth.RPCRollupBatch), parentBatchHeader: nil, prevStateRoot: common.Hash{}, batchData: NewBatchData(), @@ -115,15 +99,16 @@ func NewBatchCache( currentBlockHash: common.Hash{}, isBatchUpgraded: isBatchUpgraded, l1Client: l1Client, - l2Client: l2Client, + l2Clients: iface.L2Clients{Clients: l2Clients}, rollupContract: rollupContract, - sequencerContract: sequencerContract, - l2MessagePasserContract: l2MessagePasserContract, - govContract: govContract, + l2Caller: l2Caller, } } func (bc *BatchCache) InitFromRollupByRange() error { + if bc.initDone { + return nil + } err := bc.updateBatchConfigFromGov() if err != nil { return err @@ -153,15 +138,19 @@ func (bc *BatchCache) InitFromRollupByRange() error { } log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) - err = bc.assembleBatchHeaderFromL2BlocksByBlockRange() + err = bc.assembleUnFinalizeBatchHeaderFromL2Blocks() if err != nil { return err } + bc.initDone = true log.Info("Initialized batch cache success") return nil } func (bc *BatchCache) InitAndSyncFromRollup() error { + if bc.initDone { + return nil + } ci, fi, err := bc.getBatchStatusFromContract() if err != nil { return fmt.Errorf("get batch status from rollup failed err: %w", err) @@ -210,16 +199,17 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { } log.Info("Assemble batch success", "batch index", i, "last batch index", ci.Uint64()) } + bc.initDone = true log.Info("Initialized batch cache success") return nil } func (bc *BatchCache) updateBatchConfigFromGov() error { - interval, err := bc.govContract.BatchBlockInterval(nil) + interval, err := bc.l2Caller.BatchBlockInterval(nil) if err != nil { return err } - timeout, err := bc.govContract.BatchTimeout(nil) + timeout, err := bc.l2Caller.BatchTimeout(nil) if err != nil { return err } @@ -275,7 +265,7 @@ func (bc *BatchCache) getUnFinalizeBlockRange() (uint64, uint64, *big.Int, error return 0, 0, nil, err } startNum := finalizeBatchStorage.BlockNumber.Uint64() + 1 - endNum, err := bc.l2Client.BlockNumber(context.Background()) + endNum, err := bc.l2Clients.BlockNumber(context.Background()) if err != nil { return 0, 0, nil, err } @@ -305,12 +295,10 @@ func (bc *BatchCache) ClearCurrent() { bc.totalL1MessagePoppedAfterCurBlock = 0 bc.currentStateRoot = common.Hash{} bc.currentWithdrawRoot = common.Hash{} - bc.currentBlockNumber = 0 - bc.currentBlockHash = common.Hash{} } // GetSealedBatch gets sealed batch information -func (bc *BatchCache) GetSealedBatch(batchIndex uint64) (*SealedBatchInfo, bool) { +func (bc *BatchCache) GetSealedBatch(batchIndex uint64) (*eth.RPCRollupBatch, bool) { bc.mu.RLock() defer bc.mu.RUnlock() batch, ok := bc.sealedBatches[batchIndex] @@ -344,7 +332,7 @@ func (bc *BatchCache) GetLatestSealedBatchIndex() uint64 { // Note: This method stores block data to currentBlockContext but does not immediately append to batch // Need to call PackCurrentBlock to confirm and append func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdrawRoot common.Hash) (bool, error) { - if bc.l2Client == nil { + if len(bc.l2Clients.Clients) == 0 { return false, fmt.Errorf("L2 client is nil") } @@ -362,7 +350,7 @@ func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdraw bc.mu.Unlock() // Fetch complete block from L2 client (including transactions) - block, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + block, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) if err != nil { return false, fmt.Errorf("failed to fetch block %d: %w", blockNumber, err) } @@ -484,7 +472,7 @@ func (bc *BatchCache) FetchAndCacheHeader(blockNumber uint64, withdrawRoot commo defer bc.mu.RUnlock() // Return header (need to re-fetch because current has been cleared) - block, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + block, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) if err != nil { return nil, err } @@ -546,18 +534,46 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta return 0, common.Hash{}, false, fmt.Errorf("failed to get batch index: %w", err) } - // Store sealed batch information - sealedBatch := &SealedBatchInfo{ - BatchHeader: batchHeader, - BatchHash: batchHash, - Sidecar: sidecar, - CompressedPayload: compressedPayload, - DataHash: batchDataHash, - LastBlockNumber: bc.lastPackedBlockHeight, - PostStateRoot: bc.postStateRoot, - WithdrawRoot: bc.withdrawRoot, - TotalL1MessagePopped: bc.totalL1MessagePopped, - SealedAt: time.Now(), + // Build parent batch header bytes + var parentBatchHeaderBytes hexutil.Bytes + if bc.parentBatchHeader != nil { + parentBatchHeaderBytes = hexutil.Bytes(*bc.parentBatchHeader) + } + + // Get version from batch header + version, err := batchHeader.Version() + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to get batch version: %w", err) + } + + // Build block contexts from batch data (encode block contexts) + blockContextsData, err := bc.batchData.Encode() + if err != nil { + return 0, common.Hash{}, false, fmt.Errorf("failed to encode batch data: %w", err) + } + blockContexts := hexutil.Bytes(blockContextsData) + + // Convert sequencerSetVerifyHash to bytes + currentSequencerSetBytes := hexutil.Bytes(sequencerSetVerifyHash.Bytes()) + + // Get L1 message count from batch data + numL1Messages := bc.batchData.l1TxNum + + // Store sealed batch information as RPCRollupBatch + sealedBatch := ð.RPCRollupBatch{ + Version: uint(version), + Hash: batchHash, + ParentBatchHeader: parentBatchHeaderBytes, + BlockContexts: blockContexts, + CurrentSequencerSetBytes: currentSequencerSetBytes, + PrevStateRoot: bc.prevStateRoot, + PostStateRoot: bc.postStateRoot, + WithdrawRoot: bc.withdrawRoot, + LastBlockNumber: bc.lastPackedBlockHeight, + NumL1Messages: numL1Messages, + Sidecar: *sidecar, + Signatures: []eth.RPCBatchSignature{}, + CollectedL1Fee: nil, } bc.sealedBatches[batchIndex] = sealedBatch @@ -568,9 +584,6 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta // Reset currently accumulated batch data bc.batchData = NewBatchData() - // totalL1MessagePopped keeps accumulated value for next batch, no need to reset - bc.postStateRoot = common.Hash{} - bc.withdrawRoot = common.Hash{} return batchIndex, batchHash, reachedExpectedSize, nil } @@ -592,9 +605,16 @@ func (bc *BatchCache) CheckBatchSizeReached(batchIndex uint64) (reached bool, fo } // Expected value: compressed payload size >= MaxBlobBytesSize * 0.9 + // We need to estimate the compressed size from the block contexts + // For now, we'll use a simple heuristic based on block contexts size threshold := float64(MaxBlobBytesSize) * 0.9 expectedSizeThreshold := uint64(threshold) - reached = uint64(len(sealedBatch.CompressedPayload)) >= expectedSizeThreshold + + // Estimate compressed size from block contexts (rough approximation) + blockContextsSize := uint64(len(sealedBatch.BlockContexts)) + // Use a compression ratio estimate (zstd typically achieves 2-3x compression) + estimatedCompressedSize := blockContextsSize / 2 + reached = estimatedCompressedSize >= expectedSizeThreshold return reached, true } @@ -676,6 +696,64 @@ func (bc *BatchCache) createBatchHeader(dataHash common.Hash, sidecar *ethtypes. return batchHeaderV0.Bytes() } +// createBatchHeaderFromRPCRollupBatch reconstructs BatchHeaderBytes from RPCRollupBatch +func (bc *BatchCache) createBatchHeaderFromRPCRollupBatch(batch *eth.RPCRollupBatch, sequencerSetVerifyHash common.Hash, blockTimestamp uint64) BatchHeaderBytes { + // Extract sequencer set verify hash from CurrentSequencerSetBytes + if len(batch.CurrentSequencerSetBytes) >= 32 { + sequencerSetVerifyHash = common.BytesToHash(batch.CurrentSequencerSetBytes[:32]) + } + + // Get parent batch info + var parentBatchHeaderTotalL1 uint64 + var parentBatchIndex uint64 + var parentBatchHash common.Hash + + if len(batch.ParentBatchHeader) > 0 { + parentHeader := BatchHeaderBytes(batch.ParentBatchHeader) + parentBatchHeaderTotalL1, _ = parentHeader.TotalL1MessagePopped() + parentBatchIndex, _ = parentHeader.BatchIndex() + parentBatchHash, _ = parentHeader.Hash() + } + + // Calculate L1 message popped from NumL1Messages + l1MessagePopped := uint64(batch.NumL1Messages) + + // Get data hash from sidecar blob (simplified - in practice, this should be stored) + dataHash := common.Hash{} + if len(batch.Sidecar.Blobs) > 0 && len(batch.Sidecar.Blobs[0]) > 0 { + // Use a hash of the blob as data hash approximation + dataHash = common.BytesToHash(batch.Sidecar.Blobs[0][:32]) + } + + blobHashes := []common.Hash{EmptyVersionedHash} + if len(batch.Sidecar.Blobs) > 0 { + blobHashes = batch.Sidecar.BlobHashes() + } + + batchHeaderV0 := BatchHeaderV0{ + BatchIndex: parentBatchIndex + 1, + L1MessagePopped: l1MessagePopped, + TotalL1MessagePopped: parentBatchHeaderTotalL1 + l1MessagePopped, + DataHash: dataHash, + BlobVersionedHash: blobHashes[0], + PrevStateRoot: batch.PrevStateRoot, + PostStateRoot: batch.PostStateRoot, + WithdrawalRoot: batch.WithdrawRoot, + SequencerSetVerifyHash: sequencerSetVerifyHash, + ParentBatchHash: parentBatchHash, + } + + if bc.isBatchUpgraded(blockTimestamp) { + batchHeaderV1 := BatchHeaderV1{ + BatchHeaderV0: batchHeaderV0, + LastBlockNumber: batch.LastBlockNumber, + } + return batchHeaderV1.Bytes() + } + + return batchHeaderV0.Bytes() +} + // parsingTxs parses transactions, distinguishes L1 and L2 transactions func parsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( txsPayload []byte, @@ -759,7 +837,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) - root, err := bc.l2MessagePasserContract.GetTreeRoot(callOpts) + root, err := bc.l2Caller.GetTreeRoot(callOpts) if err != nil { return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) } @@ -776,12 +854,12 @@ func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( } } - sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) + sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) if err != nil { return nil, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) } // Get the last block's timestamp for packing - lastBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) + lastBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) if err != nil { return nil, fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) } @@ -793,17 +871,22 @@ func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( return nil, fmt.Errorf("failed to seal batch: %w", err) } - // Get the sealed batch header + // Get the sealed batch sealedBatch, found := bc.GetSealedBatch(batchIndex) if !found { return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) } + // Reconstruct batch header from RPCRollupBatch data + // We need to create a BatchHeaderBytes from the available data in RPCRollupBatch + // Since we have all the necessary fields, we can reconstruct it + batchHeader := bc.createBatchHeaderFromRPCRollupBatch(sealedBatch, sequencerSetVerifyHash, blockTimestamp) + log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) - return &sealedBatch.BatchHeader, nil + return &batchHeader, nil } -func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { +func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { ctx := context.Background() callOpts := &bind.CallOpts{ Context: ctx, @@ -814,7 +897,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { } // Get start block once to avoid repeated queries - startBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + startBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) if err != nil { return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) } @@ -823,7 +906,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) - root, err := bc.l2MessagePasserContract.GetTreeRoot(callOpts) + root, err := bc.l2Caller.GetTreeRoot(callOpts) if err != nil { return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) } @@ -835,7 +918,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { } // Get current block to check timeout after packing - nowBlock, err := bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(blockNum))) + nowBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(blockNum))) if err != nil { return fmt.Errorf("failed to get block %d: %w", blockNum, err) } @@ -865,7 +948,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { startBlockNum = blockNum + 1 if startBlockNum <= endBlockNum { // Update startBlock and startBlockTime for next batch's timeout calculation - startBlock, err = bc.l2Client.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + startBlock, err = bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) if err != nil { return fmt.Errorf("failed to get start block %d for next batch: %w", startBlockNum, err) } @@ -883,11 +966,11 @@ func (bc *BatchCache) assembleBatchHeaderFromL2BlocksByBlockRange() error { } func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, error) { - sequencerSetVerifyHash, err := bc.sequencerContract.SequencerSetVerifyHash(callOpts) + sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) if err != nil { return common.Hash{}, false, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) } - lastBlock, err := bc.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) + lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) if err != nil { return common.Hash{}, false, fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) } @@ -903,14 +986,150 @@ func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (c } if batchIndex <= ci.Uint64() { // batch already commited, check batch hash - correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.BatchHash) + correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.Hash) if err != nil { return common.Hash{}, false, err } if !correct { - log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.BatchHash.String()) + log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.Hash.String()) return common.Hash{}, false, fmt.Errorf("batch hash does not match sealed batch") } } return batchHash, reachedExpectedSize, nil } + +// Get gets sealed batch information by batch index +// Returns the sealed batch info and a boolean indicating if the batch was found +func (bc *BatchCache) Get(batchIndex uint64) (*eth.RPCRollupBatch, bool) { + bc.mu.RLock() + defer bc.mu.RUnlock() + batch, ok := bc.sealedBatches[batchIndex] + return batch, ok +} + +// Delete deletes a sealed batch from the cache by batch index +// Returns a boolean indicating if the batch was found and deleted +func (bc *BatchCache) Delete(batchIndex uint64) bool { + bc.mu.Lock() + defer bc.mu.Unlock() + _, exists := bc.sealedBatches[batchIndex] + if exists { + delete(bc.sealedBatches, batchIndex) + } + return exists +} + +// logSealedBatch logs the details of the sealed batch for debugging purposes. +func (bc *BatchCache) logSealedBatch(batchHeader BatchHeaderBytes, batchHash common.Hash) { + log.Info("Sealed batch header", "batchHash", batchHash.Hex()) + batchIndex, _ := batchHeader.BatchIndex() + l1MessagePopped, _ := batchHeader.L1MessagePopped() + totalL1MessagePopped, _ := batchHeader.TotalL1MessagePopped() + dataHash, _ := batchHeader.DataHash() + parentBatchHash, _ := batchHeader.ParentBatchHash() + log.Info(fmt.Sprintf("===batchIndex: %d \n===L1MessagePopped: %d \n===TotalL1MessagePopped: %d \n===dataHash: %x \n===blockNum: %d \n===ParentBatchHash: %x \n", + batchIndex, + l1MessagePopped, + totalL1MessagePopped, + dataHash, + bc.batchData.BlockNum(), + parentBatchHash)) +} + +func (bc *BatchCache) AssembleCurrentBatchHeader() error { + if !bc.initDone { + return errors.New("batch has not been initialized, should wait") + } + callOpts := &bind.CallOpts{ + Context: bc.ctx, + } + endBlockNum, err := bc.l2Clients.BlockNumber(bc.ctx) + if err != nil { + return err + } + if endBlockNum < bc.currentBlockNumber { + return fmt.Errorf("has rerog, should check block status current %v, now %v", bc.currentBlockNumber, endBlockNum) + } + startBlockNum, err := bc.parentBatchHeader.LastBlockNumber() + if err != nil { + return fmt.Errorf("failed to get last block number %w", err) + } + // Get start block once to avoid repeated queries + startBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime := startBlock.Time() + currentBlockNum := bc.currentBlockNumber + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := currentBlockNum; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) + root, err := bc.l2Caller.GetTreeRoot(callOpts) + if err != nil { + return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Get current block to check timeout after packing + nowBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(blockNum))) + if err != nil { + return fmt.Errorf("failed to get block %d: %w", blockNum, err) + } + nowBlockTime := nowBlock.Time() + + // Check timeout: if elapsed time >= batchTimeOut, must seal batch immediately + // This ensures batch is sealed before exceeding the maximum timeout configured in gov contract + timeout := false + if bc.batchTimeOut > 0 { + elapsedTime := nowBlockTime - startBlockTime + if elapsedTime >= bc.batchTimeOut { + timeout = true + log.Info("Batch timeout reached, must seal batch", "startBlock", startBlockNum, "currentBlock", blockNum, + "elapsedTime", elapsedTime, "batchTimeOut", bc.batchTimeOut) + } + } + + // Check if we need to seal batch due to capacity, block interval, or timeout + // Timeout check ensures batch is sealed before exceeding the maximum timeout + if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { + log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) + sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) + if err != nil { + return fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) + if err != nil { + return fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) + } + blockTimestamp := lastBlock.Time() + _, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + if err != nil { + return fmt.Errorf("failed to seal batch: %w", err) + } + + // Update startBlockNum and startBlockTime for next batch + startBlockNum = blockNum + 1 + if startBlockNum <= endBlockNum { + // Update startBlock and startBlockTime for next batch's timeout calculation + startBlock, err = bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d for next batch: %w", startBlockNum, err) + } + startBlockTime = startBlock.Time() + } + log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + } + return nil +} diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index f820d7954..19291e04a 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -1,10 +1,16 @@ package batch import ( + "github.com/morph-l2/go-ethereum/log" + "morph-l2/tx-submitter/utils" "testing" + "time" - "github.com/stretchr/testify/require" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + + "github.com/stretchr/testify/require" ) func init() { @@ -13,28 +19,35 @@ func init() { if err != nil { panic(err) } - sequencerContract, err = bindings.NewSequencer(sequencerAddr, l2Client) - if err != nil { - panic(err) - } - l2MessagePasserContract, err = bindings.NewL2ToL1MessagePasser(l2MessagePasserAddr, l2Client) - if err != nil { - panic(err) - } - govContract, err = bindings.NewGov(govAddr, l2Client) + l2Caller, err = types.NewL2Caller([]iface.L2Client{l2Client}) if err != nil { panic(err) } } +func TestBatchCacheInitSer(t *testing.T) { + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + + go utils.Loop(cache.ctx, 5*time.Second, func() { + err := cache.InitAndSyncFromRollup() + if err != nil { + log.Error("init and sync from rollup failed, wait for the next try", "error", err) + } + err = cache.AssembleCurrentBatchHeader() + if err != nil { + log.Error("Assemble current batch failed, wait for the next try", "error", err) + } + }) +} + func TestBatchCacheInit(t *testing.T) { - cache := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) err := cache.InitAndSyncFromRollup() require.NoError(t, err) } func TestBatchCacheInitByBlockRange(t *testing.T) { - cache := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) err := cache.InitFromRollupByRange() require.NoError(t, err) } diff --git a/tx-submitter/batch/batch.go b/tx-submitter/batch/batch_data.go similarity index 100% rename from tx-submitter/batch/batch.go rename to tx-submitter/batch/batch_data.go diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index b4e9ead53..776d29087 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -11,6 +11,7 @@ import ( "morph-l2/bindings/bindings" "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" @@ -24,20 +25,16 @@ var ( ) var ( - rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") - sequencerAddr = common.HexToAddress("0x5300000000000000000000000000000000000017") - l2MessagePasserAddr = common.HexToAddress("0x5300000000000000000000000000000000000001") - govAddr = common.HexToAddress("0x5300000000000000000000000000000000000004") + rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") l1ClientRpc = "http://localhost:9545" l2ClientRpc = "http://localhost:8545" l1Client, _ = ethclient.Dial(l1ClientRpc) l2Client, _ = ethclient.Dial(l2ClientRpc) - rollupContract *bindings.Rollup - sequencerContract *bindings.Sequencer - l2MessagePasserContract *bindings.L2ToL1MessagePasser - govContract *bindings.Gov + rollupContract *bindings.Rollup + + l2Caller *types.L2Caller ) func init() { @@ -46,22 +43,14 @@ func init() { if err != nil { panic(err) } - sequencerContract, err = bindings.NewSequencer(sequencerAddr, l2Client) - if err != nil { - panic(err) - } - l2MessagePasserContract, err = bindings.NewL2ToL1MessagePasser(l2MessagePasserAddr, l2Client) - if err != nil { - panic(err) - } - govContract, err = bindings.NewGov(govAddr, l2Client) + l2Caller, err = types.NewL2Caller([]iface.L2Client{l2Client}) if err != nil { panic(err) } } func Test_GetFinalizeBatchHeader(t *testing.T) { - bc := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(0) require.NoError(t, err) t.Log("headerBytes", hex.EncodeToString(headerBytes.Bytes())) @@ -82,12 +71,12 @@ func Test_CommitBatchParse(t *testing.T) { } func TestBatchRestartInit(t *testing.T) { - sequencerSetVerifyHash, err := sequencerContract.SequencerSetVerifyHash(nil) + sequencerSetVerifyHash, err := l2Caller.SequencerSetVerifyHash(nil) require.NoError(t, err) t.Log("sequencer set verify hash", hex.EncodeToString(sequencerSetVerifyHash[:])) ci, fi := getInfosFromContract() t.Log("commit index", ci, " ", "finalize index", fi) - bc := NewBatchCache(nil, l1Client, l2Client, rollupContract, sequencerContract, l2MessagePasserContract, govContract) + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) startBlockNum, endBlockNum, err := getFirstUnFinalizeBatchBlockNumRange(fi) require.NoError(t, err) startBlockNum = new(big.Int).Add(startBlockNum, new(big.Int).SetUint64(1)) @@ -115,7 +104,7 @@ func TestBatchRestartInit(t *testing.T) { t.Logf("First unfinalize batch index: %d, block range: %d - %d", firstUnfinalizedIndex, startBlockNum.Uint64(), endBlockNum.Uint64()) // Fetch blocks from L2 client in this range and assemble batchHeader - assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetVerifyHash, l2Client, l2MessagePasserContract) + assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetVerifyHash, l2Client, l2Caller) require.NoError(t, err, "failed to assemble batch header from L2 blocks") t.Log("assembled batch header success", hex.EncodeToString(assembledBatchHeader.Bytes())) // Verify the assembled batchHeader @@ -624,13 +613,13 @@ func assembleBatchHeaderFromL2Blocks( startBlockNum, endBlockNum uint64, sequencerSetVerifyHash common.Hash, l2Client iface.L2Client, - l2MessagePasser *bindings.L2ToL1MessagePasser, + l2Caller *types.L2Caller, ) (*BatchHeaderBytes, error) { ctx := context.Background() // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { - root, err := l2MessagePasser.GetTreeRoot(&bind.CallOpts{ + root, err := l2Caller.GetTreeRoot(&bind.CallOpts{ Context: ctx, BlockNumber: new(big.Int).SetUint64(blockNum), }) @@ -676,6 +665,6 @@ func assembleBatchHeaderFromL2Blocks( _ = batchHash // batch hash _ = reachedExpectedSize // whether reached expected size - - return &sealedBatch.BatchHeader, nil + batch := bc.createBatchHeaderFromRPCRollupBatch(sealedBatch, sequencerSetVerifyHash, blockTimestamp) + return &batch, nil } diff --git a/tx-submitter/entry.go b/tx-submitter/entry.go index bc5675fcd..d6ab302b4 100644 --- a/tx-submitter/entry.go +++ b/tx-submitter/entry.go @@ -19,6 +19,7 @@ import ( "morph-l2/tx-submitter/l1checker" "morph-l2/tx-submitter/metrics" "morph-l2/tx-submitter/services" + "morph-l2/tx-submitter/types" "morph-l2/tx-submitter/utils" "github.com/morph-l2/externalsign" @@ -203,9 +204,13 @@ func Main() func(ctx *cli.Context) error { // start rorator event indexer rotator.StartEventIndexer() - // blockmonitor + // block monitor bm := l1checker.NewBlockMonitor(cfg.BlockNotIncreasedThreshold, l1Client) + l2Caller, err := types.NewL2Caller(l2Clients) + if err != nil { + return err + } // new rollup service sr := services.NewRollup( ctx, @@ -225,6 +230,7 @@ func Main() func(ctx *cli.Context) error { ldb, bm, eventInfoStorage, + l2Caller, ) // metrics diff --git a/tx-submitter/iface/client.go b/tx-submitter/iface/client.go index 0c275bec3..054736584 100644 --- a/tx-submitter/iface/client.go +++ b/tx-submitter/iface/client.go @@ -2,6 +2,7 @@ package iface import ( "context" + "errors" "math/big" "github.com/morph-l2/go-ethereum" @@ -32,3 +33,225 @@ type L2Client interface { GetBlockTraceByNumber(ctx context.Context, number *big.Int) (*types.BlockTrace, error) GetRollupBatchByIndex(ctx context.Context, batchIndex uint64) (*eth.RPCRollupBatch, error) } + +type L2Clients struct { + Clients []L2Client +} + +// getFirstClient returns the first available client, or an error if no clients are available +func (c *L2Clients) getFirstClient() (L2Client, error) { + if len(c.Clients) == 0 { + return nil, errors.New("no L2 clients available") + } + return c.Clients[0], nil +} + +// tryAllClients tries all clients until one succeeds, returns the last error if all fail +func (c *L2Clients) tryAllClients(fn func(L2Client) error) error { + if len(c.Clients) == 0 { + return errors.New("no L2 clients available") + } + var lastErr error + for _, client := range c.Clients { + if err := fn(client); err == nil { + return nil + } else { + lastErr = err + } + } + return lastErr +} + +// CodeAt implements bind.ContractCaller +func (c *L2Clients) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + var result []byte + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.CodeAt(ctx, contract, blockNumber) + return err + }) + return result, err +} + +// CallContract implements bind.ContractCaller +func (c *L2Clients) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + var result []byte + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.CallContract(ctx, call, blockNumber) + return err + }) + return result, err +} + +// PendingCodeAt implements bind.PendingContractCaller and bind.ContractTransactor +func (c *L2Clients) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.PendingCodeAt(ctx, account) +} + +// PendingNonceAt implements bind.ContractTransactor +func (c *L2Clients) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + client, err := c.getFirstClient() + if err != nil { + return 0, err + } + return client.PendingNonceAt(ctx, account) +} + +// SuggestGasPrice implements bind.ContractTransactor +func (c *L2Clients) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SuggestGasPrice(ctx) +} + +// SuggestGasTipCap implements bind.ContractTransactor +func (c *L2Clients) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SuggestGasTipCap(ctx) +} + +// EstimateGas implements bind.ContractTransactor +func (c *L2Clients) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + client, err := c.getFirstClient() + if err != nil { + return 0, err + } + return client.EstimateGas(ctx, call) +} + +// SendTransaction implements bind.ContractTransactor +func (c *L2Clients) SendTransaction(ctx context.Context, tx *types.Transaction) error { + client, err := c.getFirstClient() + if err != nil { + return err + } + return client.SendTransaction(ctx, tx) +} + +// FilterLogs implements bind.ContractFilterer +func (c *L2Clients) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { + var result []types.Log + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.FilterLogs(ctx, query) + return err + }) + return result, err +} + +// SubscribeFilterLogs implements bind.ContractFilterer +func (c *L2Clients) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SubscribeFilterLogs(ctx, query, ch) +} + +// TransactionByHash implements Client +func (c *L2Clients) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) { + err = c.tryAllClients(func(client L2Client) error { + var e error + tx, isPending, e = client.TransactionByHash(ctx, hash) + return e + }) + return tx, isPending, err +} + +// BlockByNumber implements Client +func (c *L2Clients) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + var result *types.Block + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BlockByNumber(ctx, number) + return err + }) + return result, err +} + +// NonceAt implements Client +func (c *L2Clients) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + var result uint64 + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.NonceAt(ctx, account, blockNumber) + return err + }) + return result, err +} + +// TransactionReceipt implements Client +func (c *L2Clients) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + var result *types.Receipt + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.TransactionReceipt(ctx, txHash) + return err + }) + return result, err +} + +// BalanceAt implements Client +func (c *L2Clients) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + var result *big.Int + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BalanceAt(ctx, account, blockNumber) + return err + }) + return result, err +} + +// HeaderByNumber implements Client and bind.ContractTransactor +func (c *L2Clients) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + var result *types.Header + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.HeaderByNumber(ctx, number) + return err + }) + return result, err +} + +// BlockNumber implements Client +func (c *L2Clients) BlockNumber(ctx context.Context) (uint64, error) { + var result uint64 + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BlockNumber(ctx) + return err + }) + return result, err +} + +// GetBlockTraceByNumber implements L2Client +func (c *L2Clients) GetBlockTraceByNumber(ctx context.Context, number *big.Int) (*types.BlockTrace, error) { + var result *types.BlockTrace + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.GetBlockTraceByNumber(ctx, number) + return err + }) + return result, err +} + +// GetRollupBatchByIndex implements L2Client +func (c *L2Clients) GetRollupBatchByIndex(ctx context.Context, batchIndex uint64) (*eth.RPCRollupBatch, error) { + var result *eth.RPCRollupBatch + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.GetRollupBatchByIndex(ctx, batchIndex) + return err + }) + return result, err +} diff --git a/tx-submitter/iface/rollup.go b/tx-submitter/iface/rollup.go index 1b2ea85a0..afa7d46a2 100644 --- a/tx-submitter/iface/rollup.go +++ b/tx-submitter/iface/rollup.go @@ -17,20 +17,36 @@ type IRollup interface { FinalizeBatch(*bind.TransactOpts, []byte) (*types.Transaction, error) BatchInsideChallengeWindow(opts *bind.CallOpts, batchIndex *big.Int) (bool, error) BatchExist(opts *bind.CallOpts, batchIndex *big.Int) (bool, error) + CommittedBatches(opts *bind.CallOpts, batchIndex *big.Int) ([32]byte, error) + BatchDataStore(opts *bind.CallOpts, batchIndex *big.Int) (struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int + }, error) + + FilterCommitBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupCommitBatchIterator, error) + FilterFinalizeBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupFinalizeBatchIterator, error) } // IL2Sequencer is the interface for the sequencer on L2 type IL2Sequencer interface { - UpdateTime(opts *bind.CallOpts) (*big.Int, error) - GetSequencerSet2() ([]common.Address, error) + SequencerSetVerifyHash(opts *bind.CallOpts) ([32]byte, error) } type IL2Gov interface { RollupEpoch(opts *bind.CallOpts) (*big.Int, error) + BatchBlockInterval(opts *bind.CallOpts) (*big.Int, error) + BatchTimeout(opts *bind.CallOpts) (*big.Int, error) } + type IL1Staking interface { IsStaker(opts *bind.CallOpts, addr common.Address) (bool, error) GetStakersBitmap(opts *bind.CallOpts, _stakers []common.Address) (*big.Int, error) GetActiveStakers(opts *bind.CallOpts) ([]common.Address, error) GetStakers(opts *bind.CallOpts) ([255]common.Address, error) } + +type IL2MessagePasser interface { + GetTreeRoot(opts *bind.CallOpts) ([32]byte, error) +} diff --git a/tx-submitter/services/batch_fetcher.go b/tx-submitter/services/batch_fetcher.go deleted file mode 100644 index 7c658ab81..000000000 --- a/tx-submitter/services/batch_fetcher.go +++ /dev/null @@ -1,39 +0,0 @@ -package services - -import ( - "context" - "fmt" - "morph-l2/tx-submitter/iface" - - "github.com/morph-l2/go-ethereum/eth" -) - -type BatchFetcher struct { - l2Clients []iface.L2Client -} - -func NewBatchFetcher(l2Clients []iface.L2Client) *BatchFetcher { - return &BatchFetcher{ - l2Clients: l2Clients, - } -} - -func (bf *BatchFetcher) GetRollupBatchByIndex(index uint64) (*eth.RPCRollupBatch, error) { - // Try each L2 client until we get a successful response - var lastErr error - for _, client := range bf.l2Clients { - batch, err := client.GetRollupBatchByIndex(context.Background(), index) - if err != nil { - lastErr = err - continue - } - // Validate that batch exists and has signatures before returning - if batch != nil && len(batch.Signatures) > 0 { - return batch, nil - } - } - if lastErr != nil { - return nil, fmt.Errorf("failed to get batch %d from any L2 client: %w", index, lastErr) - } - return nil, fmt.Errorf("batch %d not found in any L2 client", index) -} diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index caa5471a4..44bfa30a9 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -26,6 +26,7 @@ import ( "github.com/morph-l2/go-ethereum/rpc" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/batch" "morph-l2/tx-submitter/constants" "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/event" @@ -76,7 +77,7 @@ type Rollup struct { // collectedL1FeeSum collectedL1FeeSum float64 // batchcache - batchCache *types.BatchCache + batchCache *batch.BatchCache bm *l1checker.BlockMonitor eventInfoStorage *event.EventInfoStorage reorgDetector iface.IReorgDetector @@ -102,8 +103,8 @@ func NewRollup( ldb *db.Db, bm *l1checker.BlockMonitor, eventInfoStorage *event.EventInfoStorage, + l2Caller *types.L2Caller, ) *Rollup { - batchFetcher := NewBatchFetcher(l2Clients) reorgDetector := NewReorgDetector(l1, metrics) r := &Rollup{ ctx: ctx, @@ -121,7 +122,7 @@ func NewRollup( cfg: cfg, signer: ethtypes.LatestSignerForChainID(chainId), externalRsaPriv: rsaPriv, - batchCache: types.NewBatchCache(batchFetcher), + batchCache: batch.NewBatchCache(nil, l1, l2Clients, rollup, l2Caller), ldb: ldb, bm: bm, eventInfoStorage: eventInfoStorage, @@ -227,8 +228,18 @@ func (r *Rollup) Start() error { } }) - if r.cfg.Finalize { + go utils.Loop(r.ctx, 5*time.Second, func() { + err = r.batchCache.InitAndSyncFromRollup() + if err != nil { + log.Error("init and sync from rollup failed, wait for the next try", "error", err) + } + err = r.batchCache.AssembleCurrentBatchHeader() + if err != nil { + log.Error("Assemble current batch failed, wait for the next try", "error", err) + } + }) + if r.cfg.Finalize { go utils.Loop(r.ctx, r.cfg.FinalizeInterval, func() { r.rollupFinalizeMu.Lock() defer r.rollupFinalizeMu.Unlock() @@ -454,9 +465,9 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re // Calculate and update L1 fee metrics batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 - batch, ok := r.batchCache.Get(batchIndex) + rollupBatch, ok := r.batchCache.Get(batchIndex) if ok { - collectedL1Fee := new(big.Float).Quo(new(big.Float).SetInt(batch.CollectedL1Fee.ToInt()), new(big.Float).SetInt(big.NewInt(params.Ether))) + collectedL1Fee := new(big.Float).Quo(new(big.Float).SetInt(rollupBatch.CollectedL1Fee.ToInt()), new(big.Float).SetInt(big.NewInt(params.Ether))) collectedL1FeeFloat, _ := collectedL1Fee.Float64() // Update metrics @@ -473,7 +484,7 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re "batch_index", batchIndex, "l1_fee_eth", collectedL1FeeFloat) } else { - log.Warn("batch not found in cache", "batch_index", batchIndex) + log.Warn("rollupBatch not found in cache", "batch_index", batchIndex) } } else if method == constants.MethodFinalizeBatch { r.finalizeFeeSum += txFeeFloat @@ -763,11 +774,11 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa } } else { // Transaction succeeded // Get current block number for confirmation count only for successful transactions - currentBlock, err := r.L1Client.BlockNumber(context.Background()) + currentBlock, err = r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("get current block number error: %w", err) } - confirmations := currentBlock - status.receipt.BlockNumber.Uint64() + confirmations = currentBlock - status.receipt.BlockNumber.Uint64() if method == constants.MethodCommitBatch { batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 @@ -1432,7 +1443,6 @@ func GetEpoch(addr common.Address, clients []iface.L2Client) (*big.Int, error) { // query sequencer set update time from sequencer contract on l2 func GetSequencerSetUpdateTime(addr common.Address, clients []iface.L2Client) (*big.Int, error) { - if len(clients) < 1 { return nil, fmt.Errorf("no client to query sequencer set update time") } diff --git a/tx-submitter/types/batch_cache.go b/tx-submitter/types/batch_cache.go deleted file mode 100644 index 9be2d6031..000000000 --- a/tx-submitter/types/batch_cache.go +++ /dev/null @@ -1,97 +0,0 @@ -package types - -import ( - "sync" - - "morph-l2/tx-submitter/iface" - - "github.com/morph-l2/go-ethereum/eth" - "github.com/morph-l2/go-ethereum/log" -) - -type BatchCache struct { - m sync.RWMutex - batchCache map[uint64]*eth.RPCRollupBatch - fetcher iface.BatchFetcher -} - -// NewBatchCache creates a new batch cache instance -func NewBatchCache(fetcher iface.BatchFetcher) *BatchCache { - return &BatchCache{ - batchCache: make(map[uint64]*eth.RPCRollupBatch), - fetcher: fetcher, - } -} - -// Get retrieves a batch from the cache by its index -// If not found in cache, tries to fetch from node -func (b *BatchCache) Get(batchIndex uint64) (*eth.RPCRollupBatch, bool) { - // First try to get from cache - b.m.RLock() - batch, ok := b.batchCache[batchIndex] - b.m.RUnlock() - - if ok { - return batch, true - } - - // If not in cache, try to fetch from node - if b.fetcher != nil { - fetchedBatch, err := b.fetcher.GetRollupBatchByIndex(batchIndex) - if err != nil { - log.Warn("Failed to fetch batch from node", - "index", batchIndex, - "error", err) - return nil, false - } - - // Validate batch before caching - batch must exist and have signatures - if fetchedBatch != nil && len(fetchedBatch.Signatures) > 0 { - // Store valid batch in cache for future use - b.m.Lock() - b.batchCache[batchIndex] = fetchedBatch - b.m.Unlock() - - return fetchedBatch, true - } else if fetchedBatch != nil { - // Batch exists but doesn't have signatures, don't cache it - log.Debug("Batch validation failed - no signatures", - "batch_index", batchIndex, - "found", fetchedBatch != nil, - "has_signatures", len(fetchedBatch.Signatures) > 0) - return fetchedBatch, true - } - } - - return nil, false -} - -func (b *BatchCache) Set(batchIndex uint64, batch *eth.RPCRollupBatch) { - // Validate batch before caching - batch must exist and have signatures - if batch == nil || len(batch.Signatures) == 0 { - log.Debug("Refusing to cache invalid batch", - "batch_index", batchIndex, - "exists", batch != nil, - "has_signatures", batch != nil && len(batch.Signatures) > 0) - return - } - - b.m.Lock() - defer b.m.Unlock() - - b.batchCache[batchIndex] = batch -} - -func (b *BatchCache) Delete(batchIndex uint64) { - b.m.Lock() - defer b.m.Unlock() - - delete(b.batchCache, batchIndex) -} - -// Clear removes all entries from the batch cache -func (bc *BatchCache) Clear() { - bc.m.Lock() - defer bc.m.Unlock() - bc.batchCache = make(map[uint64]*eth.RPCRollupBatch) -} diff --git a/tx-submitter/types/batch_cache_test.go b/tx-submitter/types/batch_cache_test.go deleted file mode 100644 index ae1449fd0..000000000 --- a/tx-submitter/types/batch_cache_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package types - -import ( - "sync" - "testing" - - "github.com/morph-l2/go-ethereum/eth" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// MockBatchFetcher implements the BatchFetcher interface for testing -type MockBatchFetcher struct { - mock.Mock -} - -func (m *MockBatchFetcher) GetRollupBatchByIndex(index uint64) (*eth.RPCRollupBatch, error) { - args := m.Called(index) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*eth.RPCRollupBatch), args.Error(1) -} - -func TestBatchCache(t *testing.T) { - t.Run("Get non-existent batch - fetch from node", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - expectedBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - mockFetcher.On("GetRollupBatchByIndex", uint64(1)).Return(expectedBatch, nil) - - batch, ok := cache.Get(1) - assert.True(t, ok) - assert.Equal(t, expectedBatch, batch) - - mockFetcher.AssertExpectations(t) - - // Second get should use cache - batch, ok = cache.Get(1) - assert.True(t, ok) - assert.Equal(t, expectedBatch, batch) - }) - - t.Run("Get non-existent batch - fetch fails", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(nil, assert.AnError).Once() - - batch, ok := cache.Get(2) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set and Get batch", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - - // Add this line to set up the mock expectation - mockFetcher.On("GetRollupBatchByIndex", uint64(3)).Return(batch, nil).Maybe() - - cache.Set(3, batch) - - gotBatch, ok := cache.Get(3) - assert.True(t, ok) - assert.Equal(t, batch, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Delete batch", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - - cache.Set(4, batch) - gotBatch, ok := cache.Get(4) - assert.True(t, ok) - assert.Equal(t, batch, gotBatch) - - cache.Delete(4) - - // Setup mock for fetching after delete - mockFetcher.On("GetRollupBatchByIndex", uint64(4)).Return(nil, assert.AnError).Once() - - gotBatch, ok = cache.Get(4) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Clear cache", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch1 := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature1"), - }, - }, - } - batch2 := ð.RPCRollupBatch{ - Version: 2, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature2"), - }, - }, - } - - cache.Set(5, batch1) - cache.Set(6, batch2) - - cache.Clear() - - // Setup mocks for fetching after clear - mockFetcher.On("GetRollupBatchByIndex", uint64(5)).Return(nil, assert.AnError).Once() - mockFetcher.On("GetRollupBatchByIndex", uint64(6)).Return(nil, assert.AnError).Once() - - gotBatch, ok := cache.Get(5) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - gotBatch, ok = cache.Get(6) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Concurrent access", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Pre-set a batch to avoid nil pointer in concurrent access - testBatch := ð.RPCRollupBatch{ - Version: 7, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - cache.Set(7, testBatch) - - // Setup mock expectation to allow any number of calls - mockFetcher.On("GetRollupBatchByIndex", uint64(7)).Return(testBatch, nil).Maybe() - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - batch, ok := cache.Get(7) - if ok && batch != nil { - cache.Set(7, batch) - } - }() - } - - wg.Wait() - - // Final validation of cache state - batch, ok := cache.Get(7) - assert.True(t, ok) - assert.NotNil(t, batch) - assert.Equal(t, testBatch.Version, batch.Version) - }) -} diff --git a/tx-submitter/types/batch_cache_validation_test.go b/tx-submitter/types/batch_cache_validation_test.go deleted file mode 100644 index c809b5e5e..000000000 --- a/tx-submitter/types/batch_cache_validation_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package types - -import ( - "testing" - - "github.com/morph-l2/go-ethereum/common" - "github.com/morph-l2/go-ethereum/eth" - "github.com/stretchr/testify/assert" -) - -func TestBatchValidation(t *testing.T) { - t.Run("Get - Valid batch with signatures is cached", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create valid batch with signatures - validBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signer: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Signature: []byte("test-signature"), - }, - }, - } - - mockFetcher.On("GetRollupBatchByIndex", uint64(1)).Return(validBatch, nil).Once() - - // Get should return the batch and cache it - batch, ok := cache.Get(1) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - assert.Equal(t, 1, len(batch.Signatures)) - - // Second get should use cache without calling fetcher - batch, ok = cache.Get(1) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Get - Invalid batch without signatures is not cached", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create invalid batch without signatures - invalidBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{}, // Empty signatures - } - - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(invalidBatch, nil).Once() - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(invalidBatch, nil).Once() // Second call because not cached - - // Get should return the batch but not cache it - batch, ok := cache.Get(2) - assert.True(t, ok) // Still returns true because batch was found, just not cached - assert.Equal(t, invalidBatch, batch) - assert.Equal(t, 0, len(batch.Signatures)) - - // Second get should call fetcher again since it wasn't cached - batch, ok = cache.Get(2) - assert.True(t, ok) - assert.Equal(t, invalidBatch, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set - Valid batch with signatures is stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create valid batch with signatures - validBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signer: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Signature: []byte("test-signature"), - }, - }, - } - - // Set should store the batch - cache.Set(3, validBatch) - - // Get should retrieve from cache - batch, ok := cache.Get(3) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - }) - - t.Run("Set - Invalid batch without signatures is not stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create invalid batch without signatures - invalidBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{}, // Empty signatures - } - - // Set should not store the batch - cache.Set(4, invalidBatch) - - // Setup mock for fetching since batch shouldn't be in cache - mockFetcher.On("GetRollupBatchByIndex", uint64(4)).Return(nil, assert.AnError).Once() - - // Get should try to fetch from node and fail - batch, ok := cache.Get(4) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set - Nil batch is not stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Set with nil batch should not store anything - cache.Set(5, nil) - - // Setup mock for fetching since nothing should be in cache - mockFetcher.On("GetRollupBatchByIndex", uint64(5)).Return(nil, assert.AnError).Once() - - // Get should try to fetch from node and fail - batch, ok := cache.Get(5) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) -} diff --git a/tx-submitter/types/l2Caller.go b/tx-submitter/types/l2Caller.go new file mode 100644 index 000000000..93e670c38 --- /dev/null +++ b/tx-submitter/types/l2Caller.go @@ -0,0 +1,67 @@ +package types + +import ( + "math/big" + + "morph-l2/bindings/bindings" + "morph-l2/bindings/predeploys" + "morph-l2/tx-submitter/iface" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" +) + +type L2Caller struct { + l2Clients *iface.L2Clients + sequencerContract *bindings.SequencerCaller + l2MessagePasserContract *bindings.L2ToL1MessagePasserCaller + govContract *bindings.GovCaller +} + +func NewL2Caller(l2Clients []iface.L2Client) (*L2Caller, error) { + clients := &iface.L2Clients{Clients: l2Clients} + + // Initialize Sequencer contract + sequencerContract, err := bindings.NewSequencerCaller(predeploys.SequencerAddr, clients) + if err != nil { + return nil, err + } + + // Initialize L2ToL1MessagePasser contract + l2MessagePasserContract, err := bindings.NewL2ToL1MessagePasserCaller(predeploys.L2ToL1MessagePasserAddr, clients) + if err != nil { + return nil, err + } + + // Initialize Gov contract + govContract, err := bindings.NewGovCaller(predeploys.GovAddr, clients) + if err != nil { + return nil, err + } + + return &L2Caller{ + l2Clients: clients, + sequencerContract: sequencerContract, + l2MessagePasserContract: l2MessagePasserContract, + govContract: govContract, + }, nil +} + +// SequencerSetVerifyHash gets the sequencer set verify hash from the Sequencer contract +func (c *L2Caller) SequencerSetVerifyHash(opts *bind.CallOpts) ([32]byte, error) { + return c.sequencerContract.SequencerSetVerifyHash(opts) +} + +// GetTreeRoot gets the tree root from the L2ToL1MessagePasser contract +func (c *L2Caller) GetTreeRoot(opts *bind.CallOpts) ([32]byte, error) { + return c.l2MessagePasserContract.GetTreeRoot(opts) +} + +// BatchBlockInterval gets the batch block interval from the Gov contract +func (c *L2Caller) BatchBlockInterval(opts *bind.CallOpts) (*big.Int, error) { + return c.govContract.BatchBlockInterval(opts) +} + +// BatchTimeout gets the batch timeout from the Gov contract +func (c *L2Caller) BatchTimeout(opts *bind.CallOpts) (*big.Int, error) { + return c.govContract.BatchTimeout(opts) +} From 685fcd212e66337023c66938fb3465e84cfb233c Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Tue, 3 Feb 2026 11:38:31 +0800 Subject: [PATCH 04/12] update batch --- tx-submitter/batch/batch_cache.go | 58 +++++++++++++++++++------- tx-submitter/batch/batch_cache_test.go | 18 ++++++-- tx-submitter/services/rollup.go | 42 +++++++++---------- 3 files changed, 77 insertions(+), 41 deletions(-) diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 98d887b7b..86593abbb 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -7,9 +7,10 @@ import ( "errors" "fmt" "math/big" + "sync" + "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" - "sync" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" @@ -78,8 +79,14 @@ func NewBatchCache( // Default implementation: always returns true (use V1 version) isBatchUpgraded = func(uint64) bool { return true } } + ctx := context.Background() + ifL2Clients := iface.L2Clients{Clients: l2Clients} + _, err := ifL2Clients.BlockNumber(ctx) + if err != nil { + panic(err) + } return &BatchCache{ - ctx: context.Background(), + ctx: ctx, initDone: false, sealedBatches: make(map[uint64]*eth.RPCRollupBatch), parentBatchHeader: nil, @@ -130,7 +137,11 @@ func (bc *BatchCache) InitFromRollupByRange() error { } bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() if err != nil { - return fmt.Errorf("get last block number err: %w", err) + store, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return err + } + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() } bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() if err != nil { @@ -169,13 +180,23 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() if err != nil { - return fmt.Errorf("get last block number err: %w", err) + store, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return err + } + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() } bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() if err != nil { return fmt.Errorf("get total l1 message popped err: %w", err) } - log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) + + log.Info("Start assemble batch", + "startBatch", fi.Uint64()+1, + "endBatch", ci.Uint64(), + "startNum", bc.lastPackedBlockHeight, + "prevStateRoot", bc.prevStateRoot.String(), + ) for i := fi.Uint64() + 1; i < ci.Uint64(); i++ { batchIndex := new(big.Int).SetUint64(i) startNum, endNum, err := bc.getBatchBlockRange(batchIndex) @@ -333,7 +354,7 @@ func (bc *BatchCache) GetLatestSealedBatchIndex() uint64 { // Need to call PackCurrentBlock to confirm and append func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdrawRoot common.Hash) (bool, error) { if len(bc.l2Clients.Clients) == 0 { - return false, fmt.Errorf("L2 client is nil") + return false, fmt.Errorf("l2 client is nil") } // Verify block number continuity @@ -423,7 +444,7 @@ func (bc *BatchCache) PackCurrentBlock(blockNumber uint64) error { bc.mu.Lock() defer bc.mu.Unlock() - // If current block is empty, return directly + // If the current block is empty, return directly if len(bc.currentBlockContext) == 0 { return nil // nothing to pack } @@ -540,7 +561,7 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta parentBatchHeaderBytes = hexutil.Bytes(*bc.parentBatchHeader) } - // Get version from batch header + // Get the version from batch header version, err := batchHeader.Version() if err != nil { return 0, common.Hash{}, false, fmt.Errorf("failed to get batch version: %w", err) @@ -582,6 +603,8 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta bc.parentBatchHeader = &batchHeader bc.prevStateRoot = bc.postStateRoot + bc.logSealedBatch(batchHeader, batchHash) + // Reset currently accumulated batch data bc.batchData = NewBatchData() @@ -643,7 +666,7 @@ func (bc *BatchCache) handleBatchSealing(blockTimestamp uint64) ([]byte, common. } } - // Fall back to old version + // Fall back to the old version compressedPayload, err = CompressBatchBytes(bc.batchData.TxsPayload()) if err != nil { return nil, common.Hash{}, fmt.Errorf("failed to compress payload: %w", err) @@ -954,7 +977,11 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { } startBlockTime = startBlock.Time() } - log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + index, err := bc.parentBatchHeader.BatchIndex() + if err != nil { + return err + } + log.Info("seal batch success", "batchIndex", index, "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) } // Pack current block (confirm and append to batch) @@ -1027,7 +1054,7 @@ func (bc *BatchCache) logSealedBatch(batchHeader BatchHeaderBytes, batchHash com totalL1MessagePopped, _ := batchHeader.TotalL1MessagePopped() dataHash, _ := batchHeader.DataHash() parentBatchHash, _ := batchHeader.ParentBatchHash() - log.Info(fmt.Sprintf("===batchIndex: %d \n===L1MessagePopped: %d \n===TotalL1MessagePopped: %d \n===dataHash: %x \n===blockNum: %d \n===ParentBatchHash: %x \n", + log.Info(fmt.Sprintf("===batchIndex: %d \n===L1MessagePopped: %d \n===TotalL1MessagePopped: %d \n===dataHash: %x \n===blockCount: %d \n===ParentBatchHash: %x \n", batchIndex, l1MessagePopped, totalL1MessagePopped, @@ -1063,7 +1090,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { currentBlockNum := bc.currentBlockNumber // Fetch blocks from L2 client in the specified range and accumulate to batch - for blockNum := currentBlockNum; blockNum <= endBlockNum; blockNum++ { + for blockNum := currentBlockNum + 1; blockNum <= endBlockNum; blockNum++ { callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) root, err := bc.l2Caller.GetTreeRoot(callOpts) if err != nil { @@ -1076,7 +1103,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) } - // Get current block to check timeout after packing + // Get the current block to check timeout after packing nowBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(blockNum))) if err != nil { return fmt.Errorf("failed to get block %d: %w", blockNum, err) @@ -1096,7 +1123,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { } // Check if we need to seal batch due to capacity, block interval, or timeout - // Timeout check ensures batch is sealed before exceeding the maximum timeout + // check ensures batch is sealed before exceeding the maximum timeout if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) @@ -1108,7 +1135,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { return fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) } blockTimestamp := lastBlock.Time() - _, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + _, _, _, err = bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) if err != nil { return fmt.Errorf("failed to seal batch: %w", err) } @@ -1123,7 +1150,6 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { } startBlockTime = startBlock.Time() } - log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) } // Pack current block (confirm and append to batch) diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index 19291e04a..b7cc73a76 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -1,15 +1,17 @@ package batch import ( - "github.com/morph-l2/go-ethereum/log" - "morph-l2/tx-submitter/utils" + "os" + "os/signal" "testing" "time" "morph-l2/bindings/bindings" "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" + "morph-l2/tx-submitter/utils" + "github.com/morph-l2/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -25,19 +27,27 @@ func init() { } } -func TestBatchCacheInitSer(t *testing.T) { +func TestBatchCacheInitServer(t *testing.T) { cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) go utils.Loop(cache.ctx, 5*time.Second, func() { - err := cache.InitAndSyncFromRollup() + err := cache.InitFromRollupByRange() if err != nil { log.Error("init and sync from rollup failed, wait for the next try", "error", err) } + cache.batchTimeOut = 60 err = cache.AssembleCurrentBatchHeader() if err != nil { log.Error("Assemble current batch failed, wait for the next try", "error", err) } }) + + // Catch CTRL-C to ensure a graceful shutdown. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + // Wait until the interrupt signal is received from an OS signal. + <-interrupt } func TestBatchCacheInit(t *testing.T) { diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index 44bfa30a9..5de0a05a1 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -229,7 +229,7 @@ func (r *Rollup) Start() error { }) go utils.Loop(r.ctx, 5*time.Second, func() { - err = r.batchCache.InitAndSyncFromRollup() + err = r.batchCache.InitFromRollupByRange() if err != nil { log.Error("init and sync from rollup failed, wait for the next try", "error", err) } @@ -1244,28 +1244,28 @@ func (r *Rollup) logTxInfo(tx *ethtypes.Transaction, batchIndex uint64) { } func (r *Rollup) buildSignatureInput(batch *eth.RPCRollupBatch) (*bindings.IRollupBatchSignatureInput, error) { - blsSignatures := batch.Signatures - if len(blsSignatures) == 0 { - return nil, fmt.Errorf("invalid batch signature") - } - signers := make([]common.Address, len(blsSignatures)) - for i, bz := range blsSignatures { - if len(bz.Signature) > 0 { - signers[i] = bz.Signer - } - } - - // query bitmap of signers - bm, err := r.Staking.GetStakersBitmap(nil, signers) - if err != nil { - return nil, fmt.Errorf("query stakers bitmap error:%v", err) - } - if bm == nil { - return nil, errors.New("stakers bitmap is nil") - } + //blsSignatures := batch.Signatures + //if len(blsSignatures) == 0 { + // return nil, fmt.Errorf("invalid batch signature") + //} + //signers := make([]common.Address, len(blsSignatures)) + //for i, bz := range blsSignatures { + // if len(bz.Signature) > 0 { + // signers[i] = bz.Signer + // } + //} + // + //// query bitmap of signers + //bm, err := r.Staking.GetStakersBitmap(nil, signers) + //if err != nil { + // return nil, fmt.Errorf("query stakers bitmap error:%v", err) + //} + //if bm == nil { + // return nil, errors.New("stakers bitmap is nil") + //} sigData := bindings.IRollupBatchSignatureInput{ - SignedSequencersBitmap: bm, + SignedSequencersBitmap: common.Big0, SequencerSets: batch.CurrentSequencerSetBytes, Signature: []byte("0x"), } From 78fc9285064dc364dc69fae8f7ea8ca10ed5addc Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Tue, 3 Feb 2026 23:11:08 +0800 Subject: [PATCH 05/12] update submitter batch generate --- tx-submitter/batch/batch_cache.go | 185 ++++++------------ tx-submitter/batch/batch_cache_test.go | 22 +++ tx-submitter/batch/batch_restart_test.go | 37 ++-- tx-submitter/batch/commit_test.go | 233 +++++++++++++++++++++++ tx-submitter/flags/flags.go | 2 +- tx-submitter/services/rollup.go | 72 +++---- tx-submitter/types/l2Caller.go | 20 ++ 7 files changed, 376 insertions(+), 195 deletions(-) create mode 100644 tx-submitter/batch/commit_test.go diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 86593abbb..04be7c977 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -4,11 +4,14 @@ import ( "bytes" "context" "encoding/binary" + "encoding/hex" "errors" "fmt" "math/big" "sync" + "github.com/morph-l2/go-ethereum/crypto" + "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" @@ -29,6 +32,7 @@ type BatchCache struct { // key: batchIndex, value: RPCRollupBatch sealedBatches map[uint64]*eth.RPCRollupBatch + batchDataHash map[uint64]common.Hash // Currently accumulating batch data (referencing node's BatchingCache) // Parent batch information @@ -112,10 +116,7 @@ func NewBatchCache( } } -func (bc *BatchCache) InitFromRollupByRange() error { - if bc.initDone { - return nil - } +func (bc *BatchCache) Init() error { err := bc.updateBatchConfigFromGov() if err != nil { return err @@ -143,12 +144,23 @@ func (bc *BatchCache) InitFromRollupByRange() error { } bc.lastPackedBlockHeight = store.BlockNumber.Uint64() } + bc.currentBlockNumber = bc.lastPackedBlockHeight bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() if err != nil { return fmt.Errorf("get total l1 message popped err: %w", err) } log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) + return nil +} +func (bc *BatchCache) InitFromRollupByRange() error { + if bc.initDone { + return nil + } + err := bc.Init() + if err != nil { + return err + } err = bc.assembleUnFinalizeBatchHeaderFromL2Blocks() if err != nil { return err @@ -162,42 +174,21 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { if bc.initDone { return nil } - ci, fi, err := bc.getBatchStatusFromContract() - if err != nil { - return fmt.Errorf("get batch status from rollup failed err: %w", err) - } - headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(fi.Uint64()) - if err != nil { - return fmt.Errorf("get last finalize batch header err: %w", err) - } - parentStateRoot, err := headerBytes.PostStateRoot() + err := bc.Init() if err != nil { - return fmt.Errorf("get post state root err: %w", err) - } - // Initialize BatchCache parent batch information - // prevStateRoot should be the parent batch's postStateRoot - bc.parentBatchHeader = headerBytes - bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot - bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() - if err != nil { - store, err := bc.rollupContract.BatchDataStore(nil, fi) - if err != nil { - return err - } - bc.lastPackedBlockHeight = store.BlockNumber.Uint64() + return err } - bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + ci, fi, err := bc.getBatchStatusFromContract() if err != nil { - return fmt.Errorf("get total l1 message popped err: %w", err) + return fmt.Errorf("get batch status from rollup failed err: %w", err) } - log.Info("Start assemble batch", "startBatch", fi.Uint64()+1, "endBatch", ci.Uint64(), "startNum", bc.lastPackedBlockHeight, "prevStateRoot", bc.prevStateRoot.String(), ) - for i := fi.Uint64() + 1; i < ci.Uint64(); i++ { + for i := fi.Uint64() + 1; i <= ci.Uint64(); i++ { batchIndex := new(big.Int).SetUint64(i) startNum, endNum, err := bc.getBatchBlockRange(batchIndex) if err != nil { @@ -213,7 +204,7 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { } correct, err := bc.checkBatchHashCorrect(batchIndex, batchHash) if err != nil { - return fmt.Errorf("check batch hash failed, err: %w", err) + return fmt.Errorf("check batch hash failed, err: %w, batchIndex %v, batchHash %v", err, batchIndex, batchHash.String()) } if !correct { return fmt.Errorf("batch hash check failed: batch index %d is incorrect", i) @@ -246,6 +237,10 @@ func (bc *BatchCache) checkBatchHashCorrect(batchIndex *big.Int, batchHash commo return false, err } if !bytes.Equal(commitBatchHash[:], batchHash.Bytes()) { + log.Error("check commit batch hash failed", + "index", batchIndex.String(), + "commited", hex.EncodeToString(commitBatchHash[:]), + "generate", batchHash.String()) return false, nil } return true, nil @@ -512,19 +507,19 @@ func (bc *BatchCache) FetchAndCacheHeader(blockNumber uint64, withdrawRoot commo // - error: returns error if sealing fails // // Note: Sealed batch will be stored in BatchCache's sealedBatches, not sent anywhere -func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimestamp uint64) (uint64, common.Hash, bool, error) { +func (bc *BatchCache) SealBatch(sequencerSets []byte, blockTimestamp uint64) (uint64, BatchHeaderBytes, bool, error) { bc.mu.Lock() defer bc.mu.Unlock() // Ensure batch data is not empty if bc.batchData == nil || bc.batchData.IsEmpty() { - return 0, common.Hash{}, false, errors.New("failed to seal batch: batch cache is empty") + return 0, BatchHeaderBytes{}, false, errors.New("failed to seal batch: batch cache is empty") } // Compress data and calculate dataHash compressedPayload, batchDataHash, err := bc.handleBatchSealing(blockTimestamp) if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to handle batch sealing: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to handle batch sealing: %w", err) } // Check if sealed data size reaches expected value @@ -537,22 +532,22 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta // Generate blob sidecar sidecar, err := MakeBlobTxSidecar(compressedPayload) if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to create blob sidecar: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to create blob sidecar: %w", err) } // Create batch header - batchHeader := bc.createBatchHeader(batchDataHash, sidecar, sequencerSetVerifyHash, blockTimestamp) + batchHeader := bc.createBatchHeader(batchDataHash, sidecar, crypto.Keccak256Hash(sequencerSets), blockTimestamp) // Calculate batch hash batchHash, err := batchHeader.Hash() if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to hash batch header: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to hash batch header: %w", err) } // Get batch index batchIndex, err := batchHeader.BatchIndex() if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to get batch index: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to get batch index: %w", err) } // Build parent batch header bytes @@ -564,18 +559,18 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta // Get the version from batch header version, err := batchHeader.Version() if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to get batch version: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to get batch version: %w", err) } // Build block contexts from batch data (encode block contexts) blockContextsData, err := bc.batchData.Encode() if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("failed to encode batch data: %w", err) + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to encode batch data: %w", err) } blockContexts := hexutil.Bytes(blockContextsData) // Convert sequencerSetVerifyHash to bytes - currentSequencerSetBytes := hexutil.Bytes(sequencerSetVerifyHash.Bytes()) + currentSequencerSetBytes := hexutil.Bytes(sequencerSets) // Get L1 message count from batch data numL1Messages := bc.batchData.l1TxNum @@ -596,7 +591,6 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta Signatures: []eth.RPCBatchSignature{}, CollectedL1Fee: nil, } - bc.sealedBatches[batchIndex] = sealedBatch // Update parent batch information for next batch @@ -608,7 +602,7 @@ func (bc *BatchCache) SealBatch(sequencerSetVerifyHash common.Hash, blockTimesta // Reset currently accumulated batch data bc.batchData = NewBatchData() - return batchIndex, batchHash, reachedExpectedSize, nil + return batchIndex, batchHeader, reachedExpectedSize, nil } // CheckBatchSizeReached checks if the specified batch's data size reaches expected value @@ -719,64 +713,6 @@ func (bc *BatchCache) createBatchHeader(dataHash common.Hash, sidecar *ethtypes. return batchHeaderV0.Bytes() } -// createBatchHeaderFromRPCRollupBatch reconstructs BatchHeaderBytes from RPCRollupBatch -func (bc *BatchCache) createBatchHeaderFromRPCRollupBatch(batch *eth.RPCRollupBatch, sequencerSetVerifyHash common.Hash, blockTimestamp uint64) BatchHeaderBytes { - // Extract sequencer set verify hash from CurrentSequencerSetBytes - if len(batch.CurrentSequencerSetBytes) >= 32 { - sequencerSetVerifyHash = common.BytesToHash(batch.CurrentSequencerSetBytes[:32]) - } - - // Get parent batch info - var parentBatchHeaderTotalL1 uint64 - var parentBatchIndex uint64 - var parentBatchHash common.Hash - - if len(batch.ParentBatchHeader) > 0 { - parentHeader := BatchHeaderBytes(batch.ParentBatchHeader) - parentBatchHeaderTotalL1, _ = parentHeader.TotalL1MessagePopped() - parentBatchIndex, _ = parentHeader.BatchIndex() - parentBatchHash, _ = parentHeader.Hash() - } - - // Calculate L1 message popped from NumL1Messages - l1MessagePopped := uint64(batch.NumL1Messages) - - // Get data hash from sidecar blob (simplified - in practice, this should be stored) - dataHash := common.Hash{} - if len(batch.Sidecar.Blobs) > 0 && len(batch.Sidecar.Blobs[0]) > 0 { - // Use a hash of the blob as data hash approximation - dataHash = common.BytesToHash(batch.Sidecar.Blobs[0][:32]) - } - - blobHashes := []common.Hash{EmptyVersionedHash} - if len(batch.Sidecar.Blobs) > 0 { - blobHashes = batch.Sidecar.BlobHashes() - } - - batchHeaderV0 := BatchHeaderV0{ - BatchIndex: parentBatchIndex + 1, - L1MessagePopped: l1MessagePopped, - TotalL1MessagePopped: parentBatchHeaderTotalL1 + l1MessagePopped, - DataHash: dataHash, - BlobVersionedHash: blobHashes[0], - PrevStateRoot: batch.PrevStateRoot, - PostStateRoot: batch.PostStateRoot, - WithdrawalRoot: batch.WithdrawRoot, - SequencerSetVerifyHash: sequencerSetVerifyHash, - ParentBatchHash: parentBatchHash, - } - - if bc.isBatchUpgraded(blockTimestamp) { - batchHeaderV1 := BatchHeaderV1{ - BatchHeaderV0: batchHeaderV0, - LastBlockNumber: batch.LastBlockNumber, - } - return batchHeaderV1.Bytes() - } - - return batchHeaderV0.Bytes() -} - // parsingTxs parses transactions, distinguishes L1 and L2 transactions func parsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( txsPayload []byte, @@ -877,7 +813,7 @@ func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( } } - sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) + sequencerSet, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) if err != nil { return nil, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) } @@ -889,23 +825,16 @@ func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( blockTimestamp := lastBlock.Time() // Seal batch and generate batchHeader - batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + batchIndex, batchHeader, reachedExpectedSize, err := bc.SealBatch(sequencerSet, blockTimestamp) if err != nil { return nil, fmt.Errorf("failed to seal batch: %w", err) } - // Get the sealed batch - sealedBatch, found := bc.GetSealedBatch(batchIndex) - if !found { - return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) + batchHeaderHash, err := batchHeader.Hash() + if err != nil { + return nil, fmt.Errorf("failed to hash batch header: %w", err) } - - // Reconstruct batch header from RPCRollupBatch data - // We need to create a BatchHeaderBytes from the available data in RPCRollupBatch - // Since we have all the necessary fields, we can reconstruct it - batchHeader := bc.createBatchHeaderFromRPCRollupBatch(sealedBatch, sequencerSetVerifyHash, blockTimestamp) - - log.Info("seal batch success", "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + log.Info("seal batch success", "batchIndex", batchIndex, "batchHash", batchHeaderHash.String(), "reachedExpectedSize", reachedExpectedSize) return &batchHeader, nil } @@ -928,7 +857,7 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { - callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) root, err := bc.l2Caller.GetTreeRoot(callOpts) if err != nil { return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) @@ -940,7 +869,7 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) } - // Get current block to check timeout after packing + // Get the current block to check timeout after packing nowBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(blockNum))) if err != nil { return fmt.Errorf("failed to get block %d: %w", blockNum, err) @@ -993,9 +922,9 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { } func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, error) { - sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) + sequencerSetBytes, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) if err != nil { - return common.Hash{}, false, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + return common.Hash{}, false, err } lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) if err != nil { @@ -1003,7 +932,7 @@ func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (c } blockTimestamp := lastBlock.Time() // Seal batch and generate batchHeader - batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + batchIndex, batchHeaderBytes, reachedExpectedSize, err := bc.SealBatch(sequencerSetBytes, blockTimestamp) if err != nil { return common.Hash{}, false, fmt.Errorf("failed to seal batch: %w", err) } @@ -1022,6 +951,10 @@ func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (c return common.Hash{}, false, fmt.Errorf("batch hash does not match sealed batch") } } + batchHash, err := batchHeaderBytes.Hash() + if err != nil { + return common.Hash{}, false, err + } return batchHash, reachedExpectedSize, nil } @@ -1065,7 +998,8 @@ func (bc *BatchCache) logSealedBatch(batchHeader BatchHeaderBytes, batchHash com func (bc *BatchCache) AssembleCurrentBatchHeader() error { if !bc.initDone { - return errors.New("batch has not been initialized, should wait") + log.Warn("batch has not been initialized, should wait") + return nil } callOpts := &bind.CallOpts{ Context: bc.ctx, @@ -1077,10 +1011,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { if endBlockNum < bc.currentBlockNumber { return fmt.Errorf("has rerog, should check block status current %v, now %v", bc.currentBlockNumber, endBlockNum) } - startBlockNum, err := bc.parentBatchHeader.LastBlockNumber() - if err != nil { - return fmt.Errorf("failed to get last block number %w", err) - } + startBlockNum := bc.lastPackedBlockHeight // Get start block once to avoid repeated queries startBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) if err != nil { @@ -1091,7 +1022,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { // Fetch blocks from L2 client in the specified range and accumulate to batch for blockNum := currentBlockNum + 1; blockNum <= endBlockNum; blockNum++ { - callOpts.BlockNumber = new(big.Int).SetUint64(bc.lastPackedBlockHeight) + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) root, err := bc.l2Caller.GetTreeRoot(callOpts) if err != nil { return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) @@ -1126,7 +1057,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { // check ensures batch is sealed before exceeding the maximum timeout if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) - sequencerSetVerifyHash, err := bc.l2Caller.SequencerSetVerifyHash(callOpts) + sequencerSetBytes, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) if err != nil { return fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) } @@ -1135,7 +1066,7 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { return fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) } blockTimestamp := lastBlock.Time() - _, _, _, err = bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + _, _, _, err = bc.SealBatch(sequencerSetBytes, blockTimestamp) if err != nil { return fmt.Errorf("failed to seal batch: %w", err) } diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index b7cc73a76..9f9d2d12f 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -61,3 +61,25 @@ func TestBatchCacheInitByBlockRange(t *testing.T) { err := cache.InitFromRollupByRange() require.NoError(t, err) } + +func TestBatchCacheInitByBlockRange1(t *testing.T) { + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + err := cache.Init() + require.NoError(t, err) + batch, err := cache.assembleBatchHeaderFromL2Blocks(0, 18) + require.NoError(t, err) + hash, err := batch.Hash() + require.NoError(t, err) + t.Log("0-18 batch hash", hash.String()) +} + +func TestBatchCacheInitByBlockRange2(t *testing.T) { + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + err := cache.Init() + require.NoError(t, err) + batch, err := cache.assembleBatchHeaderFromL2Blocks(1, 18) + require.NoError(t, err) + hash, err := batch.Hash() + require.NoError(t, err) + t.Log("1-18 batch hash", hash.String()) +} diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index 776d29087..b44efa6f2 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -25,7 +25,7 @@ var ( ) var ( - rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") + rollupAddr = common.HexToAddress("0xd0ec100f1252a53322051a95cf05c32f0c174354") l1ClientRpc = "http://localhost:9545" l2ClientRpc = "http://localhost:8545" @@ -71,7 +71,7 @@ func Test_CommitBatchParse(t *testing.T) { } func TestBatchRestartInit(t *testing.T) { - sequencerSetVerifyHash, err := l2Caller.SequencerSetVerifyHash(nil) + sequencerSetBytes, sequencerSetVerifyHash, err := l2Caller.GetSequencerSetBytes(nil) require.NoError(t, err) t.Log("sequencer set verify hash", hex.EncodeToString(sequencerSetVerifyHash[:])) ci, fi := getInfosFromContract() @@ -93,7 +93,11 @@ func TestBatchRestartInit(t *testing.T) { bc.parentBatchHeader = headerBytes bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() - require.NoError(t, err) + if err != nil { + store, err := rollupContract.BatchDataStore(nil, fi) + require.NoError(t, err) + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() + } bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() require.NoError(t, err) t.Logf("Restored batch header: batchIndex=%d, parentStateRoot=%x (will be used as prevStateRoot for next batch)", @@ -104,7 +108,7 @@ func TestBatchRestartInit(t *testing.T) { t.Logf("First unfinalize batch index: %d, block range: %d - %d", firstUnfinalizedIndex, startBlockNum.Uint64(), endBlockNum.Uint64()) // Fetch blocks from L2 client in this range and assemble batchHeader - assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetVerifyHash, l2Client, l2Caller) + assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetBytes, l2Client, l2Caller) require.NoError(t, err, "failed to assemble batch header from L2 blocks") t.Log("assembled batch header success", hex.EncodeToString(assembledBatchHeader.Bytes())) // Verify the assembled batchHeader @@ -129,18 +133,6 @@ func TestBatchRestartInit(t *testing.T) { require.NoError(t, err) require.Equal(t, batchDataInput.PostStateRoot[:], postStateRoot.Bytes()) - // t.Logf("batchDataInput.WithdrawalRoot=%x", batchDataInput.WithdrawalRoot) - // Perform keccak256 hash on SequencerSets - sequencerSetsHash := crypto.Keccak256Hash(batchSignatureInput.SequencerSets) - t.Logf("batchSignatureInput.SequencerSets keccak256 hash=%s", hex.EncodeToString(sequencerSetsHash[:])) - require.Equal(t, sequencerSetsHash.Bytes(), sequencerSetVerifyHash[:], "sequencer sets hash should match") - - batchHeaderBytes, err := getBatchHeaderFromGeth(firstUnfinalizedIndex) - require.NoError(t, err) - - // Compare the batch header from Geth with the assembled batch header - compareAndReportBatchHeaders(t, assembledBatchHeader, batchHeaderBytes, "assembled", "from Geth") - // Compare assembledBatchHeader with the batch header built from commitBatch data // Note: batchDataInput and batchSignatureInput can be used to verify data, but need to build a complete batch header compareBatchHeaderWithCommitData(t, assembledBatchHeader, batchDataInput, batchSignatureInput, sequencerSetVerifyHash) @@ -611,7 +603,7 @@ func getCommitBatchDataByIndex(index uint64) (*bindings.IRollupBatchDataInput, * func assembleBatchHeaderFromL2Blocks( bc *BatchCache, startBlockNum, endBlockNum uint64, - sequencerSetVerifyHash common.Hash, + sequencerBytes []byte, l2Client iface.L2Client, l2Caller *types.L2Caller, ) (*BatchHeaderBytes, error) { @@ -633,7 +625,7 @@ func assembleBatchHeaderFromL2Blocks( } // Pack current block (confirm and append to batch) - if err := bc.PackCurrentBlock(blockNum); err != nil { + if err = bc.PackCurrentBlock(blockNum); err != nil { return nil, fmt.Errorf("failed to pack block %d: %w", blockNum, err) } @@ -652,19 +644,16 @@ func assembleBatchHeaderFromL2Blocks( blockTimestamp := lastBlock.Time() // Seal batch and generate batchHeader - batchIndex, batchHash, reachedExpectedSize, err := bc.SealBatch(sequencerSetVerifyHash, blockTimestamp) + batchIndex, batchHeaderBytes, _, err := bc.SealBatch(sequencerBytes, blockTimestamp) if err != nil { return nil, fmt.Errorf("failed to seal batch: %w", err) } // Get the sealed batch header - sealedBatch, found := bc.GetSealedBatch(batchIndex) + _, found := bc.GetSealedBatch(batchIndex) if !found { return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) } - _ = batchHash // batch hash - _ = reachedExpectedSize // whether reached expected size - batch := bc.createBatchHeaderFromRPCRollupBatch(sealedBatch, sequencerSetVerifyHash, blockTimestamp) - return &batch, nil + return &batchHeaderBytes, nil } diff --git a/tx-submitter/batch/commit_test.go b/tx-submitter/batch/commit_test.go new file mode 100644 index 000000000..1209094f3 --- /dev/null +++ b/tx-submitter/batch/commit_test.go @@ -0,0 +1,233 @@ +package batch + +import ( + "context" + "crypto/ecdsa" + "fmt" + "math/big" + "testing" + "time" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + "morph-l2/tx-submitter/utils" + + "github.com/holiman/uint256" + "github.com/morph-l2/go-ethereum" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/consensus/misc/eip4844" + ethtypes "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/eth" + "github.com/morph-l2/go-ethereum/ethclient" + "github.com/morph-l2/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var pk = "0xd99870855d97327d20c666abc78588f1449b1fac76ed0c86c1afb9ce2db85f32" + +func TestRollupWithProof(t *testing.T) { + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + err := cache.InitFromRollupByRange() + require.NoError(t, err) + + privateKey, err := crypto.HexToECDSA(pk[2:]) + require.NoError(t, err) + address := crypto.PubkeyToAddress(privateKey.PublicKey) + ctx := context.Background() + l1ChainId, err := l1Client.ChainID(ctx) + require.NoError(t, err) + rollup, err := bindings.NewRollup(rollupAddr, l1Client) + require.NoError(t, err) + abi, err := bindings.RollupMetaData.GetAbi() + require.NoError(t, err) + latestCommitBatchIndex, err := rollup.LastCommittedBatchIndex(nil) + require.NoError(t, err) + + //batch, err := l2Client.GetRollupBatchByIndex(context.Background(), latestCommitBatchIndex.Uint64()+1) + //require.NoError(t, err) + batch, exist := cache.Get(latestCommitBatchIndex.Uint64() + 1) + require.NoError(t, err) + require.True(t, exist) + h := crypto.Keccak256Hash(batch.CurrentSequencerSetBytes) + t.Log("sequencer verify hash:", h.String()) + + signature, err := buildSigInput(batch) + require.NoError(t, err) + rollupBatch := bindings.IRollupBatchDataInput{ + Version: uint8(batch.Version), + ParentBatchHeader: batch.ParentBatchHeader, + LastBlockNumber: batch.LastBlockNumber, + NumL1Messages: batch.NumL1Messages, + PrevStateRoot: batch.PrevStateRoot, + PostStateRoot: batch.PostStateRoot, + WithdrawalRoot: batch.WithdrawRoot, + } + tip, gasFeeCap, blobFee, head, err := getGasTipAndCap(l1Client) + require.NoError(t, err) + + calldata, err := abi.Pack("commitBatch", rollupBatch, *signature) + require.NoError(t, err) + nonce, err := l1Client.NonceAt(context.Background(), address, nil) + require.NoError(t, err) + tx, err := createBlobTx(l1Client, batch, nonce, 3200000, tip, gasFeeCap, blobFee, calldata, head) + require.NoError(t, err) + transaction, err := sign(tx, ethtypes.LatestSignerForChainID(l1ChainId), privateKey) + require.NoError(t, err) + t.Log("txHash", transaction.Hash().String()) + err = sendTx(l1Client, 500000000000000000, transaction) + require.NoError(t, err) + time.Sleep(2 * time.Second) + receipt, err := l1Client.TransactionReceipt(ctx, transaction.Hash()) + require.NoError(t, err) + t.Log("receipt status", receipt.Status) + t.Log("receipt", receipt) + +} + +func sign(tx *ethtypes.Transaction, signer ethtypes.Signer, prv *ecdsa.PrivateKey) (*ethtypes.Transaction, error) { + signedTx, err := ethtypes.SignTx(tx, signer, prv) + if err != nil { + return nil, fmt.Errorf("sign tx error:%v", err) + } + return signedTx, nil +} + +func createBlobTx(l1client *ethclient.Client, batch *eth.RPCRollupBatch, nonce, gas uint64, tip, gasFeeCap, blobFee *big.Int, calldata []byte, head *ethtypes.Header) (*ethtypes.Transaction, error) { + versionedHashes := types.BlobHashes(batch.Sidecar.Blobs, batch.Sidecar.Commitments) + sidecar := ðtypes.BlobTxSidecar{ + Blobs: batch.Sidecar.Blobs, + Commitments: batch.Sidecar.Commitments, + } + chainID, err := l1client.ChainID(context.Background()) + if err != nil { + return nil, err + } + switch types.DetermineBlobVersion(head, chainID.Uint64()) { + case ethtypes.BlobSidecarVersion0: + sidecar.Version = ethtypes.BlobSidecarVersion0 + proof, err := types.MakeBlobProof(sidecar.Blobs, sidecar.Commitments) + if err != nil { + return nil, fmt.Errorf("gen blob proof failed %v", err) + } + sidecar.Proofs = proof + case ethtypes.BlobSidecarVersion1: + sidecar.Version = ethtypes.BlobSidecarVersion1 + proof, err := types.MakeCellProof(sidecar.Blobs) + if err != nil { + return nil, fmt.Errorf("gen cell proof failed %v", err) + } + sidecar.Proofs = proof + default: + return nil, fmt.Errorf("unsupported blob version") + } + + return ethtypes.NewTx(ðtypes.BlobTx{ + ChainID: uint256.MustFromBig(chainID), + Nonce: nonce, + GasTipCap: uint256.MustFromBig(big.NewInt(tip.Int64())), + GasFeeCap: uint256.MustFromBig(big.NewInt(gasFeeCap.Int64())), + Gas: gas, + To: rollupAddr, + Data: calldata, + BlobFeeCap: uint256.MustFromBig(blobFee), + BlobHashes: versionedHashes, + Sidecar: sidecar, + }), nil +} + +func estimateGas(l1client iface.L1Client, from, to common.Address, data []byte, feecap *big.Int, tip *big.Int) (uint64, error) { + gas, err := l1client.EstimateGas(context.Background(), ethereum.CallMsg{ + From: from, + To: &to, + GasFeeCap: feecap, + GasTipCap: tip, + Data: data, + }) + if err != nil { + return 0, fmt.Errorf("call estimate gas error:%v", err) + } + return gas, nil +} + +func getGasTipAndCap(l1client *ethclient.Client) (*big.Int, *big.Int, *big.Int, *ethtypes.Header, error) { + head, err := l1client.HeaderByNumber(context.Background(), nil) + if err != nil { + return nil, nil, nil, nil, err + } + if head.BaseFee != nil { + log.Info("market fee info", "feecap", head.BaseFee) + } + + tip, err := l1client.SuggestGasTipCap(context.Background()) + if err != nil { + return nil, nil, nil, nil, err + } + log.Info("market fee info", "tip", tip) + + tip = new(big.Int).Mul(tip, big.NewInt(int64(200))) + tip = new(big.Int).Div(tip, big.NewInt(100)) + + var gasFeeCap *big.Int + if head.BaseFee != nil { + gasFeeCap = new(big.Int).Add( + tip, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + } else { + gasFeeCap = new(big.Int).Set(tip) + } + + // calc blob fee cap + var blobFee *big.Int + if head.ExcessBlobGas != nil { + id, err := l1client.ChainID(context.Background()) + if err != nil { + return nil, nil, nil, nil, err + } + log.Info("market blob fee info", "excess blob gas", *head.ExcessBlobGas) + blobConfig, exist := types.ChainConfigMap[id.Uint64()] + if !exist { + blobConfig = types.DefaultBlobConfig + } + blobFeeDenominator := types.GetBlobFeeDenominator(blobConfig, head.Time) + blobFee = eip4844.CalcBlobFee(*head.ExcessBlobGas, blobFeeDenominator.Uint64()) + // Set to 3x to handle blob market congestion + blobFee = new(big.Int).Mul(blobFee, big.NewInt(3)) + } + + return tip, gasFeeCap, blobFee, head, nil +} + +func buildSigInput(batch *eth.RPCRollupBatch) (*bindings.IRollupBatchSignatureInput, error) { + sigData := &bindings.IRollupBatchSignatureInput{ + SignedSequencersBitmap: common.Big0, + SequencerSets: batch.CurrentSequencerSetBytes, + Signature: []byte("0x"), + } + return sigData, nil +} + +// send tx to l1 with business logic check +func sendTx(client iface.Client, txFeeLimit uint64, tx *ethtypes.Transaction) error { + // fee limit + if txFeeLimit > 0 { + var fee uint64 + // calc tx gas fee + if tx.Type() == ethtypes.BlobTxType { + // blob fee + fee = tx.BlobGasFeeCap().Uint64() * tx.BlobGas() + // tx fee + fee += tx.GasPrice().Uint64() * tx.Gas() + } else { + fee = tx.GasPrice().Uint64() * tx.Gas() + } + + if fee > txFeeLimit { + return fmt.Errorf("%v:limit=%v,but got=%v", utils.ErrExceedFeeLimit, txFeeLimit, fee) + } + } + + return client.SendTransaction(context.Background(), tx) +} diff --git a/tx-submitter/flags/flags.go b/tx-submitter/flags/flags.go index e7de38ff6..26ec2ee9c 100644 --- a/tx-submitter/flags/flags.go +++ b/tx-submitter/flags/flags.go @@ -159,7 +159,7 @@ var ( RollupInterval = cli.DurationFlag{ Name: "rollup_interval", Usage: "Interval for rollup", - Value: 500 * time.Millisecond, + Value: 5 * time.Second, EnvVar: prefixEnvVar("ROLLUP_INTERVAL"), } // finalize interval diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index 5de0a05a1..d864ec754 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -228,17 +228,6 @@ func (r *Rollup) Start() error { } }) - go utils.Loop(r.ctx, 5*time.Second, func() { - err = r.batchCache.InitFromRollupByRange() - if err != nil { - log.Error("init and sync from rollup failed, wait for the next try", "error", err) - } - err = r.batchCache.AssembleCurrentBatchHeader() - if err != nil { - log.Error("Assemble current batch failed, wait for the next try", "error", err) - } - }) - if r.cfg.Finalize { go utils.Loop(r.ctx, r.cfg.FinalizeInterval, func() { r.rollupFinalizeMu.Lock() @@ -265,6 +254,21 @@ func (r *Rollup) Start() error { } } }) + + go func() { + for { + err = r.batchCache.InitAndSyncFromRollup() + if err != nil { + log.Error("init and sync from rollup failed, wait for the next try", "error", err) + } + err = r.batchCache.AssembleCurrentBatchHeader() + if err != nil { + log.Error("Assemble current batch failed, wait for the next try", "error", err) + } + time.Sleep(5 * time.Second) + } + }() + return nil } @@ -467,6 +471,9 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 rollupBatch, ok := r.batchCache.Get(batchIndex) if ok { + if rollupBatch.CollectedL1Fee == nil { + return nil + } collectedL1Fee := new(big.Float).Quo(new(big.Float).SetInt(rollupBatch.CollectedL1Fee.ToInt()), new(big.Float).SetInt(big.NewInt(params.Ether))) collectedL1FeeFloat, _ := collectedL1Fee.Float64() @@ -475,7 +482,7 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re r.metrics.CollectedL1FeeSum.Add(collectedL1FeeFloat) // Update leveldb - err := r.ldb.PutFloat(collectedL1FeeSumKey, r.collectedL1FeeSum) + err = r.ldb.PutFloat(collectedL1FeeSumKey, r.collectedL1FeeSum) if err != nil { log.Error("failed to update collected L1 fee sum in leveldb", "error", err) } @@ -751,8 +758,6 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa if batchIndex <= lastCommitted.Uint64() { // Another submitter has already committed this batch log.Warn("Batch commit transaction failed but batch is already committed by another submitter", "batch_index", batchIndex, "tx_hash", tx.Hash().String()) - // Clean up batch from cache since it's already committed - r.batchCache.Delete(batchIndex) } else { // Contract bug detected - batch is not committed by anyone else but our transaction failed log.Warn("Critical error: batch commit transaction failed and batch is not committed by anyone", "batch_index", batchIndex, "tx_hash", tx.Hash().String()) @@ -783,11 +788,9 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa if method == constants.MethodCommitBatch { batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 log.Info("Successfully committed batch", "batch_index", batchIndex, "tx_hash", tx.Hash().String(), "block_number", status.receipt.BlockNumber.Uint64(), "gas_used", status.receipt.GasUsed, "confirm", confirmations) - - // Clean up batch from cache after successful commit - r.batchCache.Delete(batchIndex) } else if method == constants.MethodFinalizeBatch { batchIndex := utils.ParseFBatchIndex(tx.Data()) + r.batchCache.Delete(batchIndex) log.Info("Successfully finalized batch", "batch_index", batchIndex, "tx_hash", tx.Hash().String(), "block_number", status.receipt.BlockNumber.Uint64(), "gas_used", status.receipt.GasUsed, "confirm", confirmations) } } @@ -848,8 +851,8 @@ func (r *Rollup) finalize() error { // get next batch nextBatchIndex := target.Uint64() + 1 - batch, err := GetRollupBatchByIndex(nextBatchIndex, r.L2Clients) - if err != nil { + batch, ok := r.batchCache.Get(nextBatchIndex) + if !ok { log.Error("get next batch by index error", "batch_index", nextBatchIndex, ) @@ -1244,26 +1247,6 @@ func (r *Rollup) logTxInfo(tx *ethtypes.Transaction, batchIndex uint64) { } func (r *Rollup) buildSignatureInput(batch *eth.RPCRollupBatch) (*bindings.IRollupBatchSignatureInput, error) { - //blsSignatures := batch.Signatures - //if len(blsSignatures) == 0 { - // return nil, fmt.Errorf("invalid batch signature") - //} - //signers := make([]common.Address, len(blsSignatures)) - //for i, bz := range blsSignatures { - // if len(bz.Signature) > 0 { - // signers[i] = bz.Signer - // } - //} - // - //// query bitmap of signers - //bm, err := r.Staking.GetStakersBitmap(nil, signers) - //if err != nil { - // return nil, fmt.Errorf("query stakers bitmap error:%v", err) - //} - //if bm == nil { - // return nil, errors.New("stakers bitmap is nil") - //} - sigData := bindings.IRollupBatchSignatureInput{ SignedSequencersBitmap: common.Big0, SequencerSets: batch.CurrentSequencerSetBytes, @@ -1666,12 +1649,15 @@ func (r *Rollup) ReSubmitTx(resend bool, tx *ethtypes.Transaction) (*ethtypes.Tr case ethtypes.BlobTxType: sidecar := tx.BlobTxSidecar() version := types.DetermineBlobVersion(head, r.chainId.Uint64()) - if sidecar.Version == ethtypes.BlobSidecarVersion0 && version == ethtypes.BlobSidecarVersion1 { - err = types.BlobSidecarVersionToV1(sidecar) - if err != nil { - return nil, err + if sidecar != nil { + if sidecar.Version == ethtypes.BlobSidecarVersion0 && version == ethtypes.BlobSidecarVersion1 { + err = types.BlobSidecarVersionToV1(sidecar) + if err != nil { + return nil, err + } } } + newTx = ethtypes.NewTx(ðtypes.BlobTx{ ChainID: uint256.MustFromBig(tx.ChainId()), Nonce: tx.Nonce(), diff --git a/tx-submitter/types/l2Caller.go b/tx-submitter/types/l2Caller.go index 93e670c38..dc8b46099 100644 --- a/tx-submitter/types/l2Caller.go +++ b/tx-submitter/types/l2Caller.go @@ -1,6 +1,11 @@ package types import ( + "bytes" + "fmt" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" "math/big" "morph-l2/bindings/bindings" @@ -65,3 +70,18 @@ func (c *L2Caller) BatchBlockInterval(opts *bind.CallOpts) (*big.Int, error) { func (c *L2Caller) BatchTimeout(opts *bind.CallOpts) (*big.Int, error) { return c.govContract.BatchTimeout(opts) } + +func (c *L2Caller) GetSequencerSetBytes(opts *bind.CallOpts) ([]byte, common.Hash, error) { + hash, err := c.sequencerContract.SequencerSetVerifyHash(opts) + if err != nil { + return nil, common.Hash{}, err + } + setBytes, err := c.sequencerContract.GetSequencerSetBytes(opts) + if err != nil { + return nil, common.Hash{}, err + } + if bytes.Equal(hash[:], crypto.Keccak256Hash(setBytes).Bytes()) { + return setBytes, hash, nil + } + return nil, common.Hash{}, fmt.Errorf("sequencer set hash verify failed %v: %v", hexutil.Encode(setBytes), common.BytesToHash(hash[:]).String()) +} From 3b4012cafe45709a2a6e8d755280e5ffeae3f9f0 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Tue, 3 Feb 2026 23:11:46 +0800 Subject: [PATCH 06/12] update rollup contract deploy script --- contracts/deploy/020-ContractInit.ts | 5 +++++ contracts/src/deploy-config/l1.ts | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/contracts/deploy/020-ContractInit.ts b/contracts/deploy/020-ContractInit.ts index a0e17dc19..7a576bbc6 100644 --- a/contracts/deploy/020-ContractInit.ts +++ b/contracts/deploy/020-ContractInit.ts @@ -69,6 +69,11 @@ export const ContractInit = async ( res = await Rollup.addChallenger(challenger) rec = await res.wait() console.log(`addChallenger(%s) ${rec.status == 1 ? "success" : "failed"}`, challenger) + + await Rollup.initialize2("0x0000000000000000000000000000000000000000000000000000000000000001") + res = await Rollup.initialize3(8640000000) + rec = await res.wait() + console.log(`initialize3(%s) ${rec.status == 1 ? "success" : "failed"}`) } // ------------------ staking init ----------------- diff --git a/contracts/src/deploy-config/l1.ts b/contracts/src/deploy-config/l1.ts index ee9f7b653..4db4af83e 100644 --- a/contracts/src/deploy-config/l1.ts +++ b/contracts/src/deploy-config/l1.ts @@ -27,7 +27,7 @@ const config = { rollupProposer: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8', rollupChallenger: '0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65', // genesis config - batchHeader: '0x0000000000000000000000000000000000000000000000000043a758882ae97327ffcc63373e26fcd144a5a738eac834c167175d69713780c0010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014000000000000000000000000000000000000000000000000000000000000000020cd420e20d610897b8f2c5ac5259ab8b57cce1074212cec2815b2b73ff93d9f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + batchHeader: '0x000000000000000000000000000000000000000000000000004ccf6d1ee4bbebaf7ce495605b0a9bcf5281d35cd769df730205832235869547010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014000000000000000000000000000000000000000000000000000000000000000011d7c68227c20de25bf949beebef5460050abeccba32bdf538e1298f32b57db2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', // staking config // staking Cross-Chain config From 06733cdc74bb386f20b149a7d1d5c019724a3ea8 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Tue, 3 Feb 2026 23:12:10 +0800 Subject: [PATCH 07/12] add test for commit batch test --- tx-submitter/batch/commit_test.go | 4 +--- tx-submitter/types/l2Caller.go | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tx-submitter/batch/commit_test.go b/tx-submitter/batch/commit_test.go index 1209094f3..5fbee217f 100644 --- a/tx-submitter/batch/commit_test.go +++ b/tx-submitter/batch/commit_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" ) -var pk = "0xd99870855d97327d20c666abc78588f1449b1fac76ed0c86c1afb9ce2db85f32" +var pk = "" func TestRollupWithProof(t *testing.T) { cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) @@ -45,8 +45,6 @@ func TestRollupWithProof(t *testing.T) { latestCommitBatchIndex, err := rollup.LastCommittedBatchIndex(nil) require.NoError(t, err) - //batch, err := l2Client.GetRollupBatchByIndex(context.Background(), latestCommitBatchIndex.Uint64()+1) - //require.NoError(t, err) batch, exist := cache.Get(latestCommitBatchIndex.Uint64() + 1) require.NoError(t, err) require.True(t, exist) diff --git a/tx-submitter/types/l2Caller.go b/tx-submitter/types/l2Caller.go index dc8b46099..073e61199 100644 --- a/tx-submitter/types/l2Caller.go +++ b/tx-submitter/types/l2Caller.go @@ -3,16 +3,16 @@ package types import ( "bytes" "fmt" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/morph-l2/go-ethereum/common" - "github.com/morph-l2/go-ethereum/crypto" "math/big" "morph-l2/bindings/bindings" "morph-l2/bindings/predeploys" "morph-l2/tx-submitter/iface" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" ) type L2Caller struct { From 52f39ab0eab4e8ea55888fae95b5428ccd12727a Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 6 Feb 2026 00:13:06 +0800 Subject: [PATCH 08/12] fix pending tx check and add batch storage --- tx-submitter/.gitignore | 1 + tx-submitter/batch/batch_cache.go | 88 ++++++- tx-submitter/batch/batch_cache_test.go | 46 ++-- tx-submitter/batch/batch_restart_test.go | 25 +- tx-submitter/batch/batch_storage.go | 279 +++++++++++++++++++++++ tx-submitter/batch/batch_storage_test.go | 21 ++ tx-submitter/batch/commit_test.go | 15 +- tx-submitter/db/db.go | 22 ++ tx-submitter/db/interface.go | 3 + tx-submitter/metrics/metrics.go | 11 + tx-submitter/services/rollup.go | 196 ++++++++++++---- 11 files changed, 627 insertions(+), 80 deletions(-) create mode 100644 tx-submitter/batch/batch_storage.go create mode 100644 tx-submitter/batch/batch_storage_test.go diff --git a/tx-submitter/.gitignore b/tx-submitter/.gitignore index 835c4a0e4..fde59c30d 100644 --- a/tx-submitter/.gitignore +++ b/tx-submitter/.gitignore @@ -29,6 +29,7 @@ tx-submitter **/tx-submitter build/ *debug_bin* +submitter-leveldb # Config and Environment files .env* diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 04be7c977..3a385d1c5 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -10,11 +10,11 @@ import ( "math/big" "sync" - "github.com/morph-l2/go-ethereum/crypto" - + "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" + "github.com/morph-l2/go-ethereum/crypto" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" "github.com/morph-l2/go-ethereum/common/hexutil" @@ -30,6 +30,8 @@ type BatchCache struct { ctx context.Context initDone bool + batchStorage *BatchStorage + // key: batchIndex, value: RPCRollupBatch sealedBatches map[uint64]*eth.RPCRollupBatch batchDataHash map[uint64]common.Hash @@ -78,6 +80,7 @@ func NewBatchCache( l2Clients []iface.L2Client, rollupContract iface.IRollup, l2Caller *types.L2Caller, + ldb *db.Db, ) *BatchCache { if isBatchUpgraded == nil { // Default implementation: always returns true (use V1 version) @@ -113,6 +116,7 @@ func NewBatchCache( l2Clients: iface.L2Clients{Clients: l2Clients}, rollupContract: rollupContract, l2Caller: l2Caller, + batchStorage: NewBatchStorage(ldb), } } @@ -170,6 +174,77 @@ func (bc *BatchCache) InitFromRollupByRange() error { return nil } +func (bc *BatchCache) InitAndSyncFromDatabase() error { + if bc.initDone { + return nil + } + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + + batches, err := bc.batchStorage.LoadAllSealedBatches() + if err != nil { + return err + } + if batches == nil || len(batches) == 0 { + err = bc.InitAndSyncFromRollup() + if err != nil { + return err + } + return nil + } + // check batch hash with the batch that already rollup by sybmitter + for i := fi.Uint64(); i <= ci.Uint64(); i++ { + batchHash, err := bc.rollupContract.CommittedBatches(nil, new(big.Int).SetUint64(i)) + if err != nil { + return err + } + batchStorage, exist := batches[i] + if !exist || !bytes.Equal(batchHash[:], batchStorage.Hash.Bytes()) { + // batch not contiguous or batch is invalid + err = bc.InitAndSyncFromRollup() + if err != nil { + return err + } + return nil + } + } + parentHeader := BatchHeaderBytes(batches[uint64(len(batches)-1)].ParentBatchHeader[:]) + bc.lastPackedBlockHeight, err = parentHeader.LastBlockNumber() + if err != nil { + parentBatchIndex, err := parentHeader.BatchIndex() + if err != nil { + return fmt.Errorf("get batch index from parent header failed err: %w", err) + } + // check batch index range + if parentBatchIndex > ci.Uint64() || parentBatchIndex < fi.Uint64() { + // sync from another side + err = bc.InitAndSyncFromRollup() + if err != nil { + return err + } + return nil + } + store, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return err + } + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() + } + bc.sealedBatches = batches + bc.parentBatchHeader = &parentHeader + bc.prevStateRoot, err = parentHeader.PostStateRoot() + if err != nil { + return fmt.Errorf("get post state root err: %w", err) + } + bc.currentBlockNumber = bc.lastPackedBlockHeight + bc.totalL1MessagePopped, err = parentHeader.TotalL1MessagePopped() + bc.initDone = true + log.Info("Sync sealed batch from database success", "count", len(batches)) + return nil +} + func (bc *BatchCache) InitAndSyncFromRollup() error { if bc.initDone { return nil @@ -216,6 +291,10 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { return nil } +func (bc *BatchCache) LatestBatchIndex() (uint64, error) { + return bc.parentBatchHeader.BatchIndex() +} + func (bc *BatchCache) updateBatchConfigFromGov() error { interval, err := bc.l2Caller.BatchBlockInterval(nil) if err != nil { @@ -592,7 +671,10 @@ func (bc *BatchCache) SealBatch(sequencerSets []byte, blockTimestamp uint64) (ui CollectedL1Fee: nil, } bc.sealedBatches[batchIndex] = sealedBatch - + err = bc.batchStorage.StoreSealedBatch(batchIndex, sealedBatch) + if err != nil { + log.Error("failed to store sealed batch", "err", err) + } // Update parent batch information for next batch bc.parentBatchHeader = &batchHeader bc.prevStateRoot = bc.postStateRoot diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index 9f9d2d12f..dfa61c9ef 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -3,10 +3,12 @@ package batch import ( "os" "os/signal" + "path/filepath" "testing" "time" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" "morph-l2/tx-submitter/utils" @@ -27,8 +29,22 @@ func init() { } } +// setupTestDB creates a temporary database for testing +func setupTestDB(t *testing.T) *db.Db { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + + testDB, err := db.New(testDir) + require.NoError(t, err) + return testDB +} + func TestBatchCacheInitServer(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) go utils.Loop(cache.ctx, 5*time.Second, func() { err := cache.InitFromRollupByRange() @@ -51,35 +67,15 @@ func TestBatchCacheInitServer(t *testing.T) { } func TestBatchCacheInit(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) err := cache.InitAndSyncFromRollup() require.NoError(t, err) } func TestBatchCacheInitByBlockRange(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) err := cache.InitFromRollupByRange() require.NoError(t, err) } - -func TestBatchCacheInitByBlockRange1(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) - err := cache.Init() - require.NoError(t, err) - batch, err := cache.assembleBatchHeaderFromL2Blocks(0, 18) - require.NoError(t, err) - hash, err := batch.Hash() - require.NoError(t, err) - t.Log("0-18 batch hash", hash.String()) -} - -func TestBatchCacheInitByBlockRange2(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) - err := cache.Init() - require.NoError(t, err) - batch, err := cache.assembleBatchHeaderFromL2Blocks(1, 18) - require.NoError(t, err) - hash, err := batch.Hash() - require.NoError(t, err) - t.Log("1-18 batch hash", hash.String()) -} diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index b44efa6f2..785c7ca87 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -7,9 +7,12 @@ import ( "errors" "fmt" "math/big" + "os" + "path/filepath" "testing" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" @@ -25,7 +28,7 @@ var ( ) var ( - rollupAddr = common.HexToAddress("0xd0ec100f1252a53322051a95cf05c32f0c174354") + rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") l1ClientRpc = "http://localhost:9545" l2ClientRpc = "http://localhost:8545" @@ -50,7 +53,15 @@ func init() { } func Test_GetFinalizeBatchHeader(t *testing.T) { - bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(0) require.NoError(t, err) t.Log("headerBytes", hex.EncodeToString(headerBytes.Bytes())) @@ -71,12 +82,20 @@ func Test_CommitBatchParse(t *testing.T) { } func TestBatchRestartInit(t *testing.T) { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + sequencerSetBytes, sequencerSetVerifyHash, err := l2Caller.GetSequencerSetBytes(nil) require.NoError(t, err) t.Log("sequencer set verify hash", hex.EncodeToString(sequencerSetVerifyHash[:])) ci, fi := getInfosFromContract() t.Log("commit index", ci, " ", "finalize index", fi) - bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) startBlockNum, endBlockNum, err := getFirstUnFinalizeBatchBlockNumRange(fi) require.NoError(t, err) startBlockNum = new(big.Int).Add(startBlockNum, new(big.Int).SetUint64(1)) diff --git a/tx-submitter/batch/batch_storage.go b/tx-submitter/batch/batch_storage.go new file mode 100644 index 000000000..db7db59a9 --- /dev/null +++ b/tx-submitter/batch/batch_storage.go @@ -0,0 +1,279 @@ +package batch + +import ( + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "sync" + + "morph-l2/tx-submitter/db" + + "github.com/morph-l2/go-ethereum/eth" + "github.com/morph-l2/go-ethereum/log" +) + +const ( + // Key prefixes for LevelDB storage + SealedBatchKeyPrefix = "sealed_batch_" + SealedBatchIndicesKey = "sealed_batch_indices" +) + +// BatchStorage handles persistence of sealed batches using JSON encoding +type BatchStorage struct { + db db.Database + mu sync.RWMutex +} + +// NewBatchStorage creates a new BatchStorage instance +func NewBatchStorage(db db.Database) *BatchStorage { + return &BatchStorage{ + db: db, + } +} + +// StoreSealedBatch stores a single sealed batch to LevelDB +// Uses JSON encoding for serialization +func (s *BatchStorage) StoreSealedBatch(batchIndex uint64, batch *eth.RPCRollupBatch) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Serialize batch to JSON + encoded, err := json.Marshal(batch) + if err != nil { + return fmt.Errorf("failed to marshal sealed batch %d: %w", batchIndex, err) + } + + // Store batch data + key := encodeBatchKey(batchIndex) + if err := s.db.PutBytes(key, encoded); err != nil { + return fmt.Errorf("failed to store sealed batch %d: %w", batchIndex, err) + } + + // Update indices list + if err := s.updateBatchIndices(batchIndex, true); err != nil { + log.Warn("Failed to update batch indices", "batch_index", batchIndex, "error", err) + // Don't fail the operation if indices update fails + } + + return nil +} + +// LoadSealedBatch loads a single sealed batch from LevelDB +func (s *BatchStorage) LoadSealedBatch(batchIndex uint64) (*eth.RPCRollupBatch, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + key := encodeBatchKey(batchIndex) + encoded, err := s.db.GetBytes(key) + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + return nil, fmt.Errorf("sealed batch %d not found", batchIndex) + } + return nil, fmt.Errorf("failed to get sealed batch %d: %w", batchIndex, err) + } + + // Deserialize from JSON + var batch eth.RPCRollupBatch + if err := json.Unmarshal(encoded, &batch); err != nil { + return nil, fmt.Errorf("failed to unmarshal sealed batch %d: %w", batchIndex, err) + } + + return &batch, nil +} + +// LoadAllSealedBatches loads all sealed batches from LevelDB +// Returns a map of batchIndex -> RPCRollupBatch +func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, error) { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return make(map[uint64]*eth.RPCRollupBatch), nil + } + return nil, fmt.Errorf("failed to load batch indices: %w", err) + } + + // Load each batch (without holding the lock to avoid deadlock) + batches := make(map[uint64]*eth.RPCRollupBatch, len(indices)) + for _, idx := range indices { + batch, err := s.LoadSealedBatch(idx) + if err != nil { + log.Warn("Failed to load sealed batch, skipping", + "batch_index", idx, "error", err) + continue + } + batches[idx] = batch + } + + return batches, nil +} + +// DeleteSealedBatch removes a sealed batch from LevelDB +func (s *BatchStorage) DeleteSealedBatch(batchIndex uint64) error { + s.mu.Lock() + defer s.mu.Unlock() + + key := encodeBatchKey(batchIndex) + if err := s.db.Delete(key); err != nil { + return fmt.Errorf("failed to delete sealed batch %d: %w", batchIndex, err) + } + + // Update indices list + if err := s.updateBatchIndices(batchIndex, false); err != nil { + log.Warn("Failed to update batch indices after deletion", + "batch_index", batchIndex, "error", err) + // Don't fail the operation if indices update fails + } + + return nil +} + +func (s *BatchStorage) DeleteAllSealedBatches() error { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return nil + } + return fmt.Errorf("failed to load batch indices: %w", err) + } + + for _, idx := range indices { + err = s.DeleteSealedBatch(idx) + if err != nil { + log.Error("Failed to delete sealed batch", + "batch_index", idx, "error", err) + return err + } + } + + return nil +} + +// StoreSealedBatches stores multiple sealed batches in a batch operation +// This is more efficient than storing them one by one +func (s *BatchStorage) StoreSealedBatches(batches map[uint64]*eth.RPCRollupBatch) error { + s.mu.Lock() + defer s.mu.Unlock() + + indices := make([]uint64, 0, len(batches)) + + // Store each batch + for idx, batch := range batches { + encoded, err := json.Marshal(batch) + if err != nil { + return fmt.Errorf("failed to marshal sealed batch %d: %w", idx, err) + } + + key := encodeBatchKey(idx) + if err := s.db.PutBytes(key, encoded); err != nil { + return fmt.Errorf("failed to store sealed batch %d: %w", idx, err) + } + + indices = append(indices, idx) + } + + // Update indices list + if err := s.saveBatchIndices(indices); err != nil { + log.Warn("Failed to save batch indices", "error", err) + // Don't fail the operation if indices update fails + } + + return nil +} + +// encodeBatchKey encodes batch index to a byte key +func encodeBatchKey(batchIndex uint64) []byte { + key := make([]byte, len(SealedBatchKeyPrefix)+8) + copy(key, SealedBatchKeyPrefix) + binary.BigEndian.PutUint64(key[len(SealedBatchKeyPrefix):], batchIndex) + return key +} + +// updateBatchIndices updates the list of stored batch indices +// add: true to add index, false to remove +func (s *BatchStorage) updateBatchIndices(batchIndex uint64, add bool) error { + indices, err := s.loadBatchIndices() + if err != nil { + if err == db.ErrKeyNotFound { + indices = []uint64{} + } else { + return err + } + } + + if add { + // Add index if not already present + found := false + for _, idx := range indices { + if idx == batchIndex { + found = true + break + } + } + if !found { + indices = append(indices, batchIndex) + } + } else { + // Remove index + newIndices := make([]uint64, 0, len(indices)) + for _, idx := range indices { + if idx != batchIndex { + newIndices = append(newIndices, idx) + } + } + indices = newIndices + } + + return s.saveBatchIndices(indices) +} + +// loadBatchIndices loads the list of stored batch indices +func (s *BatchStorage) loadBatchIndices() ([]uint64, error) { + encoded, err := s.db.GetBytes([]byte(SealedBatchIndicesKey)) + if err != nil { + return nil, err + } + + var indices []uint64 + if err := json.Unmarshal(encoded, &indices); err != nil { + return nil, fmt.Errorf("failed to unmarshal batch indices: %w", err) + } + + return indices, nil +} + +// saveBatchIndices saves the list of batch indices +func (s *BatchStorage) saveBatchIndices(indices []uint64) error { + encoded, err := json.Marshal(indices) + if err != nil { + return fmt.Errorf("failed to marshal batch indices: %w", err) + } + + return s.db.PutBytes([]byte(SealedBatchIndicesKey), encoded) +} + +// GetStoredBatchIndices returns the list of all stored batch indices +func (s *BatchStorage) GetStoredBatchIndices() ([]uint64, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.loadBatchIndices() +} + +// BatchExists checks if a batch exists in storage +func (s *BatchStorage) BatchExists(batchIndex uint64) bool { + s.mu.RLock() + defer s.mu.RUnlock() + + key := encodeBatchKey(batchIndex) + _, err := s.db.GetBytes(key) + return err == nil +} diff --git a/tx-submitter/batch/batch_storage_test.go b/tx-submitter/batch/batch_storage_test.go new file mode 100644 index 000000000..eea1d923b --- /dev/null +++ b/tx-submitter/batch/batch_storage_test.go @@ -0,0 +1,21 @@ +package batch + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "morph-l2/tx-submitter/iface" +) + +func Test_storageBatch(t *testing.T) { + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err := cache.InitAndSyncFromRollup() + require.NoError(t, err) + + batches, err := cache.batchStorage.LoadAllSealedBatches() + require.NoError(t, err) + require.NotNil(t, batches) + t.Log("loaded batches count", len(batches)) +} diff --git a/tx-submitter/batch/commit_test.go b/tx-submitter/batch/commit_test.go index 5fbee217f..b327a7356 100644 --- a/tx-submitter/batch/commit_test.go +++ b/tx-submitter/batch/commit_test.go @@ -5,10 +5,13 @@ import ( "crypto/ecdsa" "fmt" "math/big" + "os" + "path/filepath" "testing" "time" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" "morph-l2/tx-submitter/utils" @@ -28,8 +31,16 @@ import ( var pk = "" func TestRollupWithProof(t *testing.T) { - cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller) - err := cache.InitFromRollupByRange() + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err = cache.InitFromRollupByRange() require.NoError(t, err) privateKey, err := crypto.HexToECDSA(pk[2:]) diff --git a/tx-submitter/db/db.go b/tx-submitter/db/db.go index ce62bd6eb..13c2d34a2 100644 --- a/tx-submitter/db/db.go +++ b/tx-submitter/db/db.go @@ -68,6 +68,28 @@ func (d *Db) PutString(key, val string) error { defer d.m.Unlock() return d.db.Put([]byte(key), []byte(val)) } +func (d *Db) GetBytes(key []byte) ([]byte, error) { + d.m.Lock() + defer d.m.Unlock() + v, err := d.db.Get(key) + if err != nil { + if err == errors.ErrNotFound { + return nil, ErrKeyNotFound + } + return nil, fmt.Errorf("failed to get key from leveldb: %w", err) + } + return v, nil +} +func (d *Db) PutBytes(key, val []byte) error { + d.m.Lock() + defer d.m.Unlock() + return d.db.Put(key, val) +} +func (d *Db) Delete(key []byte) error { + d.m.Lock() + defer d.m.Unlock() + return d.db.Delete(key) +} func (d *Db) Close() error { return d.db.Close() } diff --git a/tx-submitter/db/interface.go b/tx-submitter/db/interface.go index 0bec57f6f..9b26d4795 100644 --- a/tx-submitter/db/interface.go +++ b/tx-submitter/db/interface.go @@ -6,5 +6,8 @@ type Database interface { PutString(key, val string) error GetFloat(key string) (float64, error) PutFloat(key string, val float64) error + GetBytes(key []byte) ([]byte, error) + PutBytes(key, val []byte) error + Delete(key []byte) error Close() error } diff --git a/tx-submitter/metrics/metrics.go b/tx-submitter/metrics/metrics.go index 6ac53b617..be9507270 100644 --- a/tx-submitter/metrics/metrics.go +++ b/tx-submitter/metrics/metrics.go @@ -23,6 +23,7 @@ type Metrics struct { LastCommittedBatch prometheus.Gauge LastFinalizedBatch prometheus.Gauge HasPendingFinalizeBatch prometheus.Gauge + LastCacheBatchIndex prometheus.Gauge reorgs prometheus.Counter reorgDepthVal uint64 reorgCountVal uint64 @@ -72,6 +73,10 @@ func NewMetrics() *Metrics { Name: "tx_submitter_last_finalized_batch", Help: "Latest batch finalized by the submitter", }), + LastCacheBatchIndex: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "tx_submitter_last_batch_index", + Help: "Latest batch index by the submitter", + }), HasPendingFinalizeBatch: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "tx_submitter_has_pending_finalize_batch", Help: "Whether there are batches pending finalization (1 = yes, 0 = no)", @@ -101,6 +106,7 @@ func NewMetrics() *Metrics { _ = prometheus.Register(m.IndexerBlockProcessed) _ = prometheus.Register(m.LastCommittedBatch) _ = prometheus.Register(m.LastFinalizedBatch) + _ = prometheus.Register(m.LastCacheBatchIndex) _ = prometheus.Register(m.HasPendingFinalizeBatch) _ = prometheus.Register(m.reorgs) _ = prometheus.Register(m.confirmedTxs) @@ -150,6 +156,11 @@ func (m *Metrics) SetLastFinalizedBatch(index uint64) { m.LastFinalizedBatch.Set(float64(index)) } +// SetLastCacheBatchIndex sets the last batch index metric +func (m *Metrics) SetLastCacheBatchIndex(index uint64) { + m.LastCacheBatchIndex.Set(float64(index)) +} + // SetHasPendingFinalizeBatch sets whether there are batches pending finalization // hasPending should be true if there are pending batches, false otherwise func (m *Metrics) SetHasPendingFinalizeBatch(hasPending bool) { diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index d864ec754..4331eacfd 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -122,7 +122,7 @@ func NewRollup( cfg: cfg, signer: ethtypes.LatestSignerForChainID(chainId), externalRsaPriv: rsaPriv, - batchCache: batch.NewBatchCache(nil, l1, l2Clients, rollup, l2Caller), + batchCache: batch.NewBatchCache(nil, l1, l2Clients, rollup, l2Caller, ldb), ldb: ldb, bm: bm, eventInfoStorage: eventInfoStorage, @@ -167,7 +167,7 @@ func (r *Rollup) Start() error { // metrics go utils.Loop(r.ctx, 10*time.Second, func() { - // get balacnce of wallet + // get balance of wallet balance, err := r.L1Client.BalanceAt(context.Background(), r.WalletAddr(), nil) if err != nil { log.Error("get wallet balance error", "error", err) @@ -255,19 +255,24 @@ func (r *Rollup) Start() error { } }) - go func() { - for { - err = r.batchCache.InitAndSyncFromRollup() - if err != nil { - log.Error("init and sync from rollup failed, wait for the next try", "error", err) - } - err = r.batchCache.AssembleCurrentBatchHeader() - if err != nil { - log.Error("Assemble current batch failed, wait for the next try", "error", err) - } - time.Sleep(5 * time.Second) + var batchCacheSyncMu sync.Mutex + go utils.Loop(r.ctx, r.cfg.TxProcessInterval, func() { + batchCacheSyncMu.Lock() + defer batchCacheSyncMu.Unlock() + err = r.batchCache.InitAndSyncFromDatabase() + if err != nil { + log.Error("init and sync from rollup failed, wait for the next try", "error", err) + } + err = r.batchCache.AssembleCurrentBatchHeader() + if err != nil { + log.Error("assemble current batch failed, wait for the next try", "error", err) + } + index, err := r.batchCache.LatestBatchIndex() + if err != nil { + log.Error("cannot get the latest batch index from batch cache", "error", err) } - }() + r.metrics.SetLastCacheBatchIndex(index) + }) return nil } @@ -286,8 +291,8 @@ func (r *Rollup) ProcessTx() error { } // Check if this submitter should process transactions - if err := r.checkSubmitterTurn(); err != nil { - if err == errNotMyTurn { + if err = r.checkSubmitterTurn(); err != nil { + if errors.Is(err, errNotMyTurn) { // Get current submitter index for logging activeSubmitter, activeIndex, _ := r.rotator.CurrentSubmitter(r.L2Clients, r.Staking) @@ -699,23 +704,33 @@ func (r *Rollup) handleDiscardedTx(txRecord *types.TxRecord, tx *ethtypes.Transa replacedTx, err := r.ReSubmitTx(true, tx) if err != nil { if utils.ErrStringMatch(err, core.ErrNonceTooLow) { - // Transaction was probably confirmed in a reorg + // The tx was probably confirmed in a reorg log.Info("Discarded transaction removed (nonce too low)", "hash", tx.Hash().String(), "nonce", tx.Nonce(), "method", method) - if err := r.pendingTxs.Remove(tx.Hash()); err != nil { + if err = r.pendingTxs.Remove(tx.Hash()); err != nil { log.Error("failed to remove transaction", "hash", tx.Hash().String(), "error", err) } return nil } - return fmt.Errorf("resend discarded tx: %w", err) + + // If resubmit failed, try to replace it with a simple transfer transaction + log.Warn("Resubmit failed, attempting to replace with simple transfer transaction", + "hash", tx.Hash().String(), + "nonce", tx.Nonce(), + "error", err) + + replacedTx, err = r.createReplacementTransferTx(tx) + if err != nil { + return fmt.Errorf("failed to create replacement transfer tx: %w", err) + } } - if err := r.pendingTxs.Remove(tx.Hash()); err != nil { + if err = r.pendingTxs.Remove(tx.Hash()); err != nil { log.Error("failed to remove transaction", "hash", tx.Hash().String(), "error", err) } - if err := r.pendingTxs.Add(replacedTx); err != nil { + if err = r.pendingTxs.Add(replacedTx); err != nil { log.Error("failed to add replaced transaction", "hash", replacedTx.Hash().String(), "error", err) } log.Info("Successfully resubmitted discarded transaction", @@ -733,7 +748,7 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa return fmt.Errorf("get tx status error: %w", err) } - // Get current block number for confirmation count + // Get the current block number for confirmation count currentBlock, err := r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("get current block number error: %w", err) @@ -822,12 +837,12 @@ func (r *Rollup) finalize() error { } log.Info("finalize info", - "last_fianlzied", lastFinalized, + "last_finalized", lastFinalized, "last_committed", lastCommitted, "finalize_index", target, ) - // batch exist + // batch exists existed, err := r.Rollup.BatchExist(nil, target) if err != nil { log.Error("query batch exist", "err", err) @@ -838,7 +853,7 @@ func (r *Rollup) finalize() error { return nil } - // in challenge window + // inside challenge window inWindow, err := r.Rollup.BatchInsideChallengeWindow(nil, target) if err != nil { return fmt.Errorf("get batch inside challenge window error:%v", err) @@ -947,11 +962,11 @@ func (r *Rollup) finalize() error { } return fmt.Errorf("send tx error:%v", err.Error()) } else { - log.Info("finalzie tx sent") + log.Info("finalize tx sent") r.pendingTxs.SetNonce(signedTx.Nonce()) r.pendingTxs.SetPFinalize(target.Uint64()) - if err := r.pendingTxs.Add(signedTx); err != nil { + if err = r.pendingTxs.Add(signedTx); err != nil { log.Error("failed to add signed transaction", "hash", signedTx.Hash().String(), "error", err) } } @@ -976,7 +991,7 @@ func (r *Rollup) rollup() error { "blocks_processed", r.eventInfoStorage.BlockProcessed(), "last_event_time", r.eventInfoStorage.BlockTime()) - // get current blocknumber + // get current block number blockNumber, err := r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("failed to get block number in rollup: %w", err) @@ -1050,15 +1065,12 @@ func (r *Rollup) rollup() error { cindexBig, err := r.Rollup.LastCommittedBatchIndex(nil) if err != nil { - return fmt.Errorf("get last committed batch index error:%v", err) + return fmt.Errorf("get last committed rpcRollupBatch index error:%v", err) } cindex := cindexBig.Uint64() - - switch { - case r.pendingTxs.pindex != 0: + batchIndex = cindex + 1 + if len(r.pendingTxs.getAll()) != 0 && r.pendingTxs.pindex != 0 { batchIndex = max(cindex, r.pendingTxs.pindex) + 1 - default: - batchIndex = cindex + 1 } log.Debug("Batch status", @@ -1072,25 +1084,25 @@ func (r *Rollup) rollup() error { return nil } - batch, ok := r.batchCache.Get(batchIndex) + rpcRollupBatch, ok := r.batchCache.Get(batchIndex) if !ok { log.Info("Batch not found in cache", "batch_index", batchIndex) return nil } - signature, err := r.buildSignatureInput(batch) + signature, err := r.buildSignatureInput(rpcRollupBatch) if err != nil { return err } rollupBatch := bindings.IRollupBatchDataInput{ - Version: uint8(batch.Version), - ParentBatchHeader: batch.ParentBatchHeader, - LastBlockNumber: batch.LastBlockNumber, - NumL1Messages: batch.NumL1Messages, - PrevStateRoot: batch.PrevStateRoot, - PostStateRoot: batch.PostStateRoot, - WithdrawalRoot: batch.WithdrawRoot, + Version: uint8(rpcRollupBatch.Version), + ParentBatchHeader: rpcRollupBatch.ParentBatchHeader, + LastBlockNumber: rpcRollupBatch.LastBlockNumber, + NumL1Messages: rpcRollupBatch.NumL1Messages, + PrevStateRoot: rpcRollupBatch.PrevStateRoot, + PostStateRoot: rpcRollupBatch.PostStateRoot, + WithdrawalRoot: rpcRollupBatch.WithdrawRoot, } // tip and cap @@ -1108,9 +1120,9 @@ func (r *Rollup) rollup() error { gas, err := r.EstimateGas(r.WalletAddr(), r.rollupAddr, calldata, gasFeeCap, tip) if err != nil { log.Warn("Estimate gas failed", "batch_index", batchIndex, "error", err) - // Use rough estimation based on L1 message count + // Use estimation based on L1 message count if r.cfg.RoughEstimateGas { - msgcnt := utils.ParseL1MessageCnt(batch.BlockContexts) + msgcnt := utils.ParseL1MessageCnt(rpcRollupBatch.BlockContexts) gas = r.RoughRollupGasEstimate(msgcnt) log.Info("Using rough gas estimation", "batch_index", batchIndex, @@ -1131,7 +1143,7 @@ func (r *Rollup) rollup() error { } // Create and sign transaction - tx, err := r.createRollupTx(batch, nonce, gas, tip, gasFeeCap, blobFee, calldata, head) + tx, err := r.createRollupTx(rpcRollupBatch, nonce, gas, tip, gasFeeCap, blobFee, calldata, head) if err != nil { return fmt.Errorf("failed to create rollup tx: %w", err) } @@ -1741,7 +1753,6 @@ func (r *Rollup) BumpGas(origin uint64) uint64 { } } -// for rollup func (r *Rollup) RoughRollupGasEstimate(msgcnt uint64) uint64 { return r.cfg.RollupTxGasBase + msgcnt*r.cfg.RollupTxGasPerL1Msg } @@ -1927,3 +1938,94 @@ func (r *Rollup) CancelTx(tx *ethtypes.Transaction) (*ethtypes.Transaction, erro return newTx, nil } + +// createReplacementTransferTx creates a simple transfer transaction with the same nonce +// to replace the original transaction. This is used when resubmission fails. +func (r *Rollup) createReplacementTransferTx(tx *ethtypes.Transaction) (*ethtypes.Transaction, error) { + if tx == nil { + return nil, errors.New("nil tx") + } + + log.Info("creating replacement transfer transaction", + "original_hash", tx.Hash().String(), + "nonce", tx.Nonce(), + ) + + // Get current gas prices + tip, gasFeeCap, _, head, err := r.GetGasTipAndCap() + if err != nil { + return nil, fmt.Errorf("get gas tip and cap error: %w", err) + } + + // Bump gas prices to ensure replacement + bumpedFeeCap := calcThresholdValue(tx.GasFeeCap(), false) + bumpedTip := calcThresholdValue(tx.GasTipCap(), false) + + if bumpedTip.Cmp(tip) > 0 { + tip = bumpedTip + } + if bumpedFeeCap.Cmp(gasFeeCap) > 0 { + gasFeeCap = bumpedFeeCap + } + + // Ensure minimum tip if configured + if r.cfg.MinTip > 0 && tip.Cmp(big.NewInt(int64(r.cfg.MinTip))) < 0 { + log.Info("replacement tip is too low, update tip to min tip", "tip", tip, "min_tip", r.cfg.MinTip) + tip = big.NewInt(int64(r.cfg.MinTip)) + if head.BaseFee != nil { + recalculatedFeecap := new(big.Int).Add( + tip, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + if recalculatedFeecap.Cmp(gasFeeCap) > 0 { + gasFeeCap = recalculatedFeecap + } + } + } + + // Get sender address (send to self) + senderAddr := r.WalletAddr() + + // Create a simple transfer transaction (send to self with empty calldata) + // Use minimum gas limit for a simple transfer (21000) + transferGas := uint64(21000) + + newTx := ethtypes.NewTx(ðtypes.DynamicFeeTx{ + ChainID: r.chainId, + To: &senderAddr, // Send it to self + Nonce: tx.Nonce(), // Same nonce as original transaction + GasFeeCap: gasFeeCap, + GasTipCap: tip, + Gas: transferGas, + Value: big.NewInt(0), // Zero value transfer + Data: []byte{}, // Empty call data + }) + + log.Info("replacement transfer tx info", + "tx_type", newTx.Type(), + "gas_tip_gwei", utils.WeiToGwei(tip), + "gas_fee_cap_gwei", utils.WeiToGwei(gasFeeCap), + "nonce", newTx.Nonce(), + "to", senderAddr.Hex(), + ) + + // Sign transaction + newTx, err = r.Sign(newTx) + if err != nil { + return nil, fmt.Errorf("sign tx error: %w", err) + } + + // Send transaction + err = r.SendTx(newTx) + if err != nil { + return nil, fmt.Errorf("send tx error: %w", err) + } + + log.Info("successfully sent replacement transfer transaction", + "original_hash", tx.Hash().String(), + "replacement_hash", newTx.Hash().String(), + "nonce", newTx.Nonce(), + ) + + return newTx, nil +} From 260c081817724ce4182fcbd5e806797bc5af577e Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 6 Feb 2026 00:26:56 +0800 Subject: [PATCH 09/12] fix lint --- contracts/deploy/020-ContractInit.ts | 13 +- contracts/devnetL1.json | 212 ++++++++++++++++++++ contracts/src/deploy-config/l1.ts | 4 +- tx-submitter/batch/batch_cache.go | 10 +- tx-submitter/batch/batch_query.go | 20 +- tx-submitter/batch/batch_restart_test.go | 4 +- tx-submitter/mock/db.go | 25 +++ tx-submitter/mock/rollup.go | 35 ++++ tx-submitter/services/rollup_handle_test.go | 1 + 9 files changed, 304 insertions(+), 20 deletions(-) create mode 100644 contracts/devnetL1.json diff --git a/contracts/deploy/020-ContractInit.ts b/contracts/deploy/020-ContractInit.ts index 7a576bbc6..96bf3637b 100644 --- a/contracts/deploy/020-ContractInit.ts +++ b/contracts/deploy/020-ContractInit.ts @@ -57,12 +57,18 @@ export const ContractInit = async ( // submitter and challenger const submitter: string = config.rollupProposer const challenger: string = config.rollupChallenger + const rollupDelayPeriod: number = config.rollupDelayPeriod + if (!ethers.utils.isAddress(submitter) || !ethers.utils.isAddress(challenger) ) { console.error('please check your address') return '' } + if (rollupDelayPeriod==0){ + console.error('rollupDelayPeriod cannot set zero') + return '' + } let res = await Rollup.importGenesisBatch(batchHeader) let rec = await res.wait() console.log(`importGenesisBatch(%s) ${rec.status == 1 ? "success" : "failed"}`, batchHeader) @@ -70,8 +76,11 @@ export const ContractInit = async ( rec = await res.wait() console.log(`addChallenger(%s) ${rec.status == 1 ? "success" : "failed"}`, challenger) - await Rollup.initialize2("0x0000000000000000000000000000000000000000000000000000000000000001") - res = await Rollup.initialize3(8640000000) + res =await Rollup.initialize2("0x0000000000000000000000000000000000000000000000000000000000000001") + rec = await res.wait() + console.log(`initialize2(%s) ${rec.status == 1 ? "success" : "failed"}`) + + res = await Rollup.initialize3(rollupDelayPeriod) rec = await res.wait() console.log(`initialize3(%s) ${rec.status == 1 ? "success" : "failed"}`) } diff --git a/contracts/devnetL1.json b/contracts/devnetL1.json new file mode 100644 index 000000000..52742b9cb --- /dev/null +++ b/contracts/devnetL1.json @@ -0,0 +1,212 @@ +[ + { + "name": "Impl__ProxyAdmin", + "address": "0x5fbdb2315678afecb367f032d93f642f64180aa3", + "time": "2026-02-05T11:19:32.891Z", + "number": 9 + }, + { + "name": "Impl__EmptyContract", + "address": "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512", + "time": "2026-02-05T11:19:36.929Z", + "number": 11 + }, + { + "name": "Impl__WETH", + "address": "0x9fe46736679d2d9a65f0992f2272de9f3c7fa6e0", + "time": "2026-02-05T11:19:40.992Z", + "number": 12 + }, + { + "name": "Proxy__L1CrossDomainMessenger", + "address": "0xcf7ed3acca5a467e9e704c703e8d87f634fb0fc9", + "time": "2026-02-05T11:19:45.061Z", + "number": 14 + }, + { + "name": "Proxy__L1MessageQueueWithGasPriceOracle", + "address": "0xdc64a140aa3e981100a9beca4e685f962f0cf6c9", + "time": "2026-02-05T11:19:49.099Z", + "number": 15 + }, + { + "name": "Proxy__L1Staking", + "address": "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707", + "time": "2026-02-05T11:19:53.145Z", + "number": 16 + }, + { + "name": "Proxy__Rollup", + "address": "0x0165878a594ca255338adfa4d48449f69242eb8f", + "time": "2026-02-05T11:19:57.207Z", + "number": 18 + }, + { + "name": "Proxy__L1GatewayRouter", + "address": "0xa513e6e4b8f2a923d98304ec87f64353c4d5c853", + "time": "2026-02-05T11:20:05.255Z", + "number": 19 + }, + { + "name": "Proxy__L1ETHGateway", + "address": "0x2279b7a0a67db372996a5fab50d91eaa73d2ebe6", + "time": "2026-02-05T11:20:09.309Z", + "number": 21 + }, + { + "name": "Proxy__L1StandardERC20Gateway", + "address": "0x8a791620dd6260079bf849dc5567adc3f2fdc318", + "time": "2026-02-05T11:20:13.357Z", + "number": 22 + }, + { + "name": "Proxy__L1CustomERC20Gateway", + "address": "0x610178da211fef7d417bc0e6fed39f05609ad788", + "time": "2026-02-05T11:20:17.408Z", + "number": 23 + }, + { + "name": "Proxy__L1WithdrawLockERC20Gateway", + "address": "0xb7f8bc63bbcad18155201308c8f3540b07f84f5e", + "time": "2026-02-05T11:20:21.461Z", + "number": 25 + }, + { + "name": "Proxy__L1ReverseCustomGateway", + "address": "0xa51c1fc2f0d1a1b8494ed1fe312d7c3a78ed91c0", + "time": "2026-02-05T11:20:25.512Z", + "number": 26 + }, + { + "name": "Proxy__L1ERC721Gateway", + "address": "0x0dcd1bf9a1b36ce34237eeafef220932846bcd82", + "time": "2026-02-05T11:20:29.557Z", + "number": 27 + }, + { + "name": "Proxy__L1ERC1155Gateway", + "address": "0x9a676e781a523b5d0c0e43731313a708cb607508", + "time": "2026-02-05T11:20:33.607Z", + "number": 28 + }, + { + "name": "Proxy__EnforcedTxGateway", + "address": "0x0b306bf915c4d645ff596e518faf3f9669b97016", + "time": "2026-02-05T11:20:37.659Z", + "number": 29 + }, + { + "name": "Proxy__L1WETHGateway", + "address": "0x959922be3caee4b8cd9a407cc3ac1c251c2007b1", + "time": "2026-02-05T11:20:41.710Z", + "number": 30 + }, + { + "name": "Proxy__L1USDCGateway", + "address": "0x9a9f2ccfde556a7e9ff0848998aa4a0cfd8863ae", + "time": "2026-02-05T11:20:45.759Z", + "number": 32 + }, + { + "name": "Impl__ZkEvmVerifierV1", + "address": "0x68b1d87f95878fe05b998f19b66f4baba5de1aed", + "time": "2026-02-05T11:20:49.802Z", + "number": 33 + }, + { + "name": "Impl__Whitelist", + "address": "0x3aa5ebb10dc797cac828524e59a333d0a371443c", + "time": "2026-02-05T11:22:45.854Z", + "number": 68 + }, + { + "name": "Impl__L1CrossDomainMessenger", + "address": "0xc6e7df5e7b4f2a278906862b61205850344d4e7d", + "time": "2026-02-05T11:22:49.900Z", + "number": 69 + }, + { + "name": "Impl__L1MessageQueueWithGasPriceOracle", + "address": "0x59b670e9fa9d0a427751af201d676719a970857b", + "time": "2026-02-05T11:22:53.945Z", + "number": 70 + }, + { + "name": "Impl__Rollup", + "address": "0x4ed7c70f96b99c776995fb64377f0d4ab3b0e1c1", + "time": "2026-02-05T11:22:58.016Z", + "number": 72 + }, + { + "name": "Impl__L1GatewayRouter", + "address": "0x322813fd9a801c5507c9de605d63cea4f2ce6c44", + "time": "2026-02-05T11:23:06.061Z", + "number": 74 + }, + { + "name": "Impl__L1StandardERC20Gateway", + "address": "0xa85233c63b9ee964add6f2cffe00fd84eb32338f", + "time": "2026-02-05T11:23:10.096Z", + "number": 75 + }, + { + "name": "Impl__L1CustomERC20Gateway", + "address": "0x4a679253410272dd5232b3ff7cf5dbb88f295319", + "time": "2026-02-05T11:23:14.133Z", + "number": 76 + }, + { + "name": "Impl__L1WithdrawLockERC20Gateway", + "address": "0x7a2088a1bfc9d81c55368ae168c2c02570cb814f", + "time": "2026-02-05T11:23:18.173Z", + "number": 78 + }, + { + "name": "Impl__L1ReverseCustomGateway", + "address": "0x09635f643e140090a9a8dcd712ed6285858cebef", + "time": "2026-02-05T11:23:22.235Z", + "number": 79 + }, + { + "name": "Impl__L1ETHGateway", + "address": "0xc5a5c42992decbae36851359345fe25997f5c42d", + "time": "2026-02-05T11:23:26.279Z", + "number": 80 + }, + { + "name": "Impl__L1WETHGateway", + "address": "0x67d269191c92caf3cd7723f116c85e6e9bf55933", + "time": "2026-02-05T11:23:30.325Z", + "number": 82 + }, + { + "name": "Impl__EnforcedTxGateway", + "address": "0xe6e340d132b5f46d1e472debcd681b2abc16e57e", + "time": "2026-02-05T11:23:34.366Z", + "number": 83 + }, + { + "name": "Impl__L1ERC721Gateway", + "address": "0xc3e53f4d16ae77db1c982e75a937b9f60fe63690", + "time": "2026-02-05T11:23:38.408Z", + "number": 84 + }, + { + "name": "Impl__L1ERC1155Gateway", + "address": "0x84ea74d481ee0a5332c457a4d796187f6ba67feb", + "time": "2026-02-05T11:23:42.448Z", + "number": 86 + }, + { + "name": "Impl__L1Staking", + "address": "0x9e545e3c0baab3e08cdfd552c960a1050f373042", + "time": "2026-02-05T11:23:46.498Z", + "number": 87 + }, + { + "name": "Impl__MultipleVersionRollupVerifier", + "address": "0x851356ae760d987e095750cceb3bc6014560891c", + "time": "2026-02-05T11:23:56.668Z", + "number": 90 + } +] \ No newline at end of file diff --git a/contracts/src/deploy-config/l1.ts b/contracts/src/deploy-config/l1.ts index 4db4af83e..f77424fd6 100644 --- a/contracts/src/deploy-config/l1.ts +++ b/contracts/src/deploy-config/l1.ts @@ -23,11 +23,13 @@ const config = { finalizationPeriodSeconds: 10, rollupProofWindow: 86400, proofRewardPercent: 70, + rollupDelayPeriod: 86400, + // challenge config rollupProposer: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8', rollupChallenger: '0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65', // genesis config - batchHeader: '0x000000000000000000000000000000000000000000000000004ccf6d1ee4bbebaf7ce495605b0a9bcf5281d35cd769df730205832235869547010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014000000000000000000000000000000000000000000000000000000000000000011d7c68227c20de25bf949beebef5460050abeccba32bdf538e1298f32b57db2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + batchHeader: '0x00000000000000000000000000000000000000000000000000886e14341b355178d11a2c9f985f60a1a195973078b688a11aeaebb0c95db595010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c44401400000000000000000000000000000000000000000000000000000000000000002d20dde82426d971e398b3cba11ebb60d0d740b799f85e2f95fd12a1faad8e2f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', // staking config // staking Cross-Chain config diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 3a385d1c5..01f53992a 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -14,11 +14,11 @@ import ( "morph-l2/tx-submitter/iface" "morph-l2/tx-submitter/types" - "github.com/morph-l2/go-ethereum/crypto" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" "github.com/morph-l2/go-ethereum/common/hexutil" ethtypes "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto" "github.com/morph-l2/go-ethereum/eth" "github.com/morph-l2/go-ethereum/log" ) @@ -187,7 +187,7 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { if err != nil { return err } - if batches == nil || len(batches) == 0 { + if len(batches) == 0 { err = bc.InitAndSyncFromRollup() if err != nil { return err @@ -239,7 +239,7 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { return fmt.Errorf("get post state root err: %w", err) } bc.currentBlockNumber = bc.lastPackedBlockHeight - bc.totalL1MessagePopped, err = parentHeader.TotalL1MessagePopped() + bc.totalL1MessagePopped, _ = parentHeader.TotalL1MessagePopped() bc.initDone = true log.Info("Sync sealed batch from database success", "count", len(batches)) return nil @@ -318,7 +318,7 @@ func (bc *BatchCache) checkBatchHashCorrect(batchIndex *big.Int, batchHash commo if !bytes.Equal(commitBatchHash[:], batchHash.Bytes()) { log.Error("check commit batch hash failed", "index", batchIndex.String(), - "commited", hex.EncodeToString(commitBatchHash[:]), + "committed", hex.EncodeToString(commitBatchHash[:]), "generate", batchHash.String()) return false, nil } @@ -1023,7 +1023,7 @@ func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (c return common.Hash{}, false, fmt.Errorf("sealed batch not found for index %d", batchIndex) } if batchIndex <= ci.Uint64() { - // batch already commited, check batch hash + // batch already committed, check batch hash correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.Hash) if err != nil { return common.Hash{}, false, err diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go index 104357815..5776a1c2f 100644 --- a/tx-submitter/batch/batch_query.go +++ b/tx-submitter/batch/batch_query.go @@ -77,11 +77,11 @@ func (bc *BatchCache) getLastFinalizeBatchHeaderFromRollupByIndex(index uint64) continue } if batchIndex == index { - finalizeEventIter.Close() + _ = finalizeEventIter.Close() return &batchHeader, nil } } - finalizeEventIter.Close() + _ = finalizeEventIter.Close() // Continue querying backwards if endBlock < blockRange { @@ -219,12 +219,12 @@ func (bc *BatchCache) getCommitBatchDataByIndex(index uint64) (*bindings.IRollup parentHeader := BatchHeaderBytes(batchDataInput.ParentBatchHeader) parentBatchIndex, err := parentHeader.BatchIndex() if err == nil && parentBatchIndex+1 == index { - commitEventIter.Close() + _ = commitEventIter.Close() return batchDataInput, batchSignatureInput, nil } } } - commitEventIter.Close() + _ = commitEventIter.Close() // Continue querying backwards if endBlock < blockRange { @@ -290,7 +290,7 @@ func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bi // Convert []uint8 to []byte parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) for i, v := range batchDataInputStruct.ParentBatchHeader { - parentBatchHeader[i] = byte(v) + parentBatchHeader[i] = v } batchDataInput := &bindings.IRollupBatchDataInput{ @@ -313,11 +313,11 @@ func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bi // Convert []uint8 to []byte sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) for i, v := range batchSignatureInputStruct.SequencerSets { - sequencerSets[i] = byte(v) + sequencerSets[i] = v } signature := make([]byte, len(batchSignatureInputStruct.Signature)) for i, v := range batchSignatureInputStruct.Signature { - signature[i] = byte(v) + signature[i] = v } batchSignatureInput := &bindings.IRollupBatchSignatureInput{ @@ -358,7 +358,7 @@ func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindin // Convert []uint8 to []byte parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) for i, v := range batchDataInputStruct.ParentBatchHeader { - parentBatchHeader[i] = byte(v) + parentBatchHeader[i] = v } batchDataInput := &bindings.IRollupBatchDataInput{ @@ -381,11 +381,11 @@ func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindin // Convert []uint8 to []byte sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) for i, v := range batchSignatureInputStruct.SequencerSets { - sequencerSets[i] = byte(v) + sequencerSets[i] = v } signature := make([]byte, len(batchSignatureInputStruct.Signature)) for i, v := range batchSignatureInputStruct.Signature { - signature[i] = byte(v) + signature[i] = v } batchSignatureInput := &bindings.IRollupBatchSignatureInput{ diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index 785c7ca87..69bce2539 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -326,7 +326,7 @@ func compareBatchHeaderWithCommitData(t *testing.T, assembledBatchHeader *BatchH // Compare Version version, err := assembledBatchHeader.Version() require.NoError(t, err) - if uint8(version) != batchDataInput.Version { + if version != batchDataInput.Version { t.Errorf("❌ Version mismatch: assembled=%d, commitBatch=%d", version, batchDataInput.Version) } else { t.Logf("✓ Version: %d (match)", version) @@ -638,7 +638,7 @@ func assembleBatchHeaderFromL2Blocks( return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) } // Check capacity and store to current - exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + _, err := bc.CalculateCapWithProposalBlock(blockNum, root) if err != nil { return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) } diff --git a/tx-submitter/mock/db.go b/tx-submitter/mock/db.go index 2eb44844f..c6b25c809 100644 --- a/tx-submitter/mock/db.go +++ b/tx-submitter/mock/db.go @@ -50,6 +50,31 @@ func (d *MockDB) PutFloat(key string, val float64) error { return nil } +func (d *MockDB) GetBytes(key []byte) ([]byte, error) { + d.m.RLock() + defer d.m.RUnlock() + if val, ok := d.store[string(key)]; ok { + return []byte(val), nil + } + return nil, db.ErrKeyNotFound +} + +func (d *MockDB) PutBytes(key, val []byte) error { + d.m.Lock() + defer d.m.Unlock() + keyStr := string(key) + d.store[keyStr] = string(val) + return nil +} + +func (d *MockDB) Delete(key []byte) error { + d.m.Lock() + defer d.m.Unlock() + keyStr := string(key) + delete(d.store, keyStr) + return nil +} + func (d *MockDB) Close() error { return nil } diff --git a/tx-submitter/mock/rollup.go b/tx-submitter/mock/rollup.go index 6dd534ffd..dbf424a59 100644 --- a/tx-submitter/mock/rollup.go +++ b/tx-submitter/mock/rollup.go @@ -60,6 +60,41 @@ func (m *MockRollup) BatchExist(opts *bind.CallOpts, batchIndex *big.Int) (bool, return m.batchExists, nil } +// CommittedBatches implements IRollup +func (m *MockRollup) CommittedBatches(opts *bind.CallOpts, batchIndex *big.Int) ([32]byte, error) { + return [32]byte{}, nil +} + +// BatchDataStore implements IRollup +func (m *MockRollup) BatchDataStore(opts *bind.CallOpts, batchIndex *big.Int) (struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int +}, error) { + return struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int + }{ + OriginTimestamp: big.NewInt(0), + FinalizeTimestamp: big.NewInt(0), + BlockNumber: big.NewInt(0), + SignedSequencersBitmap: big.NewInt(0), + }, nil +} + +// FilterCommitBatch implements IRollup +func (m *MockRollup) FilterCommitBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupCommitBatchIterator, error) { + return nil, nil +} + +// FilterFinalizeBatch implements IRollup +func (m *MockRollup) FilterFinalizeBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupFinalizeBatchIterator, error) { + return nil, nil +} + // SetLastCommittedBatchIndex sets the mock value for LastCommittedBatchIndex func (m *MockRollup) SetLastCommittedBatchIndex(index *big.Int) { m.lastCommittedBatchIndex = index diff --git a/tx-submitter/services/rollup_handle_test.go b/tx-submitter/services/rollup_handle_test.go index 64f03b197..465965272 100644 --- a/tx-submitter/services/rollup_handle_test.go +++ b/tx-submitter/services/rollup_handle_test.go @@ -114,6 +114,7 @@ func setupTestRollup(t *testing.T) (*Rollup, *mock.L1ClientWrapper, *mock.L2Clie nil, nil, eventStorage, + nil, ) // Initialize pending transactions From 86d1ef6713aac4825736afdeff011bb3b5b058d9 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 6 Feb 2026 00:27:10 +0800 Subject: [PATCH 10/12] fix lint --- tx-submitter/batch/batch_query.go | 24 ++++++------------------ tx-submitter/batch/batch_restart_test.go | 7 ++----- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go index 5776a1c2f..e608069b2 100644 --- a/tx-submitter/batch/batch_query.go +++ b/tx-submitter/batch/batch_query.go @@ -289,9 +289,7 @@ func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bi // Convert []uint8 to []byte parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) - for i, v := range batchDataInputStruct.ParentBatchHeader { - parentBatchHeader[i] = v - } + copy(parentBatchHeader, batchDataInputStruct.ParentBatchHeader) batchDataInput := &bindings.IRollupBatchDataInput{ Version: batchDataInputStruct.Version, @@ -312,13 +310,9 @@ func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bi // Convert []uint8 to []byte sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) - for i, v := range batchSignatureInputStruct.SequencerSets { - sequencerSets[i] = v - } + copy(sequencerSets, batchSignatureInputStruct.SequencerSets) signature := make([]byte, len(batchSignatureInputStruct.Signature)) - for i, v := range batchSignatureInputStruct.Signature { - signature[i] = v - } + copy(signature, batchSignatureInputStruct.Signature) batchSignatureInput := &bindings.IRollupBatchSignatureInput{ SignedSequencersBitmap: batchSignatureInputStruct.SignedSequencersBitmap, @@ -357,9 +351,7 @@ func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindin // Convert []uint8 to []byte parentBatchHeader := make([]byte, len(batchDataInputStruct.ParentBatchHeader)) - for i, v := range batchDataInputStruct.ParentBatchHeader { - parentBatchHeader[i] = v - } + copy(parentBatchHeader, batchDataInputStruct.ParentBatchHeader) batchDataInput := &bindings.IRollupBatchDataInput{ Version: batchDataInputStruct.Version, @@ -380,13 +372,9 @@ func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindin // Convert []uint8 to []byte sequencerSets := make([]byte, len(batchSignatureInputStruct.SequencerSets)) - for i, v := range batchSignatureInputStruct.SequencerSets { - sequencerSets[i] = v - } + copy(sequencerSets, batchSignatureInputStruct.SequencerSets) signature := make([]byte, len(batchSignatureInputStruct.Signature)) - for i, v := range batchSignatureInputStruct.Signature { - signature[i] = v - } + copy(signature, batchSignatureInputStruct.Signature) batchSignatureInput := &bindings.IRollupBatchSignatureInput{ SignedSequencersBitmap: batchSignatureInputStruct.SignedSequencersBitmap, diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index 69bce2539..a2866f4e0 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -638,7 +638,7 @@ func assembleBatchHeaderFromL2Blocks( return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) } // Check capacity and store to current - _, err := bc.CalculateCapWithProposalBlock(blockNum, root) + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) if err != nil { return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) } @@ -649,10 +649,7 @@ func assembleBatchHeaderFromL2Blocks( } // If capacity exceeds limit, can stop early (optional) - if exceeded { - // Note: Can choose to continue packing until endBlockNum, or stop early - // Decide based on business requirements - } + _ = exceeded // Checked but not used in this test } // Get the last block's timestamp for packing From 19fe1f0a80b4de617a4bdf53990a8ec36975cb55 Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 6 Feb 2026 11:20:09 +0800 Subject: [PATCH 11/12] fix submitter lint --- contracts/devnetL1.json | 212 ----------------------- tx-submitter/batch/batch_cache.go | 97 +++++------ tx-submitter/batch/batch_cache_test.go | 3 +- tx-submitter/batch/batch_query.go | 80 --------- tx-submitter/batch/batch_restart_test.go | 165 ------------------ tx-submitter/batch/batch_storage.go | 8 +- tx-submitter/batch/batch_storage_test.go | 2 +- tx-submitter/batch/commit_test.go | 30 +--- tx-submitter/iface/client.go | 9 +- tx-submitter/services/rollup.go | 19 +- tx-submitter/types/l2Caller.go | 2 +- 11 files changed, 81 insertions(+), 546 deletions(-) delete mode 100644 contracts/devnetL1.json diff --git a/contracts/devnetL1.json b/contracts/devnetL1.json deleted file mode 100644 index 52742b9cb..000000000 --- a/contracts/devnetL1.json +++ /dev/null @@ -1,212 +0,0 @@ -[ - { - "name": "Impl__ProxyAdmin", - "address": "0x5fbdb2315678afecb367f032d93f642f64180aa3", - "time": "2026-02-05T11:19:32.891Z", - "number": 9 - }, - { - "name": "Impl__EmptyContract", - "address": "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512", - "time": "2026-02-05T11:19:36.929Z", - "number": 11 - }, - { - "name": "Impl__WETH", - "address": "0x9fe46736679d2d9a65f0992f2272de9f3c7fa6e0", - "time": "2026-02-05T11:19:40.992Z", - "number": 12 - }, - { - "name": "Proxy__L1CrossDomainMessenger", - "address": "0xcf7ed3acca5a467e9e704c703e8d87f634fb0fc9", - "time": "2026-02-05T11:19:45.061Z", - "number": 14 - }, - { - "name": "Proxy__L1MessageQueueWithGasPriceOracle", - "address": "0xdc64a140aa3e981100a9beca4e685f962f0cf6c9", - "time": "2026-02-05T11:19:49.099Z", - "number": 15 - }, - { - "name": "Proxy__L1Staking", - "address": "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707", - "time": "2026-02-05T11:19:53.145Z", - "number": 16 - }, - { - "name": "Proxy__Rollup", - "address": "0x0165878a594ca255338adfa4d48449f69242eb8f", - "time": "2026-02-05T11:19:57.207Z", - "number": 18 - }, - { - "name": "Proxy__L1GatewayRouter", - "address": "0xa513e6e4b8f2a923d98304ec87f64353c4d5c853", - "time": "2026-02-05T11:20:05.255Z", - "number": 19 - }, - { - "name": "Proxy__L1ETHGateway", - "address": "0x2279b7a0a67db372996a5fab50d91eaa73d2ebe6", - "time": "2026-02-05T11:20:09.309Z", - "number": 21 - }, - { - "name": "Proxy__L1StandardERC20Gateway", - "address": "0x8a791620dd6260079bf849dc5567adc3f2fdc318", - "time": "2026-02-05T11:20:13.357Z", - "number": 22 - }, - { - "name": "Proxy__L1CustomERC20Gateway", - "address": "0x610178da211fef7d417bc0e6fed39f05609ad788", - "time": "2026-02-05T11:20:17.408Z", - "number": 23 - }, - { - "name": "Proxy__L1WithdrawLockERC20Gateway", - "address": "0xb7f8bc63bbcad18155201308c8f3540b07f84f5e", - "time": "2026-02-05T11:20:21.461Z", - "number": 25 - }, - { - "name": "Proxy__L1ReverseCustomGateway", - "address": "0xa51c1fc2f0d1a1b8494ed1fe312d7c3a78ed91c0", - "time": "2026-02-05T11:20:25.512Z", - "number": 26 - }, - { - "name": "Proxy__L1ERC721Gateway", - "address": "0x0dcd1bf9a1b36ce34237eeafef220932846bcd82", - "time": "2026-02-05T11:20:29.557Z", - "number": 27 - }, - { - "name": "Proxy__L1ERC1155Gateway", - "address": "0x9a676e781a523b5d0c0e43731313a708cb607508", - "time": "2026-02-05T11:20:33.607Z", - "number": 28 - }, - { - "name": "Proxy__EnforcedTxGateway", - "address": "0x0b306bf915c4d645ff596e518faf3f9669b97016", - "time": "2026-02-05T11:20:37.659Z", - "number": 29 - }, - { - "name": "Proxy__L1WETHGateway", - "address": "0x959922be3caee4b8cd9a407cc3ac1c251c2007b1", - "time": "2026-02-05T11:20:41.710Z", - "number": 30 - }, - { - "name": "Proxy__L1USDCGateway", - "address": "0x9a9f2ccfde556a7e9ff0848998aa4a0cfd8863ae", - "time": "2026-02-05T11:20:45.759Z", - "number": 32 - }, - { - "name": "Impl__ZkEvmVerifierV1", - "address": "0x68b1d87f95878fe05b998f19b66f4baba5de1aed", - "time": "2026-02-05T11:20:49.802Z", - "number": 33 - }, - { - "name": "Impl__Whitelist", - "address": "0x3aa5ebb10dc797cac828524e59a333d0a371443c", - "time": "2026-02-05T11:22:45.854Z", - "number": 68 - }, - { - "name": "Impl__L1CrossDomainMessenger", - "address": "0xc6e7df5e7b4f2a278906862b61205850344d4e7d", - "time": "2026-02-05T11:22:49.900Z", - "number": 69 - }, - { - "name": "Impl__L1MessageQueueWithGasPriceOracle", - "address": "0x59b670e9fa9d0a427751af201d676719a970857b", - "time": "2026-02-05T11:22:53.945Z", - "number": 70 - }, - { - "name": "Impl__Rollup", - "address": "0x4ed7c70f96b99c776995fb64377f0d4ab3b0e1c1", - "time": "2026-02-05T11:22:58.016Z", - "number": 72 - }, - { - "name": "Impl__L1GatewayRouter", - "address": "0x322813fd9a801c5507c9de605d63cea4f2ce6c44", - "time": "2026-02-05T11:23:06.061Z", - "number": 74 - }, - { - "name": "Impl__L1StandardERC20Gateway", - "address": "0xa85233c63b9ee964add6f2cffe00fd84eb32338f", - "time": "2026-02-05T11:23:10.096Z", - "number": 75 - }, - { - "name": "Impl__L1CustomERC20Gateway", - "address": "0x4a679253410272dd5232b3ff7cf5dbb88f295319", - "time": "2026-02-05T11:23:14.133Z", - "number": 76 - }, - { - "name": "Impl__L1WithdrawLockERC20Gateway", - "address": "0x7a2088a1bfc9d81c55368ae168c2c02570cb814f", - "time": "2026-02-05T11:23:18.173Z", - "number": 78 - }, - { - "name": "Impl__L1ReverseCustomGateway", - "address": "0x09635f643e140090a9a8dcd712ed6285858cebef", - "time": "2026-02-05T11:23:22.235Z", - "number": 79 - }, - { - "name": "Impl__L1ETHGateway", - "address": "0xc5a5c42992decbae36851359345fe25997f5c42d", - "time": "2026-02-05T11:23:26.279Z", - "number": 80 - }, - { - "name": "Impl__L1WETHGateway", - "address": "0x67d269191c92caf3cd7723f116c85e6e9bf55933", - "time": "2026-02-05T11:23:30.325Z", - "number": 82 - }, - { - "name": "Impl__EnforcedTxGateway", - "address": "0xe6e340d132b5f46d1e472debcd681b2abc16e57e", - "time": "2026-02-05T11:23:34.366Z", - "number": 83 - }, - { - "name": "Impl__L1ERC721Gateway", - "address": "0xc3e53f4d16ae77db1c982e75a937b9f60fe63690", - "time": "2026-02-05T11:23:38.408Z", - "number": 84 - }, - { - "name": "Impl__L1ERC1155Gateway", - "address": "0x84ea74d481ee0a5332c457a4d796187f6ba67feb", - "time": "2026-02-05T11:23:42.448Z", - "number": 86 - }, - { - "name": "Impl__L1Staking", - "address": "0x9e545e3c0baab3e08cdfd552c960a1050f373042", - "time": "2026-02-05T11:23:46.498Z", - "number": 87 - }, - { - "name": "Impl__MultipleVersionRollupVerifier", - "address": "0x851356ae760d987e095750cceb3bc6014560891c", - "time": "2026-02-05T11:23:56.668Z", - "number": 90 - } -] \ No newline at end of file diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 01f53992a..6db977cc0 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -34,7 +34,6 @@ type BatchCache struct { // key: batchIndex, value: RPCRollupBatch sealedBatches map[uint64]*eth.RPCRollupBatch - batchDataHash map[uint64]common.Hash // Currently accumulating batch data (referencing node's BatchingCache) // Parent batch information @@ -153,7 +152,7 @@ func (bc *BatchCache) Init() error { if err != nil { return fmt.Errorf("get total l1 message popped err: %w", err) } - log.Info("Start assemble batch", "start batch", fi.Uint64()+1, "end batch", ci.Uint64()) + log.Info("Start assemble batch", "start batch", fi.Uint64(), "end batch", ci.Uint64()) return nil } @@ -178,12 +177,16 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { if bc.initDone { return nil } + err := bc.updateBatchConfigFromGov() + if err != nil { + return err + } ci, fi, err := bc.getBatchStatusFromContract() if err != nil { return fmt.Errorf("get batch status from rollup failed err: %w", err) } - batches, err := bc.batchStorage.LoadAllSealedBatches() + batches, indices, err := bc.batchStorage.LoadAllSealedBatches() if err != nil { return err } @@ -194,6 +197,15 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { } return nil } + maxIndex := indices[0] + if len(indices) > 0 { + for _, idx := range indices { + if idx > maxIndex { + maxIndex = idx + } + } + } + // check batch hash with the batch that already rollup by sybmitter for i := fi.Uint64(); i <= ci.Uint64(); i++ { batchHash, err := bc.rollupContract.CommittedBatches(nil, new(big.Int).SetUint64(i)) @@ -202,6 +214,10 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { } batchStorage, exist := batches[i] if !exist || !bytes.Equal(batchHash[:], batchStorage.Hash.Bytes()) { + err = bc.batchStorage.DeleteAllSealedBatches() + if err != nil { + return err + } // batch not contiguous or batch is invalid err = bc.InitAndSyncFromRollup() if err != nil { @@ -210,7 +226,8 @@ func (bc *BatchCache) InitAndSyncFromDatabase() error { return nil } } - parentHeader := BatchHeaderBytes(batches[uint64(len(batches)-1)].ParentBatchHeader[:]) + + parentHeader := BatchHeaderBytes(batches[maxIndex].ParentBatchHeader[:]) bc.lastPackedBlockHeight, err = parentHeader.LastBlockNumber() if err != nil { parentBatchIndex, err := parentHeader.BatchIndex() @@ -269,6 +286,7 @@ func (bc *BatchCache) InitAndSyncFromRollup() error { if err != nil { return fmt.Errorf("get batch block range err: %w,start %v, end %v", err, startNum, endNum) } + log.Info("assemble batch block range", "startNum", startNum, "endNum", endNum) batchHeaderBytes, err := bc.assembleBatchHeaderFromL2Blocks(startNum, endNum) if err != nil { return err @@ -415,17 +433,6 @@ func (bc *BatchCache) GetLatestSealedBatchIndex() uint64 { } // CalculateCapWithProposalBlock calculates batch capacity after including the specified block -// References node's CalculateCapWithProposalBlock -// Parameters: -// - blockNumber: block number to check -// - l2Client: L2 client interface -// -// Returns: -// - exceeded: returns true if compressed size would exceed MaxBlobBytesSize after adding this block -// - error: returns error if fetch or processing fails -// -// Note: This method stores block data to currentBlockContext but does not immediately append to batch -// Need to call PackCurrentBlock to confirm and append func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdrawRoot common.Hash) (bool, error) { if len(bc.l2Clients.Clients) == 0 { return false, fmt.Errorf("l2 client is nil") @@ -433,9 +440,11 @@ func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdraw // Verify block number continuity bc.mu.Lock() - if blockNumber <= bc.lastPackedBlockHeight && blockNumber != 0 { - bc.mu.Unlock() - return false, fmt.Errorf("wrong block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + if blockNumber <= bc.lastPackedBlockHeight { + if blockNumber != 0 || bc.lastPackedBlockHeight != 0 { + bc.mu.Unlock() + return false, fmt.Errorf("wrong block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + } } if blockNumber > bc.lastPackedBlockHeight+1 { // Some blocks were skipped, need to clear cache @@ -971,23 +980,20 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { } // Check if we need to seal batch due to capacity, block interval, or timeout - // Timeout check ensures batch is sealed before exceeding the maximum timeout + // check ensures batch is sealed before exceeding the maximum timeout if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { - log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) - batchHash, reachedExpectedSize, err := bc.SealBatchAndCheck(callOpts, ci) + log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum-1, "exceeded", exceeded, "timeout", timeout) + batchHash, reachedExpectedSize, batchIndex, err := bc.SealBatchAndCheck(callOpts, ci) if err != nil { return err } - // Update startBlockNum and startBlockTime for next batch - startBlockNum = blockNum + 1 - if startBlockNum <= endBlockNum { - // Update startBlock and startBlockTime for next batch's timeout calculation - startBlock, err = bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) - if err != nil { - return fmt.Errorf("failed to get start block %d for next batch: %w", startBlockNum, err) - } - startBlockTime = startBlock.Time() + batch, _ := bc.GetSealedBatch(batchIndex) + startBlockNum = batch.LastBlockNumber + 1 + startBlock, err = bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) } + startBlockTime = startBlock.Time() index, err := bc.parentBatchHeader.BatchIndex() if err != nil { return err @@ -1003,41 +1009,41 @@ func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { return nil } -func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, error) { +func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, uint64, error) { sequencerSetBytes, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) if err != nil { - return common.Hash{}, false, err + return common.Hash{}, false, 0, err } lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) if err != nil { - return common.Hash{}, false, fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) + return common.Hash{}, false, 0, fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) } blockTimestamp := lastBlock.Time() // Seal batch and generate batchHeader batchIndex, batchHeaderBytes, reachedExpectedSize, err := bc.SealBatch(sequencerSetBytes, blockTimestamp) if err != nil { - return common.Hash{}, false, fmt.Errorf("failed to seal batch: %w", err) + return common.Hash{}, false, 0, fmt.Errorf("failed to seal batch: %w", err) } sealedBatch, found := bc.GetSealedBatch(batchIndex) if !found { - return common.Hash{}, false, fmt.Errorf("sealed batch not found for index %d", batchIndex) + return common.Hash{}, false, 0, fmt.Errorf("sealed batch not found for index %d", batchIndex) } if batchIndex <= ci.Uint64() { // batch already committed, check batch hash correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.Hash) if err != nil { - return common.Hash{}, false, err + return common.Hash{}, false, 0, err } if !correct { log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.Hash.String()) - return common.Hash{}, false, fmt.Errorf("batch hash does not match sealed batch") + return common.Hash{}, false, 0, fmt.Errorf("batch hash does not match sealed batch") } } batchHash, err := batchHeaderBytes.Hash() if err != nil { - return common.Hash{}, false, err + return common.Hash{}, false, 0, err } - return batchHash, reachedExpectedSize, nil + return batchHash, reachedExpectedSize, batchIndex, nil } // Get gets sealed batch information by batch index @@ -1152,17 +1158,8 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { if err != nil { return fmt.Errorf("failed to seal batch: %w", err) } - - // Update startBlockNum and startBlockTime for next batch - startBlockNum = blockNum + 1 - if startBlockNum <= endBlockNum { - // Update startBlock and startBlockTime for next batch's timeout calculation - startBlock, err = bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) - if err != nil { - return fmt.Errorf("failed to get start block %d for next batch: %w", startBlockNum, err) - } - startBlockTime = startBlock.Time() - } + startBlockNum = blockNum + startBlockTime = nowBlockTime } // Pack current block (confirm and append to batch) diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go index dfa61c9ef..cd5879675 100644 --- a/tx-submitter/batch/batch_cache_test.go +++ b/tx-submitter/batch/batch_cache_test.go @@ -47,11 +47,10 @@ func TestBatchCacheInitServer(t *testing.T) { cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) go utils.Loop(cache.ctx, 5*time.Second, func() { - err := cache.InitFromRollupByRange() + err := cache.InitAndSyncFromRollup() if err != nil { log.Error("init and sync from rollup failed, wait for the next try", "error", err) } - cache.batchTimeOut = 60 err = cache.AssembleCurrentBatchHeader() if err != nil { log.Error("Assemble current batch failed, wait for the next try", "error", err) diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go index e608069b2..a91c31834 100644 --- a/tx-submitter/batch/batch_query.go +++ b/tx-submitter/batch/batch_query.go @@ -156,86 +156,6 @@ func parseFinalizeBatchTxData(txData []byte) (BatchHeaderBytes, error) { return BatchHeaderBytes(batchHeaderBytes), nil } -// getCommitBatchDataByIndex gets batchDataInput and batchSignatureInput with the specified index from the rollup contract's CommitBatch event -// Reference the implementation of getLastFinalizeBatchHeaderFromRollupByIndex -// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found -func (bc *BatchCache) getCommitBatchDataByIndex(index uint64) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { - // Get the current latest block height - latestBlock, err := bc.l1Client.BlockNumber(context.Background()) - if err != nil { - return nil, nil, fmt.Errorf("failed to get latest block number: %w", err) - } - - const blockRange = uint64(10000) // Query 10000 blocks each time - var endBlock uint64 = latestBlock - var startBlock uint64 - - // Start from the latest height, query backwards 10000 blocks each time until data is found - for endBlock > 0 { - // Calculate the start block for this query - if endBlock >= blockRange { - startBlock = endBlock - blockRange + 1 - } else { - startBlock = 0 - } - - // Set query options - filterOpts := &bind.FilterOpts{ - Start: startBlock, - End: &endBlock, - } - - // Query the CommitBatch event with the corresponding index from the rollup contract - commitEventIter, err := bc.rollupContract.FilterCommitBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) - if err != nil { - // If query fails, continue querying backwards - if endBlock < blockRange { - break // Already queried to block 0, exit loop - } - endBlock = startBlock - 1 - continue - } - - // Iterate through query results - for commitEventIter.Next() { - event := commitEventIter.Event - // Get transaction hash from event - txHash := event.Raw.TxHash - - // Get transaction details - tx, _, err := bc.l1Client.TransactionByHash(context.Background(), txHash) - if err != nil { - return nil, nil, fmt.Errorf("failed to get transaction by hash: %w", err) - } - - // Parse commitBatch transaction data to get batchDataInput and batchSignatureInput - batchDataInput, batchSignatureInput, err := parseCommitBatchTxData(tx.Data()) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse commit batch data: %w", err) - } - - // Verify if batch index matches (by checking batchIndex in parentBatchHeader) - if len(batchDataInput.ParentBatchHeader) > 0 { - parentHeader := BatchHeaderBytes(batchDataInput.ParentBatchHeader) - parentBatchIndex, err := parentHeader.BatchIndex() - if err == nil && parentBatchIndex+1 == index { - _ = commitEventIter.Close() - return batchDataInput, batchSignatureInput, nil - } - } - } - _ = commitEventIter.Close() - - // Continue querying backwards - if endBlock < blockRange { - break // Already queried to block 0, exit loop - } - endBlock = startBlock - 1 - } - - return nil, nil, fmt.Errorf("failed to find commit batch data for index %d", index) -} - // parseCommitBatchTxData parses the commitBatch transaction's input data to get BatchDataInput and BatchSignatureInput func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { // Get rollup ABI diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go index a2866f4e0..7b6c6cb8b 100644 --- a/tx-submitter/batch/batch_restart_test.go +++ b/tx-submitter/batch/batch_restart_test.go @@ -163,162 +163,6 @@ func TestBatchRestartInit(t *testing.T) { t.Logf("Successfully assembled batch header: batchIndex=%d", assembledBatchIndex) } -// compareAndReportBatchHeaders compares two batch headers and reports all mismatched fields -func compareAndReportBatchHeaders(t *testing.T, batchHeader1 *BatchHeaderBytes, batchHeader2 *BatchHeaderBytes, name1, name2 string) { - var mismatches []string - - // Compare BatchIndex - index1, err1 := batchHeader1.BatchIndex() - index2, err2 := batchHeader2.BatchIndex() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get BatchIndex: err1=%v, err2=%v", err1, err2) - return - } - if index1 != index2 { - mismatches = append(mismatches, fmt.Sprintf("BatchIndex: %s=%d, %s=%d", name1, index1, name2, index2)) - } else { - t.Logf("✓ BatchIndex: %d (match)", index1) - } - - // Compare L1MessagePopped - l1Msg1, err1 := batchHeader1.L1MessagePopped() - l1Msg2, err2 := batchHeader2.L1MessagePopped() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get L1MessagePopped: err1=%v, err2=%v", err1, err2) - return - } - if l1Msg1 != l1Msg2 { - mismatches = append(mismatches, fmt.Sprintf("L1MessagePopped: %s=%d, %s=%d", name1, l1Msg1, name2, l1Msg2)) - } else { - t.Logf("✓ L1MessagePopped: %d (match)", l1Msg1) - } - - // Compare TotalL1MessagePopped - totalL1Msg1, err1 := batchHeader1.TotalL1MessagePopped() - totalL1Msg2, err2 := batchHeader2.TotalL1MessagePopped() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get TotalL1MessagePopped: err1=%v, err2=%v", err1, err2) - return - } - if totalL1Msg1 != totalL1Msg2 { - mismatches = append(mismatches, fmt.Sprintf("TotalL1MessagePopped: %s=%d, %s=%d", name1, totalL1Msg1, name2, totalL1Msg2)) - } else { - t.Logf("✓ TotalL1MessagePopped: %d (match)", totalL1Msg1) - } - - // Compare DataHash - dataHash1, err1 := batchHeader1.DataHash() - dataHash2, err2 := batchHeader2.DataHash() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get DataHash: err1=%v, err2=%v", err1, err2) - return - } - if dataHash1 != dataHash2 { - mismatches = append(mismatches, fmt.Sprintf("DataHash: %s=%x, %s=%x", name1, dataHash1, name2, dataHash2)) - } else { - t.Logf("✓ DataHash: %x (match)", dataHash1) - } - - // Compare BlobVersionedHash - blobHash1, err1 := batchHeader1.BlobVersionedHash() - blobHash2, err2 := batchHeader2.BlobVersionedHash() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get BlobVersionedHash: err1=%v, err2=%v", err1, err2) - return - } - if blobHash1 != blobHash2 { - mismatches = append(mismatches, fmt.Sprintf("BlobVersionedHash: %s=%x, %s=%x", name1, blobHash1, name2, blobHash2)) - } else { - t.Logf("✓ BlobVersionedHash: %x (match)", blobHash1) - } - - // 比较 PrevStateRoot - prevStateRoot1, err1 := batchHeader1.PrevStateRoot() - prevStateRoot2, err2 := batchHeader2.PrevStateRoot() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get PrevStateRoot: err1=%v, err2=%v", err1, err2) - return - } - if prevStateRoot1 != prevStateRoot2 { - mismatches = append(mismatches, fmt.Sprintf("PrevStateRoot: %s=%x, %s=%x", name1, prevStateRoot1, name2, prevStateRoot2)) - } else { - t.Logf("✓ PrevStateRoot: %x (match)", prevStateRoot1) - } - - // 比较 PostStateRoot - postStateRoot1, err1 := batchHeader1.PostStateRoot() - postStateRoot2, err2 := batchHeader2.PostStateRoot() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get PostStateRoot: err1=%v, err2=%v", err1, err2) - return - } - if postStateRoot1 != postStateRoot2 { - mismatches = append(mismatches, fmt.Sprintf("PostStateRoot: %s=%x, %s=%x", name1, postStateRoot1, name2, postStateRoot2)) - } else { - t.Logf("✓ PostStateRoot: %x (match)", postStateRoot1) - } - - // 比较 WithdrawalRoot - withdrawRoot1, err1 := batchHeader1.WithdrawalRoot() - withdrawRoot2, err2 := batchHeader2.WithdrawalRoot() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get WithdrawalRoot: err1=%v, err2=%v", err1, err2) - return - } - if withdrawRoot1 != withdrawRoot2 { - mismatches = append(mismatches, fmt.Sprintf("WithdrawalRoot: %s=%x, %s=%x", name1, withdrawRoot1, name2, withdrawRoot2)) - } else { - t.Logf("✓ WithdrawalRoot: %x (match)", withdrawRoot1) - } - - // 比较 SequencerSetVerifyHash - seqHash1, err1 := batchHeader1.SequencerSetVerifyHash() - seqHash2, err2 := batchHeader2.SequencerSetVerifyHash() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get SequencerSetVerifyHash: err1=%v, err2=%v", err1, err2) - return - } - if seqHash1 != seqHash2 { - mismatches = append(mismatches, fmt.Sprintf("SequencerSetVerifyHash: %s=%x, %s=%x", name1, seqHash1, name2, seqHash2)) - } else { - t.Logf("✓ SequencerSetVerifyHash: %x (match)", seqHash1) - } - - // Compare ParentBatchHash - parentHash1, err1 := batchHeader1.ParentBatchHash() - parentHash2, err2 := batchHeader2.ParentBatchHash() - if err1 != nil || err2 != nil { - t.Errorf("Failed to get ParentBatchHash: err1=%v, err2=%v", err1, err2) - return - } - if parentHash1 != parentHash2 { - mismatches = append(mismatches, fmt.Sprintf("ParentBatchHash: %s=%x, %s=%x", name1, parentHash1, name2, parentHash2)) - } else { - t.Logf("✓ ParentBatchHash: %x (match)", parentHash1) - } - - // Compare LastBlockNumber (if supported) - lastBlock1, err1 := batchHeader1.LastBlockNumber() - lastBlock2, err2 := batchHeader2.LastBlockNumber() - if err1 == nil && err2 == nil { - if lastBlock1 != lastBlock2 { - mismatches = append(mismatches, fmt.Sprintf("LastBlockNumber: %s=%d, %s=%d", name1, lastBlock1, name2, lastBlock2)) - } else { - t.Logf("✓ LastBlockNumber: %d (match)", lastBlock1) - } - } - - // Report mismatched fields - if len(mismatches) > 0 { - t.Errorf("\n❌ Found %d mismatched fields between %s and %s:", len(mismatches), name1, name2) - for _, mismatch := range mismatches { - t.Errorf(" - %s", mismatch) - } - } else { - t.Logf("\n✅ All fields match between %s and %s", name1, name2) - } -} - // compareBatchHeaderWithCommitData compares the assembled batch header with information extracted from commitBatch data func compareBatchHeaderWithCommitData(t *testing.T, assembledBatchHeader *BatchHeaderBytes, batchDataInput *bindings.IRollupBatchDataInput, batchSignatureInput *bindings.IRollupBatchSignatureInput, sequencerSetVerifyHash common.Hash) { t.Logf("\n=== Comparing assembled batch header with commitBatch data ===") @@ -420,15 +264,6 @@ func min(a, b int) int { return b } -func getBatchHeaderFromGeth(index uint64) (*BatchHeaderBytes, error) { - batch, err := l2Client.GetRollupBatchByIndex(context.Background(), index+1) - if err != nil { - return nil, err - } - batchHeaderBytes := BatchHeaderBytes(batch.ParentBatchHeader[:]) - return &batchHeaderBytes, nil -} - // getLastFinalizeBatchHeaderByIndex gets the batch header with the specified index from the rollup contract's FinalizeBatch event // The finalizeBatch function only receives one parameter: batchHeader bytes, so it can be parsed directly from the transaction // Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found diff --git a/tx-submitter/batch/batch_storage.go b/tx-submitter/batch/batch_storage.go index db7db59a9..2af8778c5 100644 --- a/tx-submitter/batch/batch_storage.go +++ b/tx-submitter/batch/batch_storage.go @@ -84,7 +84,7 @@ func (s *BatchStorage) LoadSealedBatch(batchIndex uint64) (*eth.RPCRollupBatch, // LoadAllSealedBatches loads all sealed batches from LevelDB // Returns a map of batchIndex -> RPCRollupBatch -func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, error) { +func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, []uint64, error) { s.mu.RLock() // Load batch indices indices, err := s.loadBatchIndices() @@ -92,9 +92,9 @@ func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, e if err != nil { if errors.Is(err, db.ErrKeyNotFound) { // No batches stored yet - return make(map[uint64]*eth.RPCRollupBatch), nil + return make(map[uint64]*eth.RPCRollupBatch), nil, nil } - return nil, fmt.Errorf("failed to load batch indices: %w", err) + return nil, nil, fmt.Errorf("failed to load batch indices: %w", err) } // Load each batch (without holding the lock to avoid deadlock) @@ -109,7 +109,7 @@ func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, e batches[idx] = batch } - return batches, nil + return batches, indices, nil } // DeleteSealedBatch removes a sealed batch from LevelDB diff --git a/tx-submitter/batch/batch_storage_test.go b/tx-submitter/batch/batch_storage_test.go index eea1d923b..9346379a2 100644 --- a/tx-submitter/batch/batch_storage_test.go +++ b/tx-submitter/batch/batch_storage_test.go @@ -14,7 +14,7 @@ func Test_storageBatch(t *testing.T) { err := cache.InitAndSyncFromRollup() require.NoError(t, err) - batches, err := cache.batchStorage.LoadAllSealedBatches() + batches, _, err := cache.batchStorage.LoadAllSealedBatches() require.NoError(t, err) require.NotNil(t, batches) t.Log("loaded batches count", len(batches)) diff --git a/tx-submitter/batch/commit_test.go b/tx-submitter/batch/commit_test.go index b327a7356..733a2fa53 100644 --- a/tx-submitter/batch/commit_test.go +++ b/tx-submitter/batch/commit_test.go @@ -17,7 +17,6 @@ import ( "morph-l2/tx-submitter/utils" "github.com/holiman/uint256" - "github.com/morph-l2/go-ethereum" "github.com/morph-l2/go-ethereum/common" "github.com/morph-l2/go-ethereum/consensus/misc/eip4844" ethtypes "github.com/morph-l2/go-ethereum/core/types" @@ -135,8 +134,8 @@ func createBlobTx(l1client *ethclient.Client, batch *eth.RPCRollupBatch, nonce, return ethtypes.NewTx(ðtypes.BlobTx{ ChainID: uint256.MustFromBig(chainID), Nonce: nonce, - GasTipCap: uint256.MustFromBig(big.NewInt(tip.Int64())), - GasFeeCap: uint256.MustFromBig(big.NewInt(gasFeeCap.Int64())), + GasTipCap: uint256.MustFromBig(tip), + GasFeeCap: uint256.MustFromBig(gasFeeCap), Gas: gas, To: rollupAddr, Data: calldata, @@ -146,20 +145,6 @@ func createBlobTx(l1client *ethclient.Client, batch *eth.RPCRollupBatch, nonce, }), nil } -func estimateGas(l1client iface.L1Client, from, to common.Address, data []byte, feecap *big.Int, tip *big.Int) (uint64, error) { - gas, err := l1client.EstimateGas(context.Background(), ethereum.CallMsg{ - From: from, - To: &to, - GasFeeCap: feecap, - GasTipCap: tip, - Data: data, - }) - if err != nil { - return 0, fmt.Errorf("call estimate gas error:%v", err) - } - return gas, nil -} - func getGasTipAndCap(l1client *ethclient.Client) (*big.Int, *big.Int, *big.Int, *ethtypes.Header, error) { head, err := l1client.HeaderByNumber(context.Background(), nil) if err != nil { @@ -225,10 +210,13 @@ func sendTx(client iface.Client, txFeeLimit uint64, tx *ethtypes.Transaction) er var fee uint64 // calc tx gas fee if tx.Type() == ethtypes.BlobTxType { - // blob fee - fee = tx.BlobGasFeeCap().Uint64() * tx.BlobGas() - // tx fee - fee += tx.GasPrice().Uint64() * tx.Gas() + blobFee := new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas())) + txFee := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) + totalFee := new(big.Int).Add(blobFee, txFee) + if !totalFee.IsUint64() || totalFee.Uint64() > txFeeLimit { + return fmt.Errorf("%v:limit=%v,but got=%v", utils.ErrExceedFeeLimit, txFeeLimit, totalFee) + } + return client.SendTransaction(context.Background(), tx) } else { fee = tx.GasPrice().Uint64() * tx.Gas() } diff --git a/tx-submitter/iface/client.go b/tx-submitter/iface/client.go index 054736584..6fafffdac 100644 --- a/tx-submitter/iface/client.go +++ b/tx-submitter/iface/client.go @@ -251,7 +251,14 @@ func (c *L2Clients) GetRollupBatchByIndex(ctx context.Context, batchIndex uint64 err := c.tryAllClients(func(client L2Client) error { var err error result, err = client.GetRollupBatchByIndex(ctx, batchIndex) - return err + if err != nil { + return err + } + if result != nil && len(result.Signatures) > 0 { + return nil + } + return nil }) + return result, err } diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index 4331eacfd..ecc7f2eb4 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -259,19 +259,20 @@ func (r *Rollup) Start() error { go utils.Loop(r.ctx, r.cfg.TxProcessInterval, func() { batchCacheSyncMu.Lock() defer batchCacheSyncMu.Unlock() - err = r.batchCache.InitAndSyncFromDatabase() - if err != nil { + if err = r.batchCache.InitAndSyncFromDatabase(); err != nil { log.Error("init and sync from rollup failed, wait for the next try", "error", err) + return } - err = r.batchCache.AssembleCurrentBatchHeader() - if err != nil { + if err = r.batchCache.AssembleCurrentBatchHeader(); err != nil { log.Error("assemble current batch failed, wait for the next try", "error", err) + return } - index, err := r.batchCache.LatestBatchIndex() - if err != nil { + if index, err := r.batchCache.LatestBatchIndex(); err != nil { log.Error("cannot get the latest batch index from batch cache", "error", err) + return + } else { + r.metrics.SetLastCacheBatchIndex(index) } - r.metrics.SetLastCacheBatchIndex(index) }) return nil @@ -868,10 +869,10 @@ func (r *Rollup) finalize() error { nextBatchIndex := target.Uint64() + 1 batch, ok := r.batchCache.Get(nextBatchIndex) if !ok { - log.Error("get next batch by index error", + log.Warn("get next batch by index failed, batch not found", "batch_index", nextBatchIndex, ) - return fmt.Errorf("get next batch by index err:%v", err) + return fmt.Errorf("get next batch by index failed, batch %v not found", nextBatchIndex) } if batch == nil { log.Info("next batch is nil,wait next batch header to finalize", "next_batch_index", nextBatchIndex) diff --git a/tx-submitter/types/l2Caller.go b/tx-submitter/types/l2Caller.go index 073e61199..f359f0450 100644 --- a/tx-submitter/types/l2Caller.go +++ b/tx-submitter/types/l2Caller.go @@ -83,5 +83,5 @@ func (c *L2Caller) GetSequencerSetBytes(opts *bind.CallOpts) ([]byte, common.Has if bytes.Equal(hash[:], crypto.Keccak256Hash(setBytes).Bytes()) { return setBytes, hash, nil } - return nil, common.Hash{}, fmt.Errorf("sequencer set hash verify failed %v: %v", hexutil.Encode(setBytes), common.BytesToHash(hash[:]).String()) + return nil, common.Hash{}, fmt.Errorf("sequencer set hash verify failed %v: %v", hexutil.Encode(setBytes), common.BytesToHash(hash[:]).String()) } From e550fa91ea47e4dc779cacfcf4af74a9dff0162f Mon Sep 17 00:00:00 2001 From: kukoomomo Date: Fri, 6 Feb 2026 11:29:21 +0800 Subject: [PATCH 12/12] fix batch Seal --- tx-submitter/batch/batch_cache.go | 17 +++++++++++++---- tx-submitter/services/rollup.go | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go index 6db977cc0..ff33113a7 100644 --- a/tx-submitter/batch/batch_cache.go +++ b/tx-submitter/batch/batch_cache.go @@ -1099,7 +1099,11 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { if endBlockNum < bc.currentBlockNumber { return fmt.Errorf("has rerog, should check block status current %v, now %v", bc.currentBlockNumber, endBlockNum) } - startBlockNum := bc.lastPackedBlockHeight + startBlockNum, err := bc.parentBatchHeader.LastBlockNumber() + if err != nil { + return err + } + startBlockNum++ // Get start block once to avoid repeated queries startBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) if err != nil { @@ -1154,12 +1158,17 @@ func (bc *BatchCache) AssembleCurrentBatchHeader() error { return fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) } blockTimestamp := lastBlock.Time() - _, _, _, err = bc.SealBatch(sequencerSetBytes, blockTimestamp) + batchIndex, _, _, err := bc.SealBatch(sequencerSetBytes, blockTimestamp) if err != nil { return fmt.Errorf("failed to seal batch: %w", err) } - startBlockNum = blockNum - startBlockTime = nowBlockTime + batch, _ := bc.GetSealedBatch(batchIndex) + startBlockNum = batch.LastBlockNumber + 1 + startBlock, err = bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime = startBlock.Time() } // Pack current block (confirm and append to batch) diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index ecc7f2eb4..efa126a67 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -872,7 +872,7 @@ func (r *Rollup) finalize() error { log.Warn("get next batch by index failed, batch not found", "batch_index", nextBatchIndex, ) - return fmt.Errorf("get next batch by index failed, batch %v not found", nextBatchIndex) + return nil } if batch == nil { log.Info("next batch is nil,wait next batch header to finalize", "next_batch_index", nextBatchIndex)