Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion cmd/hack/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,35 @@ make hack
sudo DIST=/usr/local/bin make install
```

## mount ssd to memory
```
mkdir -p /mnt/ramdisk_op
mount -t tmpfs -o size=128g tmpfs /mnt/ramdisk_op
df -hT /mnt/ramdisk_op
umount -l /mnt/ramdisk_op
```

## run dump genesis
```
export chaindata_dir=/data1/chaindata
export chaindata_dir=/data/rpc-bak0820/chaindata
cp -r ${chaindata_dir} /mnt/ramdisk_op/
hack -action migrateGenesis -chaindata ${chaindata_dir} -input empty.json -output pre_xlayer_dump_file.json -log-level info
```

## re-genesis state check
```
export smtdata_dir=/data/rpc-bak0820/smt
cp -r ${smtdata_dir}/mdbx.dat /mnt/ramdisk_op/smt

cd test && make min-run
# make txs fill blockchain states
make pause
export chaindata_dir=$(pwd)/data/seq/chaindata
export smtdata_dir=$(pwd)/data/seq/smt
hack -action migrateGenesis -chaindata ${chaindata_dir} -input empty.json -output xlayer_dump_file.json
hack -action checkStateRoot -chaindata ${chaindata_dir} -smt-db-path ${smtdata_dir} -standalone-smt-db=true -ignore-scalable=true -input xlayer_dump_file.json

hack -action checkStateRootFast -chaindata ${chaindata_dir} -smt-db-path ${smtdata_dir} -standalone-smt-db=true -input xlayer_dump_file.json
```

## re-genesis state check ignoring scalable
Expand Down
157 changes: 155 additions & 2 deletions cmd/hack/hack.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,127 @@ func readJsonFile(input string) (GenesisData, error) {
return jsonData, nil
}

type KeyRange struct {
Start []byte
End []byte
}

// for storage key, it is of 32 bytes. we split the storage key space into chunks lexically
func generateKeyRanges(numChunks int) []KeyRange {
keyRanges := make([]KeyRange, numChunks)

for i := 0; i < numChunks; i++ {
startKey := make([]byte, 32)
startKey[0] = byte(i * (256 / numChunks))
Copy link
Copy Markdown
Collaborator

@Vui-Chee Vui-Chee Sep 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You initialize startKey with 32 bytes, but the the following assignment only assigns the first byte in the array. Is it because workerTx.Range(kv.PlainState, startKey, endKey) requires each key to be 60 bytes long?


var endKey []byte
if i == numChunks-1 {
endKey = bytes.Repeat([]byte{0xFF}, 32)
} else {
endKey = make([]byte, 32)
endKey[0] = byte((i + 1) * (256 / numChunks))
}

keyRanges[i] = KeyRange{Start: startKey, End: endKey}
}

return keyRanges
}

type StorageEntry struct {
Key string
Value string
}

func processScalableAddressStorageConcurrently(db kv.RwDB, prefix []byte, acct *AccInfo) (uint64, error) {

numWorkers := 32
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we use "numWorkers := runtime.NumCPU()"?

keyRanges := generateKeyRanges(numWorkers)

// Create channels for results
var wg sync.WaitGroup
results := make(chan []StorageEntry, numWorkers)

// Start workers
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
start := time.Now()
keyRange := keyRanges[workerID]

// Create a new transaction for this worker using db.View
if err := db.View(context.Background(), func(workerTx kv.Tx) error {
chunkStorage := make([]StorageEntry, 0, 1<<20)

// Get cursor for this worker
startKey := make([]byte, 60)
copy(startKey, prefix[0:28])
copy(startKey[28:], keyRange.Start)

endKey := make([]byte, 60)
copy(endKey, prefix[0:28])
copy(endKey[28:], keyRange.End)

iter, err := workerTx.Range(kv.PlainState, startKey, endKey)
if err != nil {
logger.Error("failed to create range iterator", "worker", workerID, "error", err)
results <- chunkStorage
return err
}

for iter.HasNext() {
keyStorage, valStorage, err := iter.Next()
if err != nil {
logger.Error("failed to read value from cursor", "worker", workerID, "error", err)
break
}
if len(keyStorage) > 28 {
chunkStorage = append(chunkStorage, StorageEntry{
Key: hexutil.Encode(keyStorage[28:]),
Value: BytesToPaddedHex(valStorage, 64),
})
}
}

results <- chunkStorage
logger.Info("worker completed", "id", workerID, "elapsed", time.Since(start))
return nil
}); err != nil {
logger.Error("worker transaction failed", "worker", workerID, "error", err)
results <- make([]StorageEntry, 0, 1024)
}
}(i)
}

go func() {
wg.Wait()
close(results)
}()

start := time.Now()
var totalStorage uint64
chunkCount := 0

var allChunks [][]StorageEntry
for chunkStorage := range results {
allChunks = append(allChunks, chunkStorage)
totalStorage += uint64(len(chunkStorage))
chunkCount++
}

acct.Storage = make(map[string]string, int(totalStorage))

for _, chunk := range allChunks {
for _, entry := range chunk {
acct.Storage[entry.Key] = entry.Value
}
}

logger.Info("Scalable address total storage items", "count", totalStorage, "chunk count", chunkCount, "elapsed", time.Since(start))
return totalStorage, nil
}

func writeJsonFile(data GenesisData, output string) error {
startJsonMarshal := time.Now()
updatedData, err := json.MarshalIndent(data, "", " ")
Expand Down Expand Up @@ -550,7 +671,12 @@ func scanDbGenerateGenesisData(input, chaindata string) (GenesisData, error) {

startScanKeys := time.Now()
if err := db.View(context.Background(), func(tx kv.Tx) error {
var skipNums uint64 = 0
return tx.ForEach(kv.PlainState, nil, func(k, v []byte) error {
if skipNums > 0 {
skipNums--
return nil
}
Comment thread
Vui-Chee marked this conversation as resolved.
total++
// account
if len(k) == 20 {
Expand Down Expand Up @@ -588,7 +714,7 @@ func scanDbGenerateGenesisData(input, chaindata string) (GenesisData, error) {

// storage
if len(k) > 28 {
storageCount++
var acctStorageCount uint64 = 0
acctBytes := k[:20]
acctHex := common.Bytes2Hex(acctBytes)

Expand All @@ -600,7 +726,34 @@ func scanDbGenerateGenesisData(input, chaindata string) (GenesisData, error) {
acc.Storage = make(map[string]string)
}

acc.Storage[hexutil.Encode(k[28:])] = BytesToPaddedHex(v, 64)
scalableAddressStr := strings.ToLower(strings.TrimPrefix(state.ADDRESS_SCALABLE_L2.Hex(), "0x"))
startAcctStorage := time.Now()
if acctHex == scalableAddressStr {

logger.Info("scalable acct bytes", "bytes", acctBytes, "incarnation", k[20:28])
scalableStorageCount, err := processScalableAddressStorageConcurrently(db, k[:28], acc)
acctStorageCount = scalableStorageCount
Comment on lines +734 to +735
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we omit scalableStorageCount? And just assign acctStorageCount directly?

if err != nil {
logger.Error("processing scalable address storage", "error", err)
}

} else {
tx.ForPrefix(kv.PlainState, k[:20], func(storageK, storageV []byte) error {
if len(storageK) > 20 {
acc.Storage[hexutil.Encode(storageK[28:])] = BytesToPaddedHex(storageV, 64)
acctStorageCount++
}
return nil
})
}

elapsed := time.Since(startAcctStorage)
skipNums = acctStorageCount - 1
storageCount = storageCount + acctStorageCount
if acctHex == scalableAddressStr {
fmt.Println("scanning for scalable, number of storages", acctStorageCount, "elapsed", elapsed)
}

}
return nil
})
Expand Down