diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index aa511482a..ade236aef 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -17,4 +17,4 @@ jobs: - uses: actions/setup-go@v4 with: go-version: 1.21.8 - - run: go test ./... + - run: go test ./... -timeout 30m diff --git a/go.mod b/go.mod index d0e599224..993514a04 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/ethereum/go-ethereum v1.13.5 github.com/fatih/color v1.14.1 - github.com/ferranbt/fastssz v0.1.3 + github.com/ferranbt/fastssz v0.1.4 github.com/gdamore/tcell/v2 v2.6.0 github.com/glendc/go-external-ip v0.1.0 github.com/go-openapi/errors v0.21.0 @@ -22,6 +22,7 @@ require ( github.com/goccy/go-json v0.10.2 github.com/google/uuid v1.5.0 github.com/hashicorp/go-version v1.6.0 + github.com/holiman/uint256 v1.2.4 github.com/ipfs/boxo v0.8.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -32,7 +33,7 @@ require ( github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 github.com/prysmaticlabs/prysm/v5 v5.0.3 github.com/rivo/tview v0.0.0-20230208211350-7dfff1ce7854 - github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365 + github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b github.com/sethvargo/go-password v0.2.0 github.com/shirou/gopsutil/v3 v3.23.1 github.com/tyler-smith/go-bip39 v1.1.0 @@ -69,6 +70,7 @@ require ( github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect @@ -87,7 +89,6 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect - github.com/holiman/uint256 v1.2.4 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.2 // indirect diff --git a/go.sum b/go.sum index 7a109e9ef..048211fc6 100644 --- a/go.sum +++ b/go.sum @@ -158,6 +158,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -172,8 +174,8 @@ github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84a github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= -github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= -github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= @@ -633,10 +635,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd h1:p9KuetSKB9nte9I/MkkiM3pwKFVQgqxxPTQ0y56Ff6s= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd/go.mod h1:UE9fof8P7iESVtLn1K9CTSkNRYVFHZHlf96RKbU33kA= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20240903025128-025f78ebda85 h1:sCeOQE95E9KATbaz9rnHezLvZnJa0FdNp+kE7cwogSI= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20240903025128-025f78ebda85/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365 h1:e8Y0PxBCpIV0NhCM2VvuceNbGSMfLagbMhcfwBzCNNc= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= +github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b h1:PnL2c1StqHDOjyOUYn4C/tuwhLtIZ2N/3qfNYyQlVWc= +github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= @@ -697,8 +697,6 @@ github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2n github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= diff --git a/rocketpool-cli/minipool/commands.go b/rocketpool-cli/minipool/commands.go index c1c323815..d181841e6 100644 --- a/rocketpool-cli/minipool/commands.go +++ b/rocketpool-cli/minipool/commands.go @@ -403,6 +403,10 @@ func RegisterCommands(app *cli.App, name string, aliases []string) { Name: "minipool, m", Usage: "The minipool/s to upgrade (address or 'all')", }, + cli.BoolFlag{ + Name: "include-finalized, f", + Usage: "Include finailized minipools in the list (default is to hide them).", + }, }, Action: func(c *cli.Context) error { @@ -546,6 +550,10 @@ func RegisterCommands(app *cli.App, name string, aliases []string) { Name: "amount, a", Usage: "The amount of ETH to deposit into the minipool", }, + cli.BoolFlag{ + Name: "no-send, n", + Usage: "Don't submit the transaction", + }, }, Action: func(c *cli.Context) error { diff --git a/rocketpool-cli/minipool/delegate.go b/rocketpool-cli/minipool/delegate.go index efa510a48..b42a343af 100644 --- a/rocketpool-cli/minipool/delegate.go +++ b/rocketpool-cli/minipool/delegate.go @@ -33,10 +33,14 @@ func delegateUpgradeMinipools(c *cli.Context) error { return err } + includeFinalized := c.Bool("include-finalized") + minipools := []api.MinipoolDetails{} for _, mp := range status.Minipools { if mp.Delegate != latestDelegateResponse.Address && !mp.UseLatestDelegate { - minipools = append(minipools, mp) + if includeFinalized || !mp.Finalised { + minipools = append(minipools, mp) + } } } diff --git a/rocketpool-cli/minipool/rescue-dissolved.go b/rocketpool-cli/minipool/rescue-dissolved.go index 32989a3e3..eb756a3dc 100644 --- a/rocketpool-cli/minipool/rescue-dissolved.go +++ b/rocketpool-cli/minipool/rescue-dissolved.go @@ -209,8 +209,10 @@ func rescueDissolved(c *cli.Context) error { return nil } + submit := !c.Bool("no-send") + // Refund minipool - response, err := rp.RescueDissolvedMinipool(selectedMinipool.Address, depositAmount) + response, err := rp.RescueDissolvedMinipool(selectedMinipool.Address, depositAmount, submit) if err != nil { return fmt.Errorf("Could not rescue minipool %s: %s.\n", selectedMinipool.Address.Hex(), err.Error()) } diff --git a/rocketpool-cli/node/status.go b/rocketpool-cli/node/status.go index db35f16eb..c1b832944 100644 --- a/rocketpool-cli/node/status.go +++ b/rocketpool-cli/node/status.go @@ -258,6 +258,13 @@ func getStatus(c *cli.Context) error { // Fee distributor details fmt.Printf("%s=== Fee Distributor and Smoothing Pool ===%s\n", colorGreen, colorReset) + fmt.Printf("The node's fee distributor %s%s%s has a balance of %.6f ETH.\n", colorBlue, status.FeeRecipientInfo.FeeDistributorAddress.Hex(), colorReset, math.RoundDown(eth.WeiToEth(status.FeeDistributorBalance), 6)) + if cfg.IsNativeMode && !status.FeeRecipientInfo.IsInSmoothingPool && !status.FeeRecipientInfo.IsInOptOutCooldown { + fmt.Printf("%sNOTE: You are in Native Mode; you MUST ensure that your Validator Client is using this address as its fee recipient!%s\n", colorYellow, colorReset) + } + if !status.IsFeeDistributorInitialized { + fmt.Printf("\n%sThe fee distributor hasn't been initialized yet. When you are able, please initialize it with `rocketpool node initialize-fee-distributor`.%s\n", colorYellow, colorReset) + } if status.FeeRecipientInfo.IsInSmoothingPool { fmt.Printf( "The node is currently opted into the Smoothing Pool (%s%s%s).\n", @@ -279,14 +286,24 @@ func getStatus(c *cli.Context) error { } } else { fmt.Printf("The node is not opted into the Smoothing Pool.\nTo learn more about the Smoothing Pool, please visit %s.\n", smoothingPoolLink) - } + // Count the number of 8 ETH, <10% commission minipools + poolsWithMissingCommission := 0 + leb16wei := new(big.Int) + leb16wei.SetString("16000000000000000000", 10) + for _, minipool := range status.Minipools { + if minipool.Node.DepositBalance.Cmp(leb16wei) < 0 && minipool.Node.Fee*100 < 10 { + poolsWithMissingCommission++ + } + } + if poolsWithMissingCommission == 1 { + fmt.Printf("%sYou have %d minipool that would earn extra commission if you opted into the smoothing pool!%s\n", colorYellow, poolsWithMissingCommission, colorReset) + fmt.Println("See https://rpips.rocketpool.net/RPIPs/RPIP-62 for more information about bonus commission, or run `rocketpool node join-smoothing-pool` to opt in.") + } + if poolsWithMissingCommission > 1 { + fmt.Printf("%sYou have %d minipools that would earn extra commission if you opted into the smoothing pool!%s\n", colorYellow, poolsWithMissingCommission, colorReset) + fmt.Println("See https://rpips.rocketpool.net/RPIPs/RPIP-62 for more information about bonus commission, or run `rocketpool node join-smoothing-pool` to opt in.") + } - fmt.Printf("The node's fee distributor %s%s%s has a balance of %.6f ETH.\n", colorBlue, status.FeeRecipientInfo.FeeDistributorAddress.Hex(), colorReset, math.RoundDown(eth.WeiToEth(status.FeeDistributorBalance), 6)) - if cfg.IsNativeMode && !status.FeeRecipientInfo.IsInSmoothingPool && !status.FeeRecipientInfo.IsInOptOutCooldown { - fmt.Printf("%sNOTE: You are in Native Mode; you MUST ensure that your Validator Client is using this address as its fee recipient!%s\n", colorYellow, colorReset) - } - if !status.IsFeeDistributorInitialized { - fmt.Printf("\n%sThe fee distributor hasn't been initialized yet. When you are able, please initialize it with `rocketpool node initialize-fee-distributor`.%s\n", colorYellow, colorReset) } fmt.Println() diff --git a/rocketpool-cli/service/service.go b/rocketpool-cli/service/service.go index fd0c5948f..ea47257ca 100644 --- a/rocketpool-cli/service/service.go +++ b/rocketpool-cli/service/service.go @@ -986,7 +986,7 @@ func pruneExecutionClient(c *cli.Context) error { if selectedEc == cfgtypes.ExecutionClient_Geth || selectedEc == cfgtypes.ExecutionClient_Besu { if selectedEc == cfgtypes.ExecutionClient_Geth { - fmt.Printf("%sGeth has a new feature that renders pruning obsolete. Consider enabling PBSS in the Execution Client settings in `rocketpool service config` and resyncing with `rocketpool service resync-eth1` instead of pruning.%s\n", colorYellow, colorReset) + fmt.Printf("%sGeth has a new feature that renders pruning obsolete. However, as this is a new feature you may have to resync with `rocketpool service resync-eth1` before this takes effect.%s\n", colorYellow, colorReset) } fmt.Println("This will shut down your main execution client and prune its database, freeing up disk space.") if cfg.UseFallbackClients.Value == false { diff --git a/rocketpool/api/debug/beacon_state.go b/rocketpool/api/debug/beacon_state.go new file mode 100644 index 000000000..badfc310d --- /dev/null +++ b/rocketpool/api/debug/beacon_state.go @@ -0,0 +1,52 @@ +package debug + +import ( + "encoding/json" + "fmt" + + "github.com/urfave/cli" + + "github.com/rocket-pool/smartnode/shared/services" + "github.com/rocket-pool/smartnode/shared/types/api" + hexutil "github.com/rocket-pool/smartnode/shared/utils/hex" +) + +func getBeaconStateForSlot(c *cli.Context, slot uint64, validatorIndex uint64) error { + // Create a new response + response := api.BeaconStateResponse{} + + // Get services + if err := services.RequireNodeRegistered(c); err != nil { + return err + } + bc, err := services.GetBeaconClient(c) + if err != nil { + return err + } + + // Get beacon state + beaconState, err := bc.GetBeaconState(slot) + if err != nil { + return err + } + + proof, err := beaconState.ValidatorCredentialsProof(validatorIndex) + if err != nil { + return err + } + + // Convert the proof to a list of 0x-prefixed hex strings + response.Proof = make([]string, 0, len(proof)) + for _, hash := range proof { + response.Proof = append(response.Proof, hexutil.EncodeToString(hash)) + } + + // Render response json + json, err := json.Marshal(response) + if err != nil { + return err + } + fmt.Println(string(json)) + + return nil +} diff --git a/rocketpool/api/debug/commands.go b/rocketpool/api/debug/commands.go index e7e52e8f3..72b54ca3e 100644 --- a/rocketpool/api/debug/commands.go +++ b/rocketpool/api/debug/commands.go @@ -36,6 +36,35 @@ func RegisterSubcommands(command *cli.Command, name string, aliases []string) { }, }, + { + Name: "get-beacon-state", + Aliases: []string{"b"}, + Usage: "Returns the beacon state for a given slot number", + UsageText: "rocketpool api debug get-beacon-state slot-number", + Action: func(c *cli.Context) error { + + // Validate args + if err := cliutils.ValidateArgCount(c, 2); err != nil { + return err + } + + slotNumber, err := cliutils.ValidatePositiveUint("slot number", c.Args().Get(0)) + if err != nil { + return err + } + + validatorIndex, err := cliutils.ValidatePositiveUint("validator index", c.Args().Get(1)) + if err != nil { + return err + } + + if err := getBeaconStateForSlot(c, slotNumber, validatorIndex); err != nil { + fmt.Printf("An error occurred: %s\n", err) + } + return nil + + }, + }, }, }) } diff --git a/rocketpool/api/minipool/commands.go b/rocketpool/api/minipool/commands.go index 744ff32a2..3f689cdcf 100644 --- a/rocketpool/api/minipool/commands.go +++ b/rocketpool/api/minipool/commands.go @@ -773,11 +773,11 @@ func RegisterSubcommands(command *cli.Command, name string, aliases []string) { { Name: "rescue-dissolved", Usage: "Rescue a dissolved minipool by depositing ETH for it to the Beacon deposit contract", - UsageText: "rocketpool api minipool rescue-dissolved minipool-address deposit-amount", + UsageText: "rocketpool api minipool rescue-dissolved minipool-address deposit-amount submit", Action: func(c *cli.Context) error { // Validate args - if err := cliutils.ValidateArgCount(c, 2); err != nil { + if err := cliutils.ValidateArgCount(c, 3); err != nil { return err } minipoolAddress, err := cliutils.ValidateAddress("minipool address", c.Args().Get(0)) @@ -788,9 +788,13 @@ func RegisterSubcommands(command *cli.Command, name string, aliases []string) { if err != nil { return err } + submit, err := cliutils.ValidateBool("submit", c.Args().Get(2)) + if err != nil { + return err + } // Run - api.PrintResponse(rescueDissolvedMinipool(c, minipoolAddress, depositAmount)) + api.PrintResponse(rescueDissolvedMinipool(c, minipoolAddress, depositAmount, submit)) return nil }, diff --git a/rocketpool/api/minipool/rescue-dissolved.go b/rocketpool/api/minipool/rescue-dissolved.go index 88b1dc877..5fb1c1812 100644 --- a/rocketpool/api/minipool/rescue-dissolved.go +++ b/rocketpool/api/minipool/rescue-dissolved.go @@ -281,7 +281,7 @@ func getDepositTx(rp *rocketpool.RocketPool, w *wallet.Wallet, bc beacon.Client, } -func rescueDissolvedMinipool(c *cli.Context, minipoolAddress common.Address, amount *big.Int) (*api.RescueDissolvedMinipoolResponse, error) { +func rescueDissolvedMinipool(c *cli.Context, minipoolAddress common.Address, amount *big.Int, submit bool) (*api.RescueDissolvedMinipoolResponse, error) { // Get services if err := services.RequireNodeRegistered(c); err != nil { @@ -316,11 +316,23 @@ func rescueDissolvedMinipool(c *cli.Context, minipoolAddress common.Address, amo return nil, fmt.Errorf("Error checking for nonce override: %w", err) } + opts.NoSend = !submit + // Submit the rescue deposit tx, err := getDepositTx(rp, w, bc, minipoolAddress, amount, opts) if err != nil { return nil, fmt.Errorf("error submitting rescue deposit: %w", err) } + + // Print transaction if requested + if !submit { + b, err := tx.MarshalBinary() + if err != nil { + return nil, err + } + fmt.Printf("%x\n", b) + } + response.TxHash = tx.Hash() // Return response diff --git a/rocketpool/api/minipool/status.go b/rocketpool/api/minipool/status.go index 00cb6ffd1..d203262dd 100644 --- a/rocketpool/api/minipool/status.go +++ b/rocketpool/api/minipool/status.go @@ -46,7 +46,7 @@ func getStatus(c *cli.Context) (*api.MinipoolStatusResponse, error) { if err != nil { return nil, err } - details, err := getNodeMinipoolDetails(rp, bc, nodeAccount.Address, &legacyMinipoolQueueAddress) + details, err := GetNodeMinipoolDetails(rp, bc, nodeAccount.Address, &legacyMinipoolQueueAddress) if err != nil { return nil, err } diff --git a/rocketpool/api/minipool/utils.go b/rocketpool/api/minipool/utils.go index c1006e8b5..d526b99d6 100644 --- a/rocketpool/api/minipool/utils.go +++ b/rocketpool/api/minipool/utils.go @@ -38,7 +38,7 @@ func validateMinipoolOwner(mp minipool.Minipool, nodeAddress common.Address) err } // Get all node minipool details -func getNodeMinipoolDetails(rp *rocketpool.RocketPool, bc beacon.Client, nodeAddress common.Address, legacyMinipoolQueueAddress *common.Address) ([]api.MinipoolDetails, error) { +func GetNodeMinipoolDetails(rp *rocketpool.RocketPool, bc beacon.Client, nodeAddress common.Address, legacyMinipoolQueueAddress *common.Address) ([]api.MinipoolDetails, error) { // Data var wg1 errgroup.Group diff --git a/rocketpool/api/network/generate-tree.go b/rocketpool/api/network/generate-tree.go index b2d9929c9..af402c00c 100644 --- a/rocketpool/api/network/generate-tree.go +++ b/rocketpool/api/network/generate-tree.go @@ -5,8 +5,8 @@ import ( "os" "github.com/fatih/color" - "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/smartnode/shared/services" + "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/types/api" "github.com/urfave/cli" ) @@ -32,14 +32,14 @@ func canGenerateRewardsTree(c *cli.Context, index uint64) (*api.CanNetworkGenera response := api.CanNetworkGenerateRewardsTreeResponse{} // Get the current interval - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) + currentIndexBig, err := rp.GetRewardIndex(nil) if err != nil { return nil, err } response.CurrentIndex = currentIndexBig.Uint64() // Get the path of the file to save - filePath := cfg.Smartnode.GetRewardsTreePath(index, true) + filePath := cfg.Smartnode.GetRewardsTreePath(index, true, config.RewardsExtensionJSON) _, err = os.Stat(filePath) if os.IsNotExist(err) { response.TreeFileExists = false diff --git a/rocketpool/api/node/status.go b/rocketpool/api/node/status.go index edce4d360..86f308044 100644 --- a/rocketpool/api/node/status.go +++ b/rocketpool/api/node/status.go @@ -22,6 +22,7 @@ import ( "github.com/urfave/cli" "golang.org/x/sync/errgroup" + mp "github.com/rocket-pool/smartnode/rocketpool/api/minipool" "github.com/rocket-pool/smartnode/rocketpool/api/pdao" "github.com/rocket-pool/smartnode/shared/services" "github.com/rocket-pool/smartnode/shared/services/alerting" @@ -41,6 +42,9 @@ func getStatus(c *cli.Context) (*api.NodeStatusResponse, error) { if err := services.RequireRocketStorage(c); err != nil { return nil, err } + if err := services.RequireBeaconClientSynced(c); err != nil { + return nil, err + } cfg, err := services.GetConfig(c) if err != nil { return nil, err @@ -70,6 +74,9 @@ func getStatus(c *cli.Context) (*api.NodeStatusResponse, error) { response.PenalizedMinipools = map[common.Address]uint64{} response.NodeRPLLocked = big.NewInt(0) + // Get the legacy MinipoolQueue contract address + legacyMinipoolQueueAddress := cfg.Smartnode.GetV110MinipoolQueueAddress() + // Get node account nodeAccount, err := w.GetNodeAccount() if err != nil { @@ -81,6 +88,23 @@ func getStatus(c *cli.Context) (*api.NodeStatusResponse, error) { // Sync var wg errgroup.Group + wg.Go(func() error { + mpDetails, err := mp.GetNodeMinipoolDetails(rp, bc, nodeAccount.Address, &legacyMinipoolQueueAddress) + if err == nil { + response.Minipools = mpDetails + } + return err + }) + + wg.Go(func() error { + delegate, err := rp.GetContract("rocketMinipoolDelegate", nil) + if err != nil { + return fmt.Errorf("Error getting latest minipool delegate contract: %w", err) + } + response.LatestDelegate = *delegate.Address + return err + }) + // Get node trusted status wg.Go(func() error { trusted, err := trustednode.GetMemberExists(rp, nodeAccount.Address, nil) diff --git a/rocketpool/api/pdao/status.go b/rocketpool/api/pdao/status.go index 4f92d895a..f17d1b4f4 100644 --- a/rocketpool/api/pdao/status.go +++ b/rocketpool/api/pdao/status.go @@ -223,6 +223,7 @@ func GetSnapshotVotedProposals(apiDomain string, space string, nodeAddress commo where: { space: "%s", voter_in: ["%s", "%s"], + created_gte: 1727694646 }, orderBy: "created", orderDirection: desc @@ -264,7 +265,7 @@ func GetSnapshotProposals(apiDomain string, space string, state string) (*api.Sn stateFilter = fmt.Sprintf(`, state: "%s"`, state) } query := fmt.Sprintf(`query Proposals { - proposals(where: {space: "%s"%s}, orderBy: "created", orderDirection: desc) { + proposals(where: {space: "%s"%s, start_gte: 1727694646}, orderBy: "created", orderDirection: desc) { id title choices diff --git a/rocketpool/node/collectors/node-collector.go b/rocketpool/node/collectors/node-collector.go index 32ca59482..4b69ff0f1 100644 --- a/rocketpool/node/collectors/node-collector.go +++ b/rocketpool/node/collectors/node-collector.go @@ -321,8 +321,10 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { if !previousInterval.TreeFileExists { return fmt.Errorf("Error retrieving previous interval's total node weight: rewards file %s doesn't exist for interval %d", previousInterval.TreeFilePath, previousRewardIndex) } - // Convert to a float, accuracy loss is meaningless compared to the heuristic's natural inaccuracy. - previousIntervalTotalNodeWeight = &previousInterval.TotalNodeWeight.Int + + if previousInterval.TotalNodeWeight != nil { + previousIntervalTotalNodeWeight.Set(previousInterval.TotalNodeWeight) + } // Get the info for each claimed interval for _, claimedInterval := range claimed { @@ -533,6 +535,7 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { nodeWeightSum := big.NewInt(0).Add(nodeWeight, previousIntervalTotalNodeWeight) + // Convert to a float, accuracy loss is meaningless compared to the heuristic's natural inaccuracy. // nodeWeightRatio = current_node_weight / (current_node_weight + previous_interval_total_node_weight) nodeWeightRatio, _ := big.NewFloat(0).Quo( big.NewFloat(0).SetInt(nodeWeight), diff --git a/rocketpool/node/collectors/snapshot-collector.go b/rocketpool/node/collectors/snapshot-collector.go index 76b382cc7..cb6fda5f4 100644 --- a/rocketpool/node/collectors/snapshot-collector.go +++ b/rocketpool/node/collectors/snapshot-collector.go @@ -1,14 +1,22 @@ package collectors import ( + "context" "fmt" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" + "github.com/rocket-pool/rocketpool-go/network" "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/rocket-pool/rocketpool-go/utils/eth" "github.com/rocket-pool/smartnode/rocketpool/api/pdao" + "github.com/rocket-pool/smartnode/shared/services" "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/contracts" + "github.com/rocket-pool/smartnode/shared/services/proposals" + "github.com/rocket-pool/smartnode/shared/types/api" "golang.org/x/sync/errgroup" ) @@ -35,14 +43,26 @@ type SnapshotCollector struct { // The current delegate voting power on Snapshot delegateVotingPower *prometheus.Desc + // The Rocket Pool Contract manager + rp *rocketpool.RocketPool + // The Rocket Pool config cfg *config.RocketPoolConfig + // The Rocket Pool Execution Client manager + ec *services.ExecutionClientManager + + // The Rocket Pool Beacon Client manager + bc *services.BeaconClientManager + + // The RocketSignerRegistry Contract + reg *contracts.RocketSignerRegistry + // the node wallet address nodeAddress common.Address - // the delegate address - delegateAddress common.Address + // the signalling address + signallingAddress common.Address // Store values from the latest API call cachedNodeVotingPower float64 @@ -60,7 +80,7 @@ type SnapshotCollector struct { } // Create a new SnapshotCollector instance -func NewSnapshotCollector(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, nodeAddress common.Address, delegateAddress common.Address) *SnapshotCollector { +func NewSnapshotCollector(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, ec *services.ExecutionClientManager, bc *services.BeaconClientManager, reg *contracts.RocketSignerRegistry, nodeAddress common.Address, signallingAddress common.Address) *SnapshotCollector { subsystem := "snapshot" return &SnapshotCollector{ activeProposals: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "proposals_active"), @@ -87,10 +107,14 @@ func NewSnapshotCollector(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfi "The delegate current voting power on Snapshot", nil, nil, ), - cfg: cfg, - nodeAddress: nodeAddress, - delegateAddress: delegateAddress, - logPrefix: "Snapshot Collector", + rp: rp, + cfg: cfg, + ec: ec, + bc: bc, + reg: reg, + nodeAddress: nodeAddress, + signallingAddress: signallingAddress, + logPrefix: "Snapshot Collector", } } @@ -109,38 +133,14 @@ func (collector *SnapshotCollector) Collect(channel chan<- prometheus.Metric) { // Sync var wg errgroup.Group + var err error + var propMgr *proposals.ProposalManager + var blockNumber uint64 + var onchainVotingDelegate common.Address + var isVotingInitialized bool activeProposals := float64(0) closedProposals := float64(0) - votesActiveProposals := float64(0) - votesClosedProposals := float64(0) - handledProposals := map[string]bool{} - - // Get the number of votes on Snapshot proposals - wg.Go(func() error { - if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { - votedProposals, err := pdao.GetSnapshotVotedProposals(collector.cfg.Smartnode.GetSnapshotApiDomain(), collector.cfg.Smartnode.GetSnapshotID(), collector.nodeAddress, collector.delegateAddress) - if err != nil { - return fmt.Errorf("Error getting Snapshot voted proposals: %w", err) - } - - for _, votedProposal := range votedProposals.Data.Votes { - _, exists := handledProposals[votedProposal.Proposal.Id] - if !exists { - if votedProposal.Proposal.State == "active" { - votesActiveProposals += 1 - } else { - votesClosedProposals += 1 - } - handledProposals[votedProposal.Proposal.Id] = true - } - } - collector.cachedVotesActiveProposals = votesActiveProposals - collector.cachedVotesClosedProposals = votesClosedProposals - - } - - return nil - }) + blankAddress := common.Address{} // Get the number of live Snapshot proposals wg.Go(func() error { @@ -164,31 +164,49 @@ func (collector *SnapshotCollector) Collect(channel chan<- prometheus.Metric) { return nil }) - // Get the node's voting power + // Get latest block number wg.Go(func() error { if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { - - votingPowerResponse, err := pdao.GetSnapshotVotingPower(collector.cfg.Smartnode.GetSnapshotApiDomain(), collector.cfg.Smartnode.GetSnapshotID(), collector.nodeAddress) + blockNumber, err = collector.ec.BlockNumber(context.Background()) if err != nil { - return fmt.Errorf("Error getting Snapshot voted proposals for node address: %w", err) + return fmt.Errorf("Error getting block number: %w", err) } + } + return nil - collector.cachedNodeVotingPower = votingPowerResponse.Data.Vp.Vp + }) + + // Get the propMgr + wg.Go(func() error { + if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { + propMgr, err = proposals.NewProposalManager(nil, collector.cfg, collector.rp, collector.bc) + if err != nil { + return fmt.Errorf("Error getting the prop manager: %w", err) + } } return nil }) - // Get the delegate's voting power + // Get the node onchain voting delegate wg.Go(func() error { if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { - votingPowerResponse, err := pdao.GetSnapshotVotingPower(collector.cfg.Smartnode.GetSnapshotApiDomain(), collector.cfg.Smartnode.GetSnapshotID(), collector.delegateAddress) + onchainVotingDelegate, err = network.GetCurrentVotingDelegate(collector.rp, collector.nodeAddress, nil) if err != nil { - return fmt.Errorf("Error getting Snapshot voted proposals for delegate address: %w", err) + return fmt.Errorf("Error getting the on-chain voting delegate: %w", err) } + } + return err + }) - collector.cachedDelegateVotingPower = votingPowerResponse.Data.Vp.Vp + // Get Voting Initialized status + wg.Go(func() error { + if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { + isVotingInitialized, err = network.GetVotingInitialized(collector.rp, collector.nodeAddress, nil) + if err != nil { + return fmt.Errorf("Error checking if voting is initialized: %w", err) + } } - return nil + return err }) // Wait for data @@ -196,6 +214,49 @@ func (collector *SnapshotCollector) Collect(channel chan<- prometheus.Metric) { collector.logError(err) return } + + // Check if sufficient time has passed and voting is initialized + if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait && isVotingInitialized { + // Get voting power for the node + nodeVotingPower, err := getVotingPower(propMgr, uint32(blockNumber), collector.nodeAddress) + if err != nil { + collector.logError(fmt.Errorf("error getting node voting power: %w", err)) + collector.cachedNodeVotingPower = 0 + } else { + collector.cachedNodeVotingPower = nodeVotingPower + } + // Get voting power for the delegate + delegateVotingPower, err := getVotingPower(propMgr, uint32(blockNumber), onchainVotingDelegate) + if err != nil { + collector.logError(fmt.Errorf("error getting delegate voting power: %w", err)) + collector.cachedDelegateVotingPower = 0 + } else { + collector.cachedDelegateVotingPower = delegateVotingPower + } + } + + // Get the number of votes on Snapshot proposals + if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { + // Check if there is a delegate voting on behalf of the node + if onchainVotingDelegate != blankAddress || onchainVotingDelegate != collector.nodeAddress { + delegateSignallingAddress, err := collector.reg.NodeToSigner(&bind.CallOpts{}, onchainVotingDelegate) + if err != nil { + collector.logError(fmt.Errorf("Error getting the signalling address: %w", err)) + } + votedProposals, err := pdao.GetSnapshotVotedProposals(collector.cfg.Smartnode.GetSnapshotApiDomain(), collector.cfg.Smartnode.GetSnapshotID(), onchainVotingDelegate, delegateSignallingAddress) + if err != nil { + collector.logError(fmt.Errorf("Error getting Snapshot voted proposals: %w", err)) + } + collector.collectVotes(votedProposals) + } else { + votedProposals, err := pdao.GetSnapshotVotedProposals(collector.cfg.Smartnode.GetSnapshotApiDomain(), collector.cfg.Smartnode.GetSnapshotID(), collector.nodeAddress, collector.signallingAddress) + if err != nil { + collector.logError(fmt.Errorf("Error getting Snapshot voted proposals: %w", err)) + } + collector.collectVotes(votedProposals) + } + } + if time.Since(collector.lastApiCallTimestamp).Hours() >= hoursToWait { collector.lastApiCallTimestamp = time.Now() } @@ -218,3 +279,34 @@ func (collector *SnapshotCollector) Collect(channel chan<- prometheus.Metric) { func (collector *SnapshotCollector) logError(err error) { fmt.Printf("[%s] %s\n", collector.logPrefix, err.Error()) } + +func getVotingPower(propMgr *proposals.ProposalManager, blockNumber uint32, address common.Address) (float64, error) { + // Get the total voting power + totalDelegatedVP, _, _, err := propMgr.GetArtifactsForVoting(blockNumber, address) + if err != nil { + return 0, fmt.Errorf("error getting voting power: %w", err) + } + + return eth.WeiToEth(totalDelegatedVP), nil +} + +func (collector *SnapshotCollector) collectVotes(votedProposals *api.SnapshotVotedProposals) { + handledProposals := map[string]bool{} + votesActiveProposals := float64(0) + votesClosedProposals := float64(0) + + for _, votedProposal := range votedProposals.Data.Votes { + _, exists := handledProposals[votedProposal.Proposal.Id] + if !exists { + if votedProposal.Proposal.State == "active" { + votesActiveProposals += 1 + } else { + votesClosedProposals += 1 + } + handledProposals[votedProposal.Proposal.Id] = true + } + } + collector.cachedVotesActiveProposals = votesActiveProposals + collector.cachedVotesClosedProposals = votesClosedProposals + +} diff --git a/rocketpool/node/download-reward-trees.go b/rocketpool/node/download-reward-trees.go index f93c2f39c..79ae6d6a3 100644 --- a/rocketpool/node/download-reward-trees.go +++ b/rocketpool/node/download-reward-trees.go @@ -96,7 +96,7 @@ func (d *downloadRewardsTrees) run(state *state.NetworkState) error { missingIntervals := []uint64{} for i := uint64(0); i < currentIndex; i++ { // Check if the tree file exists - treeFilePath := d.cfg.Smartnode.GetRewardsTreePath(i, true) + treeFilePath := d.cfg.Smartnode.GetRewardsTreePath(i, true, config.RewardsExtensionJSON) _, err = os.Stat(treeFilePath) if os.IsNotExist(err) { d.log.Printlnf("You are missing the rewards tree file for interval %d.", i) diff --git a/rocketpool/node/metrics-exporter.go b/rocketpool/node/metrics-exporter.go index e0555eb85..f5d3cbfdf 100644 --- a/rocketpool/node/metrics-exporter.go +++ b/rocketpool/node/metrics-exporter.go @@ -6,6 +6,8 @@ import ( "os" "strings" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rocket-pool/smartnode/rocketpool/node/collectors" @@ -37,7 +39,7 @@ func runMetricsServer(c *cli.Context, logger log.ColorLogger, stateLocker *colle if err != nil { return err } - s, err := services.GetSnapshotDelegation(c) + reg, err := services.GetRocketSignerRegistry(c) if err != nil { return err } @@ -80,14 +82,16 @@ func runMetricsServer(c *cli.Context, logger log.ColorLogger, stateLocker *colle registry.MustRegister(smoothingPoolCollector) // Set up snapshot checking if enabled - votingId := cfg.Smartnode.GetVotingSnapshotID() - if s != nil { - votingDelegate, err := s.Delegation(nil, nodeAccount.Address, votingId) + if cfg.Smartnode.GetRocketSignerRegistryAddress() != "" { + signallingAddress, err := reg.NodeToSigner(&bind.CallOpts{}, nodeAccount.Address) if err != nil { - return fmt.Errorf("Error getting node delegate: %w", err) + logger.Printlnf("Error getting the signalling address: %w", err) + // Set signallingAddress to blank address instead of erroring out of the task loop. + signallingAddress = common.Address{} } - snapshotCollector := collectors.NewSnapshotCollector(rp, cfg, nodeAccount.Address, votingDelegate) + snapshotCollector := collectors.NewSnapshotCollector(rp, cfg, ec, bc, reg, nodeAccount.Address, signallingAddress) registry.MustRegister(snapshotCollector) + } // Start the HTTP server diff --git a/rocketpool/node/node.go b/rocketpool/node/node.go index 73dc544f8..8b8b69e31 100644 --- a/rocketpool/node/node.go +++ b/rocketpool/node/node.go @@ -117,10 +117,7 @@ func run(c *cli.Context) error { updateLog := log.NewColorLogger(UpdateColor) // Create the state manager - m, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, &updateLog) - if err != nil { - return err - } + m := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, &updateLog) stateLocker := collectors.NewStateLocker() // Initialize tasks diff --git a/rocketpool/watchtower/generate-rewards-tree.go b/rocketpool/watchtower/generate-rewards-tree.go index fd6e2b62c..a93b6d782 100644 --- a/rocketpool/watchtower/generate-rewards-tree.go +++ b/rocketpool/watchtower/generate-rewards-tree.go @@ -139,8 +139,13 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { generationPrefix := fmt.Sprintf("[Interval %d Tree]", index) t.log.Printlnf("%s Starting generation of Merkle rewards tree for interval %d.", generationPrefix, index) + // Get previous rewards pool addresses + previousRewardsPoolAddresses := t.cfg.Smartnode.GetPreviousRewardsPoolAddresses() + + rewardsClient := rprewards.NewRewardsExecutionClient(t.rp) + // Find the event for this interval - rewardsEvent, err := rprewards.GetRewardSnapshotEvent(t.rp, t.cfg, index, nil) + rewardsEvent, err := rewardsClient.GetRewardSnapshotEvent(previousRewardsPoolAddresses, index, nil) if err != nil { t.handleError(fmt.Errorf("%s Error getting event for interval %d: %w", generationPrefix, index, err)) return @@ -164,11 +169,7 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { address, err := client.RocketStorage.GetAddress(opts, crypto.Keccak256Hash([]byte("contract.addressrocketTokenRETH"))) if err == nil { // Create the state manager with using the primary or fallback (not necessarily archive) EC - stateManager, err = state.NewNetworkStateManager(client, t.cfg, t.rp.Client, t.bc, &t.log) - if err != nil { - t.handleError(fmt.Errorf("error creating new NetworkStateManager with Archive EC: %w", err)) - return - } + stateManager = state.NewNetworkStateManager(client, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, &t.log) } else { // Check if an Archive EC is provided, and if using it would potentially resolve the error errMessage := err.Error() @@ -199,12 +200,16 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { t.handleError(fmt.Errorf("Error verifying rETH address with Archive EC: %w", err)) return } - // Create the state manager with the archive EC - stateManager, err = state.NewNetworkStateManager(client, t.cfg, ec, t.bc, &t.log) + + // Create a new rocketpool-go instance + archiveRP, err := rocketpool.NewRocketPool(ec, *t.rp.RocketStorageContract.Address) if err != nil { - t.handleError(fmt.Errorf("Error creating new NetworkStateManager with ARchive EC: %w", err)) + t.handleError(fmt.Errorf("Error instantiating client with Archive EC: %w", err)) return } + + // Create the state manager with the archive EC + stateManager = state.NewNetworkStateManager(archiveRP, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, &t.log) } else { // No archive node specified t.handleError(fmt.Errorf("***ERROR*** Primary EC cannot retrieve state for historical block %d and the Archive EC is not specified.", elBlockHeader.Number.Uint64())) @@ -234,53 +239,46 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { // Implementation for rewards tree generation using a viable EC func (t *generateRewardsTree) generateRewardsTreeImpl(rp *rocketpool.RocketPool, index uint64, generationPrefix string, rewardsEvent rewards.RewardsEvent, elBlockHeader *types.Header, state *state.NetworkState) { + // Determine the end of the interval + snapshotEnd := &rprewards.SnapshotEnd{ + ConsensusBlock: rewardsEvent.ConsensusBlock.Uint64(), + ExecutionBlock: rewardsEvent.ExecutionBlock.Uint64(), + Slot: state.BeaconConfig.FirstSlotAtLeast(rewardsEvent.IntervalEndTime.Unix()), + } + // Generate the rewards file start := time.Now() - treegen, err := rprewards.NewTreeGenerator(&t.log, generationPrefix, rp, t.cfg, t.bc, index, rewardsEvent.IntervalStartTime, rewardsEvent.IntervalEndTime, rewardsEvent.ConsensusBlock.Uint64(), elBlockHeader, rewardsEvent.IntervalsPassed.Uint64(), state, nil) + treegen, err := rprewards.NewTreeGenerator(&t.log, generationPrefix, rprewards.NewRewardsExecutionClient(rp), t.cfg, t.bc, index, rewardsEvent.IntervalStartTime, rewardsEvent.IntervalEndTime, snapshotEnd, elBlockHeader, rewardsEvent.IntervalsPassed.Uint64(), state) if err != nil { t.handleError(fmt.Errorf("%s Error creating Merkle tree generator: %w", generationPrefix, err)) return } - rewardsFile, err := treegen.GenerateTree() + treeResult, err := treegen.GenerateTree() if err != nil { t.handleError(fmt.Errorf("%s Error generating Merkle tree: %w", generationPrefix, err)) return } - header := rewardsFile.GetHeader() - for address, network := range header.InvalidNetworkNodes { + rewardsFile := treeResult.RewardsFile + for address, network := range treeResult.InvalidNetworkNodes { t.log.Printlnf("%s WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", generationPrefix, address.Hex(), network) } t.log.Printlnf("%s Finished in %s", generationPrefix, time.Since(start).String()) // Validate the Merkle root - root := common.BytesToHash(header.MerkleTree.Root()) - if root != rewardsEvent.MerkleRoot { - t.log.Printlnf("%s WARNING: your Merkle tree had a root of %s, but the canonical Merkle tree's root was %s. This file will not be usable for claiming rewards.", generationPrefix, root.Hex(), rewardsEvent.MerkleRoot.Hex()) + root := rewardsFile.GetMerkleRoot() + if root != rewardsEvent.MerkleRoot.Hex() { + t.log.Printlnf("%s WARNING: your Merkle tree had a root of %s, but the canonical Merkle tree's root was %s. This file will not be usable for claiming rewards.", generationPrefix, root, rewardsEvent.MerkleRoot.Hex()) } else { - t.log.Printlnf("%s Your Merkle tree's root of %s matches the canonical root! You will be able to use this file for claiming rewards.", generationPrefix, header.MerkleRoot) + t.log.Printlnf("%s Your Merkle tree's root of %s matches the canonical root! You will be able to use this file for claiming rewards.", generationPrefix, root) } - // Create the JSON files rewardsFile.SetMinipoolPerformanceFileCID("---") + + // Save the files t.log.Printlnf("%s Saving JSON files...", generationPrefix) - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - t.cfg.Smartnode.GetMinipoolPerformancePath(index, true), - ) - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - t.cfg.Smartnode.GetRewardsTreePath(index, true), - ) - - // Write the files - err = localMinipoolPerformanceFile.Write() - if err != nil { - t.handleError(fmt.Errorf("%s error saving minipool performance file: %w", generationPrefix, err)) - return - } - err = localRewardsFile.Write() + _, _, err = treegen.SaveFiles(treeResult, false) if err != nil { - t.handleError(fmt.Errorf("%s error saving rewards file: %w", generationPrefix, err)) + t.handleError(fmt.Errorf("%s failed to save rewards artifacts: %w", generationPrefix, err)) return } diff --git a/rocketpool/watchtower/submit-network-balances.go b/rocketpool/watchtower/submit-network-balances.go index acd8176de..9f281ccc2 100644 --- a/rocketpool/watchtower/submit-network-balances.go +++ b/rocketpool/watchtower/submit-network-balances.go @@ -297,10 +297,7 @@ func (t *submitNetworkBalances) getNetworkBalances(elBlockHeader *types.Header, } // Create a new state gen manager - mgr, err := state.NewNetworkStateManager(client, t.cfg, client.Client, t.bc, t.log) - if err != nil { - return networkBalances{}, fmt.Errorf("error creating network state manager for EL block %s, Beacon slot %d: %w", elBlock, beaconBlock, err) - } + mgr := state.NewNetworkStateManager(client, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, t.log) // Create a new state for the target block state, err := mgr.GetStateForSlot(beaconBlock) @@ -352,10 +349,16 @@ func (t *submitNetworkBalances) getNetworkBalances(elBlockHeader *types.Header, timeSinceStart := slotTime.Sub(startTime) intervalsPassed := timeSinceStart / intervalTime endTime := slotTime + // Since we aren't generating an actual tree, just use beaconBlock as the snapshotEnd + snapshotEnd := &rprewards.SnapshotEnd{ + Slot: beaconBlock, + ConsensusBlock: beaconBlock, + ExecutionBlock: state.ElBlockNumber, + } // Approximate the staker's share of the smoothing pool balance // NOTE: this will use the "vanilla" variant of treegen, without rolling records, to retain parity with other Oracle DAO nodes that aren't using rolling records - treegen, err := rprewards.NewTreeGenerator(t.log, "[Balances]", client, t.cfg, t.bc, currentIndex, startTime, endTime, beaconBlock, elBlockHeader, uint64(intervalsPassed), state, nil) + treegen, err := rprewards.NewTreeGenerator(t.log, "[Balances]", rprewards.NewRewardsExecutionClient(client), t.cfg, t.bc, currentIndex, startTime, endTime, snapshotEnd, elBlockHeader, uint64(intervalsPassed), state) if err != nil { return fmt.Errorf("error creating merkle tree generator to approximate share of smoothing pool: %w", err) } diff --git a/rocketpool/watchtower/submit-rewards-tree-rolling.go b/rocketpool/watchtower/submit-rewards-tree-rolling.go deleted file mode 100644 index 2d13443a3..000000000 --- a/rocketpool/watchtower/submit-rewards-tree-rolling.go +++ /dev/null @@ -1,694 +0,0 @@ -package watchtower - -import ( - "context" - "encoding/hex" - "fmt" - "math" - "math/big" - "os" - "strings" - "sync" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/rocketpool-go/tokens" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/rocketpool/watchtower/utils" - "github.com/rocket-pool/smartnode/shared/services" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - rprewards "github.com/rocket-pool/smartnode/shared/services/rewards" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/services/wallet" - "github.com/rocket-pool/smartnode/shared/utils/api" - "github.com/rocket-pool/smartnode/shared/utils/eth1" - hexutil "github.com/rocket-pool/smartnode/shared/utils/hex" - "github.com/rocket-pool/smartnode/shared/utils/log" - "github.com/urfave/cli" -) - -// Process balances and rewards task -type submitRewardsTree_Rolling struct { - c *cli.Context - log log.ColorLogger - errLog log.ColorLogger - cfg *config.RocketPoolConfig - w *wallet.Wallet - ec rocketpool.ExecutionClient - rp *rocketpool.RocketPool - bc beacon.Client - genesisTime time.Time - recordMgr *rprewards.RollingRecordManager - stateMgr *state.NetworkStateManager - logPrefix string - - lock *sync.Mutex - isRunning bool -} - -// Create submit rewards tree with rolling record support -func newSubmitRewardsTree_Rolling(c *cli.Context, logger log.ColorLogger, errorLogger log.ColorLogger, stateMgr *state.NetworkStateManager) (*submitRewardsTree_Rolling, error) { - - // Get services - cfg, err := services.GetConfig(c) - if err != nil { - return nil, err - } - w, err := services.GetWallet(c) - if err != nil { - return nil, err - } - ec, err := services.GetEthClient(c) - if err != nil { - return nil, err - } - rp, err := services.GetRocketPool(c) - if err != nil { - return nil, err - } - bc, err := services.GetBeaconClient(c) - if err != nil { - return nil, err - } - - // Get the beacon config - beaconCfg, err := bc.GetEth2Config() - if err != nil { - return nil, fmt.Errorf("error getting beacon config: %w", err) - } - - // Get the Beacon genesis time - genesisTime := time.Unix(int64(beaconCfg.GenesisTime), 0) - - // Get the current interval index - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) - if err != nil { - return nil, fmt.Errorf("error getting rewards index: %w", err) - } - currentIndex := currentIndexBig.Uint64() - if currentIndex == 0 { - return nil, fmt.Errorf("rolling records cannot be used for the first rewards interval") - } - - // Get the previous RocketRewardsPool addresses - prevAddresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() - - // Get the last rewards event and starting epoch - found, event, err := rewards.GetRewardsEvent(rp, currentIndex-1, prevAddresses, nil) - if err != nil { - return nil, fmt.Errorf("error getting event for rewards interval %d: %w", currentIndex-1, err) - } - if !found { - return nil, fmt.Errorf("event for rewards interval %d not found", currentIndex-1) - } - - // Get the start slot of the current interval - startSlot, err := rprewards.GetStartSlotForInterval(event, bc, beaconCfg) - if err != nil { - return nil, fmt.Errorf("error getting start slot for interval %d: %w", currentIndex, err) - } - - // Create the task - lock := &sync.Mutex{} - logPrefix := "[Rolling Record]" - task := &submitRewardsTree_Rolling{ - c: c, - log: logger, - errLog: errorLogger, - cfg: cfg, - ec: ec, - w: w, - rp: rp, - bc: bc, - stateMgr: stateMgr, - genesisTime: genesisTime, - logPrefix: logPrefix, - lock: lock, - isRunning: false, - } - - // Make a new rolling manager - recordMgr, err := rprewards.NewRollingRecordManager(&task.log, &task.errLog, cfg, rp, bc, stateMgr, startSlot, beaconCfg, currentIndex) - if err != nil { - return nil, fmt.Errorf("error creating rolling record manager: %w", err) - } - - // Load the latest checkpoint - beaconHead, err := bc.GetBeaconHead() - if err != nil { - return nil, fmt.Errorf("error getting beacon head: %w", err) - } - latestFinalizedSlot := (beaconHead.FinalizedEpoch+1)*beaconCfg.SlotsPerEpoch - 1 - _, err = recordMgr.LoadBestRecordFromDisk(startSlot, latestFinalizedSlot, currentIndex) - if err != nil { - return nil, fmt.Errorf("error loading rolling record checkpoint from disk: %w", err) - } - - // Return - task.recordMgr = recordMgr - return task, nil - -} - -// Update the rolling record and run the submission process if applicable -func (t *submitRewardsTree_Rolling) run(headState *state.NetworkState) error { - // Wait for clients to sync - if err := services.WaitEthClientSynced(t.c, true); err != nil { - return err - } - if err := services.WaitBeaconClientSynced(t.c, true); err != nil { - return err - } - - t.lock.Lock() - if t.isRunning { - t.log.Println("Record update is already running in the background.") - t.lock.Unlock() - return nil - } - t.lock.Unlock() - - nodeAccount, err := t.w.GetNodeAccount() - if err != nil { - return fmt.Errorf("error loading node account: %w", err) - } - nodeAddress := nodeAccount.Address - - go func() { - t.lock.Lock() - t.isRunning = true - t.lock.Unlock() - t.log.Printlnf("%s Running record update in a separate thread.", t.logPrefix) - - // Capture the latest head state if one isn't passed in - if headState == nil { - // Get the latest Beacon block - latestBlock, err := t.stateMgr.GetLatestBeaconBlock() - if err != nil { - t.handleError(fmt.Errorf("error getting latest Beacon block: %w", err)) - return - } - - // Get the state of the network - headState, err = t.stateMgr.GetStateForSlot(latestBlock.Slot) - if err != nil { - t.handleError(fmt.Errorf("error getting network state: %w", err)) - return - } - } - - // Check whether or not the node is in the Oracle DAO - isInOdao := false - for _, details := range headState.OracleDaoMemberDetails { - if details.Address == nodeAddress { - isInOdao = true - break - } - } - - // Get the latest finalized slot and epoch - latestFinalizedBlock, err := t.stateMgr.GetLatestFinalizedBeaconBlock() - if err != nil { - t.handleError(fmt.Errorf("error getting latest finalized block: %w", err)) - return - } - latestFinalizedEpoch := latestFinalizedBlock.Slot / headState.BeaconConfig.SlotsPerEpoch - - // Check if a rewards interval is due - isRewardsSubmissionDue, rewardsSlot, intervalsPassed, startTime, endTime, err := t.isRewardsIntervalSubmissionRequired(headState) - if err != nil { - t.handleError(fmt.Errorf("error checking if rewards submission is required: %w", err)) - return - } - - // If no special upcoming state is required, update normally - if !isRewardsSubmissionDue { - err = t.recordMgr.UpdateRecordToState(headState, latestFinalizedBlock.Slot) - if err != nil { - t.handleError(fmt.Errorf("error updating record: %w", err)) - return - } - - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - return - } - - // Check if rewards reporting is ready - rewardsEpoch := rewardsSlot / headState.BeaconConfig.SlotsPerEpoch - requiredRewardsEpoch := rewardsEpoch + 1 - isRewardsReadyForReport := isRewardsSubmissionDue && (latestFinalizedEpoch >= requiredRewardsEpoch) - - // Run updates and submissions as required - if isRewardsReadyForReport { - // Check if there's an existing file for this interval, and try submitting that - existingRewardsFile, valid, mustRegenerate := t.isExistingRewardsFileValid(headState.NetworkDetails.RewardIndex, intervalsPassed, nodeAddress, isInOdao) - if existingRewardsFile != nil { - if valid && !mustRegenerate { - // We already have a valid file and submission - t.log.Printlnf("%s Rewards tree has already been submitted for interval %d and is still valid but consensus hasn't been reached yet; nothing to do.", t.logPrefix, headState.NetworkDetails.RewardIndex) - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - return - } else if !valid && !mustRegenerate { - // We already have a valid file but need to submit again - t.log.Printlnf("%s Rewards tree has already been created for interval %d but hasn't been submitted yet, attempting resubmission.", t.logPrefix, headState.NetworkDetails.RewardIndex) - } else if !valid && mustRegenerate { - // We have a file but it's not valid (probably because too many intervals have passed) - t.log.Printlnf("%s Rewards submission for interval %d is due and current file is no longer valid (likely too many intervals have passed since its creation), regenerating it.", t.logPrefix, headState.NetworkDetails.RewardIndex) - } - } - - // Get the actual slot to report on - var elBlockNumber uint64 - rewardsSlot, elBlockNumber, err = t.getTrueRewardsIntervalSubmissionSlot(rewardsSlot) - if err != nil { - t.handleError(fmt.Errorf("error getting the true rewards interval slot: %w", err)) - return - } - - // Get an appropriate client that has access to the target state - this is required if the state gets pruned by the local EC and the - // archive EC is required - client, err := eth1.GetBestApiClient(t.rp, t.cfg, t.printMessage, big.NewInt(0).SetUint64(elBlockNumber)) - if err != nil { - t.handleError(fmt.Errorf("error getting best API client during rewards submission: %w", err)) - return - } - - // Generate the rewards state - stateMgr, err := state.NewNetworkStateManager(client, t.cfg, client.Client, t.bc, &t.log) - if err != nil { - t.handleError(fmt.Errorf("error creating state manager for rewards slot: %w", err)) - return - } - state, err := stateMgr.GetStateForSlot(rewardsSlot) - if err != nil { - t.handleError(fmt.Errorf("error getting state for rewards slot: %w", err)) - return - } - - // Process the rewards interval - t.log.Printlnf("%s Running rewards interval submission.", t.logPrefix) - err = t.runRewardsIntervalReport(client, state, isInOdao, intervalsPassed, startTime, endTime, mustRegenerate, existingRewardsFile) - if err != nil { - t.handleError(fmt.Errorf("error running rewards interval report: %w", err)) - return - } - } else { - t.log.Printlnf("%s Rewards submission for interval %d is due... waiting for epoch %d to be finalized (currently on epoch %d)", t.logPrefix, headState.NetworkDetails.RewardIndex, requiredRewardsEpoch, latestFinalizedEpoch) - } - - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - }() - - return nil -} - -// Print a message from the tree generation goroutine -func (t *submitRewardsTree_Rolling) printMessage(message string) { - t.log.Printlnf("%s %s", t.logPrefix, message) -} - -// Print an error and unlock the mutex -func (t *submitRewardsTree_Rolling) handleError(err error) { - t.errLog.Printlnf("%s %s", t.logPrefix, err.Error()) - t.errLog.Println("*** Rolling Record processing failed. ***") - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() -} - -// Check if a rewards interval submission is required and if so, the slot number for the update -func (t *submitRewardsTree_Rolling) isRewardsIntervalSubmissionRequired(state *state.NetworkState) (bool, uint64, uint64, time.Time, time.Time, error) { - // Check if a rewards interval has passed and needs to be calculated - startTime := state.NetworkDetails.IntervalStart - intervalTime := state.NetworkDetails.IntervalDuration - - // Adjust for the first interval by making the start time the RPL inflation interval start time - if startTime == time.Unix(0, 0) { - var err error - opts := &bind.CallOpts{ - BlockNumber: big.NewInt(0).SetUint64(state.ElBlockNumber), - } - startTime, err = tokens.GetRPLInflationIntervalStartTime(t.rp, opts) - if err != nil { - return false, 0, 0, time.Time{}, time.Time{}, fmt.Errorf("start time is zero, but error getting Rocket Pool deployment block: %w", err) - } - t.log.Printlnf("NOTE: rewards pool interval start time is 0, using the inflation interval start time according to the RPL token (%s)", startTime.String()) - } - - // Calculate the end time, which is the number of intervals that have gone by since the current one's start - secondsSinceGenesis := time.Duration(state.BeaconConfig.SecondsPerSlot*state.BeaconSlotNumber) * time.Second - stateTime := t.genesisTime.Add(secondsSinceGenesis) - timeSinceStart := stateTime.Sub(startTime) - intervalsPassed := timeSinceStart / intervalTime - endTime := startTime.Add(intervalTime * intervalsPassed) - if intervalsPassed == 0 { - return false, 0, 0, time.Time{}, time.Time{}, nil - } - - // Get the target slot number - eth2Config := state.BeaconConfig - totalTimespan := endTime.Sub(t.genesisTime) - targetSlot := uint64(math.Ceil(totalTimespan.Seconds() / float64(eth2Config.SecondsPerSlot))) - targetSlotEpoch := targetSlot / eth2Config.SlotsPerEpoch - targetSlot = (targetSlotEpoch+1)*eth2Config.SlotsPerEpoch - 1 // The target slot becomes the last one in the Epoch - - return true, targetSlot, uint64(intervalsPassed), startTime, endTime, nil -} - -// Get the actual slot to be used for a rewards interval submission instead of the naively-determined one -// NOTE: only call this once the required epoch (targetSlotEpoch + 1) has been finalized -func (t *submitRewardsTree_Rolling) getTrueRewardsIntervalSubmissionSlot(targetSlot uint64) (uint64, uint64, error) { - // Get the first successful block - for { - // Try to get the current block - block, exists, err := t.bc.GetBeaconBlock(fmt.Sprint(targetSlot)) - if err != nil { - return 0, 0, fmt.Errorf("error getting Beacon block %d: %w", targetSlot, err) - } - - // If the block was missing, try the previous one - if !exists { - t.log.Printlnf("%s Slot %d was missing, trying the previous one...", t.logPrefix, targetSlot) - targetSlot-- - } else { - // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! - return targetSlot, block.ExecutionBlockNumber, nil - } - } -} - -// Checks to see if an existing rewards file is still valid and whether or not it should be regenerated or just resubmitted -func (t *submitRewardsTree_Rolling) isExistingRewardsFileValid(rewardIndex uint64, intervalsPassed uint64, nodeAddress common.Address, isInOdao bool) (*rprewards.LocalRewardsFile, bool, bool) { - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(rewardIndex, true) - - // Check if the rewards file exists - _, err := os.Stat(rewardsTreePath) - if os.IsNotExist(err) { - return nil, false, true - } - if err != nil { - t.log.Printlnf("%s WARNING: failed to check if [%s] exists: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - // The file already exists, attempt to read it - localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePath) - if err != nil { - t.log.Printlnf("%s WARNING: failed to read %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - proofWrapper := localRewardsFile.Impl() - header := proofWrapper.GetHeader() - - if isInOdao { - // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - t.log.Printlnf("%s WARNING: failed to get CID for %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - // Check if this file has already been submitted - submission := rewards.RewardSubmission{ - RewardIndex: big.NewInt(0).SetUint64(header.Index), - ExecutionBlock: big.NewInt(0).SetUint64(header.ExecutionEndBlock), - ConsensusBlock: big.NewInt(0).SetUint64(header.ConsensusEndBlock), - MerkleRoot: common.HexToHash(header.MerkleRoot), - MerkleTreeCID: cid.String(), - IntervalsPassed: big.NewInt(0).SetUint64(header.IntervalsPassed), - TreasuryRPL: &header.TotalRewards.ProtocolDaoRpl.Int, - TrustedNodeRPL: []*big.Int{&header.TotalRewards.TotalOracleDaoRpl.Int}, - NodeRPL: []*big.Int{&header.TotalRewards.TotalCollateralRpl.Int}, - NodeETH: []*big.Int{&header.TotalRewards.NodeOperatorSmoothingPoolEth.Int}, - UserETH: &header.TotalRewards.PoolStakerSmoothingPoolEth.Int, - } - - hasSubmitted, err := rewards.GetTrustedNodeSubmittedSpecificRewards(t.rp, nodeAddress, submission, nil) - if err != nil { - t.log.Printlnf("%s WARNING: could not check if node has previously submitted file %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - if !hasSubmitted { - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("%s Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...", t.logPrefix, header.Index, header.IntervalsPassed, intervalsPassed) - return localRewardsFile, false, true - } - t.log.Printlnf("%s Existing file for interval %d has not been submitted yet.", t.logPrefix, header.Index) - return localRewardsFile, false, false - } - } - - // Check if the file's valid (same number of intervals passed as the current time) - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("%s Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...", t.logPrefix, header.Index, header.IntervalsPassed, intervalsPassed) - return localRewardsFile, false, true - } - - // File's good and it has the same number of intervals passed, so use it - return localRewardsFile, true, false -} - -// Run a rewards interval report submission -func (t *submitRewardsTree_Rolling) runRewardsIntervalReport(client *rocketpool.RocketPool, state *state.NetworkState, isInOdao bool, intervalsPassed uint64, startTime time.Time, endTime time.Time, mustRegenerate bool, existingRewardsFile *rprewards.LocalRewardsFile) error { - // Prep the record for reporting - err := t.recordMgr.PrepareRecordForReport(state) - if err != nil { - return fmt.Errorf("error preparing record for report: %w", err) - } - - // Initialize some variables - snapshotBeaconBlock := state.BeaconSlotNumber - elBlockNumber := state.ElBlockNumber - - // Get the number of the EL block matching the CL snapshot block - snapshotElBlockHeader, err := t.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) - if err != nil { - return err - } - elBlockIndex := snapshotElBlockHeader.Number.Uint64() - - // Get the current interval - currentIndex := state.NetworkDetails.RewardIndex - currentIndexBig := big.NewInt(0).SetUint64(currentIndex) - - // Get the expected file paths - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true) - compressedRewardsTreePath := rewardsTreePath + config.RewardsTreeIpfsExtension - minipoolPerformancePath := t.cfg.Smartnode.GetMinipoolPerformancePath(currentIndex, true) - compressedMinipoolPerformancePath := minipoolPerformancePath + config.RewardsTreeIpfsExtension - - // Check if we can reuse an existing file for this interval - if !mustRegenerate { - if !isInOdao { - t.log.Printlnf("%s Node is not in the Oracle DAO, skipping submission for interval %d.", t.logPrefix, currentIndex) - return nil - } - - t.log.Printlnf("%s Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", t.logPrefix, currentIndex, rewardsTreePath) - - // Save the compressed file and get the CID for it - cid, err := existingRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("error getting CID for file %s: %w", compressedRewardsTreePath, err) - } - t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) - - // Submit to the contracts - err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, existingRewardsFile.Impl().GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) - if err != nil { - return fmt.Errorf("error submitting rewards snapshot: %w", err) - } - - t.log.Printlnf("%s Successfully submitted rewards snapshot for interval %d.", t.logPrefix, currentIndex) - return nil - } - - // Generate the tree - err = t.generateTree(client, state, intervalsPassed, isInOdao, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) - if err != nil { - return fmt.Errorf("error generating rewards tree: %w", err) - } - - return nil -} - -// Implementation for rewards tree generation using a viable EC -func (t *submitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, state *state.NetworkState, intervalsPassed uint64, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) error { - - // Log - if intervalsPassed > 1 { - t.log.Printlnf("WARNING: %d intervals have passed since the last rewards checkpoint was submitted! Rolling them into one...", intervalsPassed) - } - t.log.Printlnf("Rewards checkpoint has passed, starting Merkle tree generation for interval %d in the background.\n%s Snapshot Beacon block = %d, EL block = %d, running from %s to %s", currentIndex, t.logPrefix, snapshotBeaconBlock, elBlockIndex, startTime, endTime) - - // Generate the rewards file - treegen, err := rprewards.NewTreeGenerator(&t.log, t.logPrefix, rp, t.cfg, t.bc, currentIndex, startTime, endTime, snapshotBeaconBlock, snapshotElBlockHeader, uint64(intervalsPassed), state, t.recordMgr.Record) - if err != nil { - return fmt.Errorf("Error creating Merkle tree generator: %w", err) - } - rewardsFile, err := treegen.GenerateTree() - if err != nil { - return fmt.Errorf("Error generating Merkle tree: %w", err) - } - for address, network := range rewardsFile.GetHeader().InvalidNetworkNodes { - t.printMessage(fmt.Sprintf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)) - } - - // Serialize the minipool performance file - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - minipoolPerformancePath, - ) - err = localMinipoolPerformanceFile.Write() - if err != nil { - return fmt.Errorf("Error serializing minipool performance file into JSON: %w", err) - } - - if nodeTrusted { - minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting the CID for file %s: %w", compressedMinipoolPerformancePath, err) - } - t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) - rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) - } else { - t.printMessage("Saved minipool performance file.") - rewardsFile.SetMinipoolPerformanceFileCID("---") - } - - // Serialize the rewards tree to JSON - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - rewardsTreePath, - ) - t.printMessage("Generation complete! Saving tree...") - - // Write the rewards tree to disk - err = localRewardsFile.Write() - if err != nil { - return fmt.Errorf("Error saving rewards tree file to %s: %w", rewardsTreePath, err) - } - - if nodeTrusted { - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) - } - t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) - // Submit to the contracts - err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) - if err != nil { - return fmt.Errorf("Error submitting rewards snapshot: %w", err) - } - - t.printMessage(fmt.Sprintf("Successfully submitted rewards snapshot for interval %d.", currentIndex)) - } else { - t.printMessage(fmt.Sprintf("Successfully generated rewards snapshot for interval %d.", currentIndex)) - } - - return nil - -} - -// Submit rewards info to the contracts -func (t *submitRewardsTree_Rolling) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFileHeader *rprewards.RewardsFileHeader, cid string, intervalsPassed *big.Int) error { - - treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFileHeader.MerkleRoot)) - if err != nil { - return fmt.Errorf("Error decoding merkle root: %w", err) - } - treeRoot := common.BytesToHash(treeRootBytes) - - // Create the arrays of rewards per network - collateralRplRewards := []*big.Int{} - oDaoRplRewards := []*big.Int{} - smoothingPoolEthRewards := []*big.Int{} - - // Create the total rewards for each network - network := uint64(0) - for { - networkRewards, exists := rewardsFileHeader.NetworkRewards[network] - if !exists { - break - } - - collateralRplRewards = append(collateralRplRewards, &networkRewards.CollateralRpl.Int) - oDaoRplRewards = append(oDaoRplRewards, &networkRewards.OracleDaoRpl.Int) - smoothingPoolEthRewards = append(smoothingPoolEthRewards, &networkRewards.SmoothingPoolEth.Int) - - network++ - } - - // Get transactor - opts, err := t.w.GetNodeAccountTransactor() - if err != nil { - return err - } - - // Create the submission - submission := rewards.RewardSubmission{ - RewardIndex: index, - ExecutionBlock: big.NewInt(0).SetUint64(executionBlock), - ConsensusBlock: big.NewInt(0).SetUint64(consensusBlock), - MerkleRoot: treeRoot, - MerkleTreeCID: cid, - IntervalsPassed: intervalsPassed, - TreasuryRPL: &rewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int, - NodeRPL: collateralRplRewards, - TrustedNodeRPL: oDaoRplRewards, - NodeETH: smoothingPoolEthRewards, - UserETH: &rewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int, - } - - // Get the gas limit - gasInfo, err := rewards.EstimateSubmitRewardSnapshotGas(t.rp, submission, opts) - if err != nil { - if enableSubmissionAfterConsensus_RewardsTree && strings.Contains(err.Error(), "Can only submit snapshot for next period") { - // Set a gas limit which will intentionally be too low and revert - gasInfo = rocketpool.GasInfo{ - EstGasLimit: utils.RewardsSubmissionForcedGas, - SafeGasLimit: utils.RewardsSubmissionForcedGas, - } - t.log.Println("Rewards period consensus has already been reached but submitting anyway for the health check.") - } else { - return fmt.Errorf("Could not estimate the gas required to submit the rewards tree: %w", err) - } - } - - // Print the gas info - maxFee := eth.GweiToWei(utils.GetWatchtowerMaxFee(t.cfg)) - if !api.PrintAndCheckGasInfo(gasInfo, false, 0, &t.log, maxFee, 0) { - return nil - } - - opts.GasFeeCap = maxFee - opts.GasTipCap = eth.GweiToWei(utils.GetWatchtowerPrioFee(t.cfg)) - opts.GasLimit = gasInfo.SafeGasLimit - - // Submit RPL price - hash, err := rewards.SubmitRewardSnapshot(t.rp, submission, opts) - if err != nil { - return err - } - - // Print TX info and wait for it to be included in a block - err = api.PrintAndWaitForTransaction(t.cfg, hash, t.rp.Client, &t.log) - if err != nil { - return err - } - - // Return - return nil -} diff --git a/rocketpool/watchtower/submit-rewards-tree-stateless.go b/rocketpool/watchtower/submit-rewards-tree-stateless.go index ca592644b..5e84089b9 100644 --- a/rocketpool/watchtower/submit-rewards-tree-stateless.go +++ b/rocketpool/watchtower/submit-rewards-tree-stateless.go @@ -155,10 +155,12 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network } // Get the block and timestamp of the consensus block that best matches the end time - snapshotBeaconBlock, elBlockNumber, err := t.getSnapshotConsensusBlock(endTime, state) + snapshotEnd, err := t.getSnapshotEnd(endTime, state) if err != nil { return err } + snapshotBeaconBlock := snapshotEnd.ConsensusBlock + elBlockNumber := snapshotEnd.ExecutionBlock // Get the number of the EL block matching the CL snapshot block snapshotElBlockHeader, err := t.ec.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) @@ -181,15 +183,13 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network t.lock.Unlock() // Get the expected file paths - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true) - compressedRewardsTreePath := rewardsTreePath + config.RewardsTreeIpfsExtension - minipoolPerformancePath := t.cfg.Smartnode.GetMinipoolPerformancePath(currentIndex, true) - compressedMinipoolPerformancePath := minipoolPerformancePath + config.RewardsTreeIpfsExtension + rewardsTreePathJSON := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionJSON) + compressedRewardsTreePathJSON := rewardsTreePathJSON + config.RewardsTreeIpfsExtension // Check if we can reuse an existing file for this interval - if t.isExistingRewardsFileValid(rewardsTreePath, uint64(intervalsPassed)) { + if t.isExistingRewardsFileValid(rewardsTreePathJSON, uint64(intervalsPassed)) { if !nodeTrusted { - t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s.", currentIndex, rewardsTreePath) + t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s.", currentIndex, rewardsTreePathJSON) return nil } @@ -202,10 +202,10 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network return nil } - t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", currentIndex, rewardsTreePath) + t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", currentIndex, rewardsTreePathJSON) // Deserialize the file - localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePath) + localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePathJSON) if err != nil { return fmt.Errorf("Error reading rewards tree file: %w", err) } @@ -213,15 +213,15 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network proofWrapper := localRewardsFile.Impl() // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() + _, cid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) + return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePathJSON, err) } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) // Submit to the contracts - err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, proofWrapper.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) + err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, proofWrapper, cid.String(), big.NewInt(int64(intervalsPassed))) if err != nil { return fmt.Errorf("Error submitting rewards snapshot: %w", err) } @@ -231,7 +231,7 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network } // Generate the tree - t.generateTree(intervalsPassed, nodeTrusted, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) + t.generateTree(intervalsPassed, nodeTrusted, currentIndex, snapshotEnd, elBlockIndex, startTime, endTime, snapshotElBlockHeader) // Done return nil @@ -268,9 +268,12 @@ func (t *submitRewardsTree_Stateless) isExistingRewardsFileValid(rewardsTreePath // Compare the number of intervals in it with the current number of intervals proofWrapper := localRewardsFile.Impl() - header := proofWrapper.GetHeader() - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...\n", header.Index, header.IntervalsPassed, intervalsPassed) + if proofWrapper.GetIntervalsPassed() != intervalsPassed { + t.log.Printlnf("Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...\n", + proofWrapper.GetIndex(), + proofWrapper.GetIntervalsPassed(), + intervalsPassed, + ) return false } @@ -280,7 +283,7 @@ func (t *submitRewardsTree_Stateless) isExistingRewardsFileValid(rewardsTreePath } // Kick off the tree generation goroutine -func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) { +func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotEnd *rprewards.SnapshotEnd, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header) { go func() { t.lock.Lock() @@ -295,7 +298,7 @@ func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration } // Generate the tree - err = t.generateTreeImpl(client, intervalsPassed, nodeTrusted, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) + err = t.generateTreeImpl(client, intervalsPassed, nodeTrusted, currentIndex, snapshotEnd, elBlockIndex, startTime, endTime, snapshotElBlockHeader) if err != nil { t.handleError(err) } @@ -308,7 +311,8 @@ func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration } // Implementation for rewards tree generation using a viable EC -func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool, intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) error { +func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool, intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotEnd *rprewards.SnapshotEnd, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header) error { + snapshotBeaconBlock := snapshotEnd.ConsensusBlock // Log if uint64(intervalsPassed) > 1 { @@ -317,10 +321,7 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool t.log.Printlnf("Rewards checkpoint has passed, starting Merkle tree generation for interval %d in the background.\n%s Snapshot Beacon block = %d, EL block = %d, running from %s to %s", currentIndex, t.generationPrefix, snapshotBeaconBlock, elBlockIndex, startTime, endTime) // Create a new state gen manager - mgr, err := state.NewNetworkStateManager(rp, t.cfg, rp.Client, t.bc, t.log) - if err != nil { - return fmt.Errorf("error creating network state manager for EL block %d, Beacon slot %d: %w", elBlockIndex, snapshotBeaconBlock, err) - } + mgr := state.NewNetworkStateManager(rp, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, t.log) // Create a new state for the target block state, err := mgr.GetStateForSlot(snapshotBeaconBlock) @@ -329,65 +330,34 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool } // Generate the rewards file - treegen, err := rprewards.NewTreeGenerator(t.log, t.generationPrefix, rp, t.cfg, t.bc, currentIndex, startTime, endTime, snapshotBeaconBlock, snapshotElBlockHeader, uint64(intervalsPassed), state, nil) + treegen, err := rprewards.NewTreeGenerator(t.log, t.generationPrefix, rprewards.NewRewardsExecutionClient(rp), t.cfg, t.bc, currentIndex, startTime, endTime, snapshotEnd, snapshotElBlockHeader, uint64(intervalsPassed), state) if err != nil { return fmt.Errorf("Error creating Merkle tree generator: %w", err) } - rewardsFile, err := treegen.GenerateTree() + treeResult, err := treegen.GenerateTree() if err != nil { return fmt.Errorf("Error generating Merkle tree: %w", err) } - for address, network := range rewardsFile.GetHeader().InvalidNetworkNodes { + rewardsFile := treeResult.RewardsFile + for address, network := range treeResult.InvalidNetworkNodes { t.printMessage(fmt.Sprintf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)) } - // Serialize the minipool performance file - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - minipoolPerformancePath, - ) - - // Write it to disk - err = localMinipoolPerformanceFile.Write() + // Save the files + t.printMessage("Generation complete! Saving files...") + cid, cids, err := treegen.SaveFiles(treeResult, nodeTrusted) if err != nil { - return fmt.Errorf("Error saving minipool performance file to %s: %w", minipoolPerformancePath, err) - } - - if nodeTrusted { - minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedMinipoolPerformancePath, err) - } - t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) - rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) - } else { - t.printMessage("Saved minipool performance file.") - rewardsFile.SetMinipoolPerformanceFileCID("---") + return fmt.Errorf("Error writing rewards artifacts to disk: %w", err) } - - // Serialize the rewards tree to JSON - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - rewardsTreePath, - ) - t.printMessage("Generation complete! Saving tree...") - - // Write the rewards tree to disk - err = localRewardsFile.Write() - if err != nil { - return fmt.Errorf("Error saving rewards tree file to %s: %w", rewardsTreePath, err) + for filename, cid := range cids { + t.printMessage(fmt.Sprintf("\t%s - CID %s", filename, cid.String())) } if nodeTrusted { - // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s : %w", rewardsTreePath, err) - } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) // Submit to the contracts - err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) + err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile, cid.String(), big.NewInt(int64(intervalsPassed))) if err != nil { return fmt.Errorf("Error submitting rewards snapshot: %w", err) } @@ -402,9 +372,9 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool } // Submit rewards info to the contracts -func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFileHeader *rprewards.RewardsFileHeader, cid string, intervalsPassed *big.Int) error { +func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFile rprewards.IRewardsFile, cid string, intervalsPassed *big.Int) error { - treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFileHeader.MerkleRoot)) + treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFile.GetMerkleRoot())) if err != nil { return fmt.Errorf("Error decoding merkle root: %w", err) } @@ -416,18 +386,11 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons smoothingPoolEthRewards := []*big.Int{} // Create the total rewards for each network - network := uint64(0) - for { - networkRewards, exists := rewardsFileHeader.NetworkRewards[network] - if !exists { - break - } + for network := uint64(0); rewardsFile.HasRewardsForNetwork(network); network++ { - collateralRplRewards = append(collateralRplRewards, &networkRewards.CollateralRpl.Int) - oDaoRplRewards = append(oDaoRplRewards, &networkRewards.OracleDaoRpl.Int) - smoothingPoolEthRewards = append(smoothingPoolEthRewards, &networkRewards.SmoothingPoolEth.Int) - - network++ + collateralRplRewards = append(collateralRplRewards, rewardsFile.GetNetworkCollateralRpl(network)) + oDaoRplRewards = append(oDaoRplRewards, rewardsFile.GetNetworkOracleDaoRpl(network)) + smoothingPoolEthRewards = append(smoothingPoolEthRewards, rewardsFile.GetNetworkSmoothingPoolEth(network)) } // Get transactor @@ -444,11 +407,11 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons MerkleRoot: treeRoot, MerkleTreeCID: cid, IntervalsPassed: intervalsPassed, - TreasuryRPL: &rewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int, + TreasuryRPL: rewardsFile.GetTotalProtocolDaoRpl(), NodeRPL: collateralRplRewards, TrustedNodeRPL: oDaoRplRewards, NodeETH: smoothingPoolEthRewards, - UserETH: &rewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int, + UserETH: rewardsFile.GetTotalPoolStakerSmoothingPoolEth(), } // Get the gas limit @@ -493,12 +456,12 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons } // Get the first finalized, successful consensus block that occurred after the given target time -func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Time, state *state.NetworkState) (uint64, uint64, error) { +func (t *submitRewardsTree_Stateless) getSnapshotEnd(endTime time.Time, state *state.NetworkState) (*rprewards.SnapshotEnd, error) { // Get the beacon head beaconHead, err := t.bc.GetBeaconHead() if err != nil { - return 0, 0, fmt.Errorf("Error getting Beacon head: %w", err) + return nil, fmt.Errorf("Error getting Beacon head: %w", err) } // Get the target block number @@ -512,7 +475,11 @@ func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Tim // Check if the required epoch is finalized yet if beaconHead.FinalizedEpoch < requiredEpoch { - return 0, 0, fmt.Errorf("Snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d).", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) + return nil, fmt.Errorf("Snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d).", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) + } + + out := &rprewards.SnapshotEnd{ + Slot: targetSlot, } // Get the first successful block @@ -520,19 +487,23 @@ func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Tim // Try to get the current block block, exists, err := t.bc.GetBeaconBlock(fmt.Sprint(targetSlot)) if err != nil { - return 0, 0, fmt.Errorf("Error getting Beacon block %d: %w", targetSlot, err) + return nil, fmt.Errorf("Error getting Beacon block %d: %w", targetSlot, err) } // If the block was missing, try the previous one if !exists { t.log.Printlnf("Slot %d was missing, trying the previous one...", targetSlot) targetSlot-- - } else { - // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! - return targetSlot, block.ExecutionBlockNumber, nil + continue } + + // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! + out.ConsensusBlock = targetSlot + out.ExecutionBlock = block.ExecutionBlockNumber + break } + return out, nil } // Check whether the rewards tree for the current interval been submitted by the node diff --git a/rocketpool/watchtower/watchtower.go b/rocketpool/watchtower/watchtower.go index ae5739385..c8f63bff6 100644 --- a/rocketpool/watchtower/watchtower.go +++ b/rocketpool/watchtower/watchtower.go @@ -95,12 +95,6 @@ func run(c *cli.Context) error { fmt.Println("Starting watchtower daemon in Docker Mode.") } - // Check if rolling records are enabled - useRollingRecords := cfg.Smartnode.UseRollingRecords.Value.(bool) - if useRollingRecords { - fmt.Println("***NOTE: EXPERIMENTAL ROLLING RECORDS ARE ENABLED, BE ADVISED!***") - } - // Initialize the metrics reporters scrubCollector := collectors.NewScrubCollector() bondReductionCollector := collectors.NewBondReductionCollector() @@ -111,10 +105,7 @@ func run(c *cli.Context) error { updateLog := log.NewColorLogger(UpdateColor) // Create the state manager - m, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, &updateLog) - if err != nil { - return err - } + m := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, &updateLog) // Get the node address nodeAccount, err := w.GetNodeAccount() @@ -144,17 +135,9 @@ func run(c *cli.Context) error { return fmt.Errorf("error during scrub check: %w", err) } var submitRewardsTree_Stateless *submitRewardsTree_Stateless - var submitRewardsTree_Rolling *submitRewardsTree_Rolling - if !useRollingRecords { - submitRewardsTree_Stateless, err = newSubmitRewardsTree_Stateless(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) - if err != nil { - return fmt.Errorf("error during stateless rewards tree check: %w", err) - } - } else { - submitRewardsTree_Rolling, err = newSubmitRewardsTree_Rolling(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) - if err != nil { - return fmt.Errorf("error during rolling rewards tree check: %w", err) - } + submitRewardsTree_Stateless, err = newSubmitRewardsTree_Stateless(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) + if err != nil { + return fmt.Errorf("error during stateless rewards tree check: %w", err) } /*processPenalties, err := newProcessPenalties(c, log.NewColorLogger(ProcessPenaltiesColor), errorLog) if err != nil { @@ -251,19 +234,11 @@ func run(c *cli.Context) error { } time.Sleep(taskCooldown) - if !useRollingRecords { - // Run the rewards tree submission check - if err := submitRewardsTree_Stateless.Run(isOnOdao, state, latestBlock.Slot); err != nil { - errorLog.Println(err) - } - time.Sleep(taskCooldown) - } else { - // Run the network balance and rewards tree submission check - if err := submitRewardsTree_Rolling.run(state); err != nil { - errorLog.Println(err) - } - time.Sleep(taskCooldown) + // Run the rewards tree submission check + if err := submitRewardsTree_Stateless.Run(isOnOdao, state, latestBlock.Slot); err != nil { + errorLog.Println(err) } + time.Sleep(taskCooldown) // Run the price submission check if err := submitRplPrice.run(state); err != nil { @@ -307,18 +282,9 @@ func run(c *cli.Context) error { }*/ // DISABLED until MEV-Boost can support it } else { - /* - */ - if !useRollingRecords { - // Run the rewards tree submission check - if err := submitRewardsTree_Stateless.Run(isOnOdao, nil, latestBlock.Slot); err != nil { - errorLog.Println(err) - } - } else { - // Run the network balance and rewards tree submission check - if err := submitRewardsTree_Rolling.run(nil); err != nil { - errorLog.Println(err) - } + // Run the rewards tree submission check + if err := submitRewardsTree_Stateless.Run(isOnOdao, nil, latestBlock.Slot); err != nil { + errorLog.Println(err) } } diff --git a/shared/services/alerting/alertmanager/client/alertgroup/get_alert_groups_parameters.go b/shared/services/alerting/alertmanager/client/alertgroup/get_alert_groups_parameters.go index 8a5610869..b7abeb342 100644 --- a/shared/services/alerting/alertmanager/client/alertgroup/get_alert_groups_parameters.go +++ b/shared/services/alerting/alertmanager/client/alertgroup/get_alert_groups_parameters.go @@ -316,7 +316,7 @@ func (o *GetAlertGroupsParams) WriteToRequest(r runtime.ClientRequest, reg strfm return nil } -// bindParamGetAlertGroups binds the parameter filter +// bindParamFilter binds the parameter filter func (o *GetAlertGroupsParams) bindParamFilter(formats strfmt.Registry) []string { filterIR := o.Filter diff --git a/shared/services/bc-manager.go b/shared/services/bc-manager.go index 17f01147a..c9207c898 100644 --- a/shared/services/bc-manager.go +++ b/shared/services/bc-manager.go @@ -2,6 +2,7 @@ package services import ( "fmt" + "math/big" "strings" "github.com/ethereum/go-ethereum/common" @@ -12,6 +13,7 @@ import ( "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/types/api" cfgtypes "github.com/rocket-pool/smartnode/shared/types/config" + "github.com/rocket-pool/smartnode/shared/types/eth2" "github.com/rocket-pool/smartnode/shared/utils/log" ) @@ -182,6 +184,17 @@ func (m *BeaconClientManager) GetBeaconHead() (beacon.BeaconHead, error) { return result.(beacon.BeaconHead), nil } +// Get the Beacon State information +func (m *BeaconClientManager) GetBeaconState(slot uint64) (*eth2.BeaconStateDeneb, error) { + result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { + return client.GetBeaconState(slot) + }) + if err != nil { + return nil, err + } + return result.(*eth2.BeaconStateDeneb), nil +} + // Get a validator's status by its index func (m *BeaconClientManager) GetValidatorStatusByIndex(index string, opts *beacon.ValidatorStatusOptions) (beacon.ValidatorStatus, error) { result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { @@ -308,6 +321,28 @@ func (m *BeaconClientManager) ChangeWithdrawalCredentials(validatorIndex string, return nil } +// Get the validator balances for a set of validators at a given slot, with backoff. +func (m *BeaconClientManager) GetValidatorBalancesSafe(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { + return client.GetValidatorBalancesSafe(indices, opts) + }) + if err != nil { + return nil, err + } + return result.(map[string]*big.Int), nil +} + +// Get the validator balances for a set of validators at a given slot +func (m *BeaconClientManager) GetValidatorBalances(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { + return client.GetValidatorBalances(indices, opts) + }) + if err != nil { + return nil, err + } + return result.(map[string]*big.Int), nil +} + /// ================== /// Internal Functions /// ================== diff --git a/shared/services/beacon/client.go b/shared/services/beacon/client.go index f60b6a943..861abb9a1 100644 --- a/shared/services/beacon/client.go +++ b/shared/services/beacon/client.go @@ -1,9 +1,12 @@ package beacon import ( + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/prysmaticlabs/go-bitfield" "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/types/eth2" ) // API request options @@ -17,16 +20,6 @@ type SyncStatus struct { Syncing bool Progress float64 } -type Eth2Config struct { - GenesisForkVersion []byte - GenesisValidatorsRoot []byte - GenesisEpoch uint64 - GenesisTime uint64 - SecondsPerSlot uint64 - SlotsPerEpoch uint64 - SecondsPerEpoch uint64 - EpochsPerSyncCommitteePeriod uint64 -} type Eth2DepositContract struct { ChainID uint64 Address common.Address @@ -38,24 +31,29 @@ type BeaconHead struct { PreviousJustifiedEpoch uint64 } type ValidatorStatus struct { - Pubkey types.ValidatorPubkey - Index string - WithdrawalCredentials common.Hash - Balance uint64 - Status ValidatorState - EffectiveBalance uint64 - Slashed bool - ActivationEligibilityEpoch uint64 - ActivationEpoch uint64 - ExitEpoch uint64 - WithdrawableEpoch uint64 - Exists bool + Pubkey types.ValidatorPubkey `json:"pubkey"` + Index string `json:"index"` + WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` + Balance uint64 `json:"balance"` + Status ValidatorState `json:"status"` + EffectiveBalance uint64 `json:"effective_balance"` + Slashed bool `json:"slashed"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` + ActivationEpoch uint64 `json:"activation_epoch"` + ExitEpoch uint64 `json:"exit_epoch"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + Exists bool `json:"exists"` } type Eth1Data struct { DepositRoot common.Hash DepositCount uint64 BlockHash common.Hash } +type WithdrawalInfo struct { + ValidatorIndex string + Address common.Address + Amount *big.Int +} type BeaconBlock struct { Slot uint64 ProposerIndex string @@ -63,6 +61,7 @@ type BeaconBlock struct { Attestations []AttestationInfo FeeRecipient common.Address ExecutionBlockNumber uint64 + Withdrawals []WithdrawalInfo } type BeaconBlockHeader struct { Slot uint64 @@ -140,12 +139,15 @@ type Client interface { GetBeaconBlock(blockId string) (BeaconBlock, bool, error) GetBeaconBlockHeader(blockId string) (BeaconBlockHeader, bool, error) GetBeaconHead() (BeaconHead, error) + GetBeaconState(slot uint64) (*eth2.BeaconStateDeneb, error) GetValidatorStatusByIndex(index string, opts *ValidatorStatusOptions) (ValidatorStatus, error) GetValidatorStatus(pubkey types.ValidatorPubkey, opts *ValidatorStatusOptions) (ValidatorStatus, error) GetValidatorStatuses(pubkeys []types.ValidatorPubkey, opts *ValidatorStatusOptions) (map[types.ValidatorPubkey]ValidatorStatus, error) GetValidatorIndex(pubkey types.ValidatorPubkey) (string, error) GetValidatorSyncDuties(indices []string, epoch uint64) (map[string]bool, error) GetValidatorProposerDuties(indices []string, epoch uint64) (map[string]uint64, error) + GetValidatorBalances(indices []string, opts *ValidatorStatusOptions) (map[string]*big.Int, error) + GetValidatorBalancesSafe(indices []string, opts *ValidatorStatusOptions) (map[string]*big.Int, error) GetDomainData(domainType []byte, epoch uint64, useGenesisFork bool) ([]byte, error) ExitValidator(validatorIndex string, epoch uint64, signature types.ValidatorSignature) error Close() error diff --git a/shared/services/beacon/client/std-http-client.go b/shared/services/beacon/client/std-http-client.go index a8729ec21..dda58958e 100644 --- a/shared/services/beacon/client/std-http-client.go +++ b/shared/services/beacon/client/std-http-client.go @@ -5,10 +5,13 @@ import ( "encoding/hex" "fmt" "io" + "math/big" "net/http" + "slices" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -19,14 +22,16 @@ import ( "golang.org/x/sync/errgroup" "github.com/rocket-pool/smartnode/shared/services/beacon" + beacontypes "github.com/rocket-pool/smartnode/shared/types/eth2" "github.com/rocket-pool/smartnode/shared/utils/eth2" hexutil "github.com/rocket-pool/smartnode/shared/utils/hex" ) // Config const ( - RequestUrlFormat = "%s%s" - RequestContentType = "application/json" + RequestUrlFormat = "%s%s" + RequestJsonContentType = "application/json" + RequestSSZContentType = "application/octet-stream" RequestSyncStatusPath = "/eth/v1/node/syncing" RequestEth2ConfigPath = "/eth/v1/config/spec" @@ -36,10 +41,12 @@ const ( RequestFinalityCheckpointsPath = "/eth/v1/beacon/states/%s/finality_checkpoints" RequestForkPath = "/eth/v1/beacon/states/%s/fork" RequestValidatorsPath = "/eth/v1/beacon/states/%s/validators" + RequestValidatorBalancesPath = "/eth/v1/beacon/states/%s/validator_balances" RequestVoluntaryExitPath = "/eth/v1/beacon/pool/voluntary_exits" RequestAttestationsPath = "/eth/v1/beacon/blocks/%s/attestations" RequestBeaconBlockPath = "/eth/v2/beacon/blocks/%s" RequestBeaconBlockHeaderPath = "/eth/v1/beacon/headers/%s" + RequestBeaconStatePath = "/eth/v2/debug/beacon/states/%d" RequestValidatorSyncDuties = "/eth/v1/validator/duties/sync/%s" RequestValidatorProposerDuties = "/eth/v1/validator/duties/proposer/%s" RequestWithdrawalCredentialsChangePath = "/eth/v1/beacon/pool/bls_to_execution_changes" @@ -90,9 +97,18 @@ func (c *StandardHttpClient) GetSyncStatus() (beacon.SyncStatus, error) { } +var eth2ConfigCache atomic.Pointer[beacon.Eth2Config] + // Get the eth2 config +// cache it for future requests func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { + // Check the cache + cached := eth2ConfigCache.Load() + if cached != nil { + return *cached, nil + } + // Data var wg errgroup.Group var eth2Config Eth2ConfigResponse @@ -117,8 +133,8 @@ func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { return beacon.Eth2Config{}, err } - // Return response - return beacon.Eth2Config{ + // Save the result + out := beacon.Eth2Config{ GenesisForkVersion: genesis.Data.GenesisForkVersion, GenesisValidatorsRoot: genesis.Data.GenesisValidatorsRoot, GenesisEpoch: 0, @@ -127,8 +143,11 @@ func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { SlotsPerEpoch: uint64(eth2Config.Data.SlotsPerEpoch), SecondsPerEpoch: uint64(eth2Config.Data.SecondsPerSlot * eth2Config.Data.SlotsPerEpoch), EpochsPerSyncCommitteePeriod: uint64(eth2Config.Data.EpochsPerSyncCommitteePeriod), - }, nil + } + eth2ConfigCache.Store(&out) + // Return + return out, nil } // Get the eth2 deposit contract info @@ -231,6 +250,114 @@ func (c *StandardHttpClient) getValidatorStatus(pubkeyOrIndex string, opts *beac } +// Get multiple validators' balances +func (c *StandardHttpClient) GetValidatorBalances(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + + // Get state ID + var stateId string + if opts == nil { + stateId = "head" + } else if opts.Slot != nil { + stateId = strconv.FormatInt(int64(*opts.Slot), 10) + } else if opts.Epoch != nil { + + // Get eth2 config + eth2Config, err := c.getEth2Config() + if err != nil { + return nil, err + } + + // Get slot nuimber + slot := *opts.Epoch * uint64(eth2Config.Data.SlotsPerEpoch) + stateId = strconv.FormatInt(int64(slot), 10) + + } else { + return nil, fmt.Errorf("must specify a slot or epoch when calling getValidatorsByOpts") + } + + count := len(indices) + data := make(map[string]*big.Int, count) + for i := 0; i < count; i += MaxRequestValidatorsCount { + i := i + max := i + MaxRequestValidatorsCount + if max > count { + max = count + } + + // Get & add validators + batch := indices[i:max] + balances, err := c.getValidatorBalances(stateId, batch) + if err != nil { + return nil, fmt.Errorf("error getting validator balances: %w", err) + } + for _, balance := range balances.Data { + b, ok := big.NewInt(0).SetString(balance.Balance, 10) + if !ok { + return nil, fmt.Errorf("invalid balance: %s", balance.Balance) + } + // Beacon clients return Gwei, but we want wei + b.Mul(b, big.NewInt(1e9)) + + data[balance.Index] = b + } + } + + // Return + return data, nil +} + +// GetValidatorBalancesSafe returns the balances of the validators +// In order to avoid thrashing the bn, when opts.Slot is provided, +// we will preflight the balance query with a sync query, and ensure that the +// bn has not entered optimistic sync due to being unable to provide forkchoice updates, +// and that the current head is a recent slot. +func (c *StandardHttpClient) GetValidatorBalancesSafe(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + // Filter out empty indices + indices = slices.DeleteFunc(indices, func(index string) bool { + return index == "" + }) + + beaconConfig, err := c.GetEth2Config() + if err != nil { + return nil, err + } + // Check the current head + safe := false + for i := 0; i < 30; i++ { + syncStatus, err := c.getSyncStatus() + if err != nil { + // If we get an error, wait and try again + time.Sleep(1 * time.Second) + continue + } + if syncStatus.Data.IsSyncing { + // If the bn is still syncing, wait and try again + time.Sleep(1 * time.Second) + continue + } + if syncStatus.Data.ELOffline { + // If the bn is offline, wait and try again + time.Sleep(1 * time.Second) + continue + } + // Check that the head is no more than 2 slots behind the current time. + if beaconConfig.GetSlotTime(uint64(syncStatus.Data.HeadSlot)).Add(2 * time.Second * time.Duration(beaconConfig.SecondsPerSlot)).Before(time.Now()) { + // If the head is too far behind, wait and try again + time.Sleep(1 * time.Second) + continue + } + + safe = true + break + } + if !safe { + return nil, fmt.Errorf("bn is not in sync after 30 seconds") + } + + // Get the balances + return c.GetValidatorBalances(indices, opts) +} + // Get multiple validators' statuses func (c *StandardHttpClient) GetValidatorStatuses(pubkeys []types.ValidatorPubkey, opts *beacon.ValidatorStatusOptions) (map[types.ValidatorPubkey]beacon.ValidatorStatus, error) { @@ -526,6 +653,7 @@ func (c *StandardHttpClient) GetBeaconBlock(blockId string) (beacon.BeaconBlock, } // Add attestation info + beaconBlock.Attestations = make([]beacon.AttestationInfo, 0, len(block.Data.Message.Body.Attestations)) for i, attestation := range block.Data.Message.Body.Attestations { bitString := hexutil.RemovePrefix(attestation.AggregationBits) info := beacon.AttestationInfo{ @@ -539,6 +667,22 @@ func (c *StandardHttpClient) GetBeaconBlock(blockId string) (beacon.BeaconBlock, beaconBlock.Attestations = append(beaconBlock.Attestations, info) } + // Add withdrawals + beaconBlock.Withdrawals = make([]beacon.WithdrawalInfo, 0, len(block.Data.Message.Body.ExecutionPayload.Withdrawals)) + for _, withdrawal := range block.Data.Message.Body.ExecutionPayload.Withdrawals { + amount, ok := new(big.Int).SetString(withdrawal.Amount, 10) + if !ok { + return beacon.BeaconBlock{}, false, fmt.Errorf("Error decoding withdrawal amount for withdrawal for address %s of block %s: %s", withdrawal.Address, blockId, withdrawal.Amount) + } + // amount is in Gwei, but we want wei + amount.Mul(amount, big.NewInt(1e9)) + beaconBlock.Withdrawals = append(beaconBlock.Withdrawals, beacon.WithdrawalInfo{ + ValidatorIndex: withdrawal.ValidatorIndex, + Address: common.BytesToAddress(withdrawal.Address), + Amount: amount, + }) + } + return beaconBlock, true, nil } @@ -676,6 +820,26 @@ func (c *StandardHttpClient) getFork(stateId string) (ForkResponse, error) { return fork, nil } +// Get validator balances +func (c *StandardHttpClient) getValidatorBalances(stateId string, indices []string) (ValidatorBalancesResponse, error) { + var query string + if len(indices) > 0 { + query = fmt.Sprintf("?id=%s", strings.Join(indices, ",")) + } + responseBody, status, err := c.getRequest(fmt.Sprintf(RequestValidatorBalancesPath, stateId) + query) + if err != nil { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not get validator balances: %w", err) + } + if status != http.StatusOK { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not get validator balances: HTTP status %d; response body: '%s'", status, string(responseBody)) + } + var balances ValidatorBalancesResponse + if err := json.Unmarshal(responseBody, &balances); err != nil { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not decode validator balances: %w", err) + } + return balances, nil +} + // Get validators func (c *StandardHttpClient) getValidators(stateId string, pubkeys []string) (ValidatorsResponse, error) { var query string @@ -813,6 +977,23 @@ func (c *StandardHttpClient) getBeaconBlock(blockId string) (BeaconBlockResponse return beaconBlock, true, nil } +// Get the Beacon state for a slot +func (c *StandardHttpClient) GetBeaconState(slot uint64) (*beacontypes.BeaconStateDeneb, error) { + responseBody, status, err := c.getRequestWithContentType(fmt.Sprintf(RequestBeaconStatePath, slot), RequestSSZContentType) + if err != nil { + return nil, fmt.Errorf("Could not get beacon state data: %w", err) + } + if status != http.StatusOK { + return nil, fmt.Errorf("Could not get beacon state data: HTTP status %d; response body: '%s'", status, string(responseBody)) + } + var beaconState beacontypes.BeaconStateDeneb + if err := beaconState.UnmarshalSSZ(responseBody); err != nil { + return nil, fmt.Errorf("Could not decode beacon state data: %w", err) + } + + return &beaconState, nil +} + // Get the specified beacon block header func (c *StandardHttpClient) getBeaconBlockHeader(blockId string) (BeaconBlockHeaderResponse, bool, error) { responseBody, status, err := c.getRequest(fmt.Sprintf(RequestBeaconBlockHeaderPath, blockId)) @@ -917,9 +1098,21 @@ func (c *StandardHttpClient) postWithdrawalCredentialsChange(request BLSToExecut // Make a GET request but do not read its body yet (allows buffered decoding) func (c *StandardHttpClient) getRequestReader(requestPath string) (io.ReadCloser, int, error) { + return c.getRequestReaderWithContentType(requestPath, RequestJsonContentType) +} +// Make a GET request but do not read its body yet (allows buffered decoding) +func (c *StandardHttpClient) getRequestReaderWithContentType(requestPath string, contentType string) (io.ReadCloser, int, error) { // Send request - response, err := http.Get(fmt.Sprintf(RequestUrlFormat, c.providerAddress, requestPath)) + request, err := http.NewRequest("GET", fmt.Sprintf(RequestUrlFormat, c.providerAddress, requestPath), nil) + if err != nil { + return nil, 0, err + } + request.Header.Set("Accept", contentType) + + client := http.Client{} + + response, err := client.Do(request) if err != nil { return nil, 0, err } @@ -929,9 +1122,14 @@ func (c *StandardHttpClient) getRequestReader(requestPath string) (io.ReadCloser // Make a GET request to the beacon node and read the body of the response func (c *StandardHttpClient) getRequest(requestPath string) ([]byte, int, error) { + return c.getRequestWithContentType(requestPath, RequestJsonContentType) +} + +// Make a GET request to the beacon node and read the body of the response +func (c *StandardHttpClient) getRequestWithContentType(requestPath string, contentType string) ([]byte, int, error) { // Send request - reader, status, err := c.getRequestReader(requestPath) + reader, status, err := c.getRequestReaderWithContentType(requestPath, contentType) if err != nil { return []byte{}, 0, err } @@ -960,7 +1158,7 @@ func (c *StandardHttpClient) postRequest(requestPath string, requestBody interfa requestBodyReader := bytes.NewReader(requestBodyBytes) // Send request - response, err := http.Post(fmt.Sprintf(RequestUrlFormat, c.providerAddress, requestPath), RequestContentType, requestBodyReader) + response, err := http.Post(fmt.Sprintf(RequestUrlFormat, c.providerAddress, requestPath), RequestJsonContentType, requestBodyReader) if err != nil { return []byte{}, 0, err } diff --git a/shared/services/beacon/client/types.go b/shared/services/beacon/client/types.go index e14d0073a..00882fe0a 100644 --- a/shared/services/beacon/client/types.go +++ b/shared/services/beacon/client/types.go @@ -32,9 +32,11 @@ type BLSToExecutionChangeRequest struct { // Response types type SyncStatusResponse struct { Data struct { - IsSyncing bool `json:"is_syncing"` HeadSlot uinteger `json:"head_slot"` SyncDistance uinteger `json:"sync_distance"` + IsSyncing bool `json:"is_syncing"` + IsOptimistic bool `json:"is_optimistic"` + ELOffline bool `json:"el_offline"` } `json:"data"` } type Eth2ConfigResponse struct { @@ -94,8 +96,9 @@ type BeaconBlockResponse struct { } `json:"eth1_data"` Attestations []Attestation `json:"attestations"` ExecutionPayload *struct { - FeeRecipient byteArray `json:"fee_recipient"` - BlockNumber uinteger `json:"block_number"` + FeeRecipient byteArray `json:"fee_recipient"` + BlockNumber uinteger `json:"block_number"` + Withdrawals []Withdrawal `json:"withdrawals"` } `json:"execution_payload"` } `json:"body"` } `json:"message"` @@ -114,6 +117,12 @@ type BeaconBlockHeaderResponse struct { } `json:"header"` } `json:"data"` } +type ValidatorBalancesResponse struct { + Data []struct { + Index string `json:"index"` + Balance string `json:"balance"` + } `json:"data"` +} type ValidatorsResponse struct { Data []Validator `json:"data"` } @@ -159,6 +168,13 @@ type Attestation struct { } `json:"data"` } +type Withdrawal struct { + Index string `json:"index"` + ValidatorIndex string `json:"validator_index"` + Address byteArray `json:"address"` + Amount string `json:"amount"` +} + // Unsigned integer type type uinteger uint64 diff --git a/shared/services/beacon/config.go b/shared/services/beacon/config.go new file mode 100644 index 000000000..fd0299fa5 --- /dev/null +++ b/shared/services/beacon/config.go @@ -0,0 +1,126 @@ +package beacon + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type Eth2Config struct { + GenesisForkVersion []byte `json:"genesis_fork_version"` + GenesisValidatorsRoot []byte `json:"genesis_validators_root"` + GenesisEpoch uint64 `json:"genesis_epoch"` + GenesisTime uint64 `json:"genesis_time"` + SecondsPerSlot uint64 `json:"seconds_per_slot"` + SlotsPerEpoch uint64 `json:"slots_per_epoch"` + SecondsPerEpoch uint64 `json:"seconds_per_epoch"` + EpochsPerSyncCommitteePeriod uint64 `json:"epochs_per_sync_committee_period"` +} + +func (c *Eth2Config) MarshalJSON() ([]byte, error) { + // GenesisForkVersion and GenesisValidatorsRoot are returned as hex strings with 0x prefixes. + // The other fields are returned as uint64s. + type Alias Eth2Config + return json.Marshal(&struct { + GenesisForkVersion string `json:"genesis_fork_version"` + GenesisValidatorsRoot string `json:"genesis_validators_root"` + *Alias + }{ + GenesisForkVersion: hexutil.Encode(c.GenesisForkVersion), + GenesisValidatorsRoot: hexutil.Encode(c.GenesisValidatorsRoot), + Alias: (*Alias)(c), + }) +} + +func (c *Eth2Config) UnmarshalJSON(data []byte) error { + type Alias Eth2Config + aux := &struct { + GenesisForkVersion string `json:"genesis_fork_version"` + GenesisValidatorsRoot string `json:"genesis_validators_root"` + *Alias + }{ + Alias: (*Alias)(c), + } + + err := json.Unmarshal(data, &aux) + if err != nil { + return err + } + + c.GenesisForkVersion, err = hexutil.Decode(aux.GenesisForkVersion) + if err != nil { + return err + } + c.GenesisValidatorsRoot, err = hexutil.Decode(aux.GenesisValidatorsRoot) + if err != nil { + return err + } + return nil +} + +// GetSlotTime returns the time of a given slot for the network described by Eth2Config. +func (c *Eth2Config) GetSlotTime(slot uint64) time.Time { + // In the interest of keeping this pure, we'll just return genesis time for slots before genesis + if slot <= c.GenesisEpoch*c.SlotsPerEpoch { + return time.Unix(int64(c.GenesisTime), 0) + } + // Genesis is slot 0 on mainnet, so we can subtract it safely + slotsSinceGenesis := slot - (c.GenesisEpoch * c.SlotsPerEpoch) + return time.Unix(int64(slotsSinceGenesis*c.SecondsPerSlot+c.GenesisTime), 0) +} + +// FirstSlotAtLeast returns the first slot with a timestamp greater than or equal to t +func (c *Eth2Config) FirstSlotAtLeast(t int64) uint64 { + if t <= 0 { + return c.GenesisEpoch * c.SlotsPerEpoch + } + + if uint64(t) <= c.GenesisTime { + return c.GenesisEpoch * c.SlotsPerEpoch + } + + secondsSinceGenesis := uint64(t) - c.GenesisTime + + var slotsSinceGenesis uint64 + // Avoid float error triggering ceil on quality with a modulo check + if secondsSinceGenesis%c.SecondsPerSlot == 0 { + slotsSinceGenesis = secondsSinceGenesis / c.SecondsPerSlot + } else { + // There must be a remainder + slotsSinceGenesis = secondsSinceGenesis/c.SecondsPerSlot + 1 + } + return c.GenesisEpoch*c.SlotsPerEpoch + slotsSinceGenesis +} + +func (c *Eth2Config) SlotToEpoch(slot uint64) uint64 { + return slot / c.SlotsPerEpoch +} + +func (c *Eth2Config) EpochToSlot(epoch uint64) uint64 { + return epoch * c.SlotsPerEpoch +} + +func (c *Eth2Config) SlotOfEpoch(epoch uint64, slot uint64) (uint64, error) { + if slot > c.SlotsPerEpoch-1 { + return 0, fmt.Errorf("slot %d is not in range 0 - %d", slot, c.SlotsPerEpoch-1) + } + return epoch*c.SlotsPerEpoch + slot, nil +} + +func (c *Eth2Config) LastSlotOfEpoch(epoch uint64) uint64 { + out, err := c.SlotOfEpoch(epoch, c.SlotsPerEpoch-1) + if err != nil { + panic("SlotOfEpoch should never return an error when passed SlotsPerEpoch - 1") + } + return out +} + +func (c *Eth2Config) FirstSlotOfEpoch(epoch uint64) uint64 { + out, err := c.SlotOfEpoch(epoch, 0) + if err != nil { + panic("SlotOfEpoch should never return an error when passed 0") + } + return out +} diff --git a/shared/services/beacon/config_test.go b/shared/services/beacon/config_test.go new file mode 100644 index 000000000..e0af148de --- /dev/null +++ b/shared/services/beacon/config_test.go @@ -0,0 +1,110 @@ +package beacon + +import ( + "slices" + "testing" + "time" +) + +var config = &Eth2Config{ + GenesisEpoch: 10, + GenesisTime: 10000, + SecondsPerSlot: 4, + SlotsPerEpoch: 32, + SecondsPerEpoch: 32 * 4, +} + +func TestGetSlotTime(t *testing.T) { + genesis := config.GetSlotTime(0) + if !genesis.Equal(time.Unix(int64(config.GenesisTime), 0)) { + t.Fatalf("slot 0 should be at genesis (%d) but was at %s", config.GenesisTime, genesis) + } + + slotPlusTen := config.GenesisEpoch*config.SlotsPerEpoch + 10 + slotPlusTenTime := config.GetSlotTime(slotPlusTen) + expectedTime := time.Unix(int64(config.SecondsPerSlot*10+config.GenesisTime), 0) + if !slotPlusTenTime.Equal(expectedTime) { + t.Fatalf("slot +10 should be at genesis (%d) but was at %s", config.GenesisTime, genesis) + } +} + +func TestFirstSlotAtLeast(t *testing.T) { + genesis := config.FirstSlotAtLeast(30) + if genesis != config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatalf("should have gotten the genesis slot (%d), instead got %d", config.GenesisEpoch*config.SlotsPerEpoch, genesis) + } + + // Whole multiple + slots := uint64(9000000) + st := config.GenesisTime + config.SecondsPerSlot*slots + result := config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } + + // Partial multiple rounds up + st = config.GenesisTime + config.SecondsPerSlot*slots - config.SecondsPerSlot/2 + result = config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } + + // Smallest fractional amount rounds up + st = config.GenesisTime + config.SecondsPerSlot*slots - config.SecondsPerSlot + 1 + result = config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } +} + +func TestMarshalJSON(t *testing.T) { + config := &Eth2Config{ + GenesisForkVersion: []byte{0x00, 0x00, 0x00, 0x08}, + GenesisValidatorsRoot: []byte{0xfe, 0x44, 0x33, 0x22}, + GenesisEpoch: 10, + GenesisTime: 10000, + SecondsPerSlot: 4, + SlotsPerEpoch: 32, + SecondsPerEpoch: 32 * 4, + EpochsPerSyncCommitteePeriod: 256, + } + + json, err := config.MarshalJSON() + if err != nil { + t.Fatalf("error marshalling config: %v", err) + } + + unmarshalled := &Eth2Config{} + err = unmarshalled.UnmarshalJSON(json) + if err != nil { + t.Fatalf("error unmarshalling config: %v", err) + } + + if !slices.Equal(unmarshalled.GenesisForkVersion, config.GenesisForkVersion) { + t.Fatalf("genesis fork version should be %v, instead got %v", config.GenesisForkVersion, unmarshalled.GenesisForkVersion) + } + + if !slices.Equal(unmarshalled.GenesisValidatorsRoot, config.GenesisValidatorsRoot) { + t.Fatalf("genesis validators root should be %v, instead got %v", config.GenesisValidatorsRoot, unmarshalled.GenesisValidatorsRoot) + } + + if unmarshalled.GenesisEpoch != config.GenesisEpoch { + t.Fatalf("genesis epoch should be %v, instead got %v", config.GenesisEpoch, unmarshalled.GenesisEpoch) + } + + if unmarshalled.GenesisTime != config.GenesisTime { + t.Fatalf("genesis time should be %v, instead got %v", config.GenesisTime, unmarshalled.GenesisTime) + } + + if unmarshalled.SecondsPerSlot != config.SecondsPerSlot { + t.Fatalf("seconds per slot should be %v, instead got %v", config.SecondsPerSlot, unmarshalled.SecondsPerSlot) + } + + if unmarshalled.SlotsPerEpoch != config.SlotsPerEpoch { + t.Fatalf("slots per epoch should be %v, instead got %v", config.SlotsPerEpoch, unmarshalled.SlotsPerEpoch) + } + + if unmarshalled.EpochsPerSyncCommitteePeriod != config.EpochsPerSyncCommitteePeriod { + t.Fatalf("epochs per sync committee period should be %v, instead got %v", config.EpochsPerSyncCommitteePeriod, unmarshalled.EpochsPerSyncCommitteePeriod) + } +} diff --git a/shared/services/config/consensus-common-config.go b/shared/services/config/consensus-common-config.go index f1c225573..6c311a05c 100644 --- a/shared/services/config/consensus-common-config.go +++ b/shared/services/config/consensus-common-config.go @@ -32,6 +32,9 @@ type ConsensusCommonConfig struct { // The checkpoint sync URL if used CheckpointSyncProvider config.Parameter `yaml:"checkpointSyncProvider,omitempty"` + // The suggested block gas limit + SuggestedBlockGasLimit config.Parameter `yaml:"suggestedBlockGasLimit,ommitempty"` + // The port to use for gossip traffic P2pPort config.Parameter `yaml:"p2pPort,omitempty"` @@ -77,6 +80,17 @@ func NewConsensusCommonConfig(cfg *RocketPoolConfig) *ConsensusCommonConfig { OverwriteOnUpgrade: false, }, + SuggestedBlockGasLimit: config.Parameter{ + ID: "suggestedBlockGasLimit", + Name: "Suggested Block Gas Limit", + Description: "The block gas limit that should be used for externally built blocks.", + Type: config.ParameterType_Uint, + Default: map[config.Network]interface{}{config.Network_All: defaultSuggestedBlockLimit}, + AffectsContainers: []config.ContainerID{config.ContainerID_Eth2, config.ContainerID_Validator}, + CanBeBlank: false, + OverwriteOnUpgrade: false, + }, + P2pPort: config.Parameter{ ID: P2pPortID, Name: "P2P Port", @@ -130,6 +144,7 @@ func (cfg *ConsensusCommonConfig) GetParameters() []*config.Parameter { &cfg.Graffiti, &cfg.CheckpointSyncProvider, &cfg.P2pPort, + &cfg.SuggestedBlockGasLimit, &cfg.ApiPort, &cfg.OpenApiPort, &cfg.DoppelgangerDetection, diff --git a/shared/services/config/execution-common-config.go b/shared/services/config/execution-common-config.go index 4b31c48bd..9da3ed493 100644 --- a/shared/services/config/execution-common-config.go +++ b/shared/services/config/execution-common-config.go @@ -12,11 +12,12 @@ const ( ecOpenRpcPortsID string = "openRpcPorts" // Defaults - defaultEcP2pPort uint16 = 30303 - defaultEcHttpPort uint16 = 8545 - defaultEcWsPort uint16 = 8546 - defaultEcEnginePort uint16 = 8551 - defaultOpenEcApiPort string = string(config.RPC_Closed) + defaultSuggestedBlockLimit uint64 = 30000000 + defaultEcP2pPort uint16 = 30303 + defaultEcHttpPort uint16 = 8545 + defaultEcWsPort uint16 = 8546 + defaultEcEnginePort uint16 = 8551 + defaultOpenEcApiPort string = string(config.RPC_Closed) ) // Configuration for the Execution client @@ -38,6 +39,9 @@ type ExecutionCommonConfig struct { // P2P traffic port P2pPort config.Parameter `yaml:"p2pPort,omitempty"` + // The suggested block gas limit + SuggestedBlockGasLimit config.Parameter `yaml:"suggestedBlockGasLimit,ommitempty"` + // Label for Ethstats EthstatsLabel config.Parameter `yaml:"ethstatsLabel,omitempty"` @@ -97,10 +101,21 @@ func NewExecutionCommonConfig(cfg *RocketPoolConfig) *ExecutionCommonConfig { Options: rpcPortModes, }, + SuggestedBlockGasLimit: config.Parameter{ + ID: "suggestedBlockGasLimit", + Name: "Suggested Block Gas Limit", + Description: "The block gas limit that should be used for locally built blocks.", + Type: config.ParameterType_Uint, + Default: map[config.Network]interface{}{config.Network_All: defaultSuggestedBlockLimit}, + AffectsContainers: []config.ContainerID{config.ContainerID_Eth1}, + CanBeBlank: false, + OverwriteOnUpgrade: false, + }, + P2pPort: config.Parameter{ ID: "p2pPort", Name: "P2P Port", - Description: "The port Geth should use for P2P (blockchain) traffic to communicate with other nodes.", + Description: "The port the Execution Client should use for P2P (blockchain) traffic to communicate with other nodes.", Type: config.ParameterType_Uint16, Default: map[config.Network]interface{}{config.Network_All: defaultEcP2pPort}, AffectsContainers: []config.ContainerID{config.ContainerID_Eth1}, @@ -139,6 +154,7 @@ func (cfg *ExecutionCommonConfig) GetParameters() []*config.Parameter { &cfg.WsPort, &cfg.EnginePort, &cfg.OpenRpcPorts, + &cfg.SuggestedBlockGasLimit, &cfg.P2pPort, &cfg.EthstatsLabel, &cfg.EthstatsLogin, diff --git a/shared/services/config/lighthouse-config.go b/shared/services/config/lighthouse-config.go index d21cc49c0..63aaf0fbb 100644 --- a/shared/services/config/lighthouse-config.go +++ b/shared/services/config/lighthouse-config.go @@ -5,8 +5,8 @@ import ( ) const ( - lighthouseTagPortableTest string = "sigp/lighthouse:v5.3.0" - lighthouseTagPortableProd string = "sigp/lighthouse:v5.3.0" + lighthouseTagPortableTest string = "sigp/lighthouse:v6.0.1" + lighthouseTagPortableProd string = "sigp/lighthouse:v6.0.1" defaultLhMaxPeers uint16 = 100 ) diff --git a/shared/services/config/lodestar-config.go b/shared/services/config/lodestar-config.go index 8a2cc65c9..83b42f846 100644 --- a/shared/services/config/lodestar-config.go +++ b/shared/services/config/lodestar-config.go @@ -5,8 +5,8 @@ import ( ) const ( - lodestarTagTest string = "chainsafe/lodestar:v1.23.0" - lodestarTagProd string = "chainsafe/lodestar:v1.23.0" + lodestarTagTest string = "chainsafe/lodestar:v1.25.0" + lodestarTagProd string = "chainsafe/lodestar:v1.25.0" defaultLodestarMaxPeers uint16 = 100 ) diff --git a/shared/services/config/nethermind-params.go b/shared/services/config/nethermind-params.go index 44431738a..1e6794817 100644 --- a/shared/services/config/nethermind-params.go +++ b/shared/services/config/nethermind-params.go @@ -9,8 +9,8 @@ import ( // Constants const ( - nethermindTagProd string = "nethermind/nethermind:1.29.1" - nethermindTagTest string = "nethermind/nethermind:1.29.1" + nethermindTagProd string = "nethermind/nethermind:1.30.1" + nethermindTagTest string = "nethermind/nethermind:1.30.1" nethermindEventLogInterval int = 1000 nethermindStopSignal string = "SIGTERM" ) diff --git a/shared/services/config/nimbus-config.go b/shared/services/config/nimbus-config.go index b4615adf8..36d4e8163 100644 --- a/shared/services/config/nimbus-config.go +++ b/shared/services/config/nimbus-config.go @@ -8,12 +8,12 @@ import ( const ( // Testnet - nimbusBnTagTest string = "statusim/nimbus-eth2:multiarch-v24.10.0" - nimbusVcTagTest string = "statusim/nimbus-validator-client:multiarch-v24.10.0" + nimbusBnTagTest string = "statusim/nimbus-eth2:multiarch-v24.12.0" + nimbusVcTagTest string = "statusim/nimbus-validator-client:multiarch-v24.12.0" // Mainnet - nimbusBnTagProd string = "statusim/nimbus-eth2:multiarch-v24.10.0" - nimbusVcTagProd string = "statusim/nimbus-validator-client:multiarch-v24.10.0" + nimbusBnTagProd string = "statusim/nimbus-eth2:multiarch-v24.12.0" + nimbusVcTagProd string = "statusim/nimbus-validator-client:multiarch-v24.12.0" defaultNimbusMaxPeersArm uint16 = 100 defaultNimbusMaxPeersAmd uint16 = 160 diff --git a/shared/services/config/reth-params.go b/shared/services/config/reth-params.go index c8254cc45..4c44896a0 100644 --- a/shared/services/config/reth-params.go +++ b/shared/services/config/reth-params.go @@ -9,8 +9,8 @@ import ( // Constants const ( - rethTagProd string = "ghcr.io/paradigmxyz/reth:v1.1.2" - rethTagTest string = "ghcr.io/paradigmxyz/reth:v1.1.2" + rethTagProd string = "ghcr.io/paradigmxyz/reth:v1.1.3" + rethTagTest string = "ghcr.io/paradigmxyz/reth:v1.1.3" rethEventLogInterval int = 1000 rethStopSignal string = "SIGTERM" ) diff --git a/shared/services/config/smartnode-config.go b/shared/services/config/smartnode-config.go index f04aaf3ec..c9915d26c 100644 --- a/shared/services/config/smartnode-config.go +++ b/shared/services/config/smartnode-config.go @@ -18,8 +18,8 @@ const ( NetworkID string = "network" ProjectNameID string = "projectName" SnapshotID string = "rocketpool-dao.eth" - RewardsTreeFilenameFormat string = "rp-rewards-%s-%d.json" - MinipoolPerformanceFilenameFormat string = "rp-minipool-performance-%s-%d.json" + rewardsTreeFilenameFormat string = "rp-rewards-%s-%d%s" + minipoolPerformanceFilenameFormat string = "rp-minipool-performance-%s-%d%s" RewardsTreeIpfsExtension string = ".zst" RewardsTreesFolder string = "rewards-trees" ChecksumTableFilename string = "checksums.sha384" @@ -42,6 +42,19 @@ const ( WatchtowerPrioFeeDefault uint64 = 3 ) +type RewardsExtension string + +const ( + RewardsExtensionJSON RewardsExtension = ".json" + RewardsExtensionSSZ RewardsExtension = ".ssz" +) + +// Contract addresses for multicall / network state manager +type StateManagerContracts struct { + Multicaller common.Address + BalanceBatcher common.Address +} + // Configuration for the Smartnode type SmartnodeConfig struct { Title string `yaml:"-"` @@ -95,18 +108,6 @@ type SmartnodeConfig struct { // Manual override for the watchtower's priority fee WatchtowerPrioFeeOverride config.Parameter `yaml:"watchtowerPrioFeeOverride,omitempty"` - // The toggle for rolling records - UseRollingRecords config.Parameter `yaml:"useRollingRecords,omitempty"` - - // The rolling record checkpoint interval - RecordCheckpointInterval config.Parameter `yaml:"recordCheckpointInterval,omitempty"` - - // The checkpoint retention limit - CheckpointRetentionLimit config.Parameter `yaml:"checkpointRetentionLimit,omitempty"` - - // The path of the records folder where snapshots of rolling record info is stored during a rewards interval - RecordsPath config.Parameter `yaml:"recordsPath,omitempty"` - // The toggle for enabling pDAO proposal verification duties VerifyProposals config.Parameter `yaml:"verifyProposals,omitempty"` @@ -135,9 +136,6 @@ type SmartnodeConfig struct { // The contract address of the RPL token rplTokenAddress map[config.Network]string `yaml:"-"` - // The contract address for Snapshot delegation - snapshotDelegationAddress map[config.Network]string `yaml:"-"` - // The Snapshot API domain snapshotApiDomain map[config.Network]string `yaml:"-"` @@ -417,50 +415,6 @@ func NewSmartnodeConfig(cfg *RocketPoolConfig) *SmartnodeConfig { OverwriteOnUpgrade: true, }, - UseRollingRecords: config.Parameter{ - ID: "useRollingRecords", - Name: "Use Rolling Records", - Description: "[orange]**WARNING: EXPERIMENTAL**\n\n[white]Enable this to use the new rolling records feature, which stores attestation records for the entire Rocket Pool network in real time instead of collecting them all after a rewards period during tree generation.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Bool, - Default: map[config.Network]interface{}{config.Network_All: false}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - RecordCheckpointInterval: config.Parameter{ - ID: "recordCheckpointInterval", - Name: "Record Checkpoint Interval", - Description: "The number of epochs that should pass before saving a new rolling record checkpoint. Used if Rolling Records is enabled.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Uint, - Default: map[config.Network]interface{}{config.Network_All: uint64(45)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - CheckpointRetentionLimit: config.Parameter{ - ID: "checkpointRetentionLimit", - Name: "Checkpoint Retention Limit", - Description: "The number of checkpoint files to save on-disk before pruning old ones. Used if Rolling Records is enabled.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Uint, - Default: map[config.Network]interface{}{config.Network_All: uint64(200)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - RecordsPath: config.Parameter{ - ID: "recordsPath", - Name: "Records Path", - Description: "The path of the folder to store rolling record checkpoints in during a rewards interval. Used if Rolling Records is enabled.\n\nOnly useful if you're an Oracle DAO member, or if you generate your own rewards trees.", - Type: config.ParameterType_String, - Default: map[config.Network]interface{}{config.Network_All: getDefaultRecordsDir(cfg)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - txWatchUrl: map[config.Network]string{ config.Network_Mainnet: "https://etherscan.io/tx", config.Network_Devnet: "https://holesky.etherscan.io/tx", @@ -569,12 +523,6 @@ func NewSmartnodeConfig(cfg *RocketPoolConfig) *SmartnodeConfig { config.Network_Holesky: "0x9294Fc6F03c64Cc217f5BE8697EA3Ed2De77e2F8", }, - snapshotDelegationAddress: map[config.Network]string{ - config.Network_Mainnet: "0x469788fE6E9E9681C6ebF3bF78e7Fd26Fc015446", - config.Network_Devnet: "", - config.Network_Holesky: "", - }, - snapshotApiDomain: map[config.Network]string{ config.Network_Mainnet: "hub.snapshot.org", config.Network_Devnet: "", @@ -693,10 +641,6 @@ func (cfg *SmartnodeConfig) GetParameters() []*config.Parameter { &cfg.ArchiveECUrl, &cfg.WatchtowerMaxFeeOverride, &cfg.WatchtowerPrioFeeOverride, - &cfg.UseRollingRecords, - &cfg.RecordCheckpointInterval, - &cfg.CheckpointRetentionLimit, - &cfg.RecordsPath, } } @@ -802,10 +746,6 @@ func (cfg *SmartnodeConfig) GetRplTokenAddress() string { return cfg.rplTokenAddress[cfg.Network.Value.(config.Network)] } -func (cfg *SmartnodeConfig) GetSnapshotDelegationAddress() string { - return cfg.snapshotDelegationAddress[cfg.Network.Value.(config.Network)] -} - func (cfg *SmartnodeConfig) GetSmartnodeContainerTag() string { return smartnodeTag } @@ -844,27 +784,45 @@ func (cfg *SmartnodeConfig) GetRethAddress() common.Address { } func getDefaultDataDir(config *RocketPoolConfig) string { + if config == nil { + // Handle tests. Eventually we'll refactor so this isn't necessary. + return "" + } return filepath.Join(config.RocketPoolDirectory, "data") } -func getDefaultRecordsDir(config *RocketPoolConfig) string { - return filepath.Join(getDefaultDataDir(config), "records") -} - -func (cfg *SmartnodeConfig) GetRewardsTreePath(interval uint64, daemon bool) string { +func (cfg *SmartnodeConfig) GetRewardsTreeDirectory(daemon bool) string { if daemon && !cfg.parent.IsNativeMode { - return filepath.Join(DaemonDataPath, RewardsTreesFolder, fmt.Sprintf(RewardsTreeFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) + return filepath.Join(DaemonDataPath, RewardsTreesFolder) } - return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder, fmt.Sprintf(RewardsTreeFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) + return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder) } -func (cfg *SmartnodeConfig) GetMinipoolPerformancePath(interval uint64, daemon bool) string { - if daemon && !cfg.parent.IsNativeMode { - return filepath.Join(DaemonDataPath, RewardsTreesFolder, fmt.Sprintf(MinipoolPerformanceFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) - } +func (cfg *SmartnodeConfig) formatRewardsFilename(f string, interval uint64, extension RewardsExtension) string { + return fmt.Sprintf(f, string(cfg.Network.Value.(config.Network)), interval, string(extension)) +} + +func (cfg *SmartnodeConfig) GetRewardsTreeFilename(interval uint64, extension RewardsExtension) string { + return cfg.formatRewardsFilename(rewardsTreeFilenameFormat, interval, extension) +} + +func (cfg *SmartnodeConfig) GetMinipoolPerformanceFilename(interval uint64) string { + return cfg.formatRewardsFilename(minipoolPerformanceFilenameFormat, interval, RewardsExtensionJSON) +} + +func (cfg *SmartnodeConfig) GetRewardsTreePath(interval uint64, daemon bool, extension RewardsExtension) string { + return filepath.Join( + cfg.GetRewardsTreeDirectory(daemon), + cfg.GetRewardsTreeFilename(interval, extension), + ) +} - return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder, fmt.Sprintf(MinipoolPerformanceFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) +func (cfg *SmartnodeConfig) GetMinipoolPerformancePath(interval uint64, daemon bool) string { + return filepath.Join( + cfg.GetRewardsTreeDirectory(daemon), + cfg.GetMinipoolPerformanceFilename(interval), + ) } func (cfg *SmartnodeConfig) GetRegenerateRewardsTreeRequestPath(interval uint64, daemon bool) string { @@ -987,6 +945,14 @@ func (cfg *SmartnodeConfig) GetBalanceBatcherAddress() string { return cfg.balancebatcherAddress[cfg.Network.Value.(config.Network)] } +// Utility function to get the state manager contracts +func (cfg *SmartnodeConfig) GetStateManagerContracts() StateManagerContracts { + return StateManagerContracts{ + Multicaller: common.HexToAddress(cfg.GetMulticallAddress()), + BalanceBatcher: common.HexToAddress(cfg.GetBalanceBatcherAddress()), + } +} + func (cfg *SmartnodeConfig) GetFlashbotsProtectUrl() string { return cfg.flashbotsProtectUrl[cfg.Network.Value.(config.Network)] } diff --git a/shared/services/config/teku-config.go b/shared/services/config/teku-config.go index 77f29c759..8a2b09c21 100644 --- a/shared/services/config/teku-config.go +++ b/shared/services/config/teku-config.go @@ -6,8 +6,8 @@ import ( ) const ( - tekuTagTest string = "consensys/teku:24.10.3" - tekuTagProd string = "consensys/teku:24.10.3" + tekuTagTest string = "consensys/teku:25.1.0" + tekuTagProd string = "consensys/teku:25.1.0" defaultTekuMaxPeers uint16 = 100 ) diff --git a/shared/services/contracts/snapshot-delegation.go b/shared/services/contracts/snapshot-delegation.go deleted file mode 100644 index a62359068..000000000 --- a/shared/services/contracts/snapshot-delegation.go +++ /dev/null @@ -1,577 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// SnapshotDelegationMetaData contains all meta data concerning the SnapshotDelegation contract. -var SnapshotDelegationMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"delegator\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"delegate\",\"type\":\"address\"}],\"name\":\"ClearDelegate\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"delegator\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"delegate\",\"type\":\"address\"}],\"name\":\"SetDelegate\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"clearDelegate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"delegation\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"delegate\",\"type\":\"address\"}],\"name\":\"setDelegate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", -} - -// SnapshotDelegationABI is the input ABI used to generate the binding from. -// Deprecated: Use SnapshotDelegationMetaData.ABI instead. -var SnapshotDelegationABI = SnapshotDelegationMetaData.ABI - -// SnapshotDelegation is an auto generated Go binding around an Ethereum contract. -type SnapshotDelegation struct { - SnapshotDelegationCaller // Read-only binding to the contract - SnapshotDelegationTransactor // Write-only binding to the contract - SnapshotDelegationFilterer // Log filterer for contract events -} - -// SnapshotDelegationCaller is an auto generated read-only Go binding around an Ethereum contract. -type SnapshotDelegationCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SnapshotDelegationTransactor is an auto generated write-only Go binding around an Ethereum contract. -type SnapshotDelegationTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SnapshotDelegationFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type SnapshotDelegationFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SnapshotDelegationSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type SnapshotDelegationSession struct { - Contract *SnapshotDelegation // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SnapshotDelegationCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type SnapshotDelegationCallerSession struct { - Contract *SnapshotDelegationCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// SnapshotDelegationTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type SnapshotDelegationTransactorSession struct { - Contract *SnapshotDelegationTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SnapshotDelegationRaw is an auto generated low-level Go binding around an Ethereum contract. -type SnapshotDelegationRaw struct { - Contract *SnapshotDelegation // Generic contract binding to access the raw methods on -} - -// SnapshotDelegationCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type SnapshotDelegationCallerRaw struct { - Contract *SnapshotDelegationCaller // Generic read-only contract binding to access the raw methods on -} - -// SnapshotDelegationTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type SnapshotDelegationTransactorRaw struct { - Contract *SnapshotDelegationTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewSnapshotDelegation creates a new instance of SnapshotDelegation, bound to a specific deployed contract. -func NewSnapshotDelegation(address common.Address, backend bind.ContractBackend) (*SnapshotDelegation, error) { - contract, err := bindSnapshotDelegation(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &SnapshotDelegation{SnapshotDelegationCaller: SnapshotDelegationCaller{contract: contract}, SnapshotDelegationTransactor: SnapshotDelegationTransactor{contract: contract}, SnapshotDelegationFilterer: SnapshotDelegationFilterer{contract: contract}}, nil -} - -// NewSnapshotDelegationCaller creates a new read-only instance of SnapshotDelegation, bound to a specific deployed contract. -func NewSnapshotDelegationCaller(address common.Address, caller bind.ContractCaller) (*SnapshotDelegationCaller, error) { - contract, err := bindSnapshotDelegation(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &SnapshotDelegationCaller{contract: contract}, nil -} - -// NewSnapshotDelegationTransactor creates a new write-only instance of SnapshotDelegation, bound to a specific deployed contract. -func NewSnapshotDelegationTransactor(address common.Address, transactor bind.ContractTransactor) (*SnapshotDelegationTransactor, error) { - contract, err := bindSnapshotDelegation(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &SnapshotDelegationTransactor{contract: contract}, nil -} - -// NewSnapshotDelegationFilterer creates a new log filterer instance of SnapshotDelegation, bound to a specific deployed contract. -func NewSnapshotDelegationFilterer(address common.Address, filterer bind.ContractFilterer) (*SnapshotDelegationFilterer, error) { - contract, err := bindSnapshotDelegation(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &SnapshotDelegationFilterer{contract: contract}, nil -} - -// bindSnapshotDelegation binds a generic wrapper to an already deployed contract. -func bindSnapshotDelegation(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(SnapshotDelegationABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_SnapshotDelegation *SnapshotDelegationRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _SnapshotDelegation.Contract.SnapshotDelegationCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_SnapshotDelegation *SnapshotDelegationRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.SnapshotDelegationTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_SnapshotDelegation *SnapshotDelegationRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.SnapshotDelegationTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_SnapshotDelegation *SnapshotDelegationCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _SnapshotDelegation.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_SnapshotDelegation *SnapshotDelegationTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_SnapshotDelegation *SnapshotDelegationTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.contract.Transact(opts, method, params...) -} - -// Delegation is a free data retrieval call binding the contract method 0x74c6c454. -// -// Solidity: function delegation(address , bytes32 ) view returns(address) -func (_SnapshotDelegation *SnapshotDelegationCaller) Delegation(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte) (common.Address, error) { - var out []interface{} - err := _SnapshotDelegation.contract.Call(opts, &out, "delegation", arg0, arg1) - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// Delegation is a free data retrieval call binding the contract method 0x74c6c454. -// -// Solidity: function delegation(address , bytes32 ) view returns(address) -func (_SnapshotDelegation *SnapshotDelegationSession) Delegation(arg0 common.Address, arg1 [32]byte) (common.Address, error) { - return _SnapshotDelegation.Contract.Delegation(&_SnapshotDelegation.CallOpts, arg0, arg1) -} - -// Delegation is a free data retrieval call binding the contract method 0x74c6c454. -// -// Solidity: function delegation(address , bytes32 ) view returns(address) -func (_SnapshotDelegation *SnapshotDelegationCallerSession) Delegation(arg0 common.Address, arg1 [32]byte) (common.Address, error) { - return _SnapshotDelegation.Contract.Delegation(&_SnapshotDelegation.CallOpts, arg0, arg1) -} - -// ClearDelegate is a paid mutator transaction binding the contract method 0xf0bedbe2. -// -// Solidity: function clearDelegate(bytes32 id) returns() -func (_SnapshotDelegation *SnapshotDelegationTransactor) ClearDelegate(opts *bind.TransactOpts, id [32]byte) (*types.Transaction, error) { - return _SnapshotDelegation.contract.Transact(opts, "clearDelegate", id) -} - -// ClearDelegate is a paid mutator transaction binding the contract method 0xf0bedbe2. -// -// Solidity: function clearDelegate(bytes32 id) returns() -func (_SnapshotDelegation *SnapshotDelegationSession) ClearDelegate(id [32]byte) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.ClearDelegate(&_SnapshotDelegation.TransactOpts, id) -} - -// ClearDelegate is a paid mutator transaction binding the contract method 0xf0bedbe2. -// -// Solidity: function clearDelegate(bytes32 id) returns() -func (_SnapshotDelegation *SnapshotDelegationTransactorSession) ClearDelegate(id [32]byte) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.ClearDelegate(&_SnapshotDelegation.TransactOpts, id) -} - -// SetDelegate is a paid mutator transaction binding the contract method 0xbd86e508. -// -// Solidity: function setDelegate(bytes32 id, address delegate) returns() -func (_SnapshotDelegation *SnapshotDelegationTransactor) SetDelegate(opts *bind.TransactOpts, id [32]byte, delegate common.Address) (*types.Transaction, error) { - return _SnapshotDelegation.contract.Transact(opts, "setDelegate", id, delegate) -} - -// SetDelegate is a paid mutator transaction binding the contract method 0xbd86e508. -// -// Solidity: function setDelegate(bytes32 id, address delegate) returns() -func (_SnapshotDelegation *SnapshotDelegationSession) SetDelegate(id [32]byte, delegate common.Address) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.SetDelegate(&_SnapshotDelegation.TransactOpts, id, delegate) -} - -// SetDelegate is a paid mutator transaction binding the contract method 0xbd86e508. -// -// Solidity: function setDelegate(bytes32 id, address delegate) returns() -func (_SnapshotDelegation *SnapshotDelegationTransactorSession) SetDelegate(id [32]byte, delegate common.Address) (*types.Transaction, error) { - return _SnapshotDelegation.Contract.SetDelegate(&_SnapshotDelegation.TransactOpts, id, delegate) -} - -// SnapshotDelegationClearDelegateIterator is returned from FilterClearDelegate and is used to iterate over the raw logs and unpacked data for ClearDelegate events raised by the SnapshotDelegation contract. -type SnapshotDelegationClearDelegateIterator struct { - Event *SnapshotDelegationClearDelegate // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SnapshotDelegationClearDelegateIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SnapshotDelegationClearDelegate) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SnapshotDelegationClearDelegate) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SnapshotDelegationClearDelegateIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SnapshotDelegationClearDelegateIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SnapshotDelegationClearDelegate represents a ClearDelegate event raised by the SnapshotDelegation contract. -type SnapshotDelegationClearDelegate struct { - Delegator common.Address - Id [32]byte - Delegate common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterClearDelegate is a free log retrieval operation binding the contract event 0x9c4f00c4291262731946e308dc2979a56bd22cce8f95906b975065e96cd5a064. -// -// Solidity: event ClearDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) FilterClearDelegate(opts *bind.FilterOpts, delegator []common.Address, id [][32]byte, delegate []common.Address) (*SnapshotDelegationClearDelegateIterator, error) { - - var delegatorRule []interface{} - for _, delegatorItem := range delegator { - delegatorRule = append(delegatorRule, delegatorItem) - } - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var delegateRule []interface{} - for _, delegateItem := range delegate { - delegateRule = append(delegateRule, delegateItem) - } - - logs, sub, err := _SnapshotDelegation.contract.FilterLogs(opts, "ClearDelegate", delegatorRule, idRule, delegateRule) - if err != nil { - return nil, err - } - return &SnapshotDelegationClearDelegateIterator{contract: _SnapshotDelegation.contract, event: "ClearDelegate", logs: logs, sub: sub}, nil -} - -// WatchClearDelegate is a free log subscription operation binding the contract event 0x9c4f00c4291262731946e308dc2979a56bd22cce8f95906b975065e96cd5a064. -// -// Solidity: event ClearDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) WatchClearDelegate(opts *bind.WatchOpts, sink chan<- *SnapshotDelegationClearDelegate, delegator []common.Address, id [][32]byte, delegate []common.Address) (event.Subscription, error) { - - var delegatorRule []interface{} - for _, delegatorItem := range delegator { - delegatorRule = append(delegatorRule, delegatorItem) - } - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var delegateRule []interface{} - for _, delegateItem := range delegate { - delegateRule = append(delegateRule, delegateItem) - } - - logs, sub, err := _SnapshotDelegation.contract.WatchLogs(opts, "ClearDelegate", delegatorRule, idRule, delegateRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SnapshotDelegationClearDelegate) - if err := _SnapshotDelegation.contract.UnpackLog(event, "ClearDelegate", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseClearDelegate is a log parse operation binding the contract event 0x9c4f00c4291262731946e308dc2979a56bd22cce8f95906b975065e96cd5a064. -// -// Solidity: event ClearDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) ParseClearDelegate(log types.Log) (*SnapshotDelegationClearDelegate, error) { - event := new(SnapshotDelegationClearDelegate) - if err := _SnapshotDelegation.contract.UnpackLog(event, "ClearDelegate", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SnapshotDelegationSetDelegateIterator is returned from FilterSetDelegate and is used to iterate over the raw logs and unpacked data for SetDelegate events raised by the SnapshotDelegation contract. -type SnapshotDelegationSetDelegateIterator struct { - Event *SnapshotDelegationSetDelegate // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SnapshotDelegationSetDelegateIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SnapshotDelegationSetDelegate) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SnapshotDelegationSetDelegate) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SnapshotDelegationSetDelegateIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SnapshotDelegationSetDelegateIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SnapshotDelegationSetDelegate represents a SetDelegate event raised by the SnapshotDelegation contract. -type SnapshotDelegationSetDelegate struct { - Delegator common.Address - Id [32]byte - Delegate common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterSetDelegate is a free log retrieval operation binding the contract event 0xa9a7fd460f56bddb880a465a9c3e9730389c70bc53108148f16d55a87a6c468e. -// -// Solidity: event SetDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) FilterSetDelegate(opts *bind.FilterOpts, delegator []common.Address, id [][32]byte, delegate []common.Address) (*SnapshotDelegationSetDelegateIterator, error) { - - var delegatorRule []interface{} - for _, delegatorItem := range delegator { - delegatorRule = append(delegatorRule, delegatorItem) - } - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var delegateRule []interface{} - for _, delegateItem := range delegate { - delegateRule = append(delegateRule, delegateItem) - } - - logs, sub, err := _SnapshotDelegation.contract.FilterLogs(opts, "SetDelegate", delegatorRule, idRule, delegateRule) - if err != nil { - return nil, err - } - return &SnapshotDelegationSetDelegateIterator{contract: _SnapshotDelegation.contract, event: "SetDelegate", logs: logs, sub: sub}, nil -} - -// WatchSetDelegate is a free log subscription operation binding the contract event 0xa9a7fd460f56bddb880a465a9c3e9730389c70bc53108148f16d55a87a6c468e. -// -// Solidity: event SetDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) WatchSetDelegate(opts *bind.WatchOpts, sink chan<- *SnapshotDelegationSetDelegate, delegator []common.Address, id [][32]byte, delegate []common.Address) (event.Subscription, error) { - - var delegatorRule []interface{} - for _, delegatorItem := range delegator { - delegatorRule = append(delegatorRule, delegatorItem) - } - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var delegateRule []interface{} - for _, delegateItem := range delegate { - delegateRule = append(delegateRule, delegateItem) - } - - logs, sub, err := _SnapshotDelegation.contract.WatchLogs(opts, "SetDelegate", delegatorRule, idRule, delegateRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SnapshotDelegationSetDelegate) - if err := _SnapshotDelegation.contract.UnpackLog(event, "SetDelegate", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseSetDelegate is a log parse operation binding the contract event 0xa9a7fd460f56bddb880a465a9c3e9730389c70bc53108148f16d55a87a6c468e. -// -// Solidity: event SetDelegate(address indexed delegator, bytes32 indexed id, address indexed delegate) -func (_SnapshotDelegation *SnapshotDelegationFilterer) ParseSetDelegate(log types.Log) (*SnapshotDelegationSetDelegate, error) { - event := new(SnapshotDelegationSetDelegate) - if err := _SnapshotDelegation.contract.UnpackLog(event, "SetDelegate", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/shared/services/proposals/proposal-manager.go b/shared/services/proposals/proposal-manager.go index 13ab78429..8fe4f4d57 100644 --- a/shared/services/proposals/proposal-manager.go +++ b/shared/services/proposals/proposal-manager.go @@ -43,10 +43,7 @@ func NewProposalManager(log *log.ColorLogger, cfg *config.RocketPoolConfig, rp * return nil, fmt.Errorf("error creating node tree manager: %w", err) } - stateMgr, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, log) - if err != nil { - return nil, fmt.Errorf("error creating network state manager: %w", err) - } + stateMgr := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, log) logPrefix := "[PDAO Proposals]" return &ProposalManager{ diff --git a/shared/services/rewards/execution-client.go b/shared/services/rewards/execution-client.go new file mode 100644 index 000000000..418f825e0 --- /dev/null +++ b/shared/services/rewards/execution-client.go @@ -0,0 +1,76 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/rocket-pool/rocketpool-go/settings/trustednode" +) + +// Interface assertion +var _ RewardsExecutionClient = &defaultRewardsExecutionClient{} + +// An implementation of RewardsExecutionClient that uses +// rocketpool-go to access chain data. +// +// Importantly, this struct instantiates rocketpool.RocketPool and passes it +// to the old fashioned rocketpool-go getters that take it as an argument +// but it also fulfills the requirements of an interface used for dependency injection +// in tests. +type defaultRewardsExecutionClient struct { + *rocketpool.RocketPool +} + +func NewRewardsExecutionClient(rp *rocketpool.RocketPool) *defaultRewardsExecutionClient { + out := new(defaultRewardsExecutionClient) + out.RocketPool = rp + return out +} + +func (client *defaultRewardsExecutionClient) GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) { + return trustednode.GetNetworkEnabled(client.RocketPool, networkId, opts) +} + +func (client *defaultRewardsExecutionClient) HeaderByNumber(ctx context.Context, block *big.Int) (*ethtypes.Header, error) { + return client.RocketPool.Client.HeaderByNumber(ctx, block) +} + +func (client *defaultRewardsExecutionClient) GetRewardsEvent(index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) { + return rewards.GetRewardsEvent(client.RocketPool, index, rocketRewardsPoolAddresses, opts) +} + +func (client *defaultRewardsExecutionClient) GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { + + found, event, err := client.GetRewardsEvent(interval, previousRewardsPoolAddresses, opts) + if err != nil { + return rewards.RewardsEvent{}, fmt.Errorf("error getting rewards event for interval %d: %w", interval, err) + } + if !found { + return rewards.RewardsEvent{}, fmt.Errorf("interval %d event not found", interval) + } + + return event, nil + +} + +func (client *defaultRewardsExecutionClient) GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) { + return client.RocketPool.GetRewardIndex(opts) +} + +func (client *defaultRewardsExecutionClient) GetContract(contractName string, opts *bind.CallOpts) (*rocketpool.Contract, error) { + return client.RocketPool.GetContract(contractName, opts) +} + +func (client *defaultRewardsExecutionClient) BalanceAt(ctx context.Context, address common.Address, blockNumber *big.Int) (*big.Int, error) { + return client.RocketPool.Client.BalanceAt(ctx, address, blockNumber) +} + +func (client *defaultRewardsExecutionClient) Client() *rocketpool.RocketPool { + return client.RocketPool +} diff --git a/shared/services/rewards/fees/fees.go b/shared/services/rewards/fees/fees.go new file mode 100644 index 000000000..9cda3e9d4 --- /dev/null +++ b/shared/services/rewards/fees/fees.go @@ -0,0 +1,29 @@ +package fees + +import ( + "math/big" +) + +var oneEth = big.NewInt(1000000000000000000) +var tenEth = big.NewInt(0).Mul(oneEth, big.NewInt(10)) +var pointOhFourEth = big.NewInt(40000000000000000) +var pointOneEth = big.NewInt(0).Div(oneEth, big.NewInt(10)) +var sixteenEth = big.NewInt(0).Mul(oneEth, big.NewInt(16)) + +func GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth *big.Int) *big.Int { + if bond.Cmp(sixteenEth) >= 0 { + return fee + } + // fee = max(fee, 0.10 Eth + (0.04 Eth * min(10 Eth, percentOfBorrowedETH) / 10 Eth)) + _min := big.NewInt(0).Set(tenEth) + if _min.Cmp(percentOfBorrowedEth) > 0 { + _min.Set(percentOfBorrowedEth) + } + dividend := _min.Mul(_min, pointOhFourEth) + divResult := dividend.Div(dividend, tenEth) + feeWithBonus := divResult.Add(divResult, pointOneEth) + if fee.Cmp(feeWithBonus) >= 0 { + return fee + } + return feeWithBonus +} diff --git a/shared/services/rewards/files.go b/shared/services/rewards/files.go index 93893662d..10f9905bc 100644 --- a/shared/services/rewards/files.go +++ b/shared/services/rewards/files.go @@ -43,25 +43,35 @@ func ReadLocalMinipoolPerformanceFile(path string) (*LocalMinipoolPerformanceFil } // Interface for local rewards or minipool performance files -type ILocalFile interface { +type ISerializable interface { // Converts the underlying interface to a byte slice Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) } -// A wrapper around ILocalFile representing a local rewards file or minipool performance file. +// A wrapper around ISerializable representing a local rewards file or minipool performance file. // Can be used with anything that can be serialzed to bytes or parsed from bytes. -type LocalFile[T ILocalFile] struct { +type LocalFile[T ISerializable] struct { f T fullPath string } +type ILocalFile interface { + ISerializable + Write() ([]byte, error) + WriteSSZ() ([]byte, error) + Path() string + FileName() string + CreateCompressedFileAndCid() (string, cid.Cid, error) +} + // Type aliases type LocalRewardsFile = LocalFile[IRewardsFile] type LocalMinipoolPerformanceFile = LocalFile[IMinipoolPerformanceFile] // NewLocalFile creates the wrapper, but doesn't write to disk. // This should be used when generating new trees / performance files. -func NewLocalFile[T ILocalFile](ilf T, fullpath string) *LocalFile[T] { +func NewLocalFile[T ISerializable](ilf T, fullpath string) *LocalFile[T] { return &LocalFile[T]{ f: ilf, fullPath: fullpath, @@ -78,18 +88,45 @@ func (lf *LocalFile[T]) Serialize() ([]byte, error) { return lf.f.Serialize() } +// Converts the underlying interface to a byte slice by calling its SerializeSSZ function +func (lf *LocalFile[T]) SerializeSSZ() ([]byte, error) { + return lf.f.SerializeSSZ() +} + // Serializes the file and writes it to disk -func (lf *LocalFile[T]) Write() error { +func (lf *LocalFile[T]) Write() ([]byte, error) { data, err := lf.Serialize() if err != nil { - return fmt.Errorf("error serializing file: %w", err) + return nil, fmt.Errorf("error serializing file: %w", err) + } + + err = os.WriteFile(lf.fullPath, data, 0644) + if err != nil { + return nil, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + } + return data, nil +} + +// Serializes the file and writes it to disk +func (lf *LocalFile[T]) WriteSSZ() ([]byte, error) { + data, err := lf.SerializeSSZ() + if err != nil { + return nil, fmt.Errorf("error serializing file: %w", err) } err = os.WriteFile(lf.fullPath, data, 0644) if err != nil { - return fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + return nil, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) } - return nil + return data, nil +} + +func (lf *LocalFile[T]) Path() string { + return lf.fullPath +} + +func (lf *LocalFile[T]) FileName() string { + return filepath.Base(lf.Path()) } // Computes the CID that would be used if we compressed the file with zst, @@ -98,11 +135,11 @@ func (lf *LocalFile[T]) Write() error { // // N.B. This function will also save the compressed file to disk so it can // later be uploaded to ipfs -func (lf *LocalFile[T]) CreateCompressedFileAndCid() (cid.Cid, error) { +func (lf *LocalFile[T]) CreateCompressedFileAndCid() (string, cid.Cid, error) { // Serialize data, err := lf.Serialize() if err != nil { - return cid.Cid{}, fmt.Errorf("error serializing file: %w", err) + return "", cid.Cid{}, fmt.Errorf("error serializing file: %w", err) } // Compress @@ -112,14 +149,132 @@ func (lf *LocalFile[T]) CreateCompressedFileAndCid() (cid.Cid, error) { filename := lf.fullPath + config.RewardsTreeIpfsExtension c, err := singleFileDirIPFSCid(compressedBytes, filepath.Base(filename)) if err != nil { - return cid.Cid{}, fmt.Errorf("error calculating CID: %w", err) + return filename, cid.Cid{}, fmt.Errorf("error calculating CID: %w", err) } // Write to disk // Take care to write to `filename` since it has the .zst extension added err = os.WriteFile(filename, compressedBytes, 0644) if err != nil { - return cid.Cid{}, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + return filename, cid.Cid{}, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + } + return filename, c, nil +} + +// Saves all rewards artifacts, including ssz if the rewards file is at least v3. +// If nodeTrusted is passed, zstd compressed copies will also be saved, with the cid of the +// compressed minipool perf file added to the rewards file before the latter is compressed. +// +// If the rewards file is at least v3, the cid of the uncompressed ssz file is returned for consensus +// Otherwise, the cid of the compressed json rewards file is returned for consensus. +// Thus, this function is only suitable for v9+ and versions below should use saveJSONArtifacts instead +func saveRewardsArtifacts(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + if treeResult.RewardsFile.GetRewardsFileVersion() < rewardsFileVersionThree { + return saveJSONArtifacts(smartnode, treeResult, nodeTrusted) + } + + return saveArtifactsImpl(smartnode, treeResult, nodeTrusted, true) +} + +// Saves JSON artifacts from tree generation +// If nodeTrusted is passed, zstd compressed copies will also be saved, with the cid of the +// compressed minipool perf file added to the rewards file before the latter is compressed. +// +// Returns the cid of the compressed rewards file, a map containing all the other cids, or an error. +func saveJSONArtifacts(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveArtifactsImpl(smartnode, treeResult, nodeTrusted, false) +} + +// Saves JSON artifacts and optionally compressed + ssz artifacts +// If includeSSZ is true, the primary cid is the uncompressed reward ssz. +// Otherwise, it is the compressed rewards json. +func saveArtifactsImpl(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool, includeSSZ bool) (cid.Cid, map[string]cid.Cid, error) { + rewardsFile := treeResult.RewardsFile + currentIndex := rewardsFile.GetIndex() + + var primaryCid *cid.Cid + out := make(map[string]cid.Cid, 4) + + files := []ILocalFile{ + // Do not reorder! + // i == 0 - minipool performance file + NewLocalFile[IMinipoolPerformanceFile]( + treeResult.MinipoolPerformanceFile, + smartnode.GetMinipoolPerformancePath(currentIndex, true), + ), + // i == 1 - rewards file + NewLocalFile[IRewardsFile]( + rewardsFile, + smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionJSON), + ), + } + + // Only include ssz for supported versions + includeSSZ = includeSSZ && rewardsFile.GetRewardsFileVersion() >= minRewardsFileVersionSSZ + + if includeSSZ { + files = append( + files, + // i == 2 - ssz rewards file + NewLocalFile[IRewardsFile]( + rewardsFile, + smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionSSZ), + ), + ) + } + + for i, f := range files { + var data []byte + var err error + + if includeSSZ && i == 2 { + data, err = f.WriteSSZ() + } else { + data, err = f.Write() + } + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error saving %s: %w", f.Path(), err) + } + + uncompressedCid, err := singleFileDirIPFSCid(data, f.FileName()) + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error calculating cid for saved file %s: %w", f.Path(), err) + } + out[f.FileName()] = uncompressedCid + + if !nodeTrusted { + // For some reason we didn't simply omit this in the past, so for consistency, keep setting it. + rewardsFile.SetMinipoolPerformanceFileCID("---") + // Non odao nodes only need inflated files + continue + } + + // Save compressed versions + compressedFilePath, compressedCid, err := f.CreateCompressedFileAndCid() + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error compressing file %s: %w", f.Path(), err) + } + out[filepath.Base(compressedFilePath)] = compressedCid + + // Note the performance cid in the rewards file + if i == 0 { + rewardsFile.SetMinipoolPerformanceFileCID(compressedCid.String()) + } + + // Note the primary cid for artifacts used for consensus + if !includeSSZ { + // JSON rewards file + if i == 1 { + primaryCid = &compressedCid + } + } else { + // SSZ rewards file + if i == 2 { + // Consensus is on the uncompressed cid when using ssz + primaryCid = &uncompressedCid + } + } + } - return c, nil + return *primaryCid, out, nil } diff --git a/shared/services/rewards/files_test.go b/shared/services/rewards/files_test.go index e5914d034..7139099d9 100644 --- a/shared/services/rewards/files_test.go +++ b/shared/services/rewards/files_test.go @@ -1,10 +1,12 @@ package rewards import ( + "bytes" "encoding/hex" "fmt" "os" "path" + "path/filepath" "testing" ) @@ -17,7 +19,7 @@ func TestFilesFromTree(t *testing.T) { RewardsFileVersion: 3, RulesetVersion: 8, }, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ RewardsFileVersion: 3, RulesetVersion: 8, }, @@ -28,21 +30,35 @@ func TestFilesFromTree(t *testing.T) { path.Join(dir, "rewards.json"), ) - err := localRewardsFile.Write() + rewardsFileBytes, err := localRewardsFile.Write() if err != nil { t.Fatal(err) } + if rewardsFileBytes == nil { + t.Fatal("Write() should have returned serialized data") + } + directBytes, _ := f.Serialize() + if !bytes.Equal(directBytes, rewardsFileBytes) { + t.Fatal("Write() returned something different than Serialize()") + } - minipoolPerformanceFile := localRewardsFile.Impl().GetMinipoolPerformanceFile() + minipoolPerformanceFile := &f.MinipoolPerformanceFile localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) - err = localMinipoolPerformanceFile.Write() + miniPerfFileBytes, err := localMinipoolPerformanceFile.Write() if err != nil { t.Fatal(err) } + if miniPerfFileBytes == nil { + t.Fatal("Write() should have returned serialized data") + } + directBytes, _ = minipoolPerformanceFile.Serialize() + if !bytes.Equal(directBytes, miniPerfFileBytes) { + t.Fatal("Write() returned something different than Serialize()") + } // Check that the file can be parsed localRewardsFile, err = ReadLocalRewardsFile(path.Join(dir, "rewards.json")) @@ -50,10 +66,10 @@ func TestFilesFromTree(t *testing.T) { t.Fatal(err) } - if localRewardsFile.Impl().GetHeader().RulesetVersion != f.RewardsFileHeader.RulesetVersion { + if localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion != f.RewardsFileHeader.RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetHeader().RulesetVersion, + localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion, f.RewardsFileHeader.RulesetVersion, ) } @@ -74,7 +90,7 @@ func TestCompressionAndCids(t *testing.T) { RewardsFileVersion: 3, RulesetVersion: 8, }, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ RewardsFileVersion: 3, RulesetVersion: 9, }, @@ -85,21 +101,27 @@ func TestCompressionAndCids(t *testing.T) { path.Join(dir, "rewards.json"), ) - minipoolPerformanceFile := localRewardsFile.Impl().GetMinipoolPerformanceFile() + minipoolPerformanceFile := &f.MinipoolPerformanceFile localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) - rewardsCid, err := localRewardsFile.CreateCompressedFileAndCid() + returnedFilename, rewardsCid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { t.Fatal(err) } + if filepath.Base(returnedFilename) != "rewards.json.zst" { + t.Fatalf("Unexpected filename: %s", returnedFilename) + } - performanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() + returnedFilename, performanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() if err != nil { t.Fatal(err) } + if filepath.Base(returnedFilename) != "performance.json.zst" { + t.Fatalf("Unexpected filename: %s", returnedFilename) + } // Check that compressed files were written to disk and their cids match what was returned by CompressedCid compressedRewardsBytes, err := os.ReadFile(path.Join(dir, "rewards.json.zst")) @@ -151,21 +173,21 @@ func TestCompressionAndCids(t *testing.T) { } // Make sure values were preserved in the round trip - if localRewardsFile.Impl().GetHeader().RulesetVersion != parsedRewards.GetHeader().RulesetVersion { + if localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion != parsedRewards.(*RewardsFile_v3).RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetHeader().RulesetVersion, - parsedRewards.GetHeader().RulesetVersion, + localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion, + parsedRewards.(*RewardsFile_v3).RulesetVersion, ) } - if localRewardsFile.Impl().GetMinipoolPerformanceFile().(*MinipoolPerformanceFile_v3).RulesetVersion != - parsedPerformance.(*MinipoolPerformanceFile_v3).RulesetVersion { + if minipoolPerformanceFile.RulesetVersion != + parsedPerformance.(*MinipoolPerformanceFile_v2).RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetMinipoolPerformanceFile().(*MinipoolPerformanceFile_v3).RulesetVersion, - parsedPerformance.(*MinipoolPerformanceFile_v3).RulesetVersion, + minipoolPerformanceFile.RulesetVersion, + parsedPerformance.(*MinipoolPerformanceFile_v2).RulesetVersion, ) } } diff --git a/shared/services/rewards/generator-impl-v8-rolling.go b/shared/services/rewards/generator-impl-v8-rolling.go deleted file mode 100644 index a9d88fbce..000000000 --- a/shared/services/rewards/generator-impl-v8-rolling.go +++ /dev/null @@ -1,807 +0,0 @@ -package rewards - -import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "sort" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rocketpool" - tnsettings "github.com/rocket-pool/rocketpool-go/settings/trustednode" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" - "github.com/wealdtech/go-merkletree" - "github.com/wealdtech/go-merkletree/keccak256" -) - -// Implementation for tree generator ruleset v8 with rolling record support -type treeGeneratorImpl_v8_rolling struct { - networkState *state.NetworkState - rewardsFile *RewardsFile_v3 - elSnapshotHeader *types.Header - log *log.ColorLogger - logPrefix string - rp *rocketpool.RocketPool - cfg *config.RocketPoolConfig - bc beacon.Client - opts *bind.CallOpts - smoothingPoolBalance *big.Int - intervalDutiesInfo *IntervalDutiesInfo - slotsPerEpoch uint64 - validatorIndexMap map[string]*MinipoolInfo - elStartTime time.Time - elEndTime time.Time - validNetworkCache map[uint64]bool - epsilon *big.Int - intervalSeconds *big.Int - beaconConfig beacon.Eth2Config - rollingRecord *RollingRecord - nodeDetails map[common.Address]*NodeSmoothingDetails -} - -// Create a new tree generator -func newTreeGeneratorImpl_v8_rolling(log *log.ColorLogger, logPrefix string, index uint64, startTime time.Time, endTime time.Time, consensusBlock uint64, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState, rollingRecord *RollingRecord) *treeGeneratorImpl_v8_rolling { - return &treeGeneratorImpl_v8_rolling{ - rewardsFile: &RewardsFile_v3{ - RewardsFileHeader: &RewardsFileHeader{ - RewardsFileVersion: 3, - RulesetVersion: 8, - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - IntervalsPassed: intervalsPassed, - InvalidNetworkNodes: map[common.Address]uint64{}, - TotalRewards: &TotalRewards{ - ProtocolDaoRpl: NewQuotedBigInt(0), - TotalCollateralRpl: NewQuotedBigInt(0), - TotalOracleDaoRpl: NewQuotedBigInt(0), - TotalSmoothingPoolEth: NewQuotedBigInt(0), - PoolStakerSmoothingPoolEth: NewQuotedBigInt(0), - NodeOperatorSmoothingPoolEth: NewQuotedBigInt(0), - }, - NetworkRewards: map[uint64]*NetworkRewardsInfo{}, - }, - NodeRewards: map[common.Address]*NodeRewardsInfo_v3{}, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v3{}, - }, - }, - validatorIndexMap: map[string]*MinipoolInfo{}, - elSnapshotHeader: elSnapshotHeader, - log: log, - logPrefix: logPrefix, - networkState: state, - rollingRecord: rollingRecord, - } -} - -// Get the version of the ruleset used by this generator -func (r *treeGeneratorImpl_v8_rolling) getRulesetVersion() uint64 { - return r.rewardsFile.RulesetVersion -} - -func (r *treeGeneratorImpl_v8_rolling) generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) { - - r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) - - // Provision some struct params - r.rp = rp - r.cfg = cfg - r.bc = bc - r.validNetworkCache = map[uint64]bool{ - 0: true, - } - - // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) - r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network - r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion - - // Get the Beacon config - r.beaconConfig = r.networkState.BeaconConfig - r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch - - // Set the EL client call opts - r.opts = &bind.CallOpts{ - BlockNumber: r.elSnapshotHeader.Number, - } - - r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) - - // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation - nodeCount := len(r.networkState.NodeDetails) - minipoolCount := len(r.networkState.MinipoolDetails) - if nodeCount > minipoolCount { - r.epsilon = big.NewInt(int64(nodeCount)) - } else { - r.epsilon = big.NewInt(int64(minipoolCount)) - } - - // Calculate the RPL rewards - err := r.calculateRplRewards() - if err != nil { - return nil, fmt.Errorf("error calculating RPL rewards: %w", err) - } - - // Calculate the ETH rewards - err = r.calculateEthRewards(true) - if err != nil { - return nil, fmt.Errorf("error calculating ETH rewards: %w", err) - } - - // Calculate the network reward map and the totals - r.updateNetworksAndTotals() - - // Generate the Merkle Tree - err = r.generateMerkleTree() - if err != nil { - return nil, fmt.Errorf("error generating Merkle tree: %w", err) - } - - // Sort all of the missed attestations so the files are always generated in the same state - for _, minipoolInfo := range r.rewardsFile.MinipoolPerformanceFile.MinipoolPerformance { - sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { - return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] - }) - } - - return r.rewardsFile, nil - -} - -// Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance -// Used for approximate returns in the rETH ratio update -func (r *treeGeneratorImpl_v8_rolling) approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) { - r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) - - r.rp = rp - r.cfg = cfg - r.bc = bc - r.validNetworkCache = map[uint64]bool{ - 0: true, - } - - // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) - r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network - r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion - - // Get the Beacon config - r.beaconConfig = r.networkState.BeaconConfig - r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch - - // Set the EL client call opts - r.opts = &bind.CallOpts{ - BlockNumber: r.elSnapshotHeader.Number, - } - - r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) - - // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation - nodeCount := len(r.networkState.NodeDetails) - minipoolCount := len(r.networkState.MinipoolDetails) - if nodeCount > minipoolCount { - r.epsilon = big.NewInt(int64(nodeCount)) - } else { - r.epsilon = big.NewInt(int64(minipoolCount)) - } - - // Calculate the ETH rewards - err := r.calculateEthRewards(false) - if err != nil { - return nil, fmt.Errorf("error calculating ETH rewards: %w", err) - } - - return &r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int, nil -} - -// Generates a merkle tree from the provided rewards map -func (r *treeGeneratorImpl_v8_rolling) generateMerkleTree() error { - - // Generate the leaf data for each node - totalData := make([][]byte, 0, len(r.rewardsFile.NodeRewards)) - for address, rewardsForNode := range r.rewardsFile.NodeRewards { - // Ignore nodes that didn't receive any rewards - if rewardsForNode.CollateralRpl.Cmp(common.Big0) == 0 && rewardsForNode.OracleDaoRpl.Cmp(common.Big0) == 0 && rewardsForNode.SmoothingPoolEth.Cmp(common.Big0) == 0 { - continue - } - - // Node data is address[20] :: network[32] :: RPL[32] :: ETH[32] - nodeData := make([]byte, 0, 20+32*3) - - // Node address - addressBytes := address.Bytes() - nodeData = append(nodeData, addressBytes...) - - // Node network - network := big.NewInt(0).SetUint64(rewardsForNode.RewardNetwork) - networkBytes := make([]byte, 32) - network.FillBytes(networkBytes) - nodeData = append(nodeData, networkBytes...) - - // RPL rewards - rplRewards := big.NewInt(0) - rplRewards.Add(&rewardsForNode.CollateralRpl.Int, &rewardsForNode.OracleDaoRpl.Int) - rplRewardsBytes := make([]byte, 32) - rplRewards.FillBytes(rplRewardsBytes) - nodeData = append(nodeData, rplRewardsBytes...) - - // ETH rewards - ethRewardsBytes := make([]byte, 32) - rewardsForNode.SmoothingPoolEth.FillBytes(ethRewardsBytes) - nodeData = append(nodeData, ethRewardsBytes...) - - // Assign it to the node rewards tracker and add it to the leaf data slice - rewardsForNode.MerkleData = nodeData - totalData = append(totalData, nodeData) - } - - // Generate the tree - tree, err := merkletree.NewUsing(totalData, keccak256.New(), false, true) - if err != nil { - return fmt.Errorf("error generating Merkle Tree: %w", err) - } - - // Generate the proofs for each node - for address, rewardsForNode := range r.rewardsFile.NodeRewards { - // Get the proof - proof, err := tree.GenerateProof(rewardsForNode.MerkleData, 0) - if err != nil { - return fmt.Errorf("error generating proof for node %s: %w", address.Hex(), err) - } - - // Convert the proof into hex strings - proofStrings := make([]string, len(proof.Hashes)) - for i, hash := range proof.Hashes { - proofStrings[i] = fmt.Sprintf("0x%s", hex.EncodeToString(hash)) - } - - // Assign the hex strings to the node rewards struct - rewardsForNode.MerkleProof = proofStrings - } - - r.rewardsFile.MerkleTree = tree - r.rewardsFile.MerkleRoot = common.BytesToHash(tree.Root()).Hex() - return nil - -} - -// Calculates the per-network distribution amounts and the total reward amounts -func (r *treeGeneratorImpl_v8_rolling) updateNetworksAndTotals() { - - // Get the highest network index with valid rewards - highestNetworkIndex := uint64(0) - for network := range r.rewardsFile.NetworkRewards { - if network > highestNetworkIndex { - highestNetworkIndex = network - } - } - - // Create the map for each network, including unused ones - for network := uint64(0); network <= highestNetworkIndex; network++ { - _, exists := r.rewardsFile.NetworkRewards[network] - if !exists { - rewardsForNetwork := &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[network] = rewardsForNetwork - } - } - -} - -func (r *treeGeneratorImpl_v8_rolling) calculateNodeRplRewards( - collateralRewards *big.Int, - nodeEffectiveStake *big.Int, - totalEffectiveRplStake *big.Int, - nodeWeight *big.Int, - totalNodeWeight *big.Int, -) *big.Int { - - if nodeEffectiveStake.Sign() <= 0 || nodeWeight.Sign() <= 0 { - return big.NewInt(0) - } - - // C is in the closed range [1, 6] - // C := min(6, interval - 18 + 1) - c := int64(6) - interval := int64(r.networkState.NetworkDetails.RewardIndex) - - if c > (interval - 18 + 1) { - c = interval - 18 + 1 - } - - if c <= 0 { - c = 1 - } - - bigC := big.NewInt(c) - - // (collateralRewards * C * nodeWeight / (totalNodeWeight * 6)) + (collateralRewards * (6 - C) * nodeEffectiveStake / (totalEffectiveRplStake * 6)) - // First, (collateralRewards * C * nodeWeight / (totalNodeWeight * 6)) - rpip30Rewards := big.NewInt(0).Mul(collateralRewards, nodeWeight) - rpip30Rewards.Mul(rpip30Rewards, bigC) - rpip30Rewards.Quo(rpip30Rewards, big.NewInt(0).Mul(totalNodeWeight, six)) - - // Once C hits 6 we can exit early as an optimization - if c == 6 { - return rpip30Rewards - } - - // Second, (collateralRewards * (6 - C) * nodeEffectiveStake / (totalEffectiveRplStake * 6)) - oldRewards := big.NewInt(6) - oldRewards.Sub(oldRewards, bigC) - oldRewards.Mul(oldRewards, collateralRewards) - oldRewards.Mul(oldRewards, nodeEffectiveStake) - oldRewards.Quo(oldRewards, big.NewInt(0).Mul(totalEffectiveRplStake, six)) - - // Add them together - return rpip30Rewards.Add(rpip30Rewards, oldRewards) -} - -// Calculates the RPL rewards for the given interval -func (r *treeGeneratorImpl_v8_rolling) calculateRplRewards() error { - pendingRewards := r.networkState.NetworkDetails.PendingRPLRewards - r.log.Printlnf("%s Pending RPL rewards: %s (%.3f)", r.logPrefix, pendingRewards.String(), eth.WeiToEth(pendingRewards)) - if pendingRewards.Cmp(common.Big0) == 0 { - return fmt.Errorf("there are no pending RPL rewards, so this interval cannot be used for rewards submission") - } - - // Get baseline Protocol DAO rewards - pDaoPercent := r.networkState.NetworkDetails.ProtocolDaoRewardsPercent - pDaoRewards := NewQuotedBigInt(0) - pDaoRewards.Mul(pendingRewards, pDaoPercent) - pDaoRewards.Div(&pDaoRewards.Int, eth.EthToWei(1)) - r.log.Printlnf("%s Expected Protocol DAO rewards: %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(&pDaoRewards.Int)) - - // Get node operator rewards - nodeOpPercent := r.networkState.NetworkDetails.NodeOperatorRewardsPercent - totalNodeRewards := big.NewInt(0) - totalNodeRewards.Mul(pendingRewards, nodeOpPercent) - totalNodeRewards.Div(totalNodeRewards, eth.EthToWei(1)) - r.log.Printlnf("%s Approx. total collateral RPL rewards: %s (%.3f)", r.logPrefix, totalNodeRewards.String(), eth.WeiToEth(totalNodeRewards)) - - // Calculate the effective stake of each node, scaling by their participation in this interval - // Before entering this function, make sure to hard-code MaxCollateralFraction to 1.5 eth (150% in wei), to comply with RPIP-30. - // Do it here, as the network state value will still be used for vote power, so doing it upstream is likely to introduce more issues. - // Doing it here also ensures that v1-7 continue to run correctly on networks other than mainnet where the max collateral fraction may not have always been 150%. - r.networkState.NetworkDetails.MaxCollateralFraction = big.NewInt(1.5e18) // 1.5 eth is 150% in wei - trueNodeEffectiveStakes, totalNodeEffectiveStake, err := r.networkState.CalculateTrueEffectiveStakes(true, true) - if err != nil { - return fmt.Errorf("error calculating effective RPL stakes: %w", err) - } - - // Calculate the RPIP-30 weight of each node, scaling by their participation in this interval - nodeWeights, totalNodeWeight, err := r.networkState.CalculateNodeWeights() - if err != nil { - return fmt.Errorf("error calculating node weights: %w", err) - } - - // Operate normally if any node has rewards - if totalNodeEffectiveStake.Sign() > 0 && totalNodeWeight.Sign() > 0 { - // Make sure to record totalNodeWeight in the rewards file - quotedTotalNodeWeight := NewQuotedBigInt(0) - quotedTotalNodeWeight.Set(totalNodeWeight) - r.rewardsFile.TotalRewards.TotalNodeWeight = quotedTotalNodeWeight - - r.log.Printlnf("%s Calculating individual collateral rewards...", r.logPrefix) - for i, nodeDetails := range r.networkState.NodeDetails { - // Get how much RPL goes to this node - nodeRplRewards := r.calculateNodeRplRewards( - totalNodeRewards, - trueNodeEffectiveStakes[nodeDetails.NodeAddress], - totalNodeEffectiveStake, - nodeWeights[nodeDetails.NodeAddress], - totalNodeWeight, - ) - - // If there are pending rewards, add it to the map - if nodeRplRewards.Sign() == 1 { - rewardsForNode, exists := r.rewardsFile.NodeRewards[nodeDetails.NodeAddress] - if !exists { - // Get the network the rewards should go to - network := r.networkState.NodeDetails[i].RewardNetwork.Uint64() - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeDetails.NodeAddress] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[nodeDetails.NodeAddress] = rewardsForNode - } - rewardsForNode.CollateralRpl.Add(&rewardsForNode.CollateralRpl.Int, nodeRplRewards) - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.CollateralRpl.Add(&rewardsForNetwork.CollateralRpl.Int, nodeRplRewards) - } - } - - // Sanity check to make sure we arrived at the correct total - delta := big.NewInt(0) - totalCalculatedNodeRewards := big.NewInt(0) - for _, networkRewards := range r.rewardsFile.NetworkRewards { - totalCalculatedNodeRewards.Add(totalCalculatedNodeRewards, &networkRewards.CollateralRpl.Int) - } - delta.Sub(totalNodeRewards, totalCalculatedNodeRewards).Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return fmt.Errorf("error calculating collateral RPL: total was %s, but expected %s; error was too large", totalCalculatedNodeRewards.String(), totalNodeRewards.String()) - } - r.rewardsFile.TotalRewards.TotalCollateralRpl.Int = *totalCalculatedNodeRewards - r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedNodeRewards.String(), delta.String()) - pDaoRewards.Sub(pendingRewards, totalCalculatedNodeRewards) - } else { - // In this situation, none of the nodes in the network had eligible rewards so send it all to the pDAO - pDaoRewards.Add(&pDaoRewards.Int, totalNodeRewards) - r.log.Printlnf("%s None of the nodes were eligible for collateral rewards, sending everything to the pDAO; now at %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(&pDaoRewards.Int)) - } - - // Handle Oracle DAO rewards - oDaoPercent := r.networkState.NetworkDetails.TrustedNodeOperatorRewardsPercent - totalODaoRewards := big.NewInt(0) - totalODaoRewards.Mul(pendingRewards, oDaoPercent) - totalODaoRewards.Div(totalODaoRewards, eth.EthToWei(1)) - r.log.Printlnf("%s Total Oracle DAO RPL rewards: %s (%.3f)", r.logPrefix, totalODaoRewards.String(), eth.WeiToEth(totalODaoRewards)) - - oDaoDetails := r.networkState.OracleDaoMemberDetails - - // Calculate the true effective time of each oDAO node based on their participation in this interval - totalODaoNodeTime := big.NewInt(0) - trueODaoNodeTimes := map[common.Address]*big.Int{} - for _, details := range oDaoDetails { - // Get the timestamp of the node joining the oDAO - joinTime := details.JoinedTime - - // Get the actual effective time, scaled based on participation - intervalDuration := r.networkState.NetworkDetails.IntervalDuration - intervalDurationBig := big.NewInt(int64(intervalDuration.Seconds())) - participationTime := big.NewInt(0).Set(intervalDurationBig) - snapshotBlockTime := time.Unix(int64(r.elSnapshotHeader.Time), 0) - eligibleDuration := snapshotBlockTime.Sub(joinTime) - if eligibleDuration < intervalDuration { - participationTime = big.NewInt(int64(eligibleDuration.Seconds())) - } - trueODaoNodeTimes[details.Address] = participationTime - - // Add it to the total - totalODaoNodeTime.Add(totalODaoNodeTime, participationTime) - } - - for _, details := range oDaoDetails { - address := details.Address - - // Calculate the oDAO rewards for the node: (participation time) * (total oDAO rewards) / (total participation time) - individualOdaoRewards := big.NewInt(0) - individualOdaoRewards.Mul(trueODaoNodeTimes[address], totalODaoRewards) - individualOdaoRewards.Div(individualOdaoRewards, totalODaoNodeTime) - - rewardsForNode, exists := r.rewardsFile.NodeRewards[address] - if !exists { - // Get the network the rewards should go to - network := r.networkState.NodeDetailsByAddress[address].RewardNetwork.Uint64() - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[address] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[address] = rewardsForNode - - } - rewardsForNode.OracleDaoRpl.Add(&rewardsForNode.OracleDaoRpl.Int, individualOdaoRewards) - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.OracleDaoRpl.Add(&rewardsForNetwork.OracleDaoRpl.Int, individualOdaoRewards) - } - - // Sanity check to make sure we arrived at the correct total - totalCalculatedOdaoRewards := big.NewInt(0) - delta := big.NewInt(0) - for _, networkRewards := range r.rewardsFile.NetworkRewards { - totalCalculatedOdaoRewards.Add(totalCalculatedOdaoRewards, &networkRewards.OracleDaoRpl.Int) - } - delta.Sub(totalODaoRewards, totalCalculatedOdaoRewards).Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return fmt.Errorf("error calculating ODao RPL: total was %s, but expected %s; error was too large", totalCalculatedOdaoRewards.String(), totalODaoRewards.String()) - } - r.rewardsFile.TotalRewards.TotalOracleDaoRpl.Int = *totalCalculatedOdaoRewards - r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedOdaoRewards.String(), delta.String()) - - // Get actual protocol DAO rewards - pDaoRewards.Sub(&pDaoRewards.Int, totalCalculatedOdaoRewards) - r.rewardsFile.TotalRewards.ProtocolDaoRpl = pDaoRewards - r.log.Printlnf("%s Actual Protocol DAO rewards: %s to account for truncation", r.logPrefix, pDaoRewards.String()) - - return nil - -} - -// Calculates the ETH rewards for the given interval -func (r *treeGeneratorImpl_v8_rolling) calculateEthRewards(checkBeaconPerformance bool) error { - - // Get the Smoothing Pool contract's balance - r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance - r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) - - // Ignore the ETH calculation if there are no rewards - if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { - return nil - } - - if r.rewardsFile.Index == 0 { - // This is the first interval, Smoothing Pool rewards are ignored on the first interval since it doesn't have a discrete start time - return nil - } - - // Get the EL block for the start of this interval - startElBlockHeader, err := r.getStartBlocksForInterval() - if err != nil { - return err - } - - r.elStartTime = time.Unix(int64(startElBlockHeader.Time), 0) - r.elEndTime = time.Unix(int64(r.elSnapshotHeader.Time), 0) - r.intervalSeconds = big.NewInt(int64(r.elEndTime.Sub(r.elStartTime) / time.Second)) - - // Process the attestation performance for each minipool during this interval - r.intervalDutiesInfo = &IntervalDutiesInfo{ - Index: r.rewardsFile.Index, - Slots: map[uint64]*SlotInfo{}, - } - - // Determine how much ETH each node gets and how much the pool stakers get - poolStakerETH, nodeOpEth, err := r.calculateNodeRewards() - if err != nil { - return err - } - - // Update the rewards maps - for nodeAddress, nodeInfo := range r.nodeDetails { - if nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { - rewardsForNode, exists := r.rewardsFile.NodeRewards[nodeAddress] - if !exists { - network := nodeInfo.RewardsNetwork - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeAddress] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[nodeAddress] = rewardsForNode - } - rewardsForNode.SmoothingPoolEth.Add(&rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) - - // Add minipool rewards to the JSON - for _, minipoolInfo := range nodeInfo.Minipools { - successfulAttestations := uint64(minipoolInfo.AttestationCount) - missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v3{ - Pubkey: minipoolInfo.ValidatorPubkey.Hex(), - SuccessfulAttestations: successfulAttestations, - MissedAttestations: missingAttestations, - AttestationScore: &QuotedBigInt{Int: minipoolInfo.AttestationScore.Int}, - EthEarned: &QuotedBigInt{Int: *minipoolInfo.MinipoolShare}, - MissingAttestationSlots: []uint64{}, - } - if successfulAttestations+missingAttestations == 0 { - // Don't include minipools that have zero attestations - continue - } - for slot := range minipoolInfo.MissingAttestationSlots { - performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) - } - r.rewardsFile.MinipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance - } - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.SmoothingPoolEth.Add(&rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) - } - } - - // Set the totals - r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int = *poolStakerETH - r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Int = *nodeOpEth - r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Int = *r.smoothingPoolBalance - return nil - -} - -// Calculate the distribution of Smoothing Pool ETH to each node -func (r *treeGeneratorImpl_v8_rolling) calculateNodeRewards() (*big.Int, *big.Int, error) { - - // Get the list of cheaters - cheaters := r.getCheaters() - - // Get the latest scores from the rolling record - minipools, totalScore, attestationCount := r.rollingRecord.GetScores(cheaters) - - // If there weren't any successful attestations, everything goes to the pool stakers - if totalScore.Cmp(common.Big0) == 0 || attestationCount == 0 { - r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", totalScore.String(), attestationCount) - return r.smoothingPoolBalance, big.NewInt(0), nil - } - - totalEthForMinipools := big.NewInt(0) - totalNodeOpShare := big.NewInt(0) - totalNodeOpShare.Mul(r.smoothingPoolBalance, totalScore) - totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(attestationCount))) - totalNodeOpShare.Div(totalNodeOpShare, eth.EthToWei(1)) - - r.nodeDetails = map[common.Address]*NodeSmoothingDetails{} - for _, minipool := range minipools { - // Get the node amount - nodeInfo, exists := r.nodeDetails[minipool.NodeAddress] - if !exists { - nodeInfo = &NodeSmoothingDetails{ - Minipools: []*MinipoolInfo{}, - SmoothingPoolEth: big.NewInt(0), - RewardsNetwork: r.networkState.NodeDetailsByAddress[minipool.NodeAddress].RewardNetwork.Uint64(), - } - r.nodeDetails[minipool.NodeAddress] = nodeInfo - } - nodeInfo.Minipools = append(nodeInfo.Minipools, minipool) - - // Add the minipool's score to the total node score - minipoolEth := big.NewInt(0).Set(totalNodeOpShare) - minipoolEth.Mul(minipoolEth, &minipool.AttestationScore.Int) - minipoolEth.Div(minipoolEth, totalScore) - minipool.MinipoolShare = minipoolEth - nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) - } - - // Add the node amounts to the total - for _, nodeInfo := range r.nodeDetails { - totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) - } - - // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors - truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) - - // Sanity check to make sure we arrived at the correct total - delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) - delta.Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) - } - - // Calculate the staking pool share and the node op share - poolStakerShare := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) - - r.log.Printlnf("%s Pool staker ETH: %s (%.3f)", r.logPrefix, poolStakerShare.String(), eth.WeiToEth(poolStakerShare)) - r.log.Printlnf("%s Node Op ETH: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) - r.log.Printlnf("%s Calculated NO ETH: %s (error = %s wei)", r.logPrefix, totalEthForMinipools.String(), delta.String()) - r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) - - return truePoolStakerAmount, totalEthForMinipools, nil - -} - -// Validates that the provided network is legal -func (r *treeGeneratorImpl_v8_rolling) validateNetwork(network uint64) (bool, error) { - valid, exists := r.validNetworkCache[network] - if !exists { - var err error - valid, err = tnsettings.GetNetworkEnabled(r.rp, big.NewInt(int64(network)), r.opts) - if err != nil { - return false, err - } - r.validNetworkCache[network] = valid - } - - return valid, nil -} - -// Gets the EL header for the given interval's start block -func (r *treeGeneratorImpl_v8_rolling) getStartBlocksForInterval() (*types.Header, error) { - // Get the Beacon block for the start slot of the record - r.rewardsFile.ConsensusStartBlock = r.rollingRecord.StartSlot - r.rewardsFile.MinipoolPerformanceFile.ConsensusStartBlock = r.rollingRecord.StartSlot - beaconBlock, exists, err := r.bc.GetBeaconBlock(fmt.Sprint(r.rollingRecord.StartSlot)) - if err != nil { - return nil, fmt.Errorf("error verifying block from previous interval: %w", err) - } - if !exists { - return nil, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", r.rollingRecord.StartSlot) - } - - // Get the EL block for that Beacon block - elBlockNumber := beaconBlock.ExecutionBlockNumber - r.rewardsFile.ExecutionStartBlock = elBlockNumber - r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err := r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) - if err != nil { - return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) - } - - return startElHeader, nil -} - -// Detect and flag any cheaters -func (r *treeGeneratorImpl_v8_rolling) getCheaters() map[common.Address]bool { - cheatingNodes := map[common.Address]bool{} - three := big.NewInt(3) - - for _, nd := range r.networkState.NodeDetails { - for _, mpd := range r.networkState.MinipoolDetailsByNode[nd.NodeAddress] { - if mpd.PenaltyCount.Cmp(three) >= 0 { - // If any minipool has 3+ penalties, ban the entire node - cheatingNodes[nd.NodeAddress] = true - break - } - } - } - - return cheatingNodes -} diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index 171758b35..84384cd1f 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -10,9 +10,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ipfs/go-cid" "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - tnsettings "github.com/rocket-pool/rocketpool-go/settings/trustednode" rptypes "github.com/rocket-pool/rocketpool-go/types" "github.com/rocket-pool/rocketpool-go/utils/eth" rpstate "github.com/rocket-pool/rocketpool-go/utils/state" @@ -27,30 +26,31 @@ var six = big.NewInt(6) // Implementation for tree generator ruleset v8 type treeGeneratorImpl_v8 struct { - networkState *state.NetworkState - rewardsFile *RewardsFile_v3 - elSnapshotHeader *types.Header - log *log.ColorLogger - logPrefix string - rp *rocketpool.RocketPool - cfg *config.RocketPoolConfig - bc beacon.Client - opts *bind.CallOpts - nodeDetails []*NodeSmoothingDetails - smoothingPoolBalance *big.Int - intervalDutiesInfo *IntervalDutiesInfo - slotsPerEpoch uint64 - validatorIndexMap map[string]*MinipoolInfo - elStartTime time.Time - elEndTime time.Time - validNetworkCache map[uint64]bool - epsilon *big.Int - intervalSeconds *big.Int - beaconConfig beacon.Eth2Config - validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus - totalAttestationScore *big.Int - successfulAttestations uint64 - genesisTime time.Time + networkState *state.NetworkState + rewardsFile *RewardsFile_v3 + elSnapshotHeader *types.Header + log *log.ColorLogger + logPrefix string + rp RewardsExecutionClient + previousRewardsPoolAddresses []common.Address + bc RewardsBeaconClient + opts *bind.CallOpts + nodeDetails []*NodeSmoothingDetails + smoothingPoolBalance *big.Int + intervalDutiesInfo *IntervalDutiesInfo + slotsPerEpoch uint64 + validatorIndexMap map[string]*MinipoolInfo + elStartTime time.Time + elEndTime time.Time + validNetworkCache map[uint64]bool + epsilon *big.Int + intervalSeconds *big.Int + beaconConfig beacon.Eth2Config + validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus + totalAttestationScore *big.Int + successfulAttestations uint64 + genesisTime time.Time + invalidNetworkNodes map[common.Address]uint64 } // Create a new tree generator @@ -58,15 +58,14 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 return &treeGeneratorImpl_v8{ rewardsFile: &RewardsFile_v3{ RewardsFileHeader: &RewardsFileHeader{ - RewardsFileVersion: 3, - RulesetVersion: 8, - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - IntervalsPassed: intervalsPassed, - InvalidNetworkNodes: map[common.Address]uint64{}, + RewardsFileVersion: 3, + RulesetVersion: 8, + Index: index, + StartTime: startTime.UTC(), + EndTime: endTime.UTC(), + ConsensusEndBlock: consensusBlock, + ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), + IntervalsPassed: intervalsPassed, TotalRewards: &TotalRewards{ ProtocolDaoRpl: NewQuotedBigInt(0), TotalCollateralRpl: NewQuotedBigInt(0), @@ -77,14 +76,14 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 }, NetworkRewards: map[uint64]*NetworkRewardsInfo{}, }, - NodeRewards: map[common.Address]*NodeRewardsInfo_v3{}, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + NodeRewards: map[common.Address]*NodeRewardsInfo_v2{}, + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ Index: index, StartTime: startTime.UTC(), EndTime: endTime.UTC(), ConsensusEndBlock: consensusBlock, ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v3{}, + MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, }, }, validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, @@ -94,6 +93,7 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 logPrefix: logPrefix, totalAttestationScore: big.NewInt(0), networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, } } @@ -102,20 +102,20 @@ func (r *treeGeneratorImpl_v8) getRulesetVersion() uint64 { return r.rewardsFile.RulesetVersion } -func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) { +func (r *treeGeneratorImpl_v8) generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) { r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) // Provision some struct params r.rp = rp - r.cfg = cfg + r.previousRewardsPoolAddresses = previousRewardsPoolAddresses r.bc = bc r.validNetworkCache = map[uint64]bool{ 0: true, } // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) + r.rewardsFile.Network = networkName r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion @@ -157,7 +157,7 @@ func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *conf r.updateNetworksAndTotals() // Generate the Merkle Tree - err = r.rewardsFile.generateMerkleTree() + err = r.rewardsFile.GenerateMerkleTree() if err != nil { return nil, fmt.Errorf("error generating Merkle tree: %w", err) } @@ -169,24 +169,27 @@ func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *conf }) } - return r.rewardsFile, nil + return &GenerateTreeResult{ + RewardsFile: r.rewardsFile, + InvalidNetworkNodes: r.invalidNetworkNodes, + MinipoolPerformanceFile: &r.rewardsFile.MinipoolPerformanceFile, + }, nil } // Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance // Used for approximate returns in the rETH ratio update -func (r *treeGeneratorImpl_v8) approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) { +func (r *treeGeneratorImpl_v8) approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) { r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) r.rp = rp - r.cfg = cfg r.bc = bc r.validNetworkCache = map[uint64]bool{ 0: true, } // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) + r.rewardsFile.Network = networkName r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion @@ -363,11 +366,11 @@ func (r *treeGeneratorImpl_v8) calculateRplRewards() error { return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeDetails.NodeAddress] = network + r.invalidNetworkNodes[nodeDetails.NodeAddress] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -458,11 +461,11 @@ func (r *treeGeneratorImpl_v8) calculateRplRewards() error { return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[address] = network + r.invalidNetworkNodes[address] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -530,7 +533,7 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) // Get the start time of this interval based on the event from the previous one //previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, r.opts) // This is immutable so querying at the head is fine and mitigates issues around calls for pruned EL state - previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, nil) + previousIntervalEvent, err := r.rp.GetRewardSnapshotEvent(r.previousRewardsPoolAddresses, r.rewardsFile.Index-1, nil) if err != nil { return err } @@ -613,11 +616,11 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeInfo.Address] = network + r.invalidNetworkNodes[nodeInfo.Address] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -631,7 +634,7 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) for _, minipoolInfo := range nodeInfo.Minipools { successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v3{ + performance := &SmoothingPoolMinipoolPerformance_v2{ Pubkey: minipoolInfo.ValidatorPubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, @@ -1048,6 +1051,7 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { Minipools: []*MinipoolInfo{}, SmoothingPoolEth: big.NewInt(0), RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, } nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState @@ -1102,6 +1106,12 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { } } + // Populate the eligible borrowed ETH field for all nodes + for _, nodeDetails := range r.nodeDetails { + nnd := r.networkState.NodeDetailsByAddress[nodeDetails.Address] + nodeDetails.EligibleBorrowedEth = r.networkState.GetEligibleBorrowedEth(nnd) + } + return nil } @@ -1111,7 +1121,7 @@ func (r *treeGeneratorImpl_v8) validateNetwork(network uint64) (bool, error) { valid, exists := r.validNetworkCache[network] if !exists { var err error - valid, err = tnsettings.GetNetworkEnabled(r.rp, big.NewInt(int64(network)), r.opts) + valid, err = r.rp.GetNetworkEnabled(big.NewInt(int64(network)), r.opts) if err != nil { return false, err } @@ -1158,7 +1168,7 @@ func (r *treeGeneratorImpl_v8) getStartBlocksForInterval(previousIntervalEvent r // We are pre-merge, so get the first block after the one from the previous interval r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err = r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) if err != nil { return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) } @@ -1166,7 +1176,7 @@ func (r *treeGeneratorImpl_v8) getStartBlocksForInterval(previousIntervalEvent r // We are post-merge, so get the EL block corresponding to the BC block r.rewardsFile.ExecutionStartBlock = elBlockNumber r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err = r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) if err != nil { return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) } @@ -1200,3 +1210,7 @@ func (r *treeGeneratorImpl_v8) getMinipoolBondAndNodeFee(details *rpstate.Native return currentBond, currentFee } + +func (r *treeGeneratorImpl_v8) saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveJSONArtifacts(smartnode, treeResult, nodeTrusted) +} diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go new file mode 100644 index 000000000..d3d7f0880 --- /dev/null +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -0,0 +1,1313 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ipfs/go-cid" + "github.com/rocket-pool/rocketpool-go/rewards" + rptypes "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/rewards/fees" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" + sszbig "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/rocket-pool/smartnode/shared/services/state" + "github.com/rocket-pool/smartnode/shared/utils/log" + "golang.org/x/sync/errgroup" +) + +// Type assertion to ensure SSZFile_v1 is IRewardsFile +var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) + +// Implementation for tree generator ruleset v9 +type treeGeneratorImpl_v9_v10 struct { + networkState *state.NetworkState + rewardsFile *ssz_types.SSZFile_v1 + elSnapshotHeader *types.Header + snapshotEnd *SnapshotEnd + log *log.ColorLogger + logPrefix string + rp RewardsExecutionClient + previousRewardsPoolAddresses []common.Address + bc RewardsBeaconClient + opts *bind.CallOpts + nodeDetails []*NodeSmoothingDetails + smoothingPoolBalance *big.Int + intervalDutiesInfo *IntervalDutiesInfo + slotsPerEpoch uint64 + validatorIndexMap map[string]*MinipoolInfo + elStartTime time.Time + elEndTime time.Time + validNetworkCache map[uint64]bool + epsilon *big.Int + intervalSeconds *big.Int + beaconConfig beacon.Eth2Config + validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus + totalAttestationScore *big.Int + successfulAttestations uint64 + genesisTime time.Time + invalidNetworkNodes map[common.Address]uint64 + minipoolPerformanceFile *MinipoolPerformanceFile_v2 + nodeRewards map[common.Address]*ssz_types.NodeReward + networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward + + // fields for RPIP-62 bonus calculations + // Withdrawals made by a minipool's validator. + minipoolWithdrawals map[common.Address]*big.Int +} + +// Create a new tree generator +func newTreeGeneratorImpl_v9_v10(rulesetVersion uint64, log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v9_v10 { + return &treeGeneratorImpl_v9_v10{ + rewardsFile: &ssz_types.SSZFile_v1{ + RewardsFileVersion: 3, + RulesetVersion: rulesetVersion, + Index: index, + IntervalsPassed: intervalsPassed, + TotalRewards: &ssz_types.TotalRewards{ + ProtocolDaoRpl: sszbig.NewUint256(0), + TotalCollateralRpl: sszbig.NewUint256(0), + TotalOracleDaoRpl: sszbig.NewUint256(0), + TotalSmoothingPoolEth: sszbig.NewUint256(0), + PoolStakerSmoothingPoolEth: sszbig.NewUint256(0), + NodeOperatorSmoothingPoolEth: sszbig.NewUint256(0), + TotalNodeWeight: sszbig.NewUint256(0), + }, + NetworkRewards: ssz_types.NetworkRewards{}, + NodeRewards: ssz_types.NodeRewards{}, + }, + validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, + validatorIndexMap: map[string]*MinipoolInfo{}, + elSnapshotHeader: elSnapshotHeader, + snapshotEnd: snapshotEnd, + log: log, + logPrefix: logPrefix, + totalAttestationScore: big.NewInt(0), + networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, + minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ + Index: index, + MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + }, + nodeRewards: map[common.Address]*ssz_types.NodeReward{}, + networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, + minipoolWithdrawals: map[common.Address]*big.Int{}, + } +} + +// Get the version of the ruleset used by this generator +func (r *treeGeneratorImpl_v9_v10) getRulesetVersion() uint64 { + return r.rewardsFile.RulesetVersion +} + +func (r *treeGeneratorImpl_v9_v10) generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) { + + r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + // Provision some struct params + r.rp = rp + r.previousRewardsPoolAddresses = previousRewardsPoolAddresses + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the RPL rewards + err := r.calculateRplRewards() + if err != nil { + return nil, fmt.Errorf("error calculating RPL rewards: %w", err) + } + + // Calculate the ETH rewards + err = r.calculateEthRewards(true) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + // Sort and assign the maps to the ssz file lists + for nodeAddress, nodeReward := range r.nodeRewards { + copy(nodeReward.Address[:], nodeAddress[:]) + r.rewardsFile.NodeRewards = append(r.rewardsFile.NodeRewards, nodeReward) + } + + for layer, networkReward := range r.networkRewards { + networkReward.Network = layer + r.rewardsFile.NetworkRewards = append(r.rewardsFile.NetworkRewards, networkReward) + } + + // Generate the Merkle Tree + err = r.rewardsFile.GenerateMerkleTree() + if err != nil { + return nil, fmt.Errorf("error generating Merkle tree: %w", err) + } + + // Sort all of the missed attestations so the files are always generated in the same state + for _, minipoolInfo := range r.minipoolPerformanceFile.MinipoolPerformance { + sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { + return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] + }) + } + + return &GenerateTreeResult{ + RewardsFile: r.rewardsFile, + InvalidNetworkNodes: r.invalidNetworkNodes, + MinipoolPerformanceFile: r.minipoolPerformanceFile, + }, nil + +} + +// Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance +// Used for approximate returns in the rETH ratio update +func (r *treeGeneratorImpl_v9_v10) approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) { + r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + r.rp = rp + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the ETH rewards + err := r.calculateEthRewards(false) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + return r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int, nil +} + +func (r *treeGeneratorImpl_v9_v10) calculateNodeRplRewards( + collateralRewards *big.Int, + nodeWeight *big.Int, + totalNodeWeight *big.Int, +) *big.Int { + + if nodeWeight.Sign() <= 0 { + return big.NewInt(0) + } + + // (collateralRewards * nodeWeight / totalNodeWeight) + rpip30Rewards := big.NewInt(0).Mul(collateralRewards, nodeWeight) + rpip30Rewards.Quo(rpip30Rewards, totalNodeWeight) + + return rpip30Rewards +} + +// Calculates the RPL rewards for the given interval +func (r *treeGeneratorImpl_v9_v10) calculateRplRewards() error { + pendingRewards := r.networkState.NetworkDetails.PendingRPLRewards + r.log.Printlnf("%s Pending RPL rewards: %s (%.3f)", r.logPrefix, pendingRewards.String(), eth.WeiToEth(pendingRewards)) + if pendingRewards.Cmp(common.Big0) == 0 { + return fmt.Errorf("there are no pending RPL rewards, so this interval cannot be used for rewards submission") + } + + // Get baseline Protocol DAO rewards + pDaoPercent := r.networkState.NetworkDetails.ProtocolDaoRewardsPercent + pDaoRewards := big.NewInt(0) + pDaoRewards.Mul(pendingRewards, pDaoPercent) + pDaoRewards.Div(pDaoRewards, oneEth) + r.log.Printlnf("%s Expected Protocol DAO rewards: %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + + // Get node operator rewards + nodeOpPercent := r.networkState.NetworkDetails.NodeOperatorRewardsPercent + totalNodeRewards := big.NewInt(0) + totalNodeRewards.Mul(pendingRewards, nodeOpPercent) + totalNodeRewards.Div(totalNodeRewards, oneEth) + r.log.Printlnf("%s Approx. total collateral RPL rewards: %s (%.3f)", r.logPrefix, totalNodeRewards.String(), eth.WeiToEth(totalNodeRewards)) + + // Calculate the RPIP-30 weight of each node, scaling by their participation in this interval + nodeWeights, totalNodeWeight, err := r.networkState.CalculateNodeWeights() + if err != nil { + return fmt.Errorf("error calculating node weights: %w", err) + } + + // Operate normally if any node has rewards + if totalNodeWeight.Sign() > 0 { + // Make sure to record totalNodeWeight in the rewards file + r.rewardsFile.TotalRewards.TotalNodeWeight.Set(totalNodeWeight) + + r.log.Printlnf("%s Calculating individual collateral rewards...", r.logPrefix) + for i, nodeDetails := range r.networkState.NodeDetails { + // Get how much RPL goes to this node + nodeRplRewards := r.calculateNodeRplRewards( + totalNodeRewards, + nodeWeights[nodeDetails.NodeAddress], + totalNodeWeight, + ) + + // If there are pending rewards, add it to the map + if nodeRplRewards.Sign() == 1 { + rewardsForNode, exists := r.nodeRewards[nodeDetails.NodeAddress] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetails[i].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeDetails.NodeAddress.Bytes()), + ) + r.nodeRewards[nodeDetails.NodeAddress] = rewardsForNode + } + rewardsForNode.CollateralRpl.Add(rewardsForNode.CollateralRpl.Int, nodeRplRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.CollateralRpl.Int.Add(rewardsForNetwork.CollateralRpl.Int, nodeRplRewards) + } + } + + // Sanity check to make sure we arrived at the correct total + delta := big.NewInt(0) + totalCalculatedNodeRewards := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedNodeRewards.Add(totalCalculatedNodeRewards, networkRewards.CollateralRpl.Int) + } + delta.Sub(totalNodeRewards, totalCalculatedNodeRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating collateral RPL: total was %s, but expected %s; error was too large", totalCalculatedNodeRewards.String(), totalNodeRewards.String()) + } + r.rewardsFile.TotalRewards.TotalCollateralRpl.Int.Set(totalCalculatedNodeRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedNodeRewards.String(), delta.String()) + pDaoRewards.Sub(pendingRewards, totalCalculatedNodeRewards) + } else { + // In this situation, none of the nodes in the network had eligible rewards so send it all to the pDAO + pDaoRewards.Add(pDaoRewards, totalNodeRewards) + r.log.Printlnf("%s None of the nodes were eligible for collateral rewards, sending everything to the pDAO; now at %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + } + + // Handle Oracle DAO rewards + oDaoPercent := r.networkState.NetworkDetails.TrustedNodeOperatorRewardsPercent + totalODaoRewards := big.NewInt(0) + totalODaoRewards.Mul(pendingRewards, oDaoPercent) + totalODaoRewards.Div(totalODaoRewards, oneEth) + r.log.Printlnf("%s Total Oracle DAO RPL rewards: %s (%.3f)", r.logPrefix, totalODaoRewards.String(), eth.WeiToEth(totalODaoRewards)) + + oDaoDetails := r.networkState.OracleDaoMemberDetails + + // Calculate the true effective time of each oDAO node based on their participation in this interval + totalODaoNodeTime := big.NewInt(0) + trueODaoNodeTimes := map[common.Address]*big.Int{} + for _, details := range oDaoDetails { + // Get the timestamp of the node joining the oDAO + joinTime := details.JoinedTime + + // Get the actual effective time, scaled based on participation + intervalDuration := r.networkState.NetworkDetails.IntervalDuration + intervalDurationBig := big.NewInt(int64(intervalDuration.Seconds())) + participationTime := big.NewInt(0).Set(intervalDurationBig) + snapshotBlockTime := time.Unix(int64(r.elSnapshotHeader.Time), 0) + eligibleDuration := snapshotBlockTime.Sub(joinTime) + if eligibleDuration < intervalDuration { + participationTime = big.NewInt(int64(eligibleDuration.Seconds())) + } + trueODaoNodeTimes[details.Address] = participationTime + + // Add it to the total + totalODaoNodeTime.Add(totalODaoNodeTime, participationTime) + } + + for _, details := range oDaoDetails { + address := details.Address + + // Calculate the oDAO rewards for the node: (participation time) * (total oDAO rewards) / (total participation time) + individualOdaoRewards := big.NewInt(0) + individualOdaoRewards.Mul(trueODaoNodeTimes[address], totalODaoRewards) + individualOdaoRewards.Div(individualOdaoRewards, totalODaoNodeTime) + + rewardsForNode, exists := r.nodeRewards[address] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetailsByAddress[address].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(address.Bytes()), + ) + r.nodeRewards[address] = rewardsForNode + + } + rewardsForNode.OracleDaoRpl.Add(rewardsForNode.OracleDaoRpl.Int, individualOdaoRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.OracleDaoRpl.Add(rewardsForNetwork.OracleDaoRpl.Int, individualOdaoRewards) + } + + // Sanity check to make sure we arrived at the correct total + totalCalculatedOdaoRewards := big.NewInt(0) + delta := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedOdaoRewards.Add(totalCalculatedOdaoRewards, networkRewards.OracleDaoRpl.Int) + } + delta.Sub(totalODaoRewards, totalCalculatedOdaoRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating ODao RPL: total was %s, but expected %s; error was too large", totalCalculatedOdaoRewards.String(), totalODaoRewards.String()) + } + r.rewardsFile.TotalRewards.TotalOracleDaoRpl.Int.Set(totalCalculatedOdaoRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedOdaoRewards.String(), delta.String()) + + // Get actual protocol DAO rewards + pDaoRewards.Sub(pDaoRewards, totalCalculatedOdaoRewards) + r.rewardsFile.TotalRewards.ProtocolDaoRpl = sszbig.NewUint256(0) + r.rewardsFile.TotalRewards.ProtocolDaoRpl.Set(pDaoRewards) + r.log.Printlnf("%s Actual Protocol DAO rewards: %s to account for truncation", r.logPrefix, pDaoRewards.String()) + + // Print total node weight + r.log.Printlnf("%s Total Node Weight: %s", r.logPrefix, totalNodeWeight) + + return nil + +} + +// Calculates the ETH rewards for the given interval +func (r *treeGeneratorImpl_v9_v10) calculateEthRewards(checkBeaconPerformance bool) error { + + // Get the Smoothing Pool contract's balance + r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance + r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + + // Ignore the ETH calculation if there are no rewards + if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { + return nil + } + + if r.rewardsFile.Index == 0 { + // This is the first interval, Smoothing Pool rewards are ignored on the first interval since it doesn't have a discrete start time + return nil + } + + // Get the start time of this interval based on the event from the previous one + //previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, r.opts) // This is immutable so querying at the head is fine and mitigates issues around calls for pruned EL state + previousIntervalEvent, err := r.rp.GetRewardSnapshotEvent(r.previousRewardsPoolAddresses, r.rewardsFile.Index-1, r.opts) + if err != nil { + return err + } + startElBlockHeader, err := r.getBlocksAndTimesForInterval(previousIntervalEvent) + if err != nil { + return err + } + + r.elStartTime = time.Unix(int64(startElBlockHeader.Time), 0) + r.elEndTime = time.Unix(int64(r.elSnapshotHeader.Time), 0) + r.intervalSeconds = big.NewInt(int64(r.elEndTime.Sub(r.elStartTime) / time.Second)) + + // Get the details for nodes eligible for Smoothing Pool rewards + // This should be all of the eth1 calls, so do them all at the start of Smoothing Pool calculation to prevent the need for an archive node during normal operations + err = r.getSmoothingPoolNodeDetails() + if err != nil { + return err + } + eligible := 0 + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible { + eligible++ + } + } + r.log.Printlnf("%s %d / %d nodes were eligible for Smoothing Pool rewards", r.logPrefix, eligible, len(r.nodeDetails)) + + // Process the attestation performance for each minipool during this interval + r.intervalDutiesInfo = &IntervalDutiesInfo{ + Index: r.rewardsFile.Index, + Slots: map[uint64]*SlotInfo{}, + } + if checkBeaconPerformance { + err = r.processAttestationsBalancesAndWithdrawalsForInterval() + if err != nil { + return err + } + } else { + // Attestation processing is disabled, just give each minipool 1 good attestation and complete slot activity so they're all scored the same + // Used for approximating rETH's share during balances calculation + validatorReq := big.NewInt(0).Set(thirtyTwoEth) + for _, nodeInfo := range r.nodeDetails { + // Check if the node is currently opted in for simplicity + if nodeInfo.IsEligible && nodeInfo.IsOptedIn && r.elEndTime.After(nodeInfo.OptInTime) { + eligibleBorrowedEth := nodeInfo.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeInfo.RplStake) + for _, minipool := range nodeInfo.Minipools { + minipool.CompletedAttestations = map[uint64]bool{0: true} + + // Make up an attestation + details := r.networkState.MinipoolDetailsByAddress[minipool.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(r.elEndTime) + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, validatorReq) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + minipool.AttestationScore.Add(&minipool.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + + r.successfulAttestations++ + } + } + } + } + + // Determine how much ETH each node gets and how much the pool stakers get + poolStakerETH, nodeOpEth, bonusScalar, err := r.calculateNodeRewards() + if err != nil { + return err + } + if r.rewardsFile.RulesetVersion >= 10 { + r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(bonusScalar) + } + + // Update the rewards maps + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible && nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { + rewardsForNode, exists := r.nodeRewards[nodeInfo.Address] + if !exists { + network := nodeInfo.RewardsNetwork + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[nodeInfo.Address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeInfo.Address.Bytes()), + ) + r.nodeRewards[nodeInfo.Address] = rewardsForNode + } + rewardsForNode.SmoothingPoolEth.Add(rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + + // Add minipool rewards to the JSON + for _, minipoolInfo := range nodeInfo.Minipools { + successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) + missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) + performance := &SmoothingPoolMinipoolPerformance_v2{ + Pubkey: minipoolInfo.ValidatorPubkey.Hex(), + SuccessfulAttestations: successfulAttestations, + MissedAttestations: missingAttestations, + AttestationScore: minipoolInfo.AttestationScore, + EthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolShare), + BonusEthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolBonus), + ConsensusIncome: minipoolInfo.ConsensusIncome, + EffectiveCommission: QuotedBigIntFromBigInt(minipoolInfo.TotalFee), + MissingAttestationSlots: []uint64{}, + } + if successfulAttestations+missingAttestations == 0 { + // Don't include minipools that have zero attestations + continue + } + for slot := range minipoolInfo.MissingAttestationSlots { + performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) + } + r.minipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance + } + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.SmoothingPoolEth.Add(rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + } + } + + // Set the totals + r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(poolStakerETH) + r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeOpEth) + r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Set(r.smoothingPoolBalance) + return nil + +} + +var oneEth = big.NewInt(1000000000000000000) +var eightEth = big.NewInt(0).Mul(oneEth, big.NewInt(8)) +var fourteenPercentEth = big.NewInt(14e16) +var thirtyTwoEth = big.NewInt(0).Mul(oneEth, big.NewInt(32)) + +func (r *treeGeneratorImpl_v9_v10) calculateNodeBonuses() (*big.Int, error) { + totalConsensusBonus := big.NewInt(0) + for _, nsd := range r.nodeDetails { + if !nsd.IsEligible { + continue + } + + nodeDetails := r.networkState.NodeDetailsByAddress[nsd.Address] + eligible, _, eligibleEnd := nodeDetails.IsEligibleForBonuses(r.elStartTime, r.elEndTime) + if !eligible { + continue + } + + // Get the nodeDetails from the network state + eligibleBorrowedEth := nsd.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nsd.RplStake) + for _, mpd := range nsd.Minipools { + mpi := r.networkState.MinipoolDetailsByAddress[mpd.Address] + if !mpi.IsEligibleForBonuses(eligibleEnd) { + continue + } + bond, fee := mpi.GetMinipoolBondAndNodeFee(eligibleEnd) + feeWithBonus := fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + if fee.Cmp(feeWithBonus) >= 0 { + // This minipool won't get any bonuses, so skip it + continue + } + // This minipool will get a bonus + // It is safe to populate the optional fields from here on. + + fee = feeWithBonus + // Save fee as totalFee for the Minipool + mpd.TotalFee = fee + + // Total fee for a minipool with a bonus shall never exceed 14% + if fee.Cmp(fourteenPercentEth) > 0 { + r.log.Printlnf("WARNING: Minipool %s has a fee of %s, which is greater than the maximum allowed of 14%", mpd.Address.Hex(), fee.String()) + r.log.Printlnf("WARNING: Aborting.") + return nil, fmt.Errorf("minipool %s has a fee of %s, which is greater than the maximum allowed of 14%%", mpd.Address.Hex(), fee.String()) + } + bonusFee := big.NewInt(0).Set(fee) + bonusFee.Sub(bonusFee, mpi.NodeFee) + withdrawalTotal := r.minipoolWithdrawals[mpd.Address] + if withdrawalTotal == nil { + withdrawalTotal = big.NewInt(0) + } + consensusIncome := big.NewInt(0).Set(withdrawalTotal) + mpd.ConsensusIncome = &QuotedBigInt{Int: *(big.NewInt(0).Set(consensusIncome))} + bonusShare := bonusFee.Mul(bonusFee, big.NewInt(0).Sub(thirtyTwoEth, mpi.NodeDepositBalance)) + bonusShare.Div(bonusShare, thirtyTwoEth) + minipoolBonus := consensusIncome.Mul(consensusIncome, bonusShare) + minipoolBonus.Div(minipoolBonus, oneEth) + if minipoolBonus.Sign() == -1 { + minipoolBonus = big.NewInt(0) + } + mpd.MinipoolBonus = minipoolBonus + totalConsensusBonus.Add(totalConsensusBonus, minipoolBonus) + nsd.BonusEth.Add(nsd.BonusEth, minipoolBonus) + } + } + return totalConsensusBonus, nil +} + +// Calculate the distribution of Smoothing Pool ETH to each node +func (r *treeGeneratorImpl_v9_v10) calculateNodeRewards() (*big.Int, *big.Int, *big.Int, error) { + var err error + bonusScalar := big.NewInt(0).Set(oneEth) + + // If there weren't any successful attestations, everything goes to the pool stakers + if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { + r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) + return r.smoothingPoolBalance, big.NewInt(0), bonusScalar, nil + } + + // Calculate the minipool bonuses + isEligibleInterval := true // TODO - check on-chain for saturn 1 + var totalConsensusBonus *big.Int + if r.rewardsFile.RulesetVersion >= 10 && isEligibleInterval { + totalConsensusBonus, err = r.calculateNodeBonuses() + if err != nil { + return nil, nil, nil, err + } + } + + totalEthForMinipools := big.NewInt(0) + totalNodeOpShare := big.NewInt(0) + totalNodeOpShare.Mul(r.smoothingPoolBalance, r.totalAttestationScore) + totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(r.successfulAttestations))) + totalNodeOpShare.Div(totalNodeOpShare, oneEth) + + for _, nodeInfo := range r.nodeDetails { + nodeInfo.SmoothingPoolEth = big.NewInt(0) + if !nodeInfo.IsEligible { + continue + } + for _, minipool := range nodeInfo.Minipools { + if len(minipool.CompletedAttestations)+len(minipool.MissingAttestationSlots) == 0 || !minipool.WasActive { + // Ignore minipools that weren't active for the interval + minipool.WasActive = false + minipool.MinipoolShare = big.NewInt(0) + continue + } + + minipoolEth := big.NewInt(0).Set(totalNodeOpShare) + minipoolEth.Mul(minipoolEth, &minipool.AttestationScore.Int) + minipoolEth.Div(minipoolEth, r.totalAttestationScore) + minipool.MinipoolShare = minipoolEth + nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) + } + totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) + } + + if r.rewardsFile.RulesetVersion >= 10 { + remainingBalance := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + if remainingBalance.Cmp(totalConsensusBonus) < 0 { + r.log.Printlnf("WARNING: Remaining balance is less than total consensus bonus... Balance = %s, total consensus bonus = %s", remainingBalance.String(), totalConsensusBonus.String()) + // Scale bonuses down to fit the remaining balance + bonusScalar.Div(big.NewInt(0).Mul(remainingBalance, oneEth), totalConsensusBonus) + for _, nsd := range r.nodeDetails { + nsd.BonusEth.Mul(nsd.BonusEth, remainingBalance) + nsd.BonusEth.Div(nsd.BonusEth, totalConsensusBonus) + // Calculate the reduced bonus for each minipool + // Because of integer division, this will be less than the actual bonus by up to 1 wei + for _, mpd := range nsd.Minipools { + if mpd.MinipoolBonus == nil { + continue + } + mpd.MinipoolBonus.Mul(mpd.MinipoolBonus, remainingBalance) + mpd.MinipoolBonus.Div(mpd.MinipoolBonus, totalConsensusBonus) + } + } + } + } + + // Sanity check the totalNodeOpShare before bonuses are awarded + delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) + delta.Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return nil, nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) + } + + // Finally, award the bonuses + if r.rewardsFile.RulesetVersion >= 10 { + for _, nsd := range r.nodeDetails { + nsd.SmoothingPoolEth.Add(nsd.SmoothingPoolEth, nsd.BonusEth) + totalEthForMinipools.Add(totalEthForMinipools, nsd.BonusEth) + } + } + + // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors + truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + + // Calculate the staking pool share and the node op share + poolStakerShareBeforeBonuses := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) + + r.log.Printlnf("%s Pool staker ETH before bonuses: %s (%.3f)", r.logPrefix, poolStakerShareBeforeBonuses.String(), eth.WeiToEth(poolStakerShareBeforeBonuses)) + r.log.Printlnf("%s Pool staker ETH after bonuses: %s (%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) + r.log.Printlnf("%s Node Op ETH before bonuses: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) + r.log.Printlnf("%s Node Op ETH after bonuses: %s (%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) + r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) + r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) + + return truePoolStakerAmount, totalEthForMinipools, bonusScalar, nil + +} + +// Get all of the duties for a range of epochs +func (r *treeGeneratorImpl_v9_v10) processAttestationsBalancesAndWithdrawalsForInterval() error { + + startEpoch := r.rewardsFile.ConsensusStartBlock / r.beaconConfig.SlotsPerEpoch + endEpoch := r.rewardsFile.ConsensusEndBlock / r.beaconConfig.SlotsPerEpoch + + // Determine the validator indices of each minipool + err := r.createMinipoolIndexMap() + if err != nil { + return err + } + + // Check all of the attestations for each epoch + r.log.Printlnf("%s Checking participation of %d minipools for epochs %d to %d", r.logPrefix, len(r.validatorIndexMap), startEpoch, endEpoch) + r.log.Printlnf("%s NOTE: this will take a long time, progress is reported every 100 epochs", r.logPrefix) + + epochsDone := 0 + reportStartTime := time.Now() + for epoch := startEpoch; epoch < endEpoch+1; epoch++ { + if epochsDone == 100 { + timeTaken := time.Since(reportStartTime) + r.log.Printlnf("%s On Epoch %d of %d (%.2f%%)... (%s so far)", r.logPrefix, epoch, endEpoch, float64(epoch-startEpoch)/float64(endEpoch-startEpoch)*100.0, timeTaken) + epochsDone = 0 + } + + err := r.processEpoch(true, epoch) + if err != nil { + return err + } + + epochsDone++ + } + + // Check the epoch after the end of the interval for any lingering attestations + epoch := endEpoch + 1 + err = r.processEpoch(false, epoch) + if err != nil { + return err + } + + r.log.Printlnf("%s Finished participation check (total time = %s)", r.logPrefix, time.Since(reportStartTime)) + return nil + +} + +// Process an epoch, optionally getting the duties for all eligible minipools in it and checking each one's attestation performance +func (r *treeGeneratorImpl_v9_v10) processEpoch(duringInterval bool, epoch uint64) error { + + // Get the committee info and attestation records for this epoch + var committeeData beacon.Committees + attestationsPerSlot := make([][]beacon.AttestationInfo, r.slotsPerEpoch) + var wg errgroup.Group + + if duringInterval { + wg.Go(func() error { + var err error + committeeData, err = r.bc.GetCommitteesForEpoch(&epoch) + return err + }) + } + + withdrawalsLock := &sync.Mutex{} + for i := uint64(0); i < r.slotsPerEpoch; i++ { + // Get the beacon block for this slot + i := i + slot := epoch*r.slotsPerEpoch + i + slotTime := r.networkState.BeaconConfig.GetSlotTime(slot) + wg.Go(func() error { + beaconBlock, found, err := r.bc.GetBeaconBlock(fmt.Sprint(slot)) + if err != nil { + return err + } + if found { + attestationsPerSlot[i] = beaconBlock.Attestations + } + + // If we don't need withdrawal amounts because we're using ruleset 9, + // return early + if r.rewardsFile.RulesetVersion < 10 || !duringInterval { + return nil + } + + for _, withdrawal := range beaconBlock.Withdrawals { + // Ignore non-RP validators + mpi, exists := r.validatorIndexMap[withdrawal.ValidatorIndex] + if !exists { + continue + } + nnd := r.networkState.NodeDetailsByAddress[mpi.NodeAddress] + nmd := r.networkState.MinipoolDetailsByAddress[mpi.Address] + + // Check that the node is opted into the SP during this slot + if !nnd.WasOptedInAt(slotTime) { + continue + } + + // Check that the minipool's bond is eligible for bonuses at this slot + if eligible := nmd.IsEligibleForBonuses(slotTime); !eligible { + continue + } + + // If the withdrawal is in or after the minipool's withdrawable epoch, adjust it. + withdrawalAmount := withdrawal.Amount + validatorInfo := r.networkState.ValidatorDetails[mpi.ValidatorPubkey] + if slot >= r.networkState.BeaconConfig.FirstSlotOfEpoch(validatorInfo.WithdrawableEpoch) { + // Subtract 32 ETH from the withdrawal amount + withdrawalAmount = big.NewInt(0).Sub(withdrawalAmount, thirtyTwoEth) + // max(withdrawalAmount, 0) + if withdrawalAmount.Sign() < 0 { + withdrawalAmount.SetInt64(0) + } + } + + // Create the minipool's withdrawal sum big.Int if it doesn't exist + withdrawalsLock.Lock() + if r.minipoolWithdrawals[mpi.Address] == nil { + r.minipoolWithdrawals[mpi.Address] = big.NewInt(0) + } + // Add the withdrawal amount + r.minipoolWithdrawals[mpi.Address].Add(r.minipoolWithdrawals[mpi.Address], withdrawalAmount) + withdrawalsLock.Unlock() + } + return nil + }) + } + err := wg.Wait() + // Return preallocated memory to the pool if it exists + if committeeData != nil { + defer committeeData.Release() + } + if err != nil { + return fmt.Errorf("error getting committee and attestaion records for epoch %d: %w", epoch, err) + } + + if duringInterval { + // Get all of the expected duties for the epoch + err = r.getDutiesForEpoch(committeeData) + if err != nil { + return fmt.Errorf("error getting duties for epoch %d: %w", epoch, err) + } + } + + // Process all of the slots in the epoch + for i := uint64(0); i < r.slotsPerEpoch; i++ { + inclusionSlot := epoch*r.slotsPerEpoch + i + attestations := attestationsPerSlot[i] + if len(attestations) > 0 { + r.checkAttestations(attestations, inclusionSlot) + } + } + + return nil + +} + +func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.AttestationInfo, inclusionSlot uint64) error { + + // Go through the attestations for the block + for _, attestation := range attestations { + // Get the RP committees for this attestation's slot and index + slotInfo, exists := r.intervalDutiesInfo.Slots[attestation.SlotIndex] + if !exists { + continue + } + // Ignore attestations delayed by more than 32 slots + if inclusionSlot-attestation.SlotIndex > r.beaconConfig.SlotsPerEpoch { + continue + } + rpCommittee, exists := slotInfo.Committees[attestation.CommitteeIndex] + if !exists { + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) + + // Check if each RP validator attested successfully + for position, validator := range rpCommittee.Positions { + if !attestation.AggregationBits.BitAt(uint64(position)) { + continue + } + + // This was seen, so remove it from the missing attestations and add it to the completed ones + delete(rpCommittee.Positions, position) + if len(rpCommittee.Positions) == 0 { + delete(slotInfo.Committees, attestation.CommitteeIndex) + } + if len(slotInfo.Committees) == 0 { + delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) + } + delete(validator.MissingAttestationSlots, attestation.SlotIndex) + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.nodeDetails[validator.NodeIndex] + if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { + // Not opted in + continue + } + + eligibleBorrowedEth := nodeDetails.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) + + // Mark this duty as completed + validator.CompletedAttestations[attestation.SlotIndex] = true + + // Get the pseudoscore for this attestation + details := r.networkState.MinipoolDetailsByAddress[validator.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) + + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + r.successfulAttestations++ + } + } + + return nil + +} + +// Maps out the attestaion duties for the given epoch +func (r *treeGeneratorImpl_v9_v10) getDutiesForEpoch(committees beacon.Committees) error { + + // Crawl the committees + for idx := 0; idx < committees.Count(); idx++ { + slotIndex := committees.Slot(idx) + if slotIndex < r.rewardsFile.ConsensusStartBlock || slotIndex > r.rewardsFile.ConsensusEndBlock { + // Ignore slots that are out of bounds + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*slotIndex)) + committeeIndex := committees.Index(idx) + + // Check if there are any RP validators in this committee + rpValidators := map[int]*MinipoolInfo{} + for position, validator := range committees.Validators(idx) { + minipoolInfo, exists := r.validatorIndexMap[validator] + if !exists { + // This isn't an RP validator, so ignore it + continue + } + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + isOptedIn := nodeDetails.SmoothingPoolRegistrationState + spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) + if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it + (!isOptedIn && spRegistrationTime.Sub(blockTime) < 0) { // If this block occurred after the node opted out, ignore it + continue + } + + // Check if this minipool was in the `staking` state during this time + mpd := r.networkState.MinipoolDetailsByAddress[minipoolInfo.Address] + statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) + if mpd.Status != rptypes.Staking || blockTime.Sub(statusChangeTime) < 0 { + continue + } + + // This was a legal RP validator opted into the SP during this slot so add it + rpValidators[position] = minipoolInfo + minipoolInfo.MissingAttestationSlots[slotIndex] = true + } + + // If there are some RP validators, add this committee to the map + if len(rpValidators) > 0 { + slotInfo, exists := r.intervalDutiesInfo.Slots[slotIndex] + if !exists { + slotInfo = &SlotInfo{ + Index: slotIndex, + Committees: map[uint64]*CommitteeInfo{}, + } + r.intervalDutiesInfo.Slots[slotIndex] = slotInfo + } + slotInfo.Committees[committeeIndex] = &CommitteeInfo{ + Index: committeeIndex, + Positions: rpValidators, + } + } + } + + return nil + +} + +// Maps all minipools to their validator indices and creates a map of indices to minipool info +func (r *treeGeneratorImpl_v9_v10) createMinipoolIndexMap() error { + + // Get the status for all uncached minipool validators and add them to the cache + r.validatorIndexMap = map[string]*MinipoolInfo{} + for _, details := range r.nodeDetails { + if details.IsEligible { + for _, minipoolInfo := range details.Minipools { + status, exists := r.networkState.ValidatorDetails[minipoolInfo.ValidatorPubkey] + if !exists { + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (pubkey %s) didn't exist at this slot; removing it", minipoolInfo.Address.Hex(), minipoolInfo.ValidatorPubkey.Hex()) + minipoolInfo.WasActive = false + } else { + switch status.Status { + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (index %s, pubkey %s) was in state %s; removing it", minipoolInfo.Address.Hex(), status.Index, minipoolInfo.ValidatorPubkey.Hex(), string(status.Status)) + minipoolInfo.WasActive = false + default: + // Get the validator index + minipoolInfo.ValidatorIndex = status.Index + r.validatorIndexMap[minipoolInfo.ValidatorIndex] = minipoolInfo + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this minipool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: minipool %s hasn't been scheduled for activation yet; removing it", minipoolInfo.Address.Hex()) + minipoolInfo.WasActive = false + continue + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: minipool %s activates on slot %d which is after interval end %d; removing it", minipoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + minipoolInfo.WasActive = false + } + + // Check if the minipool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: minipool %s exited on slot %d which was before interval start %d; removing it", minipoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + minipoolInfo.WasActive = false + continue + } + } + } + } + } + } + + return nil + +} + +var farFutureTimestamp int64 = 1000000000000000000 // Far into the future +var farPastTimestamp int64 = 0 + +// Get the details for every node that was opted into the Smoothing Pool for at least some portion of this interval +func (r *treeGeneratorImpl_v9_v10) getSmoothingPoolNodeDetails() error { + + // For each NO, get their opt-in status and time of last change in batches + r.log.Printlnf("%s Getting details of nodes for Smoothing Pool calculation...", r.logPrefix) + nodeCount := uint64(len(r.networkState.NodeDetails)) + r.nodeDetails = make([]*NodeSmoothingDetails, nodeCount) + for batchStartIndex := uint64(0); batchStartIndex < nodeCount; batchStartIndex += SmoothingPoolDetailsBatchSize { + + // Get batch start & end index + iterationStartIndex := batchStartIndex + iterationEndIndex := batchStartIndex + SmoothingPoolDetailsBatchSize + if iterationEndIndex > nodeCount { + iterationEndIndex = nodeCount + } + + // Load details + var wg errgroup.Group + for iterationIndex := iterationStartIndex; iterationIndex < iterationEndIndex; iterationIndex++ { + iterationIndex := iterationIndex + wg.Go(func() error { + nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] + nodeDetails := &NodeSmoothingDetails{ + Address: nativeNodeDetails.NodeAddress, + Minipools: []*MinipoolInfo{}, + SmoothingPoolEth: big.NewInt(0), + BonusEth: big.NewInt(0), + RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, + } + + nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState + statusChangeTimeBig := nativeNodeDetails.SmoothingPoolRegistrationChanged + statusChangeTime := time.Unix(statusChangeTimeBig.Int64(), 0) + + if nodeDetails.IsOptedIn { + nodeDetails.OptInTime = statusChangeTime + nodeDetails.OptOutTime = time.Unix(farFutureTimestamp, 0) + } else { + nodeDetails.OptOutTime = statusChangeTime + nodeDetails.OptInTime = time.Unix(farPastTimestamp, 0) + } + + // Get the details for each minipool in the node + for _, mpd := range r.networkState.MinipoolDetailsByNode[nodeDetails.Address] { + if mpd.Exists && mpd.Status == rptypes.Staking { + nativeMinipoolDetails := r.networkState.MinipoolDetailsByAddress[mpd.MinipoolAddress] + penaltyCount := nativeMinipoolDetails.PenaltyCount.Uint64() + if penaltyCount >= 3 { + // This node is a cheater + nodeDetails.IsEligible = false + nodeDetails.Minipools = []*MinipoolInfo{} + r.nodeDetails[iterationIndex] = nodeDetails + return nil + } + + // This minipool is below the penalty count, so include it + nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ + Address: mpd.MinipoolAddress, + ValidatorPubkey: mpd.Pubkey, + NodeAddress: nodeDetails.Address, + NodeIndex: iterationIndex, + Fee: nativeMinipoolDetails.NodeFee, + //MissedAttestations: 0, + //GoodAttestations: 0, + MissingAttestationSlots: map[uint64]bool{}, + CompletedAttestations: map[uint64]bool{}, + WasActive: true, + AttestationScore: NewQuotedBigInt(0), + NodeOperatorBond: nativeMinipoolDetails.NodeDepositBalance, + }) + } + } + + nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 + r.nodeDetails[iterationIndex] = nodeDetails + return nil + }) + } + if err := wg.Wait(); err != nil { + return err + } + } + + // Populate the eligible borrowed ETH field for all nodes + for _, nodeDetails := range r.nodeDetails { + nnd := r.networkState.NodeDetailsByAddress[nodeDetails.Address] + nodeDetails.EligibleBorrowedEth = r.networkState.GetEligibleBorrowedEth(nnd) + } + + return nil + +} + +// Validates that the provided network is legal +func (r *treeGeneratorImpl_v9_v10) validateNetwork(network uint64) (bool, error) { + valid, exists := r.validNetworkCache[network] + if !exists { + var err error + valid, err = r.rp.GetNetworkEnabled(big.NewInt(int64(network)), r.opts) + if err != nil { + return false, err + } + r.validNetworkCache[network] = valid + } + + return valid, nil +} + +// Gets the start blocks for the given interval +func (r *treeGeneratorImpl_v9_v10) getBlocksAndTimesForInterval(previousIntervalEvent rewards.RewardsEvent) (*types.Header, error) { + // Sanity check to confirm the BN can access the block from the previous interval + _, exists, err := r.bc.GetBeaconBlock(previousIntervalEvent.ConsensusBlock.String()) + if err != nil { + return nil, fmt.Errorf("error verifying block from previous interval: %w", err) + } + if !exists { + return nil, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", previousIntervalEvent.ConsensusBlock.Uint64()) + } + + previousEpoch := previousIntervalEvent.ConsensusBlock.Uint64() / r.beaconConfig.SlotsPerEpoch + nextEpoch := previousEpoch + 1 + + consensusStartSlot := nextEpoch * r.beaconConfig.SlotsPerEpoch + startTime := r.beaconConfig.GetSlotTime(consensusStartSlot) + endTime := r.beaconConfig.GetSlotTime(r.snapshotEnd.Slot) + + r.rewardsFile.StartTime = startTime + r.minipoolPerformanceFile.StartTime = startTime + + r.rewardsFile.EndTime = endTime + r.minipoolPerformanceFile.EndTime = endTime + + r.rewardsFile.ConsensusStartBlock = nextEpoch * r.beaconConfig.SlotsPerEpoch + r.minipoolPerformanceFile.ConsensusStartBlock = r.rewardsFile.ConsensusStartBlock + + r.rewardsFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + r.minipoolPerformanceFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + + r.rewardsFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + r.minipoolPerformanceFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + + // Get the first block that isn't missing + var elBlockNumber uint64 + for { + beaconBlock, exists, err := r.bc.GetBeaconBlock(fmt.Sprint(r.rewardsFile.ConsensusStartBlock)) + if err != nil { + return nil, fmt.Errorf("error getting EL data for BC slot %d: %w", r.rewardsFile.ConsensusStartBlock, err) + } + if !exists { + r.rewardsFile.ConsensusStartBlock++ + r.minipoolPerformanceFile.ConsensusStartBlock++ + } else { + elBlockNumber = beaconBlock.ExecutionBlockNumber + break + } + } + + var startElHeader *types.Header + if elBlockNumber == 0 { + // We are pre-merge, so get the first block after the one from the previous interval + r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) + if err != nil { + return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) + } + } else { + // We are post-merge, so get the EL block corresponding to the BC block + r.rewardsFile.ExecutionStartBlock = elBlockNumber + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) + if err != nil { + return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) + } + } + + return startElHeader, nil +} + +func (r *treeGeneratorImpl_v9_v10) saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveRewardsArtifacts(smartnode, treeResult, nodeTrusted) +} diff --git a/shared/services/rewards/generator-v8_test.go b/shared/services/rewards/generator-v8_test.go new file mode 100644 index 000000000..5dd52d226 --- /dev/null +++ b/shared/services/rewards/generator-v8_test.go @@ -0,0 +1,153 @@ +package rewards + +import ( + "fmt" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/services/state" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +type v8Test struct { + *testing.T + rp *test.MockRocketPool + bc *test.MockBeaconClient +} + +func (t *v8Test) saveArtifacts(prefix string, result *GenerateTreeResult) { + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("artifacts-%s", t.Name())) + t.failIf(err) + rewardsLocalFile := LocalFile[IRewardsFile]{ + fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-rewards.json", prefix)), + f: result.RewardsFile, + } + performanceLocalFile := LocalFile[IMinipoolPerformanceFile]{ + fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-minipool-performance.json", prefix)), + f: result.MinipoolPerformanceFile, + } + _, err = rewardsLocalFile.Write() + t.failIf(err) + _, err = performanceLocalFile.Write() + t.failIf(err) + + t.Logf("wrote artifacts to %s\n", tmpDir) +} + +func newV8Test(t *testing.T, index uint64) *v8Test { + rp := test.NewMockRocketPool(t, index) + out := &v8Test{ + T: t, + rp: rp, + bc: test.NewMockBeaconClient(t), + } + return out +} + +func (t *v8Test) failIf(err error) { + if err != nil { + t.Fatalf(err.Error()) + } +} + +func (t *v8Test) SetMinipoolPerformance(canonicalMinipoolPerformance IMinipoolPerformanceFile, networkState *state.NetworkState) { + addresses := canonicalMinipoolPerformance.GetMinipoolAddresses() + for _, address := range addresses { + + // Get the minipool's performance + perf, ok := canonicalMinipoolPerformance.GetSmoothingPoolPerformance(address) + if !ok { + t.Fatalf("Minipool %s not found in canonical minipool performance, despite being listed as present", address.Hex()) + } + missedSlots := perf.GetMissingAttestationSlots() + pubkey, err := perf.GetPubkey() + + // Get the minipool's validator index + validatorStatus := networkState.ValidatorDetails[pubkey] + + if err != nil { + t.Fatalf("Minipool %s pubkey could not be parsed: %s", address.Hex(), err.Error()) + } + t.bc.SetMinipoolPerformance(validatorStatus.Index, missedSlots) + } +} + +// TestV8Mainnet builds a tree using serialized state for a mainnet interval that used v8 +// and checks that the resulting artifacts match their canonical values. +func TestV8Mainnet(tt *testing.T) { + state := assets.GetMainnet20RewardsState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + canonical, err := DeserializeRewardsFile(assets.GetMainnet20RewardsJSON()) + t.failIf(err) + + canonicalPerformance, err := DeserializeMinipoolPerformanceFile(assets.GetMainnet20MinipoolPerformanceJSON()) + t.failIf(err) + + t.Logf("pending rpl rewards: %s", state.NetworkDetails.PendingRPLRewards.String()) + + t.bc.SetState(state) + + // Some interval info needed for mocks + consensusStartBlock := canonical.GetConsensusStartBlock() + executionStartBlock := canonical.GetExecutionStartBlock() + consensusEndBlock := canonical.GetConsensusEndBlock() + + // Create a new treeGeneratorImpl_v8 + logger := log.NewColorLogger(color.Faint) + generator := newTreeGeneratorImpl_v8( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + canonical.GetStartTime(), + canonical.GetEndTime(), + consensusEndBlock, + &types.Header{ + Number: big.NewInt(int64(canonical.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + canonical.GetIntervalsPassed(), + state, + ) + + // Load the mock up + t.rp.SetRewardSnapshotEvent(assets.GetRewardSnapshotEventInterval19()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(canonical.GetStartTime().Unix())}) + + // Set the critical duties slots + t.bc.SetCriticalDutiesSlots(assets.GetMainnet20CriticalDutiesSlots()) + + // Set the minipool performance + t.SetMinipoolPerformance(canonicalPerformance, state) + + artifacts, err := generator.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + // Save the artifacts if verbose mode is enabled + if testing.Verbose() { + t.saveArtifacts("", artifacts) + } + + t.Logf("merkle root: %s\n", artifacts.RewardsFile.GetMerkleRoot()) + if artifacts.RewardsFile.GetMerkleRoot() != canonical.GetMerkleRoot() { + t.Fatalf("Merkle root does not match %s", canonical.GetMerkleRoot()) + } else { + t.Logf("merkle root matches %s", canonical.GetMerkleRoot()) + } +} diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index cdcc9d8a3..e0ba76630 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -6,8 +6,9 @@ import ( "slices" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/ipfs/go-cid" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/services/state" @@ -46,38 +47,85 @@ const ( // HoleskyV7Interval uint64 = 0 // Mainnet intervals - MainnetV8Interval uint64 = 18 - + MainnetV8Interval uint64 = 18 + MainnetV9Interval uint64 = 29 + MainnetV10Interval uint64 = 30 // Devnet intervals // Holesky intervals - HoleskyV8Interval uint64 = 93 + HoleskyV8Interval uint64 = 93 + HoleskyV9Interval uint64 = 276 + HoleskyV10Interval uint64 = 277 ) +func GetMainnetRulesetVersion(interval uint64) uint64 { + if interval >= MainnetV10Interval { + return 10 + } + if interval >= MainnetV9Interval { + return 9 + } + return 8 +} + +func GetHoleskyRulesetVersion(interval uint64) uint64 { + if interval >= HoleskyV10Interval { + return 10 + } + if interval >= HoleskyV9Interval { + return 9 + } + return 8 +} + +func GetRulesetVersion(network cfgtypes.Network, interval uint64) uint64 { + switch network { + case cfgtypes.Network_Mainnet: + return GetMainnetRulesetVersion(interval) + case cfgtypes.Network_Holesky: + return GetHoleskyRulesetVersion(interval) + case cfgtypes.Network_Devnet: + return 10 + default: + return 10 + } +} + type TreeGenerator struct { rewardsIntervalInfos map[uint64]rewardsIntervalInfo logger *log.ColorLogger logPrefix string - rp *rocketpool.RocketPool + rp RewardsExecutionClient cfg *config.RocketPoolConfig bc beacon.Client index uint64 startTime time.Time endTime time.Time - consensusBlock uint64 + snapshotEnd *SnapshotEnd elSnapshotHeader *types.Header intervalsPassed uint64 generatorImpl treeGeneratorImpl approximatorImpl treeGeneratorImpl } +type SnapshotEnd struct { + // Slot is the last slot of the interval + Slot uint64 + // ConsensusBlock is the last non-missed slot of the interval + ConsensusBlock uint64 + // ExecutionBlock is the EL block number of ConsensusBlock + ExecutionBlock uint64 +} + type treeGeneratorImpl interface { - generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) - approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) + generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) + approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) getRulesetVersion() uint64 + // Returns the primary artifact cid for consensus, all cids of all files in a map, and any potential errors + saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) } -func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client, index uint64, startTime time.Time, endTime time.Time, consensusBlock uint64, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState, rollingRecord *RollingRecord) (*TreeGenerator, error) { +func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecutionClient, cfg *config.RocketPoolConfig, bc beacon.Client, index uint64, startTime time.Time, endTime time.Time, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) (*TreeGenerator, error) { t := &TreeGenerator{ logger: logger, logPrefix: logPrefix, @@ -87,21 +135,34 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool. index: index, startTime: startTime, endTime: endTime, - consensusBlock: consensusBlock, + snapshotEnd: snapshotEnd, elSnapshotHeader: elSnapshotHeader, intervalsPassed: intervalsPassed, } + // v10 + v10_generator := newTreeGeneratorImpl_v9_v10(10, t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state) + + // v9 + v9_generator := newTreeGeneratorImpl_v9_v10(9, t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state) + // v8 - var v8_generator treeGeneratorImpl - if rollingRecord == nil { - v8_generator = newTreeGeneratorImpl_v8(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.consensusBlock, t.elSnapshotHeader, t.intervalsPassed, state) - } else { - v8_generator = newTreeGeneratorImpl_v8_rolling(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.consensusBlock, t.elSnapshotHeader, t.intervalsPassed, state, rollingRecord) - } + v8_generator := newTreeGeneratorImpl_v8(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.snapshotEnd.ConsensusBlock, t.elSnapshotHeader, t.intervalsPassed, state) // Create the interval wrappers rewardsIntervalInfos := []rewardsIntervalInfo{ + { + rewardsRulesetVersion: 10, + mainnetStartInterval: MainnetV10Interval, + holeskyStartInterval: HoleskyV10Interval, + generator: v10_generator, + }, + { + rewardsRulesetVersion: 9, + mainnetStartInterval: MainnetV9Interval, + holeskyStartInterval: HoleskyV9Interval, + generator: v9_generator, + }, { rewardsRulesetVersion: 8, mainnetStartInterval: MainnetV8Interval, @@ -167,12 +228,18 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool. return t, nil } -func (t *TreeGenerator) GenerateTree() (IRewardsFile, error) { - return t.generatorImpl.generateTree(t.rp, t.cfg, t.bc) +type GenerateTreeResult struct { + RewardsFile IRewardsFile + MinipoolPerformanceFile IMinipoolPerformanceFile + InvalidNetworkNodes map[common.Address]uint64 +} + +func (t *TreeGenerator) GenerateTree() (*GenerateTreeResult, error) { + return t.generatorImpl.generateTree(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.cfg.Smartnode.GetPreviousRewardsPoolAddresses(), t.bc) } func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPool() (*big.Int, error) { - return t.approximatorImpl.approximateStakerShareOfSmoothingPool(t.rp, t.cfg, t.bc) + return t.approximatorImpl.approximateStakerShareOfSmoothingPool(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.bc) } func (t *TreeGenerator) GetGeneratorRulesetVersion() uint64 { @@ -183,13 +250,18 @@ func (t *TreeGenerator) GetApproximatorRulesetVersion() uint64 { return t.approximatorImpl.getRulesetVersion() } -func (t *TreeGenerator) GenerateTreeWithRuleset(ruleset uint64) (IRewardsFile, error) { +func (t *TreeGenerator) GenerateTreeWithRuleset(ruleset uint64) (*GenerateTreeResult, error) { info, exists := t.rewardsIntervalInfos[ruleset] if !exists { return nil, fmt.Errorf("ruleset v%d does not exist", ruleset) } - return info.generator.generateTree(t.rp, t.cfg, t.bc) + return info.generator.generateTree( + t.rp, + fmt.Sprint(t.cfg.Smartnode.Network.Value), + t.cfg.Smartnode.GetPreviousRewardsPoolAddresses(), + t.bc, + ) } func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPoolWithRuleset(ruleset uint64) (*big.Int, error) { @@ -198,5 +270,9 @@ func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPoolWithRuleset(ruleset return nil, fmt.Errorf("ruleset v%d does not exist", ruleset) } - return info.generator.approximateStakerShareOfSmoothingPool(t.rp, t.cfg, t.bc) + return info.generator.approximateStakerShareOfSmoothingPool(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.bc) +} + +func (t *TreeGenerator) SaveFiles(treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return t.generatorImpl.saveFiles(t.cfg.Smartnode, treeResult, nodeTrusted) } diff --git a/shared/services/rewards/mock_test.go b/shared/services/rewards/mock_test.go new file mode 100644 index 000000000..e24d5739d --- /dev/null +++ b/shared/services/rewards/mock_test.go @@ -0,0 +1,317 @@ +package rewards + +// This file contains treegen tests which use mock history. +// These mocks are faster to process than real history, and are useful for +// testing new features and refactoring. + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +func TestMockIntervalDefaultsTreegenv8v9(tt *testing.T) { + history := test.NewDefaultMockHistory() + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + generator := newTreeGeneratorImpl_v8( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + history.GetStartTime(), + history.GetEndTime(), + consensusEndBlock, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + v8Artifacts, err := generator.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v8", v8Artifacts) + } + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 9, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v9Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v9", v9Artifacts) + } + + // Validate individual node details in the rewards file + rewardsFile := v8Artifacts.RewardsFile + nodeSummary := history.GetNodeSummary() + + singleEightEthNodes := nodeSummary["single_eight_eth"] + singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got 0 ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] + singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.SmoothingPoolRegistrationState { + if node.Class == "single_eight_eth_sp" { + expectedEthAmount.SetString("1354725546842756912", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("2292612463887742467", 10) + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingInNodesSP := append( + nodeSummary["single_eight_eth_opted_in_quarter"], + nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + ) + for _, node := range optingInNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1019397441188609162", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1725134131242261659", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingOutNodesSP := append( + nodeSummary["single_eight_eth_opted_out_three_quarters"], + nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + ) + for _, node := range optingOutNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1005984316962443252", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1702434997936442426", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + bondReductionNode := nodeSummary["single_bond_reduction"] + for _, node := range bondReductionNode { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Nodes that bond reduce are treated as having their new bond for the full interval, + // when it comes to RPL rewards. + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got reduced ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount, _ := big.NewInt(0).SetString("1922203879488237721", 10) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + noMinipoolsNodes := nodeSummary["no_minipools"] + for _, node := range noMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + // Validate merkle root + v8MerkleRoot := v8Artifacts.RewardsFile.GetMerkleRoot() + v9MerkleRoot := v9Artifacts.RewardsFile.GetMerkleRoot() + + if !strings.EqualFold(v8MerkleRoot, v9MerkleRoot) { + t.Fatalf("Merkle root does not match %s != %s", v8MerkleRoot, v9MerkleRoot) + } else { + t.Logf("v8/v9 Merkle root matches %s", v8MerkleRoot) + } + + // Expected merkle root: + // 0x9915d949936995f9045d26c3ef919194445377e83f1be2da47d181ee9ce705d8 + // + // If this does not match, it implies either you updated the set of default mock nodes, + // or you introduced a regression in treegen. + // DO NOT update this value unless you know what you are doing. + expectedMerkleRoot := "0x9915d949936995f9045d26c3ef919194445377e83f1be2da47d181ee9ce705d8" + if !strings.EqualFold(v8MerkleRoot, expectedMerkleRoot) { + t.Fatalf("Merkle root does not match expected value %s != %s", v8MerkleRoot, expectedMerkleRoot) + } else { + t.Logf("Merkle root matches expected value %s", expectedMerkleRoot) + } +} diff --git a/shared/services/rewards/mock_v10_test.go b/shared/services/rewards/mock_v10_test.go new file mode 100644 index 000000000..ce933590b --- /dev/null +++ b/shared/services/rewards/mock_v10_test.go @@ -0,0 +1,864 @@ +package rewards + +// This file contains treegen tests which use mock history. +// These mocks are faster to process than real history, and are useful for +// testing new features and refactoring. + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { + + history := test.NewDefaultMockHistory() + // Add a node which is earning some bonus commission + node := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + node.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, node) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + // Set some custom balances for the validators that opt in and out of smoothing pool + nodeSummary := history.GetNodeSummary() + customBalanceNodes := nodeSummary["single_eight_eth_opted_in_quarter"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_eight_eth_opted_out_three_quarters"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_bond_reduction"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.5) + } + + history.SetWithdrawals(t.bc) + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name()+"-stateless", + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Validate individual node details in the rewards file + rewardsFile := v10Artifacts.RewardsFile + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + + singleEightEthNodes := nodeSummary["single_eight_eth"] + singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got 0 ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Logf("Node %+v", node) + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] + singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.SmoothingPoolRegistrationState { + if node.Class == "single_eight_eth_sp" { + expectedEthAmount.SetString("1450562599049128367", 10) + // There should be a bonus for these nodes' minipools + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // 8 eth minipools with 10% collateral earn 14% commission overall. + // They earned 10% on 24/32 of the 1 eth of consensus rewards already, which is 0.075 eth. + // Their bonus is therefore 4/10 of 0.075 eth, which is 0.03 eth. + expectedBonusEthEarned, _ := big.NewInt(0).SetString("30000000000000000", 10) + if minipoolPerf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, eightEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("2200871632329635499", 10) + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // The 16 eth minipools earn 10% on 24/32. + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, sixteenEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + // 16 eth minipools earn no bonus. + if minipoolPerf.GetBonusEthEarned().Sign() != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != 0", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String()) + } + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingInNodesSP := append( + nodeSummary["single_eight_eth_opted_in_quarter"], + nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + ) + for _, node := range optingInNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1091438193343898573", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1656101426307448494", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingOutNodesSP := append( + nodeSummary["single_eight_eth_opted_out_three_quarters"], + nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + ) + for _, node := range optingOutNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1077373217115689381", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1634310618066561014", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + bondReductionNode := nodeSummary["single_bond_reduction"] + for _, node := range bondReductionNode { + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Nodes that bond reduce are treated as having their new bond for the full interval, + // when it comes to RPL rewards. + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got reduced ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount, _ := big.NewInt(0).SetString("1920903328050713153", 10) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // And a reduced bonus + expectedBonusEthEarned, _ := big.NewInt(0).SetString("15000000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + noMinipoolsNodes := nodeSummary["no_minipools"] + for _, node := range noMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + // Validate merkle root + v10MerkleRoot := v10Artifacts.RewardsFile.GetMerkleRoot() + + // Expected merkle root: + // 0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b + // + // If this does not match, it implies either you updated the set of default mock nodes, + // or you introduced a regression in treegen. + // DO NOT update this value unless you know what you are doing. + expectedMerkleRoot := "0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b" + if !strings.EqualFold(v10MerkleRoot, expectedMerkleRoot) { + t.Fatalf("Merkle root does not match expected value %s != %s", v10MerkleRoot, expectedMerkleRoot) + } else { + t.Logf("Merkle root matches expected value %s", expectedMerkleRoot) + } +} + +func TestInsufficientEthForBonuseses(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 20, + }) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + // Ovewrite the SP balance to a value under the bonus commission + history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1000) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + history.SetWithdrawals(t.bc) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + if ethOne.Uint64() != 169+416 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 169+416) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + if ethTwo.Uint64() != 177+237 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %d", ethTwo.String(), 177+237) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance not found") + } + if perfOne.GetBonusEthEarned().Uint64() != 416 { + t.Fatalf("Node one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 416) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 237 { + t.Fatalf("Node two bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 237) + } +} + +func TestMockNoRPLRewards(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 2, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + nodeTwo.Minipools[1].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Give all three minipools 1 ETH of consensus income + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[1].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one is not a SP, so it should have 0 ETH + if ethOne.Uint64() != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 0) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("32575000000000000000", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + _, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if ok { + t.Fatalf("Node one minipool performance should not be found") + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + perfThree, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[1].Address) + if !ok { + t.Fatalf("Node two minipool two performance not found") + } + if perfThree.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool two bonus does not match expected value: %s != %d", perfThree.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfThree.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool two effective commission does not match expected value: %s != %d", perfThree.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockOptedOutAndThenBondReduced(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + // Opted out 1/4 of the way through the interval + nodeOne.SmoothingPoolRegistrationChanged = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/4)) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Bond reduced 1/2 of the way through the interval + nodeOne.Minipools[0].LastBondReductionTime = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2)) + nodeOne.Minipools[0].LastBondReductionPrevValue = big.NewInt(0).Set(sixteenEth) + nodeOne.Minipools[0].LastBondReductionPrevNodeFee, _ = big.NewInt(0).SetString("140000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one was in the SP so it should have some ETH, but no bonuses + expectedEthOne, _ := big.NewInt(0).SetString("11309523809523809523", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("26089087301587301587", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 0 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 0) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockWithdrawableEpoch(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Withdrawable epoch half way through the interval + nodeTwo.Minipools[0].WithdrawableEpoch = history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2 + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Add a withdrawal in the epoch after the interval ends + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Withdraw 0.5 eth at the start of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(5e17)) + // Withdraw 32.5 eth at the end of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch-1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(0).Mul(big.NewInt(325), big.NewInt(1e17))) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + expectedEthOne, _ := big.NewInt(0).SetString("21920833333333333333", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("10654166666666666666", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 37500000000000000) + } + if perfOne.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node one minipool one effective commission does not match expected value: %s != %d", perfOne.GetEffectiveCommission().String(), 1000000000000000000) + } + if perfOne.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node one minipool one consensus income does not match expected value: %s != %d", perfOne.GetConsensusIncome().String(), 1000000000000000000) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + if perfTwo.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node two minipool one consensus income does not match expected value: %s != %d", perfTwo.GetConsensusIncome().String(), 1000000000000000000) + } +} diff --git a/shared/services/rewards/record-file-info.go b/shared/services/rewards/record-file-info.go deleted file mode 100644 index 815be6bd8..000000000 --- a/shared/services/rewards/record-file-info.go +++ /dev/null @@ -1,10 +0,0 @@ -package rewards - -// Information about a saved rolling record -type RecordFileInfo struct { - StartSlot uint64 `json:"startSlot"` - EndSlot uint64 `json:"endSlot"` - Filename string `json:"filename"` - Version int `json:"version"` - Checksum [48]byte `json:"checksum"` -} diff --git a/shared/services/rewards/rewards-file-v1.go b/shared/services/rewards/rewards-file-v1.go index 378a6a2ea..d0d0fbb0a 100644 --- a/shared/services/rewards/rewards-file-v1.go +++ b/shared/services/rewards/rewards-file-v1.go @@ -14,7 +14,6 @@ import ( "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information type MinipoolPerformanceFile_v1 struct { Index uint64 `json:"index"` Network string `json:"network"` @@ -32,6 +31,10 @@ func (f *MinipoolPerformanceFile_v1) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *MinipoolPerformanceFile_v1) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for minipool performance files") +} + // Serialize a minipool performance file into bytes designed for human readability func (f *MinipoolPerformanceFile_v1) SerializeHuman() ([]byte, error) { return json.MarshalIndent(f, "", "\t") @@ -88,6 +91,18 @@ func (p *SmoothingPoolMinipoolPerformance_v1) GetMissingAttestationSlots() []uin func (p *SmoothingPoolMinipoolPerformance_v1) GetEthEarned() *big.Int { return eth.EthToWei(p.EthEarned) } +func (p *SmoothingPoolMinipoolPerformance_v1) GetBonusEthEarned() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetEffectiveCommission() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetConsensusIncome() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetAttestationScore() *big.Int { + return big.NewInt(0) +} // Node operator rewards type NodeRewardsInfo_v1 struct { @@ -100,21 +115,13 @@ type NodeRewardsInfo_v1 struct { MerkleProof []string `json:"merkleProof"` } -func (i *NodeRewardsInfo_v1) GetRewardNetwork() uint64 { - return i.RewardNetwork -} -func (i *NodeRewardsInfo_v1) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl -} -func (i *NodeRewardsInfo_v1) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl -} -func (i *NodeRewardsInfo_v1) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth -} -func (n *NodeRewardsInfo_v1) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { +func (f *RewardsFile_v1) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { proof = append(proof, common.HexToHash(proofLevel)) } return proof, nil @@ -132,14 +139,98 @@ func (f *RewardsFile_v1) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *RewardsFile_v1) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for rewards file v1") +} + // Deserialize a rewards file from bytes func (f *RewardsFile_v1) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get the rewards file's header -func (f *RewardsFile_v1) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the rewards file version +func (f *RewardsFile_v1) GetRewardsFileVersion() uint64 { + return rewardsFileVersionOne +} + +// Get the rewards file index +func (f *RewardsFile_v1) GetIndex() uint64 { + return f.RewardsFileHeader.Index +} + +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v1) GetTotalNodeWeight() *big.Int { + return nil +} + +// Get the merkle root +func (f *RewardsFile_v1) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot +} + +// Get network rewards for a specific network +func (f *RewardsFile_v1) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] +} + +// Get the number of intervals that have passed +func (f *RewardsFile_v1) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v1) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v1) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int +} + +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v1) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +// Get the total rpl sent to stakers +func (f *RewardsFile_v1) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int +} + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v1) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +// Get the the execution start block +func (f *RewardsFile_v1) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock +} + +// Get the the consensus start block +func (f *RewardsFile_v1) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock +} + +// Get the the execution end block +func (f *RewardsFile_v1) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock +} + +// Get the the consensus end block +func (f *RewardsFile_v1) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock +} + +// Get the start time +func (f *RewardsFile_v1) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime +} + +// Get the end time +func (f *RewardsFile_v1) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -154,15 +245,71 @@ func (f *RewardsFile_v1) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v1) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v1) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v1, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v1) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v1) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v1) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v1) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +// Getters for network info +func (f *RewardsFile_v1) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v1) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v1) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v1) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -171,7 +318,7 @@ func (f *RewardsFile_v1) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v1) generateMerkleTree() error { +func (f *RewardsFile_v1) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rewards-file-v2.go b/shared/services/rewards/rewards-file-v2.go index ae5b2f460..09bfa69cb 100644 --- a/shared/services/rewards/rewards-file-v2.go +++ b/shared/services/rewards/rewards-file-v2.go @@ -13,9 +13,8 @@ import ( "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information type MinipoolPerformanceFile_v2 struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` + RewardsFileVersion uint64 `json:"rewardsFileVersion"` RulesetVersion uint64 `json:"rulesetVersion"` Index uint64 `json:"index"` Network string `json:"network"` @@ -26,6 +25,7 @@ type MinipoolPerformanceFile_v2 struct { ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v2 `json:"minipoolPerformance"` + BonusScalar *QuotedBigInt `json:"bonusScalar,omitempty"` } // Serialize a minipool performance file into bytes @@ -33,6 +33,10 @@ func (f *MinipoolPerformanceFile_v2) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *MinipoolPerformanceFile_v2) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for minipool performance files") +} + // Serialize a minipool performance file into bytes designed for human readability func (f *MinipoolPerformanceFile_v2) SerializeHuman() ([]byte, error) { return json.MarshalIndent(f, "", "\t") @@ -69,6 +73,9 @@ type SmoothingPoolMinipoolPerformance_v2 struct { AttestationScore *QuotedBigInt `json:"attestationScore"` MissingAttestationSlots []uint64 `json:"missingAttestationSlots"` EthEarned *QuotedBigInt `json:"ethEarned"` + ConsensusIncome *QuotedBigInt `json:"consensusIncome,omitempty"` + BonusEthEarned *QuotedBigInt `json:"bonusEthEarned,omitempty"` + EffectiveCommission *QuotedBigInt `json:"effectiveCommission,omitempty"` } func (p *SmoothingPoolMinipoolPerformance_v2) GetPubkey() (types.ValidatorPubkey, error) { @@ -86,6 +93,27 @@ func (p *SmoothingPoolMinipoolPerformance_v2) GetMissingAttestationSlots() []uin func (p *SmoothingPoolMinipoolPerformance_v2) GetEthEarned() *big.Int { return &p.EthEarned.Int } +func (p *SmoothingPoolMinipoolPerformance_v2) GetBonusEthEarned() *big.Int { + if p.BonusEthEarned == nil { + return big.NewInt(0) + } + return &p.BonusEthEarned.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetEffectiveCommission() *big.Int { + if p.EffectiveCommission == nil { + return big.NewInt(0) + } + return &p.EffectiveCommission.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetConsensusIncome() *big.Int { + if p.ConsensusIncome == nil { + return big.NewInt(0) + } + return &p.ConsensusIncome.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetAttestationScore() *big.Int { + return &p.AttestationScore.Int +} // Node operator rewards type NodeRewardsInfo_v2 struct { @@ -97,21 +125,13 @@ type NodeRewardsInfo_v2 struct { MerkleProof []string `json:"merkleProof"` } -func (i *NodeRewardsInfo_v2) GetRewardNetwork() uint64 { - return i.RewardNetwork -} -func (i *NodeRewardsInfo_v2) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl -} -func (i *NodeRewardsInfo_v2) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl -} -func (i *NodeRewardsInfo_v2) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth -} -func (n *NodeRewardsInfo_v2) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { +func (f *RewardsFile_v2) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { proof = append(proof, common.HexToHash(proofLevel)) } return proof, nil @@ -129,14 +149,98 @@ func (f *RewardsFile_v2) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *RewardsFile_v2) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for rewards file v2") +} + // Deserialize a rewards file from bytes func (f *RewardsFile_v2) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get the rewards file's header -func (f *RewardsFile_v2) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the rewards file version +func (f *RewardsFile_v2) GetRewardsFileVersion() uint64 { + return rewardsFileVersionTwo +} + +// Get the rewards file index +func (f *RewardsFile_v2) GetIndex() uint64 { + return f.RewardsFileHeader.Index +} + +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v2) GetTotalNodeWeight() *big.Int { + return nil +} + +// Get the merkle root +func (f *RewardsFile_v2) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot +} + +// Get network rewards for a specific network +func (f *RewardsFile_v2) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] +} + +// Get the number of intervals that have passed +func (f *RewardsFile_v2) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v2) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v2) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int +} + +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v2) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +// Get the total rpl sent to stakers +func (f *RewardsFile_v2) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int +} + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v2) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +// Get the the execution start block +func (f *RewardsFile_v2) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock +} + +// Get the the consensus start block +func (f *RewardsFile_v2) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock +} + +// Get the the execution end block +func (f *RewardsFile_v2) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock +} + +// Get the the consensus end block +func (f *RewardsFile_v2) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock +} + +// Get the start time +func (f *RewardsFile_v2) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime +} + +// Get the end time +func (f *RewardsFile_v2) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -151,15 +255,71 @@ func (f *RewardsFile_v2) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v2) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v2) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v2, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v2) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v2) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v2) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v2) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v2) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +// Getters for network info +func (f *RewardsFile_v2) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v2) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v2) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v2) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -168,7 +328,7 @@ func (f *RewardsFile_v2) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v2) generateMerkleTree() error { +func (f *RewardsFile_v2) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rewards-file-v3.go b/shared/services/rewards/rewards-file-v3.go index efeb8f56c..231badc45 100644 --- a/shared/services/rewards/rewards-file-v3.go +++ b/shared/services/rewards/rewards-file-v3.go @@ -8,135 +8,129 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/goccy/go-json" - "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" "github.com/wealdtech/go-merkletree" "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information -type MinipoolPerformanceFile_v3 struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` - RulesetVersion uint64 `json:"rulesetVersion"` - Index uint64 `json:"index"` - Network string `json:"network"` - StartTime time.Time `json:"startTime,omitempty"` - EndTime time.Time `json:"endTime,omitempty"` - ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` - ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` - ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` - ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` - MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v3 `json:"minipoolPerformance"` -} - -// Serialize a minipool performance file into bytes -func (f *MinipoolPerformanceFile_v3) Serialize() ([]byte, error) { +// JSON struct for a complete rewards file +type RewardsFile_v3 struct { + *RewardsFileHeader + NodeRewards map[common.Address]*NodeRewardsInfo_v2 `json:"nodeRewards"` + MinipoolPerformanceFile MinipoolPerformanceFile_v2 `json:"-"` +} + +// Serialize a rewards file into bytes +func (f *RewardsFile_v3) Serialize() ([]byte, error) { return json.Marshal(f) } -// Serialize a minipool performance file into bytes designed for human readability -func (f *MinipoolPerformanceFile_v3) SerializeHuman() ([]byte, error) { - return json.MarshalIndent(f, "", "\t") +// Serialize as SSZ +func (f *RewardsFile_v3) SerializeSSZ() ([]byte, error) { + // In order to avoid multiple code paths, we won't bother making a RewardsFile_v3 <-> SSZFile_v1 function + // Instead, we can serialize json, parse to SSZFile_v1, and then serialize that as SSZ + data, err := f.Serialize() + if err != nil { + return nil, fmt.Errorf("error converting RewardsFile v3 to json so it could be parsed as SSZFile_v1: %w", err) + } + + s := &ssz_types.SSZFile_v1{} + err = s.UnmarshalSSZ(data) + if err != nil { + return nil, fmt.Errorf("error parsing RewardsFile v3 json as SSZFile_v1: %w", err) + } + + return s.SerializeSSZ() } -// Deserialize a minipool performance file from bytes -func (f *MinipoolPerformanceFile_v3) Deserialize(bytes []byte) error { +// Deserialize a rewards file from bytes +func (f *RewardsFile_v3) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get all of the minipool addresses with rewards in this file -// NOTE: the order of minipool addresses is not guaranteed to be stable, so don't rely on it -func (f *MinipoolPerformanceFile_v3) GetMinipoolAddresses() []common.Address { - addresses := make([]common.Address, len(f.MinipoolPerformance)) - i := 0 - for address := range f.MinipoolPerformance { - addresses[i] = address - i++ - } - return addresses +// Get the rewards file version +func (f *RewardsFile_v3) GetRewardsFileVersion() uint64 { + return rewardsFileVersionThree } -// Get a minipool's smoothing pool performance if it was present -func (f *MinipoolPerformanceFile_v3) GetSmoothingPoolPerformance(minipoolAddress common.Address) (ISmoothingPoolMinipoolPerformance, bool) { - perf, exists := f.MinipoolPerformance[minipoolAddress] - return perf, exists +// Get the rewards file index +func (f *RewardsFile_v3) GetIndex() uint64 { + return f.RewardsFileHeader.Index } -// Minipool stats -type SmoothingPoolMinipoolPerformance_v3 struct { - Pubkey string `json:"pubkey"` - SuccessfulAttestations uint64 `json:"successfulAttestations"` - MissedAttestations uint64 `json:"missedAttestations"` - AttestationScore *QuotedBigInt `json:"attestationScore"` - MissingAttestationSlots []uint64 `json:"missingAttestationSlots"` - EthEarned *QuotedBigInt `json:"ethEarned"` +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v3) GetTotalNodeWeight() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalNodeWeight.Int } -func (p *SmoothingPoolMinipoolPerformance_v3) GetPubkey() (types.ValidatorPubkey, error) { - return types.HexToValidatorPubkey(p.Pubkey) -} -func (p *SmoothingPoolMinipoolPerformance_v3) GetSuccessfulAttestationCount() uint64 { - return p.SuccessfulAttestations +// Get the merkle root +func (f *RewardsFile_v3) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot } -func (p *SmoothingPoolMinipoolPerformance_v3) GetMissedAttestationCount() uint64 { - return p.MissedAttestations + +// Get network rewards for a specific network +func (f *RewardsFile_v3) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] } -func (p *SmoothingPoolMinipoolPerformance_v3) GetMissingAttestationSlots() []uint64 { - return p.MissingAttestationSlots + +// Get the number of intervals that have passed +func (f *RewardsFile_v3) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed } -func (p *SmoothingPoolMinipoolPerformance_v3) GetEthEarned() *big.Int { - return &p.EthEarned.Int + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v3) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int } -// Node operator rewards -type NodeRewardsInfo_v3 struct { - RewardNetwork uint64 `json:"rewardNetwork"` - CollateralRpl *QuotedBigInt `json:"collateralRpl"` - OracleDaoRpl *QuotedBigInt `json:"oracleDaoRpl"` - SmoothingPoolEth *QuotedBigInt `json:"smoothingPoolEth"` - MerkleData []byte `json:"-"` - MerkleProof []string `json:"merkleProof"` +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v3) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int } -func (i *NodeRewardsInfo_v3) GetRewardNetwork() uint64 { - return i.RewardNetwork +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v3) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int } -func (i *NodeRewardsInfo_v3) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl + +// Get the total rpl sent to stakers +func (f *RewardsFile_v3) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int } -func (i *NodeRewardsInfo_v3) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v3) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int } -func (i *NodeRewardsInfo_v3) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth + +// Get the execution end block +func (f *RewardsFile_v3) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock } -func (n *NodeRewardsInfo_v3) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { - proof = append(proof, common.HexToHash(proofLevel)) - } - return proof, nil + +// Get the consensus end block +func (f *RewardsFile_v3) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock } -// JSON struct for a complete rewards file -type RewardsFile_v3 struct { - *RewardsFileHeader - NodeRewards map[common.Address]*NodeRewardsInfo_v3 `json:"nodeRewards"` - MinipoolPerformanceFile MinipoolPerformanceFile_v3 `json:"-"` +// Get the execution start block +func (f *RewardsFile_v3) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock } -// Serialize a rewards file into bytes -func (f *RewardsFile_v3) Serialize() ([]byte, error) { - return json.Marshal(f) +// Get the consensus start block +func (f *RewardsFile_v3) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock } -// Deserialize a rewards file from bytes -func (f *RewardsFile_v3) Deserialize(bytes []byte) error { - return json.Unmarshal(bytes, &f) +// Get the start time +func (f *RewardsFile_v3) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime } -// Get the rewards file's header -func (f *RewardsFile_v3) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the end time +func (f *RewardsFile_v3) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -151,15 +145,83 @@ func (f *RewardsFile_v3) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v3) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v3) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v2, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v3) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v3) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v3) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v3) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v3) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +func (f *RewardsFile_v3) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { + proof = append(proof, common.HexToHash(proofLevel)) + } + return proof, nil +} + +// Getters for network info +func (f *RewardsFile_v3) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v3) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v3) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v3) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -168,7 +230,7 @@ func (f *RewardsFile_v3) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v3) generateMerkleTree() error { +func (f *RewardsFile_v3) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rolling-manager.go b/shared/services/rewards/rolling-manager.go deleted file mode 100644 index 79bbdd7c2..000000000 --- a/shared/services/rewards/rolling-manager.go +++ /dev/null @@ -1,645 +0,0 @@ -package rewards - -import ( - "bytes" - "crypto/sha512" - "encoding/hex" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/blang/semver/v4" - "github.com/klauspost/compress/zstd" - rprewards "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" -) - -const ( - recordsFilenameFormat string = "%d-%d.json.zst" - recordsFilenamePattern string = "(?P\\d+)\\-(?P\\d+)\\.json\\.zst" - latestCompatibleVersionString string = "1.11.0-dev" -) - -// Manager for RollingRecords -type RollingRecordManager struct { - Record *RollingRecord - LatestFinalizedEpoch uint64 - ExpectedBalancesBlock uint64 - ExpectedRewardsIntervalBlock uint64 - - log *log.ColorLogger - errLog *log.ColorLogger - logPrefix string - cfg *config.RocketPoolConfig - rp *rocketpool.RocketPool - bc beacon.Client - mgr *state.NetworkStateManager - startSlot uint64 - nextEpochToSave uint64 - - beaconCfg beacon.Eth2Config - genesisTime time.Time - compressor *zstd.Encoder - decompressor *zstd.Decoder - recordsFilenameRegex *regexp.Regexp -} - -// Creates a new manager for rolling records. -func NewRollingRecordManager(log *log.ColorLogger, errLog *log.ColorLogger, cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, bc beacon.Client, mgr *state.NetworkStateManager, startSlot uint64, beaconCfg beacon.Eth2Config, rewardsInterval uint64) (*RollingRecordManager, error) { - // Get the Beacon genesis time - genesisTime := time.Unix(int64(beaconCfg.GenesisTime), 0) - - // Create the zstd compressor and decompressor - encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBestCompression)) - if err != nil { - return nil, fmt.Errorf("error creating zstd compressor for rolling record manager: %w", err) - } - decoder, err := zstd.NewReader(nil) - if err != nil { - return nil, fmt.Errorf("error creating zstd decompressor for rolling record manager: %w", err) - } - - // Create the records filename regex - recordsFilenameRegex := regexp.MustCompile(recordsFilenamePattern) - - // Make the records folder if it doesn't exist - recordsPath := cfg.Smartnode.GetRecordsPath() - fileInfo, err := os.Stat(recordsPath) - if os.IsNotExist(err) { - err2 := os.MkdirAll(recordsPath, 0755) - if err2 != nil { - return nil, fmt.Errorf("error creating rolling records folder: %w", err) - } - } else if err != nil { - return nil, fmt.Errorf("error checking rolling records folder: %w", err) - } else if !fileInfo.IsDir() { - return nil, fmt.Errorf("rolling records folder location exists (%s), but is not a folder", recordsPath) - } - - logPrefix := "[Rolling Record]" - log.Printlnf("%s Created Rolling Record manager for start slot %d.", logPrefix, startSlot) - return &RollingRecordManager{ - Record: NewRollingRecord(log, logPrefix, bc, startSlot, &beaconCfg, rewardsInterval), - - log: log, - errLog: errLog, - logPrefix: logPrefix, - cfg: cfg, - rp: rp, - bc: bc, - mgr: mgr, - startSlot: startSlot, - beaconCfg: beaconCfg, - genesisTime: genesisTime, - compressor: encoder, - decompressor: decoder, - recordsFilenameRegex: recordsFilenameRegex, - }, nil -} - -// Generate a new record for the provided slot using the latest viable saved record -func (r *RollingRecordManager) GenerateRecordForState(state *state.NetworkState) (*RollingRecord, error) { - // Load the latest viable record - slot := state.BeaconSlotNumber - rewardsInterval := state.NetworkDetails.RewardIndex - record, err := r.LoadBestRecordFromDisk(r.startSlot, slot, rewardsInterval) - if err != nil { - return nil, fmt.Errorf("error loading best record for slot %d: %w", slot, err) - } - - if record.LastDutiesSlot == slot { - // Already have a full snapshot so we don't have to do anything - r.log.Printf("%s Loaded record was already up-to-date for slot %d.", r.logPrefix, slot) - return record, nil - } else if record.LastDutiesSlot > slot { - // This should never happen but sanity check it anyway - return nil, fmt.Errorf("loaded record has duties completed for slot %d, which is too far forward (targeting slot %d)", record.LastDutiesSlot, slot) - } - - // Update to the target slot - err = r.UpdateRecordToState(state, slot) - if err != nil { - return nil, fmt.Errorf("error updating record to slot %d: %w", slot, err) - } - - return record, nil -} - -// Save the rolling record to a file and update the record info catalog -func (r *RollingRecordManager) SaveRecordToFile(record *RollingRecord) error { - - // Serialize the record - bytes, err := record.Serialize() - if err != nil { - return fmt.Errorf("error saving rolling record: %w", err) - } - - // Compress the record - compressedBytes := r.compressor.EncodeAll(bytes, make([]byte, 0, len(bytes))) - - // Get the record filename - slot := record.LastDutiesSlot - epoch := record.LastDutiesSlot / r.beaconCfg.SlotsPerEpoch - recordsPath := r.cfg.Smartnode.GetRecordsPath() - filename := filepath.Join(recordsPath, fmt.Sprintf(recordsFilenameFormat, slot, epoch)) - - // Write it to a file - err = os.WriteFile(filename, compressedBytes, 0664) - if err != nil { - return fmt.Errorf("error writing file [%s]: %w", filename, err) - } - - // Compute the SHA384 hash to act as a checksum - checksum := sha512.Sum384(compressedBytes) - - // Load the existing checksum table - _, lines, err := r.parseChecksumFile() - if err != nil { - return fmt.Errorf("error parsing checkpoint file: %w", err) - } - if lines == nil { - lines = []string{} - } - - // Add the new record checksum - baseFilename := filepath.Base(filename) - checksumLine := fmt.Sprintf("%s %s", hex.EncodeToString(checksum[:]), baseFilename) - - // Sort the lines by their slot - err = r.sortChecksumEntries(lines) - if err != nil { - return fmt.Errorf("error sorting checkpoint file entries: %w", err) - } - - overwritten := false - for i, line := range lines { - if strings.HasSuffix(line, baseFilename) { - // If there is already a line with the filename, overwrite it - lines[i] = checksumLine - overwritten = true - break - } - } - if !overwritten { - // If there's no existing lines, add this to the end - lines = append(lines, checksumLine) - } - - // Get the number of lines to write - checkpointRetentionLimit := r.cfg.Smartnode.CheckpointRetentionLimit.Value.(uint64) - var newLines []string - if len(lines) > int(checkpointRetentionLimit) { - numberOfNewLines := int(checkpointRetentionLimit) - cullCount := len(lines) - numberOfNewLines - - // Remove old lines and delete the corresponding files that shouldn't be retained - for i := 0; i < cullCount; i++ { - line := lines[i] - - // Extract the filename - elems := strings.Split(line, " ") - if len(elems) != 2 { - return fmt.Errorf("error parsing checkpoint line (%s): expected 2 elements, but got %d", line, len(elems)) - } - filename := elems[1] - fullFilename := filepath.Join(recordsPath, filename) - - // Delete the file if it exists - _, err := os.Stat(fullFilename) - if os.IsNotExist(err) { - r.log.Printlnf("%s NOTE: tried removing checkpoint file [%s] based on the retention limit, but it didn't exist.", r.logPrefix, filename) - continue - } - err = os.Remove(fullFilename) - if err != nil { - return fmt.Errorf("error deleting file [%s]: %w", fullFilename, err) - } - - r.log.Printlnf("%s Removed checkpoint file [%s] based on the retention limit.", r.logPrefix, filename) - } - - // Store the rest - newLines = make([]string, numberOfNewLines) - for i := cullCount; i <= numberOfNewLines; i++ { - newLines[i-cullCount] = lines[i] - } - } else { - newLines = lines - } - - fileContents := strings.Join(newLines, "\n") - checksumBytes := []byte(fileContents) - - // Save the new file - checksumFilename := filepath.Join(recordsPath, config.ChecksumTableFilename) - err = os.WriteFile(checksumFilename, checksumBytes, 0644) - if err != nil { - return fmt.Errorf("error writing checksum file after culling: %w", err) - } - - return nil -} - -// Load the most recent appropriate rolling record from disk, using the checksum table as an index -func (r *RollingRecordManager) LoadBestRecordFromDisk(startSlot uint64, targetSlot uint64, rewardsInterval uint64) (*RollingRecord, error) { - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - latestCompatibleVersion, err := semver.New(latestCompatibleVersionString) - if err != nil { - return nil, fmt.Errorf("error parsing latest compatible version string [%s]: %w", latestCompatibleVersionString, err) - } - - // Parse the checksum file - exists, lines, err := r.parseChecksumFile() - if err != nil { - return nil, fmt.Errorf("error parsing checkpoint file: %w", err) - } - if !exists { - // There isn't a checksum file so start over - r.log.Printlnf("%s Checksum file not found, creating a new record from the start of the interval.", r.logPrefix) - record := NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, rewardsInterval) - r.Record = record - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - return record, nil - } - - // Iterate over each file, counting backwards from the bottom - recordsPath := r.cfg.Smartnode.GetRecordsPath() - for i := len(lines) - 1; i >= 0; i-- { - line := lines[i] - - // Extract the checksum, filename, and slot number - checksumString, filename, slot, err := r.parseChecksumEntry(line) - if err != nil { - return nil, err - } - - // Check if the slot was too far into the future - if slot > targetSlot { - r.log.Printlnf("%s File [%s] was too far into the future, trying an older one...", r.logPrefix, filename) - continue - } - - // Check if it was too far into the past - if slot < startSlot { - r.log.Printlnf("%s File [%s] was too old (generated before the target start slot), none of the remaining records can be used.", r.logPrefix, filename) - break - } - - // Make sure the checksum parses properly - checksum, err := hex.DecodeString(checksumString) - if err != nil { - return nil, fmt.Errorf("error scanning checkpoint line (%s): checksum (%s) could not be parsed", line, checksumString) - } - - // Try to load it - fullFilename := filepath.Join(recordsPath, filename) - record, err := r.loadRecordFromFile(fullFilename, checksum) - if err != nil { - r.log.Printlnf("%s WARNING: error loading record from file [%s]: %s... attempting previous file", r.logPrefix, fullFilename, err.Error()) - continue - } - - // Check if it was for the proper interval - if record.RewardsInterval != rewardsInterval { - r.log.Printlnf("%s File [%s] was for rewards interval %d instead of %d so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, record.RewardsInterval, rewardsInterval) - continue - } - - // Check if it has the proper start slot - if record.StartSlot != startSlot { - r.log.Printlnf("%s File [%s] started on slot %d instead of %d so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, record.StartSlot, startSlot) - continue - } - - // Check if it's using a compatible version - recordVersionString := record.SmartnodeVersion - if recordVersionString == "" { - recordVersionString = "1.10.0" // First release without version info - } - recordVersion, err := semver.New(recordVersionString) - if err != nil { - r.log.Printlnf("%s Failed to parse the version info for file [%s] so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename) - continue - } - if recordVersion.LT(*latestCompatibleVersion) { - r.log.Printlnf("%s File [%s] was made with Smartnode v%s which is not compatible (lowest compatible = v%s) so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, recordVersionString, latestCompatibleVersionString) - continue - } - - epoch := slot / r.beaconCfg.SlotsPerEpoch - r.log.Printlnf("%s Loaded file [%s] which ended on slot %d (epoch %d) for rewards interval %d.", r.logPrefix, filename, slot, epoch, record.RewardsInterval) - r.Record = record - r.nextEpochToSave = record.LastDutiesSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - return record, nil - - } - - // If we got here then none of the saved files worked so we have to make a new record - r.log.Printlnf("%s None of the saved record checkpoint files were eligible for use, creating a new record from the start of the interval.", r.logPrefix) - record := NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, rewardsInterval) - r.Record = record - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - return record, nil - -} - -// Updates the manager's record to the provided state, retrying upon errors until success -func (r *RollingRecordManager) UpdateRecordToState(state *state.NetworkState, latestFinalizedSlot uint64) error { - err := r.updateImpl(state, latestFinalizedSlot) - if err != nil { - // Revert to the latest saved state - r.log.Printlnf("%s WARNING: failed to update rolling record to slot %d, block %d: %s", r.logPrefix, state.BeaconSlotNumber, state.ElBlockNumber, err.Error()) - r.log.Printlnf("%s Reverting to the last saved checkpoint to prevent corruption...", r.logPrefix) - _, err2 := r.LoadBestRecordFromDisk(r.startSlot, latestFinalizedSlot, r.Record.RewardsInterval) - if err2 != nil { - return fmt.Errorf("error loading last best checkpoint: %w", err) - } - - // Try again - r.log.Printlnf("%s Successfully reverted to the last saved state.", r.logPrefix) - return err - } - - return nil -} - -// Updates the manager's record to the provided state -func (r *RollingRecordManager) updateImpl(state *state.NetworkState, latestFinalizedSlot uint64) error { - var err error - r.log.Printlnf("Updating record to target slot %d...", latestFinalizedSlot) - - // Create a new record if the current one is for the previous rewards interval - if r.Record.RewardsInterval < state.NetworkDetails.RewardIndex { - err := r.createNewRecord(state) - if err != nil { - return fmt.Errorf("error creating new record: %w", err) - } - } - - // Get the state for the target slot - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - finalTarget := latestFinalizedSlot - finalizedState := state - if finalTarget != state.BeaconSlotNumber { - finalizedState, err = r.mgr.GetStateForSlot(finalTarget) - if err != nil { - return fmt.Errorf("error getting state for latest finalized slot (%d): %w", finalTarget, err) - } - } - - // Break the routine into chunks so it can be saved if necessary - nextStartSlot := r.Record.LastDutiesSlot + 1 - if r.Record.LastDutiesSlot == 0 { - nextStartSlot = r.startSlot - } - - nextStartEpoch := nextStartSlot / r.beaconCfg.SlotsPerEpoch - finalEpoch := finalTarget / r.beaconCfg.SlotsPerEpoch - - nextTargetEpoch := finalEpoch - if nextTargetEpoch > r.nextEpochToSave { - // Make a stop at the next required checkpoint so it can be saved - nextTargetEpoch = r.nextEpochToSave - } - nextTargetSlot := (nextTargetEpoch+1)*r.beaconCfg.SlotsPerEpoch - 1 // Target is the last slot of the epoch - if nextTargetSlot > finalTarget { - nextTargetSlot = finalTarget - } - totalSlots := float64(finalTarget - nextStartSlot + 1) - initialSlot := nextStartSlot - - r.log.Printlnf("%s Collecting records from slot %d (epoch %d) to slot %d (epoch %d).", r.logPrefix, nextStartSlot, nextStartEpoch, finalTarget, finalEpoch) - startTime := time.Now() - for { - if nextStartSlot > finalTarget { - break - } - - // Update the record to the target state - err = r.Record.UpdateToSlot(nextTargetSlot, finalizedState) - if err != nil { - return fmt.Errorf("error updating rolling record to slot %d, block %d: %w", state.BeaconSlotNumber, state.ElBlockNumber, err) - } - slotsProcessed := nextTargetSlot - initialSlot + 1 - r.log.Printf("%s (%.2f%%) Updated from slot %d (epoch %d) to slot %d (epoch %d)... (%s so far) ", r.logPrefix, float64(slotsProcessed)/totalSlots*100.0, nextStartSlot, nextStartEpoch, nextTargetSlot, nextTargetEpoch, time.Since(startTime)) - - // Save if required - if nextTargetEpoch == r.nextEpochToSave { - err = r.SaveRecordToFile(r.Record) - if err != nil { - return fmt.Errorf("error saving record: %w", err) - } - r.log.Printlnf("%s Saved record checkpoint.", r.logPrefix) - r.nextEpochToSave += recordCheckpointInterval // Set the next epoch to save 1 checkpoint in the future - } - - nextStartSlot = nextTargetSlot + 1 - nextStartEpoch = nextStartSlot / r.beaconCfg.SlotsPerEpoch - nextTargetEpoch = finalEpoch - if nextTargetEpoch > r.nextEpochToSave { - // Make a stop at the next required checkpoint so it can be saved - nextTargetEpoch = r.nextEpochToSave - } - nextTargetSlot = (nextTargetEpoch+1)*r.beaconCfg.SlotsPerEpoch - 1 // Target is the last slot of the epoch - if nextTargetSlot > finalTarget { - nextTargetSlot = finalTarget - } - } - - // Log the update - startEpoch := r.Record.StartSlot / r.beaconCfg.SlotsPerEpoch - currentEpoch := r.Record.LastDutiesSlot / r.beaconCfg.SlotsPerEpoch - r.log.Printlnf("%s Record update complete (slot %d-%d, epoch %d-%d).", r.logPrefix, r.Record.StartSlot, r.Record.LastDutiesSlot, startEpoch, currentEpoch) - - return nil -} - -// Prepares the record for a rewards interval report -func (r *RollingRecordManager) PrepareRecordForReport(state *state.NetworkState) error { - rewardsSlot := state.BeaconSlotNumber - - // Check if the current record has gone past the requested slot or if it can be updated / used - if rewardsSlot < r.Record.LastDutiesSlot { - r.log.Printlnf("%s Current record has extended too far (need slot %d, but record has processed slot %d)... reverting to a previous checkpoint.", r.logPrefix, rewardsSlot, r.Record.LastDutiesSlot) - - newRecord, err := r.GenerateRecordForState(state) - if err != nil { - return fmt.Errorf("error creating record for rewards slot: %w", err) - } - - r.Record = newRecord - } else { - r.log.Printlnf("%s Current record can be used (need slot %d, record has only processed slot %d), updating to target slot.", r.logPrefix, rewardsSlot, r.Record.LastDutiesSlot) - err := r.UpdateRecordToState(state, rewardsSlot) - if err != nil { - return fmt.Errorf("error updating record to rewards slot: %w", err) - } - } - - return nil -} - -// Get the slot number from a record filename -func (r *RollingRecordManager) getSlotFromFilename(filename string) (uint64, error) { - matches := r.recordsFilenameRegex.FindStringSubmatch(filename) - if matches == nil { - return 0, fmt.Errorf("filename (%s) did not match the expected format", filename) - } - slotIndex := r.recordsFilenameRegex.SubexpIndex("slot") - if slotIndex == -1 { - return 0, fmt.Errorf("slot number not found in filename (%s)", filename) - } - slotString := matches[slotIndex] - slot, err := strconv.ParseUint(slotString, 10, 64) - if err != nil { - return 0, fmt.Errorf("slot (%s) could not be parsed to a number", slotString) - } - - return slot, nil -} - -// Load a record from a file, making sure its contents match the provided checksum -func (r *RollingRecordManager) loadRecordFromFile(filename string, expectedChecksum []byte) (*RollingRecord, error) { - // Read the file - compressedBytes, err := os.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading file: %w", err) - } - - // Calculate the hash and validate it - checksum := sha512.Sum384(compressedBytes) - if !bytes.Equal(expectedChecksum, checksum[:]) { - expectedString := hex.EncodeToString(expectedChecksum) - actualString := hex.EncodeToString(checksum[:]) - return nil, fmt.Errorf("checksum mismatch (expected %s, but it was %s)", expectedString, actualString) - } - - // Decompress it - bytes, err := r.decompressor.DecodeAll(compressedBytes, []byte{}) - if err != nil { - return nil, fmt.Errorf("error decompressing data: %w", err) - } - - // Create a new record from the data - return DeserializeRollingRecord(r.log, r.logPrefix, r.bc, &r.beaconCfg, bytes) -} - -// Get the lines from the checksum file -func (r *RollingRecordManager) parseChecksumFile() (bool, []string, error) { - // Get the checksum filename - recordsPath := r.cfg.Smartnode.GetRecordsPath() - checksumFilename := filepath.Join(recordsPath, config.ChecksumTableFilename) - - // Check if the file exists - _, err := os.Stat(checksumFilename) - if os.IsNotExist(err) { - return false, nil, nil - } - - // Open the checksum file - checksumTable, err := os.ReadFile(checksumFilename) - if err != nil { - return false, nil, fmt.Errorf("error loading checksum table (%s): %w", checksumFilename, err) - } - - // Parse out each line - originalLines := strings.Split(string(checksumTable), "\n") - - // Remove empty lines - lines := make([]string, 0, len(originalLines)) - for _, line := range originalLines { - trimmedLine := strings.TrimSpace(line) - if trimmedLine != "" { - lines = append(lines, line) - } - } - - return true, lines, nil -} - -// Sort the checksum file entries by their slot -func (r *RollingRecordManager) sortChecksumEntries(lines []string) error { - var sortErr error - sort.Slice(lines, func(i int, j int) bool { - _, _, firstSlot, err := r.parseChecksumEntry(lines[i]) - if err != nil && sortErr == nil { - sortErr = err - return false - } - - _, _, secondSlot, err := r.parseChecksumEntry(lines[j]) - if err != nil && sortErr == nil { - sortErr = err - return false - } - - return firstSlot < secondSlot - }) - return sortErr -} - -// Get the checksum, the filename, and the slot number from a checksum entry. -func (r *RollingRecordManager) parseChecksumEntry(line string) (string, string, uint64, error) { - // Extract the checksum and filename - elems := strings.Split(line, " ") - if len(elems) != 2 { - return "", "", 0, fmt.Errorf("error parsing checkpoint line (%s): expected 2 elements, but got %d", line, len(elems)) - } - checksumString := elems[0] - filename := elems[1] - - // Extract the slot number for this file - slot, err := r.getSlotFromFilename(filename) - if err != nil { - return "", "", 0, fmt.Errorf("error scanning checkpoint line (%s): %w", line, err) - } - - return checksumString, filename, slot, nil -} - -// Creates a new record -func (r *RollingRecordManager) createNewRecord(state *state.NetworkState) error { - // Get the current interval index - currentIndexBig, err := rprewards.GetRewardIndex(r.rp, nil) - if err != nil { - return fmt.Errorf("error getting rewards index: %w", err) - } - currentIndex := currentIndexBig.Uint64() - - // Get the previous RocketRewardsPool addresses - prevAddresses := r.cfg.Smartnode.GetPreviousRewardsPoolAddresses() - - // Get the last rewards event and starting epoch - found, event, err := rprewards.GetRewardsEvent(r.rp, currentIndex-1, prevAddresses, nil) - if err != nil { - return fmt.Errorf("error getting event for rewards interval %d: %w", currentIndex-1, err) - } - if !found { - return fmt.Errorf("event for rewards interval %d not found", currentIndex-1) - } - - // Get the start slot of the current interval - startSlot, err := GetStartSlotForInterval(event, r.bc, r.beaconCfg) - if err != nil { - return fmt.Errorf("error getting start slot for interval %d: %w", currentIndex, err) - } - newEpoch := startSlot / r.beaconCfg.SlotsPerEpoch - - // Create a new record for the start slot - r.log.Printlnf("%s Current record is for interval %d which has passed, creating a new record for interval %d starting on slot %d (epoch %d).", r.logPrefix, r.Record.RewardsInterval, state.NetworkDetails.RewardIndex, startSlot, newEpoch) - r.Record = NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, state.NetworkDetails.RewardIndex) - r.startSlot = startSlot - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - - return nil -} diff --git a/shared/services/rewards/rolling-record.go b/shared/services/rewards/rolling-record.go deleted file mode 100644 index 544d2aebc..000000000 --- a/shared/services/rewards/rolling-record.go +++ /dev/null @@ -1,398 +0,0 @@ -package rewards - -import ( - "fmt" - "math/big" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/goccy/go-json" - "github.com/rocket-pool/rocketpool-go/types" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/shared" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" - "golang.org/x/sync/errgroup" -) - -const ( - threadLimit int = 12 -) - -type RollingRecord struct { - StartSlot uint64 `json:"startSlot"` - LastDutiesSlot uint64 `json:"lastDutiesSlot"` - ValidatorIndexMap map[string]*MinipoolInfo `json:"validatorIndexMap"` - RewardsInterval uint64 `json:"rewardsInterval"` - SmartnodeVersion string `json:"smartnodeVersion,omitempty"` - - // Private fields - bc beacon.Client `json:"-"` - beaconConfig *beacon.Eth2Config `json:"-"` - genesisTime time.Time `json:"-"` - log *log.ColorLogger `json:"-"` - logPrefix string `json:"-"` - intervalDutiesInfo *IntervalDutiesInfo `json:"-"` - - // Constants for convenience - one *big.Int `json:"-"` - validatorReq *big.Int `json:"-"` -} - -// Create a new rolling record wrapper -func NewRollingRecord(log *log.ColorLogger, logPrefix string, bc beacon.Client, startSlot uint64, beaconConfig *beacon.Eth2Config, rewardsInterval uint64) *RollingRecord { - return &RollingRecord{ - StartSlot: startSlot, - LastDutiesSlot: 0, - ValidatorIndexMap: map[string]*MinipoolInfo{}, - RewardsInterval: rewardsInterval, - SmartnodeVersion: shared.RocketPoolVersion, - - bc: bc, - beaconConfig: beaconConfig, - genesisTime: time.Unix(int64(beaconConfig.GenesisTime), 0), - log: log, - logPrefix: logPrefix, - intervalDutiesInfo: &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - }, - - one: eth.EthToWei(1), - validatorReq: eth.EthToWei(32), - } -} - -// Load an existing record from serialized JSON data -func DeserializeRollingRecord(log *log.ColorLogger, logPrefix string, bc beacon.Client, beaconConfig *beacon.Eth2Config, bytes []byte) (*RollingRecord, error) { - record := &RollingRecord{ - bc: bc, - beaconConfig: beaconConfig, - genesisTime: time.Unix(int64(beaconConfig.GenesisTime), 0), - log: log, - logPrefix: logPrefix, - intervalDutiesInfo: &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - }, - - one: eth.EthToWei(1), - validatorReq: eth.EthToWei(32), - } - - err := json.Unmarshal(bytes, &record) - if err != nil { - return nil, fmt.Errorf("error deserializing record: %w", err) - } - - return record, nil -} - -// Update the record to the requested slot, using the provided state as a reference. -// Requires the epoch *after* the requested slot to be finalized so it can accurately count attestations. -func (r *RollingRecord) UpdateToSlot(slot uint64, state *state.NetworkState) error { - - // Get the slot to start processing from - startSlot := r.LastDutiesSlot + 1 - if r.LastDutiesSlot == 0 { - startSlot = r.StartSlot - } - startEpoch := startSlot / r.beaconConfig.SlotsPerEpoch - - // Get the epoch for the state - stateEpoch := slot / r.beaconConfig.SlotsPerEpoch - - //r.log.Printlnf("%s Updating rolling record from slot %d (epoch %d) to %d (epoch %d).", r.logPrefix, startSlot, startEpoch, slot, stateEpoch) - //start := time.Now() - - // Update the validator indices and flag any cheating nodes - r.updateValidatorIndices(state) - - // Process every epoch from the start to the current one - for epoch := startEpoch; epoch <= stateEpoch; epoch++ { - - // Retrieve the duties for the epoch - this won't get duties higher than the given state - err := r.getDutiesForEpoch(epoch, slot, state) - if err != nil { - return fmt.Errorf("error getting duties for epoch %d: %w", epoch, err) - } - - // Process the epoch's attestation submissions - err = r.processAttestationsInEpoch(epoch, state) - if err != nil { - return fmt.Errorf("error processing attestations in epoch %d: %w", epoch, err) - } - - } - - // Process the epoch after the last one to check for late attestations / attestations of the last slot - err := r.processAttestationsInEpoch(stateEpoch+1, state) - if err != nil { - return fmt.Errorf("error processing attestations in epoch %d: %w", stateEpoch+1, err) - } - - // Clear the duties cache since it's not required anymore - r.intervalDutiesInfo = &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - } - - return nil -} - -// Get the minipool scores, along with the cumulative total score and count - ignores minipools that belonged to cheaters -func (r *RollingRecord) GetScores(cheatingNodes map[common.Address]bool) ([]*MinipoolInfo, *big.Int, uint64) { - // Create a slice of minipools with legal (non-cheater) scores - minipoolInfos := make([]*MinipoolInfo, 0, len(r.ValidatorIndexMap)) - - // TODO: return a new slice of minipool infos that ignores all cheaters - totalScore := big.NewInt(0) - totalCount := uint64(0) - for _, mpInfo := range r.ValidatorIndexMap { - - // Ignore nodes that cheated - if cheatingNodes[mpInfo.NodeAddress] { - continue - } - - totalScore.Add(totalScore, &mpInfo.AttestationScore.Int) - totalCount += uint64(mpInfo.AttestationCount) - minipoolInfos = append(minipoolInfos, mpInfo) - } - - return minipoolInfos, totalScore, totalCount -} - -// Serialize the current record into a byte array -func (r *RollingRecord) Serialize() ([]byte, error) { - // Clone the record - clone := &RollingRecord{ - StartSlot: r.StartSlot, - LastDutiesSlot: r.LastDutiesSlot, - RewardsInterval: r.RewardsInterval, - SmartnodeVersion: r.SmartnodeVersion, - ValidatorIndexMap: map[string]*MinipoolInfo{}, - } - - // Remove minipool perf records with zero attestations from the serialization - for pubkey, mp := range r.ValidatorIndexMap { - if mp.AttestationCount > 0 || len(mp.MissingAttestationSlots) > 0 { - clone.ValidatorIndexMap[pubkey] = mp - } - } - - // Serialize as JSON - bytes, err := json.Marshal(clone) - if err != nil { - return nil, fmt.Errorf("error serializing rolling record: %w", err) - } - - return bytes, nil -} - -// Update the validator index map with any new validators on Beacon -func (r *RollingRecord) updateValidatorIndices(state *state.NetworkState) { - // NOTE: this has to go through every index each time in order to handle out-of-order validators - // or invalid validators that got created on the testnet with broken deposits - for i := 0; i < len(state.MinipoolDetails); i++ { - mpd := state.MinipoolDetails[i] - pubkey := mpd.Pubkey - - validator, exists := state.ValidatorDetails[pubkey] - if !exists { - // Hit a validator that doesn't exist on Beacon yet - continue - } - - _, exists = r.ValidatorIndexMap[validator.Index] - if !exists && mpd.Status == types.Staking { - // Validator exists and is staking but it hasn't been recorded yet, add it to the map and update the latest index so we don't remap stuff we've already seen - minipoolInfo := &MinipoolInfo{ - Address: mpd.MinipoolAddress, - ValidatorPubkey: mpd.Pubkey, - ValidatorIndex: validator.Index, - NodeAddress: mpd.NodeAddress, - MissingAttestationSlots: map[uint64]bool{}, - AttestationScore: NewQuotedBigInt(0), - } - r.ValidatorIndexMap[validator.Index] = minipoolInfo - } - } -} - -// Get the attestation duties for the given epoch, up to (and including) the provided end slot -func (r *RollingRecord) getDutiesForEpoch(epoch uint64, endSlot uint64, state *state.NetworkState) error { - - lastSlotInEpoch := (epoch+1)*r.beaconConfig.SlotsPerEpoch - 1 - - if r.LastDutiesSlot >= lastSlotInEpoch { - // Already collected the duties for this epoch - r.log.Printlnf("%s All duties were already collected for epoch %d, skipping...", r.logPrefix, epoch) - return nil - } - - // Get the attestation committees for the epoch - committees, err := r.bc.GetCommitteesForEpoch(&epoch) - if err != nil { - return fmt.Errorf("error getting committees for epoch %d: %w", epoch, err) - } - defer committees.Release() - - // Crawl the committees - for idx := 0; idx < committees.Count(); idx++ { - slotIndex := committees.Slot(idx) - if slotIndex < r.StartSlot || slotIndex > endSlot { - // Ignore slots that are out of bounds - continue - } - if slotIndex <= r.LastDutiesSlot { - // Ignore slots that have already been processed - continue - } - blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*slotIndex)) - committeeIndex := committees.Index(idx) - - // Check if there are any RP validators in this committee - rpValidators := map[int]*MinipoolInfo{} - for position, validator := range committees.Validators(idx) { - mpInfo, exists := r.ValidatorIndexMap[validator] - if !exists { - // This isn't an RP validator, so ignore it - continue - } - - // Check if this minipool was opted into the SP for this block - nodeDetails := state.NodeDetailsByAddress[mpInfo.NodeAddress] - isOptedIn := nodeDetails.SmoothingPoolRegistrationState - spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) - if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it - (!isOptedIn && spRegistrationTime.Sub(blockTime) < 0) { // If this block occurred after the node opted out, ignore it - continue - } - - // Check if this minipool was in the `staking` state during this time - mpd := state.MinipoolDetailsByAddress[mpInfo.Address] - statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) - if mpd.Status != types.Staking || blockTime.Sub(statusChangeTime) < 0 { - continue - } - - // This was a legal RP validator opted into the SP during this slot so add it - rpValidators[position] = mpInfo - mpInfo.MissingAttestationSlots[slotIndex] = true - } - - // If there are some RP validators, add this committee to the map - if len(rpValidators) > 0 { - slotInfo, exists := r.intervalDutiesInfo.Slots[slotIndex] - if !exists { - slotInfo = &SlotInfo{ - Index: slotIndex, - Committees: map[uint64]*CommitteeInfo{}, - } - r.intervalDutiesInfo.Slots[slotIndex] = slotInfo - } - slotInfo.Committees[committeeIndex] = &CommitteeInfo{ - Index: committeeIndex, - Positions: rpValidators, - } - } - } - - // Set the last slot duties were collected for - the minimum of the last slot in the epoch and the target state slot - r.LastDutiesSlot = lastSlotInEpoch - if endSlot < lastSlotInEpoch { - r.LastDutiesSlot = endSlot - } - return nil - -} - -// Process the attestations proposed within the given epoch against the existing record, using -// the provided state for EL <-> CL mapping -func (r *RollingRecord) processAttestationsInEpoch(epoch uint64, state *state.NetworkState) error { - - slotsPerEpoch := r.beaconConfig.SlotsPerEpoch - var wg errgroup.Group - wg.SetLimit(threadLimit) - attestationsPerSlot := make([][]beacon.AttestationInfo, r.beaconConfig.SlotsPerEpoch) - - // Get the attestation records for this epoch - for i := uint64(0); i < slotsPerEpoch; i++ { - i := i - slot := epoch*slotsPerEpoch + i - wg.Go(func() error { - attestations, found, err := r.bc.GetAttestations(fmt.Sprint(slot)) - if err != nil { - return fmt.Errorf("error getting attestations for slot %d: %w", slot, err) - } - if found { - attestationsPerSlot[i] = attestations - } else { - attestationsPerSlot[i] = []beacon.AttestationInfo{} - } - - return nil - }) - } - - err := wg.Wait() - if err != nil { - return fmt.Errorf("error getting attestation records for epoch %d: %w", epoch, err) - } - - // Process all of the slots in the epoch - for i, attestations := range attestationsPerSlot { - if len(attestations) > 0 { - // Process these attestations - slot := epoch*slotsPerEpoch + uint64(i) - r.processAttestationsInSlot(slot, attestations, state) - } - } - - return nil - -} - -// Process all of the attestations for a given slot -func (r *RollingRecord) processAttestationsInSlot(inclusionSlot uint64, attestations []beacon.AttestationInfo, state *state.NetworkState) { - - // Go through the attestations for the block - for _, attestation := range attestations { - - // Get the RP committees for this attestation's slot and index - slotInfo, exists := r.intervalDutiesInfo.Slots[attestation.SlotIndex] - if exists && inclusionSlot-attestation.SlotIndex <= r.beaconConfig.SlotsPerEpoch { // Ignore attestations delayed by more than 32 slots - rpCommittee, exists := slotInfo.Committees[attestation.CommitteeIndex] - if exists { - blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*attestation.SlotIndex)) - - // Check if each RP validator attested successfully - for position, validator := range rpCommittee.Positions { - if attestation.AggregationBits.BitAt(uint64(position)) { - // This was seen, so remove it from the missing attestations - delete(rpCommittee.Positions, position) - if len(rpCommittee.Positions) == 0 { - delete(slotInfo.Committees, attestation.CommitteeIndex) - } - if len(slotInfo.Committees) == 0 { - delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) - } - delete(validator.MissingAttestationSlots, attestation.SlotIndex) - - // Get the pseudoscore for this attestation - details := state.MinipoolDetailsByAddress[validator.Address] - bond, fee := getMinipoolBondAndNodeFee(details, blockTime) - minipoolScore := big.NewInt(0).Sub(r.one, fee) // 1 - fee - minipoolScore.Mul(minipoolScore, bond) // Multiply by bond - minipoolScore.Div(minipoolScore, r.validatorReq) // Divide by 32 to get the bond as a fraction of a total validator - minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) - - // Add it to the minipool's score - validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) - validator.AttestationCount++ - } - } - } - } - } - -} diff --git a/shared/services/rewards/ssz_types/big/uint256.go b/shared/services/rewards/ssz_types/big/uint256.go new file mode 100644 index 000000000..0c05e82a7 --- /dev/null +++ b/shared/services/rewards/ssz_types/big/uint256.go @@ -0,0 +1,105 @@ +package big + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + + ssz "github.com/ferranbt/fastssz" + "github.com/holiman/uint256" +) + +var Overflow = errors.New("uint256 overflow") +var Negative = errors.New("uint256 can't be negative before serializing") + +// Wraps big.Int but will be checked for sign/overflow when serializing SSZ +type Uint256 struct { + *big.Int +} + +func NewUint256(i int64) Uint256 { + return Uint256{big.NewInt(i)} +} + +func (u *Uint256) SizeSSZ() (size int) { + return 32 +} + +func (u *Uint256) ToUint256() (*uint256.Int, error) { + // Check sign + if u.Sign() < 0 { + return nil, Negative + } + + s, overflow := uint256.FromBig(u.Int) + if overflow { + return nil, Overflow + } + return s, nil +} + +func (u *Uint256) MarshalSSZTo(buf []byte) ([]byte, error) { + s, err := u.ToUint256() + if err != nil { + return nil, err + } + + bytes, err := s.MarshalSSZ() + if err != nil { + return nil, err + } + return append(buf, bytes...), nil +} + +func (u *Uint256) HashTreeRootWith(hh ssz.HashWalker) (err error) { + bytes := make([]byte, 32) + bytes, err = u.MarshalSSZTo(bytes) + if err != nil { + return + } + + hh.AppendBytes32(bytes) + return +} + +func (u *Uint256) UnmarshalSSZ(buf []byte) error { + repr := uint256.NewInt(0) + err := repr.UnmarshalSSZ(buf) + if err != nil { + return err + } + u.Int = repr.ToBig() + return nil +} + +func (u *Uint256) String() string { + return u.Int.String() +} + +func (u *Uint256) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + u.Int = big.NewInt(0) + return u.Int.UnmarshalJSON([]byte(s)) +} + +func (u *Uint256) MarshalJSON() ([]byte, error) { + s, err := u.Int.MarshalJSON() + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("\"%s\"", s)), nil +} + +func (u *Uint256) Bytes32() ([32]byte, error) { + s, err := u.ToUint256() + if err != nil { + return [32]byte{}, err + } + + return s.Bytes32(), nil +} diff --git a/shared/services/rewards/ssz_types/encoding.go b/shared/services/rewards/ssz_types/encoding.go new file mode 100644 index 000000000..d99385074 --- /dev/null +++ b/shared/services/rewards/ssz_types/encoding.go @@ -0,0 +1,681 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: c302f5cab9af79d858415e7e5bc2002568baf2333120ecc30517636a1b041db6 +// Version: 0.1.3 +package ssz_types + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the SSZFile_v1 object +func (s *SSZFile_v1) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SSZFile_v1 object to a target array +func (s *SSZFile_v1) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(356) + + // Field (0) 'Magic' + dst = append(dst, s.Magic[:]...) + + // Field (1) 'RewardsFileVersion' + dst = ssz.MarshalUint64(dst, s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + dst = ssz.MarshalUint64(dst, s.RulesetVersion) + + // Field (3) 'Network' + dst = ssz.MarshalUint64(dst, uint64(s.Network)) + + // Field (4) 'Index' + dst = ssz.MarshalUint64(dst, s.Index) + + // Field (5) 'StartTime' + dst = ssz.MarshalTime(dst, s.StartTime) + + // Field (6) 'EndTime' + dst = ssz.MarshalTime(dst, s.EndTime) + + // Field (7) 'ConsensusStartBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + dst = ssz.MarshalUint64(dst, s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + dst = append(dst, s.MerkleRoot[:]...) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if dst, err = s.TotalRewards.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (14) 'NetworkRewards' + dst = ssz.WriteOffset(dst, offset) + offset += len(s.NetworkRewards) * 104 + + // Offset (15) 'NodeRewards' + dst = ssz.WriteOffset(dst, offset) + offset += len(s.NodeRewards) * 124 + + // Field (14) 'NetworkRewards' + if size := len(s.NetworkRewards); size > 128 { + err = ssz.ErrListTooBigFn("SSZFile_v1.NetworkRewards", size, 128) + return + } + for ii := 0; ii < len(s.NetworkRewards); ii++ { + if dst, err = s.NetworkRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (15) 'NodeRewards' + if size := len(s.NodeRewards); size > 9223372036854775807 { + err = ssz.ErrListTooBigFn("SSZFile_v1.NodeRewards", size, 9223372036854775807) + return + } + for ii := 0; ii < len(s.NodeRewards); ii++ { + if dst, err = s.NodeRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SSZFile_v1 object +func (s *SSZFile_v1) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 356 { + return ssz.ErrSize + } + + tail := buf + var o14, o15 uint64 + + // Field (0) 'Magic' + copy(s.Magic[:], buf[0:4]) + + // Field (1) 'RewardsFileVersion' + s.RewardsFileVersion = ssz.UnmarshallUint64(buf[4:12]) + + // Field (2) 'RulesetVersion' + s.RulesetVersion = ssz.UnmarshallUint64(buf[12:20]) + + // Field (3) 'Network' + s.Network = Network(ssz.UnmarshallUint64(buf[20:28])) + + // Field (4) 'Index' + s.Index = ssz.UnmarshallUint64(buf[28:36]) + + // Field (5) 'StartTime' + s.StartTime = ssz.UnmarshalTime(buf[36:44]) + + // Field (6) 'EndTime' + s.EndTime = ssz.UnmarshalTime(buf[44:52]) + + // Field (7) 'ConsensusStartBlock' + s.ConsensusStartBlock = ssz.UnmarshallUint64(buf[52:60]) + + // Field (8) 'ConsensusEndBlock' + s.ConsensusEndBlock = ssz.UnmarshallUint64(buf[60:68]) + + // Field (9) 'ExecutionStartBlock' + s.ExecutionStartBlock = ssz.UnmarshallUint64(buf[68:76]) + + // Field (10) 'ExecutionEndBlock' + s.ExecutionEndBlock = ssz.UnmarshallUint64(buf[76:84]) + + // Field (11) 'IntervalsPassed' + s.IntervalsPassed = ssz.UnmarshallUint64(buf[84:92]) + + // Field (12) 'MerkleRoot' + copy(s.MerkleRoot[:], buf[92:124]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if err = s.TotalRewards.UnmarshalSSZ(buf[124:348]); err != nil { + return err + } + + // Offset (14) 'NetworkRewards' + if o14 = ssz.ReadOffset(buf[348:352]); o14 > size { + return ssz.ErrOffset + } + + if o14 < 356 { + return ssz.ErrInvalidVariableOffset + } + + // Offset (15) 'NodeRewards' + if o15 = ssz.ReadOffset(buf[352:356]); o15 > size || o14 > o15 { + return ssz.ErrOffset + } + + // Field (14) 'NetworkRewards' + { + buf = tail[o14:o15] + num, err := ssz.DivideInt2(len(buf), 104, 128) + if err != nil { + return err + } + s.NetworkRewards = make([]*NetworkReward, num) + for ii := 0; ii < num; ii++ { + if s.NetworkRewards[ii] == nil { + s.NetworkRewards[ii] = new(NetworkReward) + } + if err = s.NetworkRewards[ii].UnmarshalSSZ(buf[ii*104 : (ii+1)*104]); err != nil { + return err + } + } + } + + // Field (15) 'NodeRewards' + { + buf = tail[o15:] + num, err := ssz.DivideInt2(len(buf), 124, 9223372036854775807) + if err != nil { + return err + } + s.NodeRewards = make([]*NodeReward, num) + for ii := 0; ii < num; ii++ { + if s.NodeRewards[ii] == nil { + s.NodeRewards[ii] = new(NodeReward) + } + if err = s.NodeRewards[ii].UnmarshalSSZ(buf[ii*124 : (ii+1)*124]); err != nil { + return err + } + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SSZFile_v1 object +func (s *SSZFile_v1) SizeSSZ() (size int) { + size = 356 + + // Field (14) 'NetworkRewards' + size += len(s.NetworkRewards) * 104 + + // Field (15) 'NodeRewards' + size += len(s.NodeRewards) * 124 + + return +} + +// HashTreeRoot ssz hashes the SSZFile_v1 object +func (s *SSZFile_v1) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SSZFile_v1 object with a hasher +func (s *SSZFile_v1) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Magic' + hh.PutBytes(s.Magic[:]) + + // Field (1) 'RewardsFileVersion' + hh.PutUint64(s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + hh.PutUint64(s.RulesetVersion) + + // Field (3) 'Network' + hh.PutUint64(uint64(s.Network)) + + // Field (4) 'Index' + hh.PutUint64(s.Index) + + // Field (5) 'StartTime' + hh.PutUint64(uint64(s.StartTime.Unix())) + + // Field (6) 'EndTime' + hh.PutUint64(uint64(s.EndTime.Unix())) + + // Field (7) 'ConsensusStartBlock' + hh.PutUint64(s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + hh.PutUint64(s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + hh.PutUint64(s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + hh.PutUint64(s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + hh.PutUint64(s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + hh.PutBytes(s.MerkleRoot[:]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if err = s.TotalRewards.HashTreeRootWith(hh); err != nil { + return + } + + // Field (14) 'NetworkRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NetworkRewards)) + if num > 128 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NetworkRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 128) + } + + // Field (15) 'NodeRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NodeRewards)) + if num > 9223372036854775807 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NodeRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 9223372036854775807) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SSZFile_v1 object +func (s *SSZFile_v1) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the TotalRewards object +func (t *TotalRewards) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(t) +} + +// MarshalSSZTo ssz marshals the TotalRewards object to a target array +func (t *TotalRewards) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'ProtocolDaoRpl' + if dst, err = t.ProtocolDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if dst, err = t.TotalCollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if dst, err = t.TotalOracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if dst, err = t.TotalSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if dst, err = t.PoolStakerSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if dst, err = t.NodeOperatorSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if dst, err = t.TotalNodeWeight.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the TotalRewards object +func (t *TotalRewards) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 224 { + return ssz.ErrSize + } + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.UnmarshalSSZ(buf[0:32]); err != nil { + return err + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.UnmarshalSSZ(buf[32:64]); err != nil { + return err + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.UnmarshalSSZ(buf[64:96]); err != nil { + return err + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.UnmarshalSSZ(buf[96:128]); err != nil { + return err + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.UnmarshalSSZ(buf[128:160]); err != nil { + return err + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.UnmarshalSSZ(buf[160:192]); err != nil { + return err + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.UnmarshalSSZ(buf[192:224]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the TotalRewards object +func (t *TotalRewards) SizeSSZ() (size int) { + size = 224 + return +} + +// HashTreeRoot ssz hashes the TotalRewards object +func (t *TotalRewards) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(t) +} + +// HashTreeRootWith ssz hashes the TotalRewards object with a hasher +func (t *TotalRewards) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the TotalRewards object +func (t *TotalRewards) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(t) +} + +// MarshalSSZ ssz marshals the NetworkReward object +func (n *NetworkReward) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(n) +} + +// MarshalSSZTo ssz marshals the NetworkReward object to a target array +func (n *NetworkReward) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Network' + dst = ssz.MarshalUint64(dst, uint64(n.Network)) + + // Field (1) 'CollateralRpl' + if dst, err = n.CollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'OracleDaoRpl' + if dst, err = n.OracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'SmoothingPoolEth' + if dst, err = n.SmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the NetworkReward object +func (n *NetworkReward) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 104 { + return ssz.ErrSize + } + + // Field (0) 'Network' + n.Network = Layer(ssz.UnmarshallUint64(buf[0:8])) + + // Field (1) 'CollateralRpl' + if err = n.CollateralRpl.UnmarshalSSZ(buf[8:40]); err != nil { + return err + } + + // Field (2) 'OracleDaoRpl' + if err = n.OracleDaoRpl.UnmarshalSSZ(buf[40:72]); err != nil { + return err + } + + // Field (3) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.UnmarshalSSZ(buf[72:104]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the NetworkReward object +func (n *NetworkReward) SizeSSZ() (size int) { + size = 104 + return +} + +// HashTreeRoot ssz hashes the NetworkReward object +func (n *NetworkReward) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(n) +} + +// HashTreeRootWith ssz hashes the NetworkReward object with a hasher +func (n *NetworkReward) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Network' + hh.PutUint64(uint64(n.Network)) + + // Field (1) 'CollateralRpl' + if err = n.CollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'OracleDaoRpl' + if err = n.OracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the NetworkReward object +func (n *NetworkReward) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(n) +} + +// MarshalSSZ ssz marshals the NodeReward object +func (n *NodeReward) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(n) +} + +// MarshalSSZTo ssz marshals the NodeReward object to a target array +func (n *NodeReward) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Address' + dst = append(dst, n.Address[:]...) + + // Field (1) 'Network' + dst = ssz.MarshalUint64(dst, uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if dst, err = n.CollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if dst, err = n.OracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if dst, err = n.SmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the NodeReward object +func (n *NodeReward) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 124 { + return ssz.ErrSize + } + + // Field (0) 'Address' + copy(n.Address[:], buf[0:20]) + + // Field (1) 'Network' + n.Network = Layer(ssz.UnmarshallUint64(buf[20:28])) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.UnmarshalSSZ(buf[28:60]); err != nil { + return err + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.UnmarshalSSZ(buf[60:92]); err != nil { + return err + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.UnmarshalSSZ(buf[92:124]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the NodeReward object +func (n *NodeReward) SizeSSZ() (size int) { + size = 124 + return +} + +// HashTreeRoot ssz hashes the NodeReward object +func (n *NodeReward) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(n) +} + +// HashTreeRootWith ssz hashes the NodeReward object with a hasher +func (n *NodeReward) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Address' + hh.PutBytes(n.Address[:]) + + // Field (1) 'Network' + hh.PutUint64(uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the NodeReward object +func (n *NodeReward) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(n) +} diff --git a/shared/services/rewards/ssz_types/gen.sh b/shared/services/rewards/ssz_types/gen.sh new file mode 100755 index 000000000..0e575bea7 --- /dev/null +++ b/shared/services/rewards/ssz_types/gen.sh @@ -0,0 +1,3 @@ +#!/bin/bash +rm -fr encoding.go +sszgen --path . -objs SSZFile_v1 -output encoding.go -include big/ diff --git a/shared/services/rewards/ssz_types/json.go b/shared/services/rewards/ssz_types/json.go new file mode 100644 index 000000000..150004cda --- /dev/null +++ b/shared/services/rewards/ssz_types/json.go @@ -0,0 +1,214 @@ +package ssz_types + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var networkMap = map[string]Network{ + "mainnet": 1, + "holesky": 17000, +} + +// internal use only +type sszfile_v1_alias SSZFile_v1 + +// This custom unmarshaler avoids creating a landmine where the user +// may forget to call NewSSZFile_v1 before unmarshaling into the result, +// which would cause the Magic header to be unset. +func (f *SSZFile_v1) UnmarshalJSON(data []byte) error { + // Disposable type without a custom unmarshal + var alias sszfile_v1_alias + err := json.Unmarshal(data, &alias) + if err != nil { + return err + } + *f = SSZFile_v1(alias) + + // After unmarshaling, set the magic header + f.Magic = Magic + + // Verify legitimacy of the file + return f.Verify() +} + +// When writing JSON, we need to compute the merkle tree to populate the proofs +func (f *SSZFile_v1) MarshalJSON() ([]byte, error) { + if err := f.Verify(); err != nil { + return nil, fmt.Errorf("error verifying ssz while serializing json: %w", err) + } + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error getting proofs: %w", err) + } + + for _, nr := range f.NodeRewards { + proof, ok := proofs[nr.Address] + if !ok { + return nil, fmt.Errorf("error getting proof for node %s", nr.Address) + } + nr.MerkleProof = proof + } + + var alias sszfile_v1_alias + alias = sszfile_v1_alias(*f) + return json.Marshal(&alias) +} + +func (h *Hash) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + s = strings.TrimPrefix(s, "0x") + out, err := hex.DecodeString(s) + if err != nil { + return err + } + + if len(out) != 32 { + return fmt.Errorf("merkle root %s wrong size- must be 32 bytes", s) + } + + copy((*[32]byte)(h)[:], out) + return nil +} + +func (h Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + h.String() + `"`), nil +} + +func NetworkFromString(s string) (Network, bool) { + n, ok := networkMap[s] + return n, ok +} + +func (n *Network) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + id, ok := NetworkFromString(s) + if ok { + *n = Network(id) + return nil + } + + // If the network string doesn't match known values, try to treat it as an integer + u, err := strconv.ParseUint(s, 10, 64) + if err == nil { + *n = Network(u) + return nil + } + + // If the network string isn't an integer, use UINT64_MAX + *n = Network(math.MaxUint64) + return nil +} + +func (n Network) MarshalJSON() ([]byte, error) { + id := n + for k, v := range networkMap { + if v == id { + return json.Marshal(k) + } + } + + // If the network id isn't in the map, serialize it as a string + return json.Marshal(strconv.FormatUint(uint64(id), 10)) +} + +func (n *NetworkRewards) UnmarshalJSON(data []byte) error { + // Network Rewards is a slice, but represented as a map in the json. + var m map[string]json.RawMessage + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + *n = make(NetworkRewards, 0, len(m)) + for k, v := range m { + networkId, err := strconv.ParseUint(k, 10, 64) + if err != nil { + return err + } + networkReward := new(NetworkReward) + networkReward.Network = networkId + + err = json.Unmarshal(v, networkReward) + if err != nil { + return err + } + *n = append(*n, networkReward) + } + + sort.Sort(*n) + return nil +} + +func (n NetworkRewards) MarshalJSON() ([]byte, error) { + // Network Rewards is a slice, but represented as a map in the json. + m := make(map[string]*NetworkReward, len(n)) + // Make sure we sort, first + sort.Sort(n) + for _, nr := range n { + m[strconv.FormatUint(nr.Network, 10)] = nr + } + + // Serialize the map + return json.Marshal(m) +} + +func (n *NodeRewards) UnmarshalJSON(data []byte) error { + var m map[string]json.RawMessage + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + *n = make(NodeRewards, 0, len(m)) + for k, v := range m { + s := strings.TrimPrefix(k, "0x") + addr, err := hex.DecodeString(s) + if err != nil { + return err + } + + if len(addr) != 20 { + return fmt.Errorf("address %s wrong size- must be 20 bytes", s) + } + + nodeReward := new(NodeReward) + copy(nodeReward.Address[:], addr) + err = json.Unmarshal(v, nodeReward) + if err != nil { + return err + } + *n = append(*n, nodeReward) + } + + sort.Sort(*n) + return nil +} + +func (n NodeRewards) MarshalJSON() ([]byte, error) { + // Node Rewards is a slice, but represented as a map in the json. + m := make(map[string]*NodeReward, len(n)) + // Make sure we sort, first + sort.Sort(n) + for _, nr := range n { + m[nr.Address.String()] = nr + } + + // Serialize the map + return json.Marshal(m) +} diff --git a/shared/services/rewards/ssz_types/rewards-file-v4.go b/shared/services/rewards/ssz_types/rewards-file-v4.go new file mode 100644 index 000000000..2f4e7c63c --- /dev/null +++ b/shared/services/rewards/ssz_types/rewards-file-v4.go @@ -0,0 +1,591 @@ +package ssz_types + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + stdbig "math/big" + "slices" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/wealdtech/go-merkletree" + "github.com/wealdtech/go-merkletree/keccak256" +) + +type Format = uint + +const ( + FormatJSON = iota + FormatSSZ +) + +var Magic [4]byte = [4]byte{0x52, 0x50, 0x52, 0x54} + +type Address [20]byte +type Hash [32]byte +type NetworkRewards []*NetworkReward +type NodeRewards []*NodeReward + +// Network corresponds to the top-level Network field, where 1 means mainnet +type Network uint64 + +// Layer corresponds to rewards-level Network fields, where 0 means layer 1... +// Using an alias of uint64 helps serve as documentation rather than function +type Layer = uint64 + +type MerkleProof []Hash + +type SSZFile_v1 struct { + // Fields specific to ssz encoding are first + + // A magic header. Four bytes. Helps immediately verify what follows is a rewards tree. + // 0x52505254 - it's RPRT in ASCII and easy to recognize + Magic [4]byte `ssz-size:"4" json:"-"` + // Version is first- parsers can check the first 12 bytes of the file to make sure they're + // parsing a rewards tree and it is a version they know how to parse. + RewardsFileVersion uint64 `json:"rewardsFileVersion"` + + // Next, we need fields for the rest of the RewardsFileHeader + + // RulesetVersion is the version of the ruleset used to generate the tree, e.g., v9 for the first + // ruleset to use ssz + RulesetVersion uint64 `json:"rulesetVersion"` + // Network is the chain id for which the tree is generated + Network Network `json:"network"` + // Index is the rewards interval index + Index uint64 `json:"index"` + // StartTime is the time of the first slot of the interval + StartTime time.Time `json:"startTime"` + // EndTime is the time fo the last slot of the interval + EndTime time.Time `json:"endTime"` + // ConsensusStartBlock is the first non-empty slot of the interval + ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` + // ConsensusEndBlock is the last non-empty slot of the interval + ConsensusEndBlock uint64 `json:"consensusEndBlock"` + // ExecutionBlock is the execution block number included in ConsensusStartBlock + ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` + // ExecutionEndBlock is the execution block number included in ConsensusEndBlock + ExecutionEndBlock uint64 `json:"executionEndBlock"` + // IntervalsPassed is the number of rewards intervals contained in this tree + IntervalsPassed uint64 `json:"intervalsPassed"` + // MerkleRoot is the root of the merkle tree of all the nodes in this tree. + MerkleRoot Hash `ssz-size:"32" json:"merkleRoot,omitempty"` + // TotalRewards is aggregate data on how many rewards this tree contains + TotalRewards *TotalRewards `json:"totalRewards"` + // NetworkRewards is the destinations and aggregate amounts for each network + // this tree distributes to. + // Must be sorted by Chain ID ascending + NetworkRewards NetworkRewards `ssz-max:"128" json:"networkRewards"` + + // Finally, the actual per-node objects that get merkle-ized + + // NodeRewards are the objects that make up the merkle tree. + // Must be sorted by Node Address ascending + NodeRewards NodeRewards `ssz-max:"9223372036854775807" json:"nodeRewards"` + + merkleProofs map[Address]MerkleProof `ssz:"-" json:"-"` +} + +func NewSSZFile_v1() *SSZFile_v1 { + return &SSZFile_v1{ + Magic: Magic, + } +} + +// Check if the NodeRewards field respects unique constraints +func (f *SSZFile_v1) nodeRewardsUnique() bool { + m := make(map[Address]any, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + _, found := m[nr.Address] + if found { + return false + } + m[nr.Address] = struct{}{} + } + + return true +} + +// Check if the NetworkRewards field respects unique constraints +func (f *SSZFile_v1) networkRewardsUnique() bool { + m := make(map[uint64]any, len(f.NetworkRewards)) + + for _, nr := range f.NetworkRewards { + _, found := m[nr.Network] + if found { + return false + } + m[nr.Network] = struct{}{} + } + + return true +} + +// Verify checks that the arrays in the file are appropriately sorted and that +// the merkle proof, if present, matches. +func (f *SSZFile_v1) Verify() error { + if !sort.IsSorted(f.NodeRewards) { + return errors.New("ssz file node rewards out of order") + } + + if !sort.IsSorted(f.NetworkRewards) { + return errors.New("ssz file network rewards out of order") + } + + if !f.nodeRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NodeRewards field") + } + + if !f.networkRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NetworkRewards field") + } + + if f.TotalRewards == nil { + return errors.New("missing required field TotalRewards") + } + + if _, err := f.Proofs(); err != nil { + return err + } + + return nil +} + +// Minipool Performance CID is deprecated, but we must implement this for the interface +func (f *SSZFile_v1) SetMinipoolPerformanceFileCID(cid string) { +} + +// The "normal" serialize() call is expected to be JSON by ISerializable in files.go +func (f *SSZFile_v1) Serialize() ([]byte, error) { + return json.Marshal(f) +} + +// Write as SSZ +func (f *SSZFile_v1) SerializeSSZ() ([]byte, error) { + return f.FinalizeSSZ() +} + +func (f *SSZFile_v1) GenerateMerkleTree() error { + _, err := f.Proofs() + return err +} + +// Marshal wrappers that adds the magic header if absent and sets or validators merkle root +func (f *SSZFile_v1) FinalizeSSZ() ([]byte, error) { + + return f.FinalizeSSZTo(make([]byte, 0, f.SizeSSZ())) +} + +func (f *SSZFile_v1) FinalizeSSZTo(buf []byte) ([]byte, error) { + copy(f.Magic[:], Magic[:]) + if err := f.Verify(); err != nil { + return nil, err + } + + return f.MarshalSSZTo(buf) +} + +// Parsing wrapper that adds verification to the merkle root and magic header +func ParseSSZFile(buf []byte) (*SSZFile_v1, error) { + if !bytes.HasPrefix(buf, Magic[:]) { + return nil, errors.New("magic header not found in reward ssz file") + } + + f := &SSZFile_v1{} + if err := f.UnmarshalSSZ(buf); err != nil { + return nil, err + } + + if err := f.Verify(); err != nil { + return nil, err + } + + return f, nil +} + +// This getter lazy-computes the proofs and caches them on the file +func (f *SSZFile_v1) Proofs() (map[Address]MerkleProof, error) { + if f.merkleProofs != nil { + return f.merkleProofs, nil + } + + sort.Sort(f.NodeRewards) + sort.Sort(f.NetworkRewards) + + nodeDataMap := make(map[Address][]byte, len(f.NodeRewards)) + treeData := make([][]byte, 0, len(f.NodeRewards)) + for _, nr := range f.NodeRewards { + // 20 bytes for address, 32 each for network/rpl/eth + address := nr.Address + network := uint256.NewInt(nr.Network).Bytes32() + rpl := stdbig.NewInt(0) + rpl.Add(rpl, nr.CollateralRpl.Int) + rpl.Add(rpl, nr.OracleDaoRpl.Int) + rplBytes := make([]byte, 32) + rplBytes = rpl.FillBytes(rplBytes) + eth, err := nr.SmoothingPoolEth.Bytes32() + if err != nil { + return nil, fmt.Errorf("error converting big.Int to uint256 byte slice: %w", err) + } + + const dataSize = 20 + 32*3 + nodeData := make([]byte, dataSize) + copy(nodeData[0:20], address[:]) + copy(nodeData[20:20+32], network[:]) + copy(nodeData[20+32:20+32*2], rplBytes[:]) + copy(nodeData[20+32*2:20+32*3], eth[:]) + + treeData = append(treeData, nodeData) + nodeDataMap[nr.Address] = nodeData + } + + tree, err := merkletree.NewUsing(treeData, keccak256.New(), false, true) + if err != nil { + return nil, fmt.Errorf("error generating Merkle Tree: %w", err) + } + + // Generate the proofs + out := make(map[Address]MerkleProof) + f.merkleProofs = out + for address, nodeData := range nodeDataMap { + proof, err := tree.GenerateProof(nodeData, 0) + if err != nil { + return nil, fmt.Errorf("error generating proof for node 0x%s: %w", hex.EncodeToString(address[:]), err) + } + + // Store the proof in the result map + out[address] = make([]Hash, len(proof.Hashes)) + for i, hash := range proof.Hashes { + out[address][i] = Hash{} + copy(out[address][i][:], hash) + } + } + + // Populate missing proofs at node level + for _, nr := range f.NodeRewards { + if nr.MerkleProof == nil { + nr.MerkleProof = out[nr.Address] + } + } + + // Finally, set the root. If it's already set, and differs, return an error. + root := Hash{} + copy(root[:], tree.Root()) + if bytes.Count(f.MerkleRoot[:], []byte{0x00}) >= 32 { + f.MerkleRoot = root + return out, nil + } + + if !bytes.Equal(f.MerkleRoot[:], root[:]) { + return nil, fmt.Errorf("generated root %s mismatch against existing root %s", root, f.MerkleRoot) + } + + // The existing root matches the calculated root + return out, nil +} + +type TotalRewards struct { + // Total amount of RPL sent to the pDAO + ProtocolDaoRpl big.Uint256 `ssz-size:"32" json:"protocolDaoRpl"` + // Total amount of RPL sent to Node Operators + TotalCollateralRpl big.Uint256 `ssz-size:"32" json:"totalCollateralRpl"` + // Total amount of RPL sent to the oDAO + TotalOracleDaoRpl big.Uint256 `ssz-size:"32" json:"totalOracleDaoRpl"` + // Total amount of ETH in the Smoothing Pool + TotalSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"totalSmoothingPoolEth"` + // Total amount of Eth sent to the rETH contract + PoolStakerSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"poolStakerSmoothingPoolEth"` + // Total amount of Eth sent to Node Operators in the Smoothing Pool + NodeOperatorSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"nodeOperatorSmoothingPoolEth"` + // Total Node Weight as defined by RPIP-30 + TotalNodeWeight big.Uint256 `ssz-size:"32" json:"totalNodeWeight,omitempty"` +} + +type NetworkReward struct { + // Chain ID (key) + Network Layer `json:"-"` + + // Amount of RPL sent to the network for Node Operators + CollateralRpl big.Uint256 `ssz-size:"32" json:"collateralRpl"` + // Amount of RPL sent to the network for oDAO members + OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` + // Amount of Eth sent to the network for Node Operators + SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` +} + +func NewNetworkReward(network Layer) *NetworkReward { + return &NetworkReward{ + Network: network, + CollateralRpl: big.NewUint256(0), + OracleDaoRpl: big.NewUint256(0), + SmoothingPoolEth: big.NewUint256(0), + } +} + +// NetworkRewards should implement sort.Interface to make it easier to sort. +func (n NetworkRewards) Len() int { + return len(n) +} + +func (n NetworkRewards) Less(i, j int) bool { + return n[i].Network < n[j].Network +} + +func (n NetworkRewards) Swap(i, j int) { + tmp := n[i] + n[i] = n[j] + n[j] = tmp +} + +type NodeReward struct { + // Address of the Node (key) + Address Address `ssz-size:"20" json:"-"` + + // Chain ID on which the Node will claim + Network Layer `json:"rewardNetwork"` + // Amount of staking RPL earned by the Node + CollateralRpl big.Uint256 `ssz-size:"32" json:"collateralRpl"` + // Amount of oDAO RPL earned by the Node + OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` + // Amount of Smoothing Pool ETH earned by the Node + SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` + // Merkle proof for the node claim, sorted with the Merkle root last + MerkleProof MerkleProof `ssz:"-" json:"merkleProof"` +} + +func NewNodeReward(network Layer, address Address) *NodeReward { + return &NodeReward{ + Address: address, + Network: network, + CollateralRpl: big.NewUint256(0), + OracleDaoRpl: big.NewUint256(0), + SmoothingPoolEth: big.NewUint256(0), + } +} + +// NodeRewards should implement sort.Interface to make it easier to sort. +func (n NodeRewards) Len() int { + return len(n) +} + +func (n NodeRewards) Less(i, j int) bool { + ia := n[i].Address + ja := n[j].Address + + if bytes.Compare(ia[:], ja[:]) < 0 { + return true + } + + return false +} + +func (n NodeRewards) Swap(i, j int) { + tmp := n[i] + n[i] = n[j] + n[j] = tmp +} + +func (n NodeRewards) Find(addr Address) *NodeReward { + idx := slices.IndexFunc(n, func(nr *NodeReward) bool { + return bytes.Equal(nr.Address[:], addr[:]) + }) + if idx == -1 { + return nil + } + return n[idx] +} + +func AddressFromBytes(b []byte) Address { + out := Address{} + copy(out[:], b) + return out +} + +// Functions to implement IRewardsFile +func (f *SSZFile_v1) Deserialize(data []byte) error { + if bytes.HasPrefix(data, Magic[:]) { + if err := f.UnmarshalSSZ(data); err != nil { + return err + } + + return f.Verify() + } + + return json.Unmarshal(data, f) +} + +func (f *SSZFile_v1) GetIndex() uint64 { + return f.Index +} + +func (f *SSZFile_v1) GetMerkleRoot() string { + return f.MerkleRoot.String() +} + +func (f *SSZFile_v1) GetNodeAddresses() []common.Address { + out := make([]common.Address, 0, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + out = append(out, common.BytesToAddress(nr.Address[:])) + } + return out +} + +func (f *SSZFile_v1) GetConsensusStartBlock() uint64 { + return f.ConsensusStartBlock +} + +func (f *SSZFile_v1) GetExecutionStartBlock() uint64 { + return f.ExecutionStartBlock +} + +func (f *SSZFile_v1) GetConsensusEndBlock() uint64 { + return f.ConsensusEndBlock +} + +func (f *SSZFile_v1) GetExecutionEndBlock() uint64 { + return f.ExecutionEndBlock +} + +func (f *SSZFile_v1) GetStartTime() time.Time { + return f.StartTime +} + +func (f *SSZFile_v1) GetEndTime() time.Time { + return f.EndTime +} + +func (f *SSZFile_v1) GetIntervalsPassed() uint64 { + return f.IntervalsPassed +} + +func (f *SSZFile_v1) GetMerkleProof(address common.Address) ([]common.Hash, error) { + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error while calculating proof for %s: %w", address.String(), err) + } + + var nativeAddress Address + copy(nativeAddress[:], address[:]) + nativeProofs := proofs[nativeAddress] + out := make([]common.Hash, 0, len(nativeProofs)) + for _, p := range nativeProofs { + var h common.Hash + copy(h[:], p[:]) + out = append(out, h) + } + + return out, nil +} + +func (f *SSZFile_v1) getRewardsForNetwork(network uint64) *NetworkReward { + for _, nr := range f.NetworkRewards { + if nr.Network == network { + return nr + } + } + return nil +} + +func (f *SSZFile_v1) HasRewardsForNetwork(network uint64) bool { + return f.getRewardsForNetwork(network) != nil +} + +func (f *SSZFile_v1) GetNetworkCollateralRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v1) GetNetworkOracleDaoRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetNetworkSmoothingPoolEth(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v1) getNodeRewards(addr common.Address) *NodeReward { + var nativeAddress Address + copy(nativeAddress[:], addr[:]) + return f.NodeRewards.Find(nativeAddress) +} + +func (f *SSZFile_v1) HasRewardsFor(addr common.Address) bool { + return f.getNodeRewards(addr) != nil +} + +func (f *SSZFile_v1) GetNodeCollateralRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v1) GetNodeOracleDaoRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetRewardsFileVersion() uint64 { + return f.RewardsFileVersion +} + +func (f *SSZFile_v1) GetTotalCollateralRpl() *stdbig.Int { + return f.TotalRewards.TotalCollateralRpl.Int +} + +func (f *SSZFile_v1) GetTotalNodeOperatorSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetTotalNodeWeight() *stdbig.Int { + return f.TotalRewards.TotalNodeWeight.Int +} + +func (f *SSZFile_v1) GetTotalOracleDaoRpl() *stdbig.Int { + return f.TotalRewards.TotalOracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetTotalPoolStakerSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetTotalProtocolDaoRpl() *stdbig.Int { + return f.TotalRewards.ProtocolDaoRpl.Int +} diff --git a/shared/services/rewards/ssz_types/ssz_test.go b/shared/services/rewards/ssz_types/ssz_test.go new file mode 100644 index 000000000..1a3aee1a7 --- /dev/null +++ b/shared/services/rewards/ssz_types/ssz_test.go @@ -0,0 +1,284 @@ +package ssz_types + +import ( + "bytes" + "encoding/hex" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" +) + +func sampleFile() *SSZFile_v1 { + out := NewSSZFile_v1() + + out.RewardsFileVersion = 10 + out.RulesetVersion = 4 + out.Network = 17000 + out.Index = 11 + out.StartTime = time.Now().Add(time.Hour * -24) + out.EndTime = time.Now() + out.ConsensusStartBlock = 128 + out.ConsensusEndBlock = 256 + out.ExecutionStartBlock = 1024 + out.ExecutionEndBlock = 1280 + out.IntervalsPassed = 1 + _, _ = hex.Decode(out.MerkleRoot[:], []byte("ac9ddbc55a8cd92612b86866de955f0bb99dd51e1447767afc610b13a5063546")) + out.TotalRewards = &TotalRewards{ + ProtocolDaoRpl: big.NewUint256(1000), + TotalCollateralRpl: big.NewUint256(2000), + TotalOracleDaoRpl: big.NewUint256(3000), + TotalSmoothingPoolEth: big.NewUint256(4000), + PoolStakerSmoothingPoolEth: big.NewUint256(5000), + NodeOperatorSmoothingPoolEth: big.NewUint256(6000), + TotalNodeWeight: big.NewUint256(7000), + } + out.NetworkRewards = NetworkRewards{ + &NetworkReward{ + Network: 0, + CollateralRpl: big.NewUint256(200), + OracleDaoRpl: big.NewUint256(300), + SmoothingPoolEth: big.NewUint256(400), + }, + &NetworkReward{ + Network: 1, + CollateralRpl: big.NewUint256(500), + OracleDaoRpl: big.NewUint256(600), + SmoothingPoolEth: big.NewUint256(700), + }, + } + out.NodeRewards = NodeRewards{ + &NodeReward{ + Address: Address{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + Network: 0, + CollateralRpl: big.NewUint256(10), + OracleDaoRpl: big.NewUint256(20), + SmoothingPoolEth: big.NewUint256(30), + }, + &NodeReward{ + Address: Address{0x01, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + Network: 1, + CollateralRpl: big.NewUint256(10), + OracleDaoRpl: big.NewUint256(20), + SmoothingPoolEth: big.NewUint256(30), + }, + } + + return out +} + +func fatalIf(t *testing.T, err error) { + t.Helper() + if err == nil { + return + } + t.Fatal(err) +} + +func TestSSZFileRoundTrip(t *testing.T) { + f := sampleFile() + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.FinalizeSSZ() + fatalIf(t, err) + + f, err = ParseSSZFile(data) + fatalIf(t, err) + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileJSONRoundTrip(t *testing.T) { + f := sampleFile() + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.MarshalJSON() + fatalIf(t, err) + + f = &SSZFile_v1{} + fatalIf(t, f.UnmarshalJSON(data)) + + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileDuplicateNodeRewards(t *testing.T) { + f := sampleFile() + f.NodeRewards = append(f.NodeRewards, f.NodeRewards[1]) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to duplicate entries") + } + if !strings.Contains(err.Error(), "duplicate entries") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileDuplicateNetworkRewards(t *testing.T) { + f := sampleFile() + f.NetworkRewards = append(f.NetworkRewards, f.NetworkRewards[1]) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to duplicate entries") + } + if !strings.Contains(err.Error(), "duplicate entries") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileOutOfOrderNodeRewards(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NodeRewards) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to sorting") + } + if !strings.Contains(err.Error(), "out of order") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileOutOfOrderNetworkRewards(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NetworkRewards) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to sorting") + } + if !strings.Contains(err.Error(), "out of order") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileMissingTotalRewards(t *testing.T) { + f := sampleFile() + f.TotalRewards = nil + err := f.Verify() + if err == nil { + t.Fatal("expected error due to missing field") + } + if !strings.Contains(err.Error(), "missing required field TotalRewards") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileUnknownNetwork(t *testing.T) { + f := sampleFile() + f.Network = 3 + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.MarshalJSON() + fatalIf(t, err) + + f = &SSZFile_v1{} + fatalIf(t, f.UnmarshalJSON(data)) + + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileNoMagic(t *testing.T) { + f := sampleFile() + copy(f.Magic[:], []byte{0x00, 0x01, 0x02, 0x03}) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to missing magic header") + } + if !strings.Contains(err.Error(), "magic header not found") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileBadRoot(t *testing.T) { + f := sampleFile() + copy(f.MerkleRoot[:], []byte{0x00, 0x01, 0x02, 0x03}) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to mangled MerkleRoot") + } + if !strings.Contains(err.Error(), "mismatch against existing root") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileCalculateRoot(t *testing.T) { + f := sampleFile() + _, _ = hex.Decode(f.MerkleRoot[:], []byte("0000000000000000000000000000000000000000000000000000000000000000")) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + fatalIf(t, err) + + // Make sure the root is now set + if bytes.Count(f.MerkleRoot[:], []byte{0x00}) >= 32 { + t.Fatal("Expected ParseSSZFile to set the missing root") + } +} + +func TestSSZFileFinalizeFail(t *testing.T) { + f := sampleFile() + copy(f.MerkleRoot[:], []byte{0x00, 0x01, 0x02, 0x03}) + _, err := f.FinalizeSSZ() + if err == nil { + t.Fatal("expected error due to mangled MerkleRoot") + } + if !strings.Contains(err.Error(), "mismatch against existing root") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileTruncatedError(t *testing.T) { + f := sampleFile() + data, err := f.FinalizeSSZ() + data = data[:10] + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to mangled file bytes") + } + if !strings.Contains(err.Error(), "incorrect size") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileSorting(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NetworkRewards) + slices.Reverse(f.NodeRewards) + sort.Sort(f.NetworkRewards) + if !sort.IsSorted(f.NetworkRewards) { + t.Fatal("sorting NetworkRewards failed") + } + sort.Sort(f.NodeRewards) + if !sort.IsSorted(f.NodeRewards) { + t.Fatal("sorting NodeRewards failed") + } + +} diff --git a/shared/services/rewards/ssz_types/string.go b/shared/services/rewards/ssz_types/string.go new file mode 100644 index 000000000..0a164243c --- /dev/null +++ b/shared/services/rewards/ssz_types/string.go @@ -0,0 +1,13 @@ +package ssz_types + +import ( + "encoding/hex" +) + +func (h Hash) String() string { + return "0x" + hex.EncodeToString(h[:]) +} + +func (a Address) String() string { + return "0x" + hex.EncodeToString(a[:]) +} diff --git a/shared/services/rewards/test/assets/assets.go b/shared/services/rewards/test/assets/assets.go new file mode 100644 index 000000000..7faf79d73 --- /dev/null +++ b/shared/services/rewards/test/assets/assets.go @@ -0,0 +1,142 @@ +package assets + +import ( + "bytes" + "compress/gzip" + _ "embed" + "encoding/json" + "io" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +const Mainnet20ELHeaderTime = 1710394571 + +//go:embed rp-rewards-mainnet-20.json.gz +var mainnet20RewardsJSONGZ []byte +var mainnet20RewardsJSON []byte + +func GetMainnet20RewardsJSON() []byte { + if mainnet20RewardsJSON != nil { + return mainnet20RewardsJSON + } + + gz, err := gzip.NewReader(bytes.NewBuffer(mainnet20RewardsJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + mainnet20RewardsJSON, err = io.ReadAll(gz) + if err != nil { + panic(err) + } + return mainnet20RewardsJSON +} + +//go:embed rp-minipool-performance-mainnet-20.json.gz +var Mainnet20MinipoolPerformanceJSONGZ []byte +var Mainnet20MinipoolPerformanceJSON []byte + +func GetMainnet20MinipoolPerformanceJSON() []byte { + if Mainnet20MinipoolPerformanceJSON != nil { + return Mainnet20MinipoolPerformanceJSON + } + + gz, err := gzip.NewReader(bytes.NewBuffer(Mainnet20MinipoolPerformanceJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + Mainnet20MinipoolPerformanceJSON, err = io.ReadAll(gz) + if err != nil { + panic(err) + } + return Mainnet20MinipoolPerformanceJSON +} + +//go:embed rp-network-state-mainnet-20.json.gz +var Mainnet20NetworkStateJSONGZ []byte + +var mainnet20RewardsState *state.NetworkState + +func GetMainnet20RewardsState() *state.NetworkState { + if mainnet20RewardsState != nil { + return mainnet20RewardsState + } + + // GUnzip the embedded bytes + gz, err := gzip.NewReader(bytes.NewBuffer(Mainnet20NetworkStateJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + + // Create a JSON decoder + dec := json.NewDecoder(gz) + + // Decode the JSON + result := state.NetworkState{} + err = dec.Decode(&result) + if err != nil { + panic(err) + } + + // Memoize the result + mainnet20RewardsState = &result + + return mainnet20RewardsState +} + +func GetRewardSnapshotEventInterval19() rewards.RewardsEvent { + var rewardSnapshotEventInterval19 = rewards.RewardsEvent{ + Index: big.NewInt(19), + ExecutionBlock: big.NewInt(19231284), + ConsensusBlock: big.NewInt(8429279), + MerkleRoot: common.HexToHash("0x35d1be64d49aa71dc5b5ea13dd6f91d8613c81aef2593796d6dee599cd228aea"), + MerkleTreeCID: "bafybeiazkzsqe7molppbhbxg2khdgocrip36eoezroa7anbe53za7mxjpq", + IntervalsPassed: big.NewInt(1), + TreasuryRPL: big.NewInt(0), // Set below + TrustedNodeRPL: []*big.Int{}, // XXX Not set, but probably not needed + NodeRPL: []*big.Int{}, // XXX Not set, but probably not needed + NodeETH: []*big.Int{}, // XXX Not set, but probably not needed + UserETH: big.NewInt(0), // XXX Not set, but probably not needed + IntervalStartTime: time.Unix(1705556139, 0), + IntervalEndTime: time.Unix(1707975339, 0), + SubmissionTime: time.Unix(1707976475, 0), + } + rewardSnapshotEventInterval19.TreasuryRPL.SetString("0x0000000000000000000000000000000000000000000000f0a1e7585cd758ffe2", 16) + return rewardSnapshotEventInterval19 +} + +//go:embed rp-network-critical-duties-mainnet-20.json.gz +var mainnet20CriticalDutiesSlotsGZ []byte +var mainnet20CriticalDutiesSlots *state.CriticalDutiesSlots + +func GetMainnet20CriticalDutiesSlots() *state.CriticalDutiesSlots { + if mainnet20CriticalDutiesSlots != nil { + return mainnet20CriticalDutiesSlots + } + + jsonReader, err := gzip.NewReader(bytes.NewBuffer(mainnet20CriticalDutiesSlotsGZ)) + if err != nil { + panic(err) + } + defer jsonReader.Close() + + // Create a JSON decoder + dec := json.NewDecoder(jsonReader) + + // Decode the JSON + result := state.CriticalDutiesSlots{} + err = dec.Decode(&result) + if err != nil { + panic(err) + } + + mainnet20CriticalDutiesSlots = &result + return mainnet20CriticalDutiesSlots +} diff --git a/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz new file mode 100644 index 000000000..1497ec0cf Binary files /dev/null and b/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz new file mode 100644 index 000000000..dbb233c4a Binary files /dev/null and b/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz new file mode 100644 index 000000000..ce385c4a9 Binary files /dev/null and b/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz new file mode 100644 index 000000000..16004330a Binary files /dev/null and b/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/beacon.go b/shared/services/rewards/test/beacon.go new file mode 100644 index 000000000..b30925cdc --- /dev/null +++ b/shared/services/rewards/test/beacon.go @@ -0,0 +1,446 @@ +package test + +import ( + "errors" + "fmt" + "math/big" + "strconv" + "testing" + + "github.com/prysmaticlabs/go-bitfield" + "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +type epoch uint64 +type slot uint64 +type validatorIndex string +type validatorIndexToCommitteeIndexMap map[validatorIndex]uint +type criticalDutiesSlotMap map[validatorIndex]map[slot]interface{} + +func (v *validatorIndexToCommitteeIndexMap) set(vI validatorIndex, i uint) { + if *v == nil { + *v = make(validatorIndexToCommitteeIndexMap) + } + (*v)[vI] = i +} + +type missedDutiesMap map[epoch]map[slot][]validatorIndex + +func (missedDuties *missedDutiesMap) add(s slot, validator validatorIndex) { + if *missedDuties == nil { + *missedDuties = make(missedDutiesMap) + } + e := epoch(s / 32) + _, ok := (*missedDuties)[e] + + if !ok { + (*missedDuties)[e] = make(map[slot][]validatorIndex) + } + _, ok = (*missedDuties)[e][s] + if !ok { + (*missedDuties)[e][s] = make([]validatorIndex, 0) + } + (*missedDuties)[e][s] = append((*missedDuties)[e][s], validator) +} + +func (missedDuties *missedDutiesMap) getCount(s slot) uint { + e := epoch(s / 32) + if _, ok := (*missedDuties)[e]; !ok { + return 0 + } + if _, ok := (*missedDuties)[e][s]; !ok { + return 0 + } + return uint(len((*missedDuties)[e][s])) +} + +type missedEpochsMap map[validatorIndex]map[epoch]interface{} + +func (missedEpochs *missedEpochsMap) set(v validatorIndex, s slot) { + e := epoch(s / 32) + if *missedEpochs == nil { + *missedEpochs = make(missedEpochsMap) + } + _, ok := (*missedEpochs)[v] + if !ok { + (*missedEpochs)[v] = make(map[epoch]interface{}) + } + (*missedEpochs)[v][e] = struct{}{} +} + +func (missedEpochs *missedEpochsMap) validatorMissedEpoch(v validatorIndex, e epoch) bool { + if _, ok := (*missedEpochs)[v]; !ok { + return false + } + _, ok := (*missedEpochs)[v][e] + return ok +} + +type MockBeaconClient struct { + state *state.NetworkState + + t *testing.T + blocks map[string]beacon.BeaconBlock + + // A map of epoch -> slot -> validator indices for missed duties + missedDuties missedDutiesMap + + // A map of validator -> epoch -> {} + // that tracks which epochs a validator has missed duties in + missedEpochs missedEpochsMap + + // Count of validators + validatorCount uint + + // A map of validator index -> order in the list + validatorIndices validatorIndexToCommitteeIndexMap + + // A map of validator index to pubkey + validatorPubkeys map[validatorIndex]types.ValidatorPubkey + + // A map of validator index to critical duties slots + criticalDutiesSlots criticalDutiesSlotMap + + // A map of validator index to withdrawals + withdrawals map[slot]map[validatorIndex]*big.Int +} + +func (m *MockBeaconClient) SetState(state *state.NetworkState) { + m.state = state + if m.validatorPubkeys == nil { + m.validatorPubkeys = make(map[validatorIndex]types.ValidatorPubkey) + } + for _, v := range state.ValidatorDetails { + if _, ok := m.validatorPubkeys[validatorIndex(v.Index)]; ok { + m.t.Fatalf("Validator %s already set", v.Index) + } + m.validatorPubkeys[validatorIndex(v.Index)] = v.Pubkey + } +} + +type mockBeaconCommitteeSlot struct { + validators []string +} + +type MockBeaconCommittees struct { + slots []mockBeaconCommitteeSlot + epoch epoch +} + +func NewMockBeaconClient(t *testing.T) *MockBeaconClient { + return &MockBeaconClient{t: t} +} + +func (bc *MockBeaconClient) GetBeaconBlock(s string) (beacon.BeaconBlock, bool, error) { + attestations, _, err := bc.GetAttestations(s) + if err != nil { + return beacon.BeaconBlock{}, false, err + } + sInt, err := strconv.ParseUint(s, 10, 64) + if err != nil { + panic(err) + } + withdrawalMap := bc.withdrawals[slot(sInt)] + var out beacon.BeaconBlock + if block, ok := bc.blocks[s]; ok { + out = block + out.Attestations = attestations + } + + // Withdrawals + out.Withdrawals = make([]beacon.WithdrawalInfo, 0, len(withdrawalMap)) + for validatorIndex, amount := range withdrawalMap { + out.Withdrawals = append(out.Withdrawals, beacon.WithdrawalInfo{ + ValidatorIndex: string(validatorIndex), + Amount: amount, + }) + } + out.Attestations = attestations + + return out, true, nil +} + +func (bc *MockBeaconClient) SetBeaconBlock(slot string, block beacon.BeaconBlock) { + if bc.blocks == nil { + bc.blocks = make(map[string]beacon.BeaconBlock) + } + bc.blocks[slot] = block +} + +func (bc *MockBeaconClient) SetCriticalDutiesSlots(criticalDutiesSlots *state.CriticalDutiesSlots) { + if bc.criticalDutiesSlots == nil { + bc.criticalDutiesSlots = make(criticalDutiesSlotMap) + } + for _validator, slots := range criticalDutiesSlots.CriticalDuties { + validator := validatorIndex(_validator) + if bc.criticalDutiesSlots[validator] == nil { + bc.criticalDutiesSlots[validator] = make(map[slot]interface{}) + } + for _, _slot := range slots { + s := slot(_slot) + bc.criticalDutiesSlots[validator][s] = struct{}{} + } + } +} + +func (bc *MockBeaconClient) isValidatorActive(validator validatorIndex, e epoch) (bool, error) { + // Get the pubkey + validatorPubkey, ok := bc.validatorPubkeys[validator] + if !ok { + return false, fmt.Errorf("validator %s not found", validator) + } + validatorDetails, ok := bc.state.ValidatorDetails[validatorPubkey] + if !ok { + return false, fmt.Errorf("validator %s not found", validatorPubkey) + } + // Validators are assigned duties in the epoch they are activated + // but not in the epoch they exit + return validatorDetails.ActivationEpoch <= uint64(e) && (validatorDetails.ExitEpoch == 0 || uint64(e) < validatorDetails.ExitEpoch), nil +} + +func (bc *MockBeaconClient) GetCommitteesForEpoch(_epoch *uint64) (beacon.Committees, error) { + + out := &MockBeaconCommittees{} + out.epoch = epoch(*_epoch) + + // First find validators that must be assigned to specific slots + var missedDutiesValidators map[slot][]validatorIndex + missedDutiesValidators = bc.missedDuties[out.epoch] + + // Keep track of validators that have been assigned to a slot + assignedValidators := make(map[string]interface{}) + + out.slots = make([]mockBeaconCommitteeSlot, 32) + for s := out.epoch * 32; s < out.epoch*32+32; s++ { + idx := s - out.epoch*32 + out.slots[idx].validators = make([]string, 0, len(bc.validatorIndices)/32) + + // Assign validators that missed duties for this slot + for _, validator := range missedDutiesValidators[slot(s)] { + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + } + for _, validator := range out.slots[idx].validators { + assignedValidators[validator] = struct{}{} + } + } + + // Assign the remaining validators based on total order / critical duties + for validator, _ := range bc.validatorIndices { + if _, ok := assignedValidators[string(validator)]; ok { + continue + } + + // If the validator was not active, skip it + active, err := bc.isValidatorActive(validator, out.epoch) + if err != nil { + return nil, err + } + if !active { + continue + } + + // If the validator has critical duties for this slot, assign it + if _, ok := bc.criticalDutiesSlots[validator]; ok { + assigned := false + for s, _ := range bc.criticalDutiesSlots[validator] { + if bc.state.BeaconConfig.SlotToEpoch(uint64(s)) == uint64(out.epoch) { + idx := s % 32 + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + assigned = true + break + } + } + if assigned { + continue + } + } + + // The validator was not assigned to a slot, neither by missing duties nor critical duties + // Assign it to a pseudorandom slot + idx := validator.Mod32() + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + } + + return out, nil +} + +func (v validatorIndex) Mod32() uint { + vInt, err := strconv.ParseUint(string(v), 10, 64) + if err != nil { + panic(err) + } + return uint(vInt % 32) +} + +func (bc *MockBeaconClient) GetAttestations(_slot string) ([]beacon.AttestationInfo, bool, error) { + + slotNative, err := strconv.ParseUint(_slot, 10, 64) + if err != nil { + bc.t.Fatalf("Invalid slot: %s", _slot) + } + s := slot(slotNative) + + // Report attestations for the previous slot + s -= 16 + + // Get the epoch of the previous slot + e := epoch(s / 32) + + // The length of the bitlist is the number of validators that missed duties + // for the slot, plus the number of validators whose mod 32 is the same as the slot, + // unless that validator has missed duties in the same epoch. + // + // However, a validator can be both in the set of validators that missed duties for the slot + // and the set of validators whose mod 32 is the same as the slot, so we have to be careful + // to not double count them. + slotMod32 := s % 32 + var bitlistLength uint + // Add the number of validators that missed duties for the slot + bitlistLength = bc.missedDuties.getCount(s) + + for index, _ := range bc.validatorIndices { + // Don't count validators that are have misses anywhere in this epoch + if bc.missedEpochs.validatorMissedEpoch(index, e) { + // This validator either missed this slot and was already counted, + // or missed a different slot in the same epoch, and shouldn't be counted + continue + } + + active, err := bc.isValidatorActive(index, e) + if err != nil { + bc.t.Fatalf("Error checking if validator %s is active: %v", index, err) + } + if !active { + continue + } + + // Don't count validators with critical duties in this epoch unless the duty is in slot s + if duties, ok := bc.criticalDutiesSlots[index]; ok { + // The validator has some critical duties + if _, ok := duties[s]; ok { + // The duty is in slot s, so count it + bitlistLength++ + } else { + // Check if any duties are in the same epoch + foundDuty := false + for criticalDutySlot, _ := range duties { + if bc.state.BeaconConfig.SlotToEpoch(uint64(criticalDutySlot)) == uint64(e) { + foundDuty = true + break + } + } + if foundDuty { + continue + } + } + } + + // This validator was assigned to this slot and did not miss duties. + validatorIndexMod32 := index.Mod32() + if validatorIndexMod32 == uint(slotMod32) { + bitlistLength++ + } + } + + bl := bitfield.NewBitlist(uint64(bitlistLength)) + // Include all validators + bl = bl.Not() + // Exclude validators that need to miss duties on the previous slot + if _, ok := bc.missedDuties[e]; ok { + if _, ok := bc.missedDuties[e][s]; ok { + numMissed := len(bc.missedDuties[e][s]) + for i := 0; i < numMissed; i++ { + bl.SetBitAt(uint64(i), false) + } + } + } + out := []beacon.AttestationInfo{ + { + AggregationBits: bl, + SlotIndex: uint64(s), + CommitteeIndex: 0, + }, + } + return out, true, nil +} + +// Count returns the number of committees in the response +func (mbc *MockBeaconCommittees) Count() int { + return len(mbc.slots) +} + +// Index returns the index of the committee at the provided offset +func (mbc *MockBeaconCommittees) Index(index int) uint64 { + return 0 +} + +// Slot returns the slot of the committee at the provided offset +func (mbc *MockBeaconCommittees) Slot(index int) uint64 { + return uint64(mbc.epoch)*32 + uint64(index) +} + +// Validators returns the list of validators of the committee at +// the provided offset +func (mbc *MockBeaconCommittees) Validators(index int) []string { + return mbc.slots[index].validators +} + +// Release is a no-op +func (mbc *MockBeaconCommittees) Release() { +} + +// SetMinipoolPerformance notes the minipool's performance +// to be mocked in the response to GetAttestations +func (bc *MockBeaconClient) SetMinipoolPerformance(index string, missedSlots []uint64) { + + // For each missed slot, add it to the inner map of slot to validator indices + for _, s := range missedSlots { + bc.missedDuties.add(slot(s), validatorIndex(index)) + + // Add to missedEpochs + bc.missedEpochs.set(validatorIndex(index), slot(s)) + } + + // A map of true validator index -> committee index + if _, ok := bc.validatorIndices[validatorIndex(index)]; ok { + bc.t.Fatalf("Validator %s already set", index) + } + bc.validatorIndices.set(validatorIndex(index), bc.validatorCount) + bc.validatorCount++ +} + +func (bc *MockBeaconClient) GetEth2Config() (beacon.Eth2Config, error) { + return bc.state.BeaconConfig, nil +} + +func (bc *MockBeaconClient) GetBeaconHead() (beacon.BeaconHead, error) { + // Tell the tests that the beacon head is far enough ahead that the target slot + // is in an epoch that has a finalized epoch right after it. + out := beacon.BeaconHead{ + Epoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 3, + JustifiedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 2, + PreviousJustifiedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 1, + FinalizedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 1, + } + return out, nil +} + +func (bc *MockBeaconClient) GetStateForSlot(slot uint64) (*state.NetworkState, error) { + if slot == bc.state.BeaconSlotNumber { + return bc.state, nil + } + return nil, errors.New("not implemented") +} + +func (bc *MockBeaconClient) AddWithdrawal(s uint64, index string, amount *big.Int) { + if bc.withdrawals == nil { + bc.withdrawals = make(map[slot]map[validatorIndex]*big.Int) + } + ss := slot(s) + if bc.withdrawals[ss] == nil { + bc.withdrawals[ss] = make(map[validatorIndex]*big.Int) + } + bc.withdrawals[ss][validatorIndex(index)] = amount +} diff --git a/shared/services/rewards/test/mock.go b/shared/services/rewards/test/mock.go new file mode 100644 index 000000000..02d195cc6 --- /dev/null +++ b/shared/services/rewards/test/mock.go @@ -0,0 +1,740 @@ +package test + +import ( + "math/big" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/common" + rprewards "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/rocketpool-go/utils/eth" + rpstate "github.com/rocket-pool/rocketpool-go/utils/state" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/fees" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +const FarFutureEpoch uint64 = 0xffffffffffffffff + +// This file contains structs useful for quickly creating mock histories for testing. + +func (h *MockHistory) GetValidatorIndex() string { + u, err := strconv.ParseUint(h.lastValidatorIndex, 10, 64) + if err != nil { + panic(err) + } + h.lastValidatorIndex = strconv.FormatUint(u+1, 10) + return h.lastValidatorIndex +} + +func (h *MockHistory) GetValidatorPubkey() types.ValidatorPubkey { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastValidatorPubkey.Bytes()), big.NewInt(1)) + h.lastValidatorPubkey = types.BytesToValidatorPubkey(next.Bytes()) + return h.lastValidatorPubkey +} + +func (h *MockHistory) GetMinipoolAddress() common.Address { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastMinipoolAddress.Bytes()), big.NewInt(1)) + h.lastMinipoolAddress = common.BigToAddress(next) + return h.lastMinipoolAddress +} + +func (h *MockHistory) GetNodeAddress() common.Address { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastNodeAddress.Bytes()), big.NewInt(1)) + h.lastNodeAddress = common.BigToAddress(next) + return h.lastNodeAddress +} + +var oneEth = big.NewInt(1000000000000000000) +var thirtyTwoEth = big.NewInt(0).Mul(oneEth, big.NewInt(32)) + +func (h *MockHistory) GetMinipoolAttestationScoreAndCount(address common.Address, state *state.NetworkState) (*big.Int, uint64) { + out := big.NewInt(0) + mpi := state.MinipoolDetailsByAddress[address] + nodeDetails := state.NodeDetailsByAddress[mpi.NodeAddress] + + // Check every slot in the history + count := uint64(0) + for slot := h.GetConsensusStartBlock(); slot <= h.GetConsensusEndBlock(); slot++ { + // Get the time at the slot + blockTime := h.BeaconConfig.GetSlotTime(slot) + // Check the status of the minipool at this time + if mpi.Status != types.Staking { + continue + } + if mpi.Finalised { + continue + } + // Check if the minipool was opted in at this time + if !nodeDetails.WasOptedInAt(blockTime) { + continue + } + pubkey := mpi.Pubkey + validator := state.ValidatorDetails[pubkey] + // Check if the validator was exited before this slot + if validator.ExitEpoch <= h.BeaconConfig.SlotToEpoch(slot) { + continue + } + index := validator.Index + indexInt, _ := strconv.ParseUint(index, 10, 64) + // Count the attestation if index%32 == slot%32 + if indexInt%32 == uint64(slot%32) { + count++ + + bond, fee := mpi.GetMinipoolBondAndNodeFee(blockTime) + // Give the minipool a score according to its fee + eligibleBorrowedEth := state.GetEligibleBorrowedEth(nodeDetails) + _, percentOfBorrowedEth := state.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + out.Add(out, minipoolScore) + } + } + return out, count +} + +type MockMinipool struct { + Address common.Address + Pubkey types.ValidatorPubkey + Status types.MinipoolStatus + StatusBlock *big.Int + StatusTime time.Time + Finalised bool + NodeFee *big.Int + NodeDepositBalance *big.Int + NodeAddress common.Address + + LastBondReductionTime time.Time + LastBondReductionPrevValue *big.Int + LastBondReductionPrevNodeFee *big.Int + + ValidatorIndex string + + // Withdrawal amount to add to the minipool during its SP period + SPWithdrawals *big.Int + + // Withdrawal amount to add to the minipool during its regular period + OptedOutWithdrawals *big.Int + + // The epoch after which the minipool is withdrawable. + // Defaults to FAR_FUTURE_EPOCH. + WithdrawableEpoch uint64 + + Notes []string +} + +type BondSize *big.Int + +var ( + BondSizeEightEth = BondSize(eth.EthToWei(8)) + BondSizeSixteenEth = BondSize(eth.EthToWei(16)) + _bondSizeThirtyTwoEth = BondSize(eth.EthToWei(32)) +) + +func (h *MockHistory) GetNewDefaultMockMinipool(bondSize BondSize) *MockMinipool { + if (*big.Int)(_bondSizeThirtyTwoEth).Cmp(bondSize) <= 0 { + panic("Bond size must be less than 32 ether") + } + + out := &MockMinipool{ + Address: h.GetMinipoolAddress(), + Pubkey: h.GetValidatorPubkey(), + // By default, staked since always + Status: types.Staking, + StatusBlock: big.NewInt(0), + StatusTime: time.Unix(DefaultMockHistoryGenesis, 0), + // Default to 10% to make math simpler. Aka 0.1 ether + NodeFee: big.NewInt(100000000000000000), + NodeDepositBalance: big.NewInt(0).Set(bondSize), + ValidatorIndex: h.GetValidatorIndex(), + // Default to 1 ETH of SP withdrawals + SPWithdrawals: big.NewInt(1e18), + } + + return out +} + +type MockNode struct { + Address common.Address + RegistrationTime time.Time + RplStake *big.Int + SmoothingPoolRegistrationState bool + SmoothingPoolRegistrationChanged time.Time + + IsOdao bool + JoinedOdaoAt time.Time + + bondedEth *big.Int + borrowedEth *big.Int + Minipools []*MockMinipool + + Notes string + Class string +} + +func (n *MockNode) AddMinipool(minipool *MockMinipool) { + minipool.NodeAddress = n.Address + n.bondedEth.Add(n.bondedEth, minipool.NodeDepositBalance) + borrowedEth := big.NewInt(0).Sub((*big.Int)(_bondSizeThirtyTwoEth), minipool.NodeDepositBalance) + n.borrowedEth.Add(n.borrowedEth, borrowedEth) + + n.Minipools = append(n.Minipools, minipool) +} + +func (h *MockHistory) SetWithdrawals(mockBeaconClient *MockBeaconClient) { + for _, node := range h.Nodes { + var slotWhileIn uint64 + // Get a slot inside the node's SP period + if node.SmoothingPoolRegistrationState { + // Use the last slot of the SP period + slotWhileIn = h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) + } else { + // Get the opt-out time and ensure the node was opted in for at least 1 slot + optedOut := node.SmoothingPoolRegistrationChanged + if optedOut.Unix() != 0 { + slotWhileIn = h.BeaconConfig.FirstSlotAtLeast(optedOut.Unix()) - 1 + if slotWhileIn < h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) { + slotWhileIn = 0 + } + } + } + + var slotWhileOut uint64 + if !node.SmoothingPoolRegistrationState { + slotWhileOut = h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) + } else { + // Get the opt-in time and ensure the node was opted out for at least 1 slot + optedIn := node.SmoothingPoolRegistrationChanged + if optedIn.Unix() != 0 { + slotWhileOut = h.BeaconConfig.FirstSlotAtLeast(optedIn.Unix()) - 1 + if slotWhileOut < h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) { + slotWhileOut = 0 + } + } + } + + for _, minipool := range node.Minipools { + if minipool.SPWithdrawals != nil && minipool.SPWithdrawals.Sign() > 0 { + if slotWhileIn == 0 { + panic("minipool has sp withdrawals but node was never in the sp") + } + mockBeaconClient.AddWithdrawal(slotWhileIn, minipool.ValidatorIndex, minipool.SPWithdrawals) + } + if minipool.OptedOutWithdrawals != nil && minipool.OptedOutWithdrawals.Sign() > 0 { + if slotWhileOut == 0 { + panic("minipool has opted out withdrawals but node was never opted out of the sp") + } + mockBeaconClient.AddWithdrawal(slotWhileOut, minipool.ValidatorIndex, minipool.OptedOutWithdrawals) + } + } + } +} + +type NewMockNodeParams struct { + SmoothingPool bool + EightEthMinipools int + SixteenEthMinipools int + CollateralRpl int64 +} + +func (h *MockHistory) GetNewDefaultMockNode(params *NewMockNodeParams) *MockNode { + if params == nil { + // Inefficient, but nice code follows. + params = &NewMockNodeParams{} + } + + out := &MockNode{ + Address: h.GetNodeAddress(), + RegistrationTime: time.Unix(DefaultMockHistoryGenesis, 0), + RplStake: big.NewInt(0), + SmoothingPoolRegistrationState: params.SmoothingPool, + SmoothingPoolRegistrationChanged: time.Unix(0, 0), + + borrowedEth: big.NewInt(0), + bondedEth: big.NewInt(0), + } + + for i := 0; i < params.EightEthMinipools; i++ { + out.AddMinipool(h.GetNewDefaultMockMinipool(BondSizeEightEth)) + } + + for i := 0; i < params.SixteenEthMinipools; i++ { + out.AddMinipool(h.GetNewDefaultMockMinipool(BondSizeSixteenEth)) + } + + out.RplStake = big.NewInt(params.CollateralRpl) + out.RplStake.Mul(out.RplStake, eth.EthToWei(1)) + + // Opt nodes in an epoch before the start of the interval + if params.SmoothingPool { + out.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch - 1)) + } + + return out +} + +// Returns a list of nodes with various attributes- +// some will have active minipools, some will not. +// some will be under and over collateralized. +// some will have opted in or out during the interval +// some will have bond reduced during the interval +func (h *MockHistory) GetDefaultMockNodes() []*MockNode { + nodes := []*MockNode{} + + // Create 10 nodes with one 8-eth minipool each and 10 RPL staked + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Notes = "Regular node with one regular 8-eth minipool" + node.Class = "single_eight_eth" + node.Minipools[0].SPWithdrawals = nil + node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) + nodes = append(nodes, node) + } + + // Create 10 more of the same, but in the SP + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Notes = "Smoothing pool node with one regular 8-eth minipool" + node.Class = "single_eight_eth_sp" + nodes = append(nodes, node) + } + + // Create 20 as above, but with 16-eth minipools + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + CollateralRpl: 10, + }) + node.Notes = "Regular node with one regular 16-eth minipool" + node.Class = "single_sixteen_eth" + node.Minipools[0].SPWithdrawals = nil + node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) + nodes = append(nodes, node) + } + + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Notes = "Smoothing pool node with one regular 16-eth minipool" + node.Class = "single_sixteen_eth_sp" + nodes = append(nodes, node) + } + + // Add a node that opts in a quarter of the way through the interval + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 8-eth that opts in 1/4 of the way through the interval" + node.Class = "single_eight_eth_opted_in_quarter" + nodes = append(nodes, node) + + // Add a node that opts in a quarter of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 16-eth that opts in 1/4 of the way through the interval" + node.Class = "single_sixteen_eth_opted_in_quarter" + nodes = append(nodes, node) + + // Add a node that opts out a three quarters of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: false, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + 3*(h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 8-eth that opts out 3/4 of the way through the interval" + node.Class = "single_eight_eth_opted_out_three_quarters" + nodes = append(nodes, node) + + // Add a node that opts out a three quarters of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: false, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + 3*(h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 16-eth that opts out 3/4 of the way through the interval" + node.Class = "single_sixteen_eth_opted_out_three_quarters" + nodes = append(nodes, node) + + // Add a node that does a bond reduction half way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Minipools[0].LastBondReductionTime = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/2)) + node.Minipools[0].LastBondReductionPrevValue = big.NewInt(0).Mul(big.NewInt(16), eth.EthToWei(1)) + // Say it was 20% for fun + node.Minipools[0].LastBondReductionPrevNodeFee, _ = big.NewInt(0).SetString("200000000000000000", 10) + node.Notes = "Node with one 16-eth that does a bond reduction to 8 eth halfway through the interval" + node.Class = "single_bond_reduction" + nodes = append(nodes, node) + + // Add a node with no minipools + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + // Give it collateral so we can test that it's ignored despite having collateral + CollateralRpl: 10, + }) + node.Notes = "Node with no minipools but RPL collateral" + node.Class = "no_minipools" + nodes = append(nodes, node) + + // Add a node with a pending minipool + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Minipools[0].Status = types.Prelaunch + node.Minipools[0].SPWithdrawals = nil + node.Notes = "Node with one 8-eth minipool that is pending" + node.Class = "single_eight_eth_pending" + nodes = append(nodes, node) + + // Add a node with a single staking minipool that is finalized + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Minipools[0].Finalised = true + node.Minipools[0].SPWithdrawals = nil + node.Notes = "Node with one 8-eth minipool that is finalized" + node.Class = "single_eight_eth_finalized" + nodes = append(nodes, node) + + // Finally, create two odao nodes to share the juicy odao rewards + odaoNodes := h.GetDefaultMockODAONodes() + nodes = append(nodes, odaoNodes...) + + return nodes +} + +func (h *MockHistory) GetDefaultMockODAONodes() []*MockNode { + odaoNodes := []*MockNode{ + h.GetNewDefaultMockNode(nil), + h.GetNewDefaultMockNode(nil), + } + for _, node := range odaoNodes { + node.IsOdao = true + node.Class = "odao" + } + return odaoNodes +} + +const DefaultMockHistoryGenesis = 1577836800 + +type MockHistory struct { + StartEpoch uint64 + EndEpoch uint64 + BlockOffset uint64 + BeaconConfig beacon.Eth2Config + + // Network details for the final slot + NetworkDetails *rpstate.NetworkDetails + + Nodes []*MockNode + + // Various offsets to create unique number spaces for each key type + lastNodeAddress common.Address + lastMinipoolAddress common.Address + lastValidatorPubkey types.ValidatorPubkey + lastValidatorIndex string +} + +func NewDefaultMockHistoryNoNodes() *MockHistory { + out := &MockHistory{ + StartEpoch: 100, + EndEpoch: 200, + BlockOffset: 100000, + BeaconConfig: beacon.Eth2Config{ + GenesisEpoch: 0, + // 2020-01-01 midnight UTC for simplicity + GenesisTime: DefaultMockHistoryGenesis, + SlotsPerEpoch: 32, + SecondsPerSlot: 12, + SecondsPerEpoch: 12 * 32, + }, + + NetworkDetails: &rpstate.NetworkDetails{ + // Defaults to 0.24 ether, so 10 RPL is 2.4 ether and a leb8 with 10 RPL is 10% collateralized + RplPrice: big.NewInt(240000000000000000), + // Defaults to 10% aka 0.1 ether + MinCollateralFraction: big.NewInt(100000000000000000), + // Defaults to 60% to mimic current withdrawal limits + MaxCollateralFraction: big.NewInt(600000000000000000), + // Defaults to 100 epochs + IntervalDuration: 100 * 32 * 12 * time.Second, + // Defaults to genesis plus 100 epochs + IntervalStart: time.Unix(DefaultMockHistoryGenesis, 0).Add(100 * 32 * 12 * time.Second), + // Defaults to 0.7 ether to match mainnet + NodeOperatorRewardsPercent: big.NewInt(700000000000000000), + // Defaults to 0.015 ether to match mainnet as of 2024-10-08 + TrustedNodeOperatorRewardsPercent: big.NewInt(15000000000000000), + // Defaults to 1 - 0.7 - 0.015 ether to round out to 100% + ProtocolDaoRewardsPercent: big.NewInt(285000000000000000), + // Defaults to 70,000 ether of RPL to apprixmate 1/13th of 5% of 18m + PendingRPLRewards: big.NewInt(0).Mul(big.NewInt(70000), big.NewInt(1000000000000000000)), + // RewardIndex defaults to 40000 to avoid a test tree from being taken seriously + RewardIndex: 40000, + // Put 100 ether in the smoothing pool + SmoothingPoolBalance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(1000000000000000000)), + + // The rest of the fields seem unimportant and are left empty + }, + lastNodeAddress: common.BigToAddress(big.NewInt(2000)), + lastMinipoolAddress: common.BigToAddress(big.NewInt(30000)), + lastValidatorPubkey: types.BytesToValidatorPubkey(big.NewInt(600000).Bytes()), + lastValidatorIndex: "0", + } + return out +} + +func NewDefaultMockHistory() *MockHistory { + out := NewDefaultMockHistoryNoNodes() + out.Nodes = out.GetDefaultMockNodes() + return out +} + +func (h *MockHistory) GetEndNetworkState() *state.NetworkState { + out := &state.NetworkState{ + // El block number is the final slot's block, which is the last slot of the last epoch + // plus the offset + ElBlockNumber: h.BlockOffset + h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch), + BeaconSlotNumber: h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch), + BeaconConfig: h.BeaconConfig, + NetworkDetails: h.NetworkDetails, + NodeDetails: []rpstate.NativeNodeDetails{}, + NodeDetailsByAddress: make(map[common.Address]*rpstate.NativeNodeDetails), + MinipoolDetails: []rpstate.NativeMinipoolDetails{}, + MinipoolDetailsByAddress: make(map[common.Address]*rpstate.NativeMinipoolDetails), + MinipoolDetailsByNode: make(map[common.Address][]*rpstate.NativeMinipoolDetails), + ValidatorDetails: make(state.ValidatorDetailsMap), + OracleDaoMemberDetails: []rpstate.OracleDaoMemberDetails{}, + ProtocolDaoProposalDetails: nil, + } + + // Add nodes + for _, node := range h.Nodes { + // Calculate the node's effective RPL stake + // If it's below 10% of borrowed eth per the network details, it's 0 + rplStake := node.RplStake + rplPrice := h.NetworkDetails.RplPrice + // Calculate the minimum RPL stake according to the network details + minRplStake := big.NewInt(0).Mul(node.borrowedEth, h.NetworkDetails.MinCollateralFraction) + // minRplStake is now the minimum RPL stake in eth value measured in wei squared + // divide by the price to get the minimum RPL stake in RPL + minRplStake.Div(minRplStake, rplPrice) + + // Same for max + maxRplStake := big.NewInt(0).Mul(node.borrowedEth, h.NetworkDetails.MaxCollateralFraction) + maxRplStake.Div(maxRplStake, rplPrice) + + // Eth matching limit is rpl stake times the price divided by the collateral fraction + ethMatchingLimit := big.NewInt(0).Mul(node.RplStake, rplPrice) + ethMatchingLimit.Div(ethMatchingLimit, h.NetworkDetails.MinCollateralFraction) + collateralisationRatio := big.NewInt(0) + if node.borrowedEth.Sign() > 0 { + collateralisationRatio.Div(node.bondedEth, big.NewInt(0).Add(big.NewInt(0).Mul(node.bondedEth, eth.EthToWei(1)), node.borrowedEth)) + } + + // Create the node details + details := rpstate.NativeNodeDetails{ + Exists: true, + RegistrationTime: big.NewInt(node.RegistrationTime.Unix()), + TimezoneLocation: "UTC", + RewardNetwork: big.NewInt(0), + RplStake: node.RplStake, + EffectiveRPLStake: rplStake, + MinimumRPLStake: minRplStake, + MaximumRPLStake: maxRplStake, + EthMatched: node.borrowedEth, + EthMatchedLimit: ethMatchingLimit, + MinipoolCount: big.NewInt(int64(len(node.Minipools))), + // Empty node wallet + BalanceETH: big.NewInt(0), + BalanceRETH: big.NewInt(0), + BalanceRPL: big.NewInt(0), + BalanceOldRPL: big.NewInt(0), + DepositCreditBalance: big.NewInt(0), + DistributorBalance: big.NewInt(0), + DistributorBalanceUserETH: big.NewInt(0), + DistributorBalanceNodeETH: big.NewInt(0), + WithdrawalAddress: node.Address, + PendingWithdrawalAddress: common.Address{}, + SmoothingPoolRegistrationState: node.SmoothingPoolRegistrationState, + SmoothingPoolRegistrationChanged: big.NewInt(node.SmoothingPoolRegistrationChanged.Unix()), + NodeAddress: node.Address, + + AverageNodeFee: big.NewInt(0), // Populated by CalculateAverageFeeAndDistributorShares + + // Ratio of bonded to bonded plus borrowed + CollateralisationRatio: collateralisationRatio, + } + + out.NodeDetails = append(out.NodeDetails, details) + ptr := &out.NodeDetails[len(out.NodeDetails)-1] + out.NodeDetailsByAddress[node.Address] = ptr + + // Add minipools + for _, minipool := range node.Minipools { + minipoolDetails := rpstate.NativeMinipoolDetails{ + Exists: true, + MinipoolAddress: minipool.Address, + Pubkey: minipool.Pubkey, + StatusRaw: uint8(minipool.Status), + StatusBlock: minipool.StatusBlock, + StatusTime: big.NewInt(minipool.StatusTime.Unix()), + Finalised: minipool.Finalised, + NodeFee: minipool.NodeFee, + NodeDepositBalance: minipool.NodeDepositBalance, + NodeDepositAssigned: true, + UserDepositBalance: big.NewInt(0).Sub(_bondSizeThirtyTwoEth, minipool.NodeDepositBalance), + UserDepositAssigned: true, + UserDepositAssignedTime: big.NewInt(h.BeaconConfig.GetSlotTime(minipool.StatusBlock.Uint64() - h.BlockOffset).Unix()), + NodeAddress: minipool.NodeAddress, + Balance: big.NewInt(0), + DistributableBalance: big.NewInt(0), + NodeShareOfBalance: big.NewInt(0), + UserShareOfBalance: big.NewInt(0), + NodeRefundBalance: big.NewInt(0), + PenaltyCount: big.NewInt(0), + PenaltyRate: big.NewInt(0), + WithdrawalCredentials: common.Hash{}, + Status: minipool.Status, + DepositType: types.Variable, + + LastBondReductionTime: big.NewInt(minipool.LastBondReductionTime.Unix()), + LastBondReductionPrevValue: minipool.LastBondReductionPrevValue, + LastBondReductionPrevNodeFee: minipool.LastBondReductionPrevNodeFee, + } + out.MinipoolDetails = append(out.MinipoolDetails, minipoolDetails) + minipoolPtr := &out.MinipoolDetails[len(out.MinipoolDetails)-1] + out.MinipoolDetailsByAddress[minipool.Address] = minipoolPtr + out.MinipoolDetailsByNode[minipool.NodeAddress] = append(out.MinipoolDetailsByNode[minipool.NodeAddress], minipoolPtr) + + // Finally, populate the the ValidatorDetails map + pubkey := minipool.Pubkey + withdrawableEpoch := FarFutureEpoch + exitEpoch := FarFutureEpoch + if minipool.WithdrawableEpoch != 0 { + withdrawableEpoch = minipool.WithdrawableEpoch + exitEpoch = minipool.WithdrawableEpoch - 1 + } + details := beacon.ValidatorStatus{ + Pubkey: minipool.Pubkey, + Index: minipool.ValidatorIndex, + WithdrawalCredentials: common.Hash{}, + Balance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + EffectiveBalance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + Slashed: false, + ActivationEligibilityEpoch: 0, + ActivationEpoch: 0, + ExitEpoch: exitEpoch, + WithdrawableEpoch: withdrawableEpoch, + Exists: true, + } + if minipool.Status == types.Staking { + details.Status = beacon.ValidatorState_ActiveOngoing + } + if minipool.Finalised { + details.Status = beacon.ValidatorState_WithdrawalDone + } + out.ValidatorDetails[pubkey] = details + } + + // Calculate the AverageNodeFee and DistributorShares + ptr.CalculateAverageFeeAndDistributorShares(out.MinipoolDetailsByNode[ptr.NodeAddress]) + + // Check if the node is an odao member + if node.IsOdao { + details := rpstate.OracleDaoMemberDetails{ + Address: node.Address, + Exists: true, + ID: node.Address.Hex(), + Url: "https://example.com", + JoinedTime: time.Unix(node.RegistrationTime.Unix(), 0), + LastProposalTime: time.Unix(node.RegistrationTime.Unix(), 0), + RPLBondAmount: node.RplStake, + } + out.OracleDaoMemberDetails = append(out.OracleDaoMemberDetails, details) + } + } + + return out +} + +// Boring derived data getters +func (h *MockHistory) GetConsensusStartBlock() uint64 { + return h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) +} + +func (h *MockHistory) GetExecutionStartBlock() uint64 { + return h.GetConsensusStartBlock() + h.BlockOffset +} + +func (h *MockHistory) GetConsensusEndBlock() uint64 { + return h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) +} + +func (h *MockHistory) GetExecutionEndBlock() uint64 { + return h.GetConsensusEndBlock() + h.BlockOffset +} + +func (h *MockHistory) GetStartTime() time.Time { + return h.BeaconConfig.GetSlotTime(h.GetConsensusStartBlock()) +} + +func (h *MockHistory) GetEndTime() time.Time { + return h.BeaconConfig.GetSlotTime(h.GetConsensusEndBlock()) +} + +func (h *MockHistory) GetPreviousRewardSnapshotEvent() rprewards.RewardsEvent { + intervalEpochLength := h.EndEpoch - h.StartEpoch + 1 + consensusEndBlock := h.BeaconConfig.LastSlotOfEpoch(h.StartEpoch - 1) + consensusStartBlock := consensusEndBlock - intervalEpochLength*h.BeaconConfig.SlotsPerEpoch + return rprewards.RewardsEvent{ + Index: big.NewInt(int64(h.NetworkDetails.RewardIndex - 1)), + ExecutionBlock: big.NewInt(int64(consensusEndBlock + h.BlockOffset)), + ConsensusBlock: big.NewInt(int64(consensusEndBlock)), + MerkleRoot: common.Hash{}, + MerkleTreeCID: "", + IntervalsPassed: big.NewInt(1), + TreasuryRPL: big.NewInt(0), + TrustedNodeRPL: []*big.Int{}, + NodeRPL: []*big.Int{}, + NodeETH: []*big.Int{}, + UserETH: big.NewInt(0), + IntervalStartTime: h.BeaconConfig.GetSlotTime(consensusStartBlock), + IntervalEndTime: h.BeaconConfig.GetSlotTime(consensusEndBlock), + SubmissionTime: h.BeaconConfig.GetSlotTime(consensusEndBlock), + } +} + +func (h *MockHistory) GetNodeSummary() map[string][]*MockNode { + out := make(map[string][]*MockNode) + for _, node := range h.Nodes { + out[node.Class] = append(out[node.Class], node) + } + return out +} diff --git a/shared/services/rewards/test/rocketpool.go b/shared/services/rewards/test/rocketpool.go new file mode 100644 index 000000000..5d123424d --- /dev/null +++ b/shared/services/rewards/test/rocketpool.go @@ -0,0 +1,89 @@ +package test + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/rocketpool" +) + +// MockRocketPool is a EC mock specifically for testing treegen. +// At a high level our approach is to provide two options to the tester: +// 1) Use a recording of request/response data from production to emulate a canonical tree +// 2) Allow for full response customization. +// +// The former is useful for ensuring that no regressions arise during refactors that should +// otherwise be nonfunction, ie, not impact the merkle root. +// +// The latter is useful to probe specific behaviors such as opt-in/opt-out eligibility timing, +// node weight, smoothing pool status, etc. +// +// Because recording responses ties the test to a specific version of the contracts and therefor +// the client-side code, the interface we need to mock should be as minimized as possible, and the +// recorded data should tightly match that interface. That is, no recorded response should encode +// something like the contract address data are being requested from, but instead the high-level +// function name and arguments. +type MockRocketPool struct { + RewardsIndex *big.Int + t *testing.T + rewardSnapshotEvents map[uint64]rewards.RewardsEvent + headers map[uint64]*types.Header +} + +func NewMockRocketPool(t *testing.T, index uint64) *MockRocketPool { + return &MockRocketPool{t: t, RewardsIndex: big.NewInt(int64(index))} +} + +func (mock *MockRocketPool) GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) { + return true, nil +} + +func (mock *MockRocketPool) HeaderByNumber(_ context.Context, number *big.Int) (*types.Header, error) { + if header, ok := mock.headers[number.Uint64()]; ok { + return header, nil + } + return nil, fmt.Errorf("header not found in mock for %d, please set it with SetHeaderByNumber", number.Uint64()) +} + +func (mock *MockRocketPool) SetHeaderByNumber(number *big.Int, header *types.Header) { + if mock.headers == nil { + mock.headers = make(map[uint64]*types.Header) + } + mock.headers[number.Uint64()] = header +} + +func (mock *MockRocketPool) GetRewardsEvent(index uint64, _ []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) { + + if event, ok := mock.rewardSnapshotEvents[index]; ok { + return true, event, nil + } + return false, rewards.RewardsEvent{}, nil +} + +func (mock *MockRocketPool) GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { + if event, ok := mock.rewardSnapshotEvents[interval]; ok { + return event, nil + } + return rewards.RewardsEvent{}, nil +} + +func (mock *MockRocketPool) SetRewardSnapshotEvent(event rewards.RewardsEvent) { + if mock.rewardSnapshotEvents == nil { + mock.rewardSnapshotEvents = make(map[uint64]rewards.RewardsEvent) + } + mock.rewardSnapshotEvents[event.Index.Uint64()] = event +} + +func (mock *MockRocketPool) GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) { + return mock.RewardsIndex, nil +} + +func (mock *MockRocketPool) Client() *rocketpool.RocketPool { + panic("not implemented") +} diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 97a279854..a2895c4b4 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -1,34 +1,64 @@ package rewards import ( + "context" "fmt" "math/big" "strings" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/wealdtech/go-merkletree" ) -type rewardsFileVersion uint64 - const ( FarEpoch uint64 = 18446744073709551615 ) const ( - rewardsFileVersionUnknown = iota + rewardsFileVersionUnknown uint64 = iota rewardsFileVersionOne rewardsFileVersionTwo rewardsFileVersionThree rewardsFileVersionMax = iota - 1 + + minRewardsFileVersionSSZ = rewardsFileVersionThree ) +// RewardsExecutionClient defines and interface +// that contains only the functions from rocketpool.RocketPool +// required for rewards generation. +// This facade makes it easier to perform dependency injection in tests. +type RewardsExecutionClient interface { + GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) + HeaderByNumber(context.Context, *big.Int) (*ethtypes.Header, error) + GetRewardsEvent(index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) + GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) + GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) +} + +// RewardsBeaconClient defines and interface +// that contains only the functions from beacon.Client +// required for rewards generation. +// This facade makes it easier to perform dependency injection in tests. +type RewardsBeaconClient interface { + GetBeaconBlock(slot string) (beacon.BeaconBlock, bool, error) + GetCommitteesForEpoch(epoch *uint64) (beacon.Committees, error) + GetAttestations(slot string) ([]beacon.AttestationInfo, bool, error) + GetEth2Config() (beacon.Eth2Config, error) + GetBeaconHead() (beacon.BeaconHead, error) +} + // Interface for version-agnostic minipool performance type IMinipoolPerformanceFile interface { // Serialize a minipool performance file into bytes Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) // Serialize a minipool performance file into bytes designed for human readability SerializeHuman() ([]byte, error) @@ -48,28 +78,51 @@ type IMinipoolPerformanceFile interface { type IRewardsFile interface { // Serialize a rewards file into bytes Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) // Deserialize a rewards file from bytes Deserialize([]byte) error - // Get the rewards file's header - GetHeader() *RewardsFileHeader + // Getters for general interval info + GetRewardsFileVersion() uint64 + GetIndex() uint64 + GetTotalNodeWeight() *big.Int + GetMerkleRoot() string + GetIntervalsPassed() uint64 + GetTotalProtocolDaoRpl() *big.Int + GetTotalOracleDaoRpl() *big.Int + GetTotalCollateralRpl() *big.Int + GetTotalNodeOperatorSmoothingPoolEth() *big.Int + GetTotalPoolStakerSmoothingPoolEth() *big.Int + GetExecutionStartBlock() uint64 + GetConsensusStartBlock() uint64 + GetExecutionEndBlock() uint64 + GetConsensusEndBlock() uint64 + GetStartTime() time.Time + GetEndTime() time.Time // Get all of the node addresses with rewards in this file // NOTE: the order of node addresses is not guaranteed to be stable, so don't rely on it GetNodeAddresses() []common.Address - // Get info about a node's rewards - GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) + // Getters for into about specific node's rewards + HasRewardsFor(common.Address) bool + GetNodeCollateralRpl(common.Address) *big.Int + GetNodeOracleDaoRpl(common.Address) *big.Int + GetNodeSmoothingPoolEth(common.Address) *big.Int + GetMerkleProof(common.Address) ([]common.Hash, error) - // Gets the minipool performance file corresponding to this rewards file - GetMinipoolPerformanceFile() IMinipoolPerformanceFile + // Getters for network info + HasRewardsForNetwork(network uint64) bool + GetNetworkCollateralRpl(network uint64) *big.Int + GetNetworkOracleDaoRpl(network uint64) *big.Int + GetNetworkSmoothingPoolEth(network uint64) *big.Int // Sets the CID of the minipool performance file corresponding to this rewards file SetMinipoolPerformanceFileCID(cid string) // Generate the Merkle Tree and its root from the rewards file's proofs - generateMerkleTree() error + GenerateMerkleTree() error } // Rewards per network @@ -97,26 +150,21 @@ type ISmoothingPoolMinipoolPerformance interface { GetMissedAttestationCount() uint64 GetMissingAttestationSlots() []uint64 GetEthEarned() *big.Int -} - -// Interface for version-agnostic node operator rewards -type INodeRewardsInfo interface { - GetRewardNetwork() uint64 - GetCollateralRpl() *QuotedBigInt - GetOracleDaoRpl() *QuotedBigInt - GetSmoothingPoolEth() *QuotedBigInt - GetMerkleProof() ([]common.Hash, error) + GetBonusEthEarned() *big.Int + GetEffectiveCommission() *big.Int + GetConsensusIncome() *big.Int + GetAttestationScore() *big.Int } // Small struct to test version information for rewards files during deserialization type VersionHeader struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion,omitempty"` + RewardsFileVersion uint64 `json:"rewardsFileVersion,omitempty"` } // General version-agnostic information about a rewards file type RewardsFileHeader struct { // Serialized fields - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` + RewardsFileVersion uint64 `json:"rewardsFileVersion"` RulesetVersion uint64 `json:"rulesetVersion,omitempty"` Index uint64 `json:"index"` Network string `json:"network"` @@ -133,8 +181,7 @@ type RewardsFileHeader struct { NetworkRewards map[uint64]*NetworkRewardsInfo `json:"networkRewards"` // Non-serialized fields - MerkleTree *merkletree.MerkleTree `json:"-"` - InvalidNetworkNodes map[common.Address]uint64 `json:"-"` + MerkleTree *merkletree.MerkleTree `json:"-"` } // Information about an interval @@ -153,7 +200,7 @@ type IntervalInfo struct { SmoothingPoolEthAmount *QuotedBigInt `json:"smoothingPoolEthAmount"` MerkleProof []common.Hash `json:"merkleProof"` - TotalNodeWeight *QuotedBigInt `json:"-"` + TotalNodeWeight *big.Int `json:"-"` } type MinipoolInfo struct { @@ -173,8 +220,14 @@ type MinipoolInfo struct { AttestationScore *QuotedBigInt `json:"attestationScore"` CompletedAttestations map[uint64]bool `json:"-"` AttestationCount int `json:"attestationCount"` + TotalFee *big.Int `json:"-"` + MinipoolBonus *big.Int `json:"-"` + NodeOperatorBond *big.Int `json:"-"` + ConsensusIncome *QuotedBigInt `json:"consensusIncome"` } +var sixteenEth = big.NewInt(0).Mul(oneEth, big.NewInt(16)) + type IntervalDutiesInfo struct { Index uint64 Slots map[uint64]*SlotInfo @@ -206,12 +259,26 @@ type NodeSmoothingDetails struct { // v2 Fields OptInTime time.Time OptOutTime time.Time + + // v10 Fields + BonusEth *big.Int + EligibleBorrowedEth *big.Int + RplStake *big.Int } type QuotedBigInt struct { big.Int } +func QuotedBigIntFromBigInt(x *big.Int) *QuotedBigInt { + if x == nil { + return nil + } + q := QuotedBigInt{} + q.Int = *big.NewInt(0).Set(x) + return &q +} + func NewQuotedBigInt(x int64) *QuotedBigInt { q := QuotedBigInt{} native := big.NewInt(x) @@ -279,7 +346,7 @@ func (versionHeader *VersionHeader) deserializeMinipoolPerformanceFile(bytes []b file := &MinipoolPerformanceFile_v2{} return file, file.Deserialize(bytes) case rewardsFileVersionThree: - file := &MinipoolPerformanceFile_v3{} + file := &MinipoolPerformanceFile_v2{} return file, file.Deserialize(bytes) } diff --git a/shared/services/rewards/utils.go b/shared/services/rewards/utils.go index 326ebd7e4..c8c27a752 100644 --- a/shared/services/rewards/utils.go +++ b/shared/services/rewards/utils.go @@ -1,10 +1,8 @@ package rewards import ( - "context" "fmt" "io" - "math" "math/big" "net/http" "os" @@ -14,14 +12,12 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/goccy/go-json" "github.com/klauspost/compress/zstd" "github.com/mitchellh/go-homedir" "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/rocketpool-go/storage" rpstate "github.com/rocket-pool/rocketpool-go/utils/state" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" @@ -34,7 +30,7 @@ var zero *big.Int // Gets the intervals the node can claim and the intervals that have already been claimed func GetClaimStatus(rp *rocketpool.RocketPool, nodeAddress common.Address) (unclaimed []uint64, claimed []uint64, err error) { // Get the current interval - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) + currentIndexBig, err := rp.GetRewardIndex(nil) if err != nil { return } @@ -88,8 +84,11 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.Index = interval var event rewards.RewardsEvent + previousRewardsPoolAddresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() + // Get the event details for this interval - event, err = GetRewardSnapshotEvent(rp, cfg, interval, opts) + client := NewRewardsExecutionClient(rp) + event, err = client.GetRewardSnapshotEvent(previousRewardsPoolAddresses, interval, opts) if err != nil { return } @@ -101,7 +100,7 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.MerkleRoot = merkleRootCanon // Check if the tree file exists - info.TreeFilePath = cfg.Smartnode.GetRewardsTreePath(interval, true) + info.TreeFilePath = cfg.Smartnode.GetRewardsTreePath(interval, true, config.RewardsExtensionJSON) _, err = os.Stat(info.TreeFilePath) if os.IsNotExist(err) { info.TreeFileExists = false @@ -119,10 +118,10 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no proofWrapper := localRewardsFile.Impl() - info.TotalNodeWeight = proofWrapper.GetHeader().TotalRewards.TotalNodeWeight + info.TotalNodeWeight = proofWrapper.GetTotalNodeWeight() // Make sure the Merkle root has the expected value - merkleRootFromFile := common.HexToHash(proofWrapper.GetHeader().MerkleRoot) + merkleRootFromFile := common.HexToHash(proofWrapper.GetMerkleRoot()) if merkleRootCanon != merkleRootFromFile { info.MerkleRootValid = false return @@ -130,117 +129,25 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.MerkleRootValid = true // Get the rewards from it - rewards, exists := proofWrapper.GetNodeRewardsInfo(nodeAddress) - info.NodeExists = exists - if exists { - info.CollateralRplAmount = rewards.GetCollateralRpl() - info.ODaoRplAmount = rewards.GetOracleDaoRpl() - info.SmoothingPoolEthAmount = rewards.GetSmoothingPoolEth() - - var proof []common.Hash - proof, err = rewards.GetMerkleProof() - if err != nil { - err = fmt.Errorf("error deserializing merkle proof for %s, node %s: %w", info.TreeFilePath, nodeAddress.Hex(), err) - return - } - info.MerkleProof = proof - } - - return -} - -// Get the event for a rewards snapshot -func GetRewardSnapshotEvent(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { - - addresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() - found, event, err := rewards.GetRewardsEvent(rp, interval, addresses, opts) - if err != nil { - return rewards.RewardsEvent{}, fmt.Errorf("error getting rewards event for interval %d: %w", interval, err) - } - if !found { - return rewards.RewardsEvent{}, fmt.Errorf("interval %d event not found", interval) - } - - return event, nil - -} - -// Get the number of the latest EL block that was created before the given timestamp -func GetELBlockHeaderForTime(targetTime time.Time, rp *rocketpool.RocketPool) (*types.Header, error) { - - // Get the latest block's timestamp - latestBlockHeader, err := rp.Client.HeaderByNumber(context.Background(), nil) - if err != nil { - return nil, fmt.Errorf("error getting latest block header: %w", err) + info.NodeExists = proofWrapper.HasRewardsFor(nodeAddress) + if !info.NodeExists { + return } - latestBlock := latestBlockHeader.Number + info.CollateralRplAmount = &QuotedBigInt{*proofWrapper.GetNodeCollateralRpl(nodeAddress)} + info.ODaoRplAmount = &QuotedBigInt{*proofWrapper.GetNodeOracleDaoRpl(nodeAddress)} + info.SmoothingPoolEthAmount = &QuotedBigInt{*proofWrapper.GetNodeSmoothingPoolEth(nodeAddress)} - // Get the block that Rocket Pool deployed to the chain on, use that as the search start - deployBlock, err := storage.GetDeployBlock(rp) - if err != nil { - return nil, fmt.Errorf("error getting Rocket Pool deployment block: %w", err) + proof, err := proofWrapper.GetMerkleProof(nodeAddress) + if proof == nil { + err = fmt.Errorf("error deserializing merkle proof for %s, node %s: no proof for this node found", info.TreeFilePath, nodeAddress.Hex()) + return } - - // Get half the distance between the protocol deployment and right now - delta := big.NewInt(0).Sub(latestBlock, deployBlock) - delta.Div(delta, big.NewInt(2)) - - // Start at the halfway point - candidateBlockNumber := big.NewInt(0).Sub(latestBlock, delta) - candidateBlock, err := rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) + err = fmt.Errorf("error deserializing merkle proof for %s, node %s: %w", info.TreeFilePath, nodeAddress.Hex(), err) } - bestBlock := candidateBlock - pivotSize := candidateBlock.Number.Uint64() - minimumDistance := +math.Inf(1) - targetTimeUnix := float64(targetTime.Unix()) - - for { - // Get the distance from the candidate block to the target time - candidateTime := float64(candidateBlock.Time) - delta := targetTimeUnix - candidateTime - distance := math.Abs(delta) - - // If it's better, replace the best candidate with it - if distance < minimumDistance { - minimumDistance = distance - bestBlock = candidateBlock - } else if pivotSize == 1 { - // If the pivot is down to size 1 and we didn't find anything better after another iteration, this is the best block! - for candidateTime > targetTimeUnix { - // Get the previous block if this one happened after the target time - candidateBlockNumber.Sub(candidateBlockNumber, big.NewInt(1)) - candidateBlock, err = rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) - if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) - } - candidateTime = float64(candidateBlock.Time) - bestBlock = candidateBlock - } - return bestBlock, nil - } + info.MerkleProof = proof - // Iterate over the correct half, setting the pivot to the halfway point of that half (rounded up) - pivotSize = uint64(math.Ceil(float64(pivotSize) / 2)) - if delta < 0 { - // Go left - candidateBlockNumber.Sub(candidateBlockNumber, big.NewInt(int64(pivotSize))) - } else { - // Go right - candidateBlockNumber.Add(candidateBlockNumber, big.NewInt(int64(pivotSize))) - } - - // Clamp the new candidate to the latest block - if candidateBlockNumber.Uint64() > (latestBlock.Uint64() - 1) { - candidateBlockNumber.SetUint64(latestBlock.Uint64() - 1) - } - - candidateBlock, err = rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) - if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) - } - } + return } // Downloads the rewards file for this interval @@ -249,7 +156,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo expectedCid := i.CID expectedRoot := i.MerkleRoot // Determine file name and path - rewardsTreePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(interval, isDaemon)) + rewardsTreePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(interval, isDaemon, config.RewardsExtensionJSON)) if err != nil { return fmt.Errorf("error expanding rewards tree path: %w", err) } @@ -316,16 +223,13 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo } // Get the original merkle root - downloadedRoot := deserializedRewardsFile.GetHeader().MerkleRoot - - // Clear the merkle root so we have a safer comparison after calculating it again - deserializedRewardsFile.GetHeader().MerkleRoot = "" + downloadedRoot := deserializedRewardsFile.GetMerkleRoot() // Reconstruct the merkle tree from the file data, this should overwrite the stored Merkle Root with a new one - deserializedRewardsFile.generateMerkleTree() + deserializedRewardsFile.GenerateMerkleTree() // Get the resulting merkle root - calculatedRoot := deserializedRewardsFile.GetHeader().MerkleRoot + calculatedRoot := deserializedRewardsFile.GetMerkleRoot() // Compare the merkle roots to see if the original is correct if !strings.EqualFold(downloadedRoot, calculatedRoot) { @@ -342,7 +246,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo deserializedRewardsFile, rewardsTreePath, ) - err = localRewardsFile.Write() + _, err = localRewardsFile.Write() if err != nil { return fmt.Errorf("error saving interval %d file to %s: %w", interval, rewardsTreePath, err) } @@ -359,7 +263,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo } // Gets the start slot for the given interval -func GetStartSlotForInterval(previousIntervalEvent rewards.RewardsEvent, bc beacon.Client, beaconConfig beacon.Eth2Config) (uint64, error) { +func GetStartSlotForInterval(previousIntervalEvent rewards.RewardsEvent, bc RewardsBeaconClient, beaconConfig beacon.Eth2Config) (uint64, error) { // Get the chain head head, err := bc.GetBeaconHead() if err != nil { diff --git a/shared/services/rocketpool/client.go b/shared/services/rocketpool/client.go index c4ad7de97..c03525922 100644 --- a/shared/services/rocketpool/client.go +++ b/shared/services/rocketpool/client.go @@ -1203,12 +1203,11 @@ func (c *Client) deployTemplates(cfg *config.RocketPoolConfig, rocketpoolDir str } // Create the rewards file dir - rewardsFilePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(0, false)) + rewardsFileDir, err := homedir.Expand(cfg.Smartnode.GetRewardsTreeDirectory(false)) if err != nil { fmt.Printf("%sWARNING: Couldn't expand the rewards tree file directory (%s). You will not be able to view or claim your rewards until you create the folder manually.%s\n", colorYellow, err.Error(), colorReset) return deployedContainers, nil } - rewardsFileDir := filepath.Dir(rewardsFilePath) err = os.MkdirAll(rewardsFileDir, 0775) if err != nil { fmt.Printf("%sWARNING: Couldn't create the rewards tree file directory (%s). You will not be able to view or claim your rewards until you create the folder [%s] manually.%s\n", colorYellow, err.Error(), rewardsFileDir, colorReset) diff --git a/shared/services/rocketpool/minipool.go b/shared/services/rocketpool/minipool.go index e829fc963..6222b9752 100644 --- a/shared/services/rocketpool/minipool.go +++ b/shared/services/rocketpool/minipool.go @@ -521,8 +521,8 @@ func (c *Client) GetMinipoolRescueDissolvedDetailsForNode() (api.GetMinipoolResc } // Rescue a dissolved minipool by depositing ETH for it to the Beacon deposit contract -func (c *Client) RescueDissolvedMinipool(address common.Address, amount *big.Int) (api.RescueDissolvedMinipoolResponse, error) { - responseBytes, err := c.callAPI(fmt.Sprintf("minipool rescue-dissolved %s %s", address.Hex(), amount.String())) +func (c *Client) RescueDissolvedMinipool(address common.Address, amount *big.Int, submit bool) (api.RescueDissolvedMinipoolResponse, error) { + responseBytes, err := c.callAPI(fmt.Sprintf("minipool rescue-dissolved %s %s %t", address.Hex(), amount.String(), submit)) if err != nil { return api.RescueDissolvedMinipoolResponse{}, fmt.Errorf("Could not rescue dissolved minipool: %w", err) } diff --git a/shared/services/rocketpool/node.go b/shared/services/rocketpool/node.go index ce754cf1d..e74cdca81 100644 --- a/shared/services/rocketpool/node.go +++ b/shared/services/rocketpool/node.go @@ -744,70 +744,6 @@ func (c *Client) DepositContractInfo() (api.DepositContractInfoResponse, error) return response, nil } -// Estimate the gas required to set a voting snapshot delegate -func (c *Client) EstimateSetSnapshotDelegateGas(address common.Address) (api.EstimateSetSnapshotDelegateGasResponse, error) { - responseBytes, err := c.callAPI(fmt.Sprintf("node estimate-set-snapshot-delegate-gas %s", address.Hex())) - if err != nil { - return api.EstimateSetSnapshotDelegateGasResponse{}, fmt.Errorf("Could not get estimate-set-snapshot-delegate-gas response: %w", err) - } - var response api.EstimateSetSnapshotDelegateGasResponse - if err := json.Unmarshal(responseBytes, &response); err != nil { - return api.EstimateSetSnapshotDelegateGasResponse{}, fmt.Errorf("Could not decode estimate-set-snapshot-delegate-gas response: %w", err) - } - if response.Error != "" { - return api.EstimateSetSnapshotDelegateGasResponse{}, fmt.Errorf("Could not get estimate-set-snapshot-delegate-gas response: %s", response.Error) - } - return response, nil -} - -// Set a voting snapshot delegate for the node -func (c *Client) SetSnapshotDelegate(address common.Address) (api.SetSnapshotDelegateResponse, error) { - responseBytes, err := c.callAPI(fmt.Sprintf("node set-snapshot-delegate %s", address.Hex())) - if err != nil { - return api.SetSnapshotDelegateResponse{}, fmt.Errorf("Could not get set-snapshot-delegate response: %w", err) - } - var response api.SetSnapshotDelegateResponse - if err := json.Unmarshal(responseBytes, &response); err != nil { - return api.SetSnapshotDelegateResponse{}, fmt.Errorf("Could not decode set-snapshot-delegate response: %w", err) - } - if response.Error != "" { - return api.SetSnapshotDelegateResponse{}, fmt.Errorf("Could not get set-snapshot-delegate response: %s", response.Error) - } - return response, nil -} - -// Estimate the gas required to clear the node's voting snapshot delegate -func (c *Client) EstimateClearSnapshotDelegateGas() (api.EstimateClearSnapshotDelegateGasResponse, error) { - responseBytes, err := c.callAPI("node estimate-clear-snapshot-delegate-gas") - if err != nil { - return api.EstimateClearSnapshotDelegateGasResponse{}, fmt.Errorf("Could not get estimate-clear-snapshot-delegate-gas response: %w", err) - } - var response api.EstimateClearSnapshotDelegateGasResponse - if err := json.Unmarshal(responseBytes, &response); err != nil { - return api.EstimateClearSnapshotDelegateGasResponse{}, fmt.Errorf("Could not decode estimate-clear-snapshot-delegate-gas response: %w", err) - } - if response.Error != "" { - return api.EstimateClearSnapshotDelegateGasResponse{}, fmt.Errorf("Could not get estimate-clear-snapshot-delegate-gas response: %s", response.Error) - } - return response, nil -} - -// Clear the node's voting snapshot delegate -func (c *Client) ClearSnapshotDelegate() (api.ClearSnapshotDelegateResponse, error) { - responseBytes, err := c.callAPI("node clear-snapshot-delegate") - if err != nil { - return api.ClearSnapshotDelegateResponse{}, fmt.Errorf("Could not get clear-snapshot-delegate response: %w", err) - } - var response api.ClearSnapshotDelegateResponse - if err := json.Unmarshal(responseBytes, &response); err != nil { - return api.ClearSnapshotDelegateResponse{}, fmt.Errorf("Could not decode clear-snapshot-delegate response: %w", err) - } - if response.Error != "" { - return api.ClearSnapshotDelegateResponse{}, fmt.Errorf("Could not get clear-snapshot-delegate response: %s", response.Error) - } - return response, nil -} - // Get the initialization status of the fee distributor contract func (c *Client) IsFeeDistributorInitialized() (api.NodeIsFeeDistributorInitializedResponse, error) { responseBytes, err := c.callAPI("node is-fee-distributor-initialized") diff --git a/shared/services/services.go b/shared/services/services.go index 994cbb21e..0edb98b5f 100644 --- a/shared/services/services.go +++ b/shared/services/services.go @@ -39,7 +39,6 @@ var ( ecManager *ExecutionClientManager bcManager *BeaconClientManager rocketPool *rocketpool.RocketPool - snapshotDelegation *contracts.SnapshotDelegation rocketSignerRegistry *contracts.RocketSignerRegistry beaconClient beacon.Client docker *client.Client @@ -51,7 +50,6 @@ var ( initBCManager sync.Once initRocketPool sync.Once initOneInchOracle sync.Once - initSnapshotDelegation sync.Once initRocketSignerRegistry sync.Once initBeaconClient sync.Once initDocker sync.Once @@ -125,18 +123,6 @@ func GetRocketSignerRegistry(c *cli.Context) (*contracts.RocketSignerRegistry, e return getRocketSignerRegistry(cfg, ec) } -func GetSnapshotDelegation(c *cli.Context) (*contracts.SnapshotDelegation, error) { - cfg, err := getConfig(c) - if err != nil { - return nil, err - } - ec, err := getEthClient(c, cfg) - if err != nil { - return nil, err - } - return getSnapshotDelegation(cfg, ec) -} - func GetBeaconClient(c *cli.Context) (*BeaconClientManager, error) { cfg, err := getConfig(c) if err != nil { @@ -256,17 +242,6 @@ func getRocketSignerRegistry(cfg *config.RocketPoolConfig, client rocketpool.Exe return rocketSignerRegistry, err } -func getSnapshotDelegation(cfg *config.RocketPoolConfig, client rocketpool.ExecutionClient) (*contracts.SnapshotDelegation, error) { - var err error - initSnapshotDelegation.Do(func() { - address := cfg.Smartnode.GetSnapshotDelegationAddress() - if address != "" { - snapshotDelegation, err = contracts.NewSnapshotDelegation(common.HexToAddress(address), client) - } - }) - return snapshotDelegation, err -} - func getBeaconClient(c *cli.Context, cfg *config.RocketPoolConfig) (*BeaconClientManager, error) { var err error initBCManager.Do(func() { diff --git a/shared/services/state/cli/.gitignore b/shared/services/state/cli/.gitignore new file mode 100644 index 000000000..d369ba9e0 --- /dev/null +++ b/shared/services/state/cli/.gitignore @@ -0,0 +1,2 @@ +cli +*.json diff --git a/shared/services/state/cli/cli.go b/shared/services/state/cli/cli.go new file mode 100644 index 000000000..1099a31df --- /dev/null +++ b/shared/services/state/cli/cli.go @@ -0,0 +1,121 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/rocket-pool/smartnode/shared/services/beacon/client" + "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/state" + cfgtypes "github.com/rocket-pool/smartnode/shared/types/config" +) + +// A basic CLI tool which can be used to serialize NetworkState objects to files +// for future use. +// Accepts arguments for a beacon node URL, an execution node URL, and a slot number +// to get the state for. + +var bnFlag = flag.String("b", "http://localhost:5052", "The beacon node URL") +var elFlag = flag.String("e", "http://localhost:8545", "The execution node URL") +var slotFlag = flag.Uint64("slot", 0, "The slot number to get the state for") +var networkFlag = flag.String("network", "mainnet", "The network to get the state for, i.e. 'mainnet' or 'holesky'") +var prettyFlag = flag.Bool("p", false, "Pretty print the output") +var inputFlag = flag.Bool("i", false, "Parse a network state from stdin instead of retrieving it from the network") +var criticalDutiesSlotsFlag = flag.Bool("critical-duties-slots", false, "If passed, output a list of critical duties slots for the given state as if it were the final state in a 6300 epoch interval. This is outputted instead of the state json.") +var criticalDutiesEpochCountFlag = flag.Uint64("critical-duties-epoch-count", 6300, "The number of epochs to consider when calculating critical duties") + +func main() { + flag.Parse() + + sn := config.NewSmartnodeConfig(nil) + switch *networkFlag { + case "mainnet": + sn.Network.Value = cfgtypes.Network_Mainnet + case "holesky": + sn.Network.Value = cfgtypes.Network_Holesky + default: + fmt.Fprintf(os.Stderr, "Invalid network: %s\n", *networkFlag) + fmt.Fprintf(os.Stderr, "Valid networks are: mainnet, holesky\n") + os.Exit(1) + } + + ec, err := ethclient.Dial(*elFlag) + if err != nil { + fmt.Fprintf(os.Stderr, "Error connecting to execution node: %v\n", err) + os.Exit(1) + } + + contracts := sn.GetStateManagerContracts() + fmt.Fprintf(os.Stderr, "Contracts: %+v\n", contracts) + + rocketStorage := sn.GetStorageAddress() + + rp, err := rocketpool.NewRocketPool(ec, common.HexToAddress(rocketStorage)) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating rocketpool: %v\n", err) + os.Exit(1) + } + bc := client.NewStandardHttpClient(*bnFlag) + sm := state.NewNetworkStateManager(rp, contracts, bc, nil) + + var networkState *state.NetworkState + + if *inputFlag { + decoder := json.NewDecoder(os.Stdin) + err := decoder.Decode(&networkState) + if err != nil { + fmt.Fprintf(os.Stderr, "Error decoding network state: %v\n", err) + os.Exit(1) + } + } else if *slotFlag == 0 { + fmt.Fprintf(os.Stderr, "Slot number not provided, defaulting to head slot.\n") + networkState, err = sm.GetHeadState() + } else { + networkState, err = sm.GetStateForSlot(*slotFlag) + } + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting network state: %v\n", err) + os.Exit(1) + } + + if *criticalDutiesSlotsFlag { + criticalDutiesEpochs := state.NewCriticalDutiesEpochs(*criticalDutiesEpochCountFlag, networkState) + fmt.Fprintf(os.Stderr, "Critical duties epochs to check: %d\n", len(criticalDutiesEpochs.CriticalDuties)) + + criticalDutiesSlots, err := state.NewCriticalDutiesSlots(criticalDutiesEpochs, bc) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating critical duties slots: %v\n", err) + os.Exit(1) + } + + // Serialize the critical duties slots to stdout + encoder := json.NewEncoder(os.Stdout) + if *prettyFlag { + encoder.SetIndent("", " ") + } + err = encoder.Encode(criticalDutiesSlots) + if err != nil { + fmt.Fprintf(os.Stderr, "Error encoding critical duties slots: %v\n", err) + os.Exit(1) + } + + os.Exit(0) + return + } + + fmt.Fprintf(os.Stderr, "Network state fetched, outputting to stdout\n") + encoder := json.NewEncoder(os.Stdout) + if *prettyFlag { + encoder.SetIndent("", " ") + } + err = encoder.Encode(networkState) + if err != nil { + fmt.Fprintf(os.Stderr, "Error encoding network state: %v\n", err) + os.Exit(1) + } +} diff --git a/shared/services/state/critical-duties-slots.go b/shared/services/state/critical-duties-slots.go new file mode 100644 index 000000000..8a81661eb --- /dev/null +++ b/shared/services/state/critical-duties-slots.go @@ -0,0 +1,103 @@ +package state + +import ( + "github.com/rocket-pool/smartnode/shared/services/beacon" +) + +type CriticalDutiesEpochs struct { + // Map of epoch uint64 to a list of validator indices + CriticalDuties map[uint64][]string +} + +type CriticalDutiesSlots struct { + // Map of validator index to a list of critical duties slots + CriticalDuties map[string][]uint64 +} + +// Gets the critical duties slots for a given state as if it were the final state in a epochs epoch interval +func NewCriticalDutiesEpochs(epochs uint64, state *NetworkState) *CriticalDutiesEpochs { + criticalDuties := &CriticalDutiesEpochs{ + CriticalDuties: make(map[uint64][]string), + } + + endSlot := state.BeaconSlotNumber + endEpoch := state.BeaconConfig.SlotToEpoch(endSlot) + // Coerce endSlot to the last slot of the epoch + endSlot = state.BeaconConfig.LastSlotOfEpoch(endEpoch) + // Get the start epoch. Since the end epoch is the last inclusive epoch, we need to subtract 1 from the start epoch + startEpoch := endEpoch - epochs - 1 + + // Check for bond reductions first + for _, minipool := range state.MinipoolDetails { + lastReductionSlot := state.BeaconConfig.FirstSlotAtLeast(minipool.LastBondReductionTime.Int64()) + lastReductionEpoch := state.BeaconConfig.SlotToEpoch(lastReductionSlot) + if lastReductionEpoch < startEpoch { + continue + } + + if lastReductionEpoch > endEpoch { + continue + } + + pubkey := minipool.Pubkey + validatorIndex := state.ValidatorDetails[pubkey].Index + criticalDuties.CriticalDuties[lastReductionEpoch] = append(criticalDuties.CriticalDuties[lastReductionEpoch], validatorIndex) + } + + // Check for smoothing pool opt status changes next + for _, node := range state.NodeDetails { + lastOptStatusChange := state.BeaconConfig.FirstSlotAtLeast(node.SmoothingPoolRegistrationChanged.Int64()) + lastOptStatusChangeEpoch := state.BeaconConfig.SlotToEpoch(lastOptStatusChange) + if lastOptStatusChangeEpoch < startEpoch { + continue + } + + if lastOptStatusChangeEpoch > endEpoch { + continue + } + + // Flag every minipool for this node as having a critical duty + for _, minipool := range state.MinipoolDetailsByNode[node.NodeAddress] { + pubkey := minipool.Pubkey + validatorIndex := state.ValidatorDetails[pubkey].Index + criticalDuties.CriticalDuties[lastOptStatusChangeEpoch] = append(criticalDuties.CriticalDuties[lastOptStatusChangeEpoch], validatorIndex) + } + } + + return criticalDuties +} + +// For each validator in criticalDutiesEpochs, map the epochs to the slot the attestation duty assignment was for +func NewCriticalDutiesSlots(criticalDutiesEpochs *CriticalDutiesEpochs, bc beacon.Client) (*CriticalDutiesSlots, error) { + criticalDuties := &CriticalDutiesSlots{ + CriticalDuties: make(map[string][]uint64), + } + + for epoch, validatorIndices := range criticalDutiesEpochs.CriticalDuties { + // Create a set of validator indices to query when iterating committees + validatorIndicesSet := make(map[string]interface{}) + for _, validatorIndex := range validatorIndices { + validatorIndicesSet[validatorIndex] = struct{}{} + } + + // Get the beacon committee assignments for this epoch + // Rebind e to avoid using a pointer to the accumulator. + e := epoch + committees, err := bc.GetCommitteesForEpoch(&e) + if err != nil { + return nil, err + } + + // Iterate over the committees and check if the validator indices are in the set + for i := 0; i < committees.Count(); i++ { + validators := committees.Validators(i) + for _, validator := range validators { + if _, ok := validatorIndicesSet[validator]; ok { + criticalDuties.CriticalDuties[validator] = append(criticalDuties.CriticalDuties[validator], committees.Slot(i)) + } + } + } + } + + return criticalDuties, nil +} diff --git a/shared/services/state/manager.go b/shared/services/state/manager.go index c3c8d523c..25c856fb9 100644 --- a/shared/services/state/manager.go +++ b/shared/services/state/manager.go @@ -10,46 +10,51 @@ import ( "github.com/rocket-pool/rocketpool-go/rocketpool" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" - cfgtypes "github.com/rocket-pool/smartnode/shared/types/config" "github.com/rocket-pool/smartnode/shared/utils/log" ) type NetworkStateManager struct { - cfg *config.RocketPoolConfig - rp *rocketpool.RocketPool - ec rocketpool.ExecutionClient - bc beacon.Client - log *log.ColorLogger - Config *config.RocketPoolConfig - Network cfgtypes.Network - ChainID uint - BeaconConfig beacon.Eth2Config + rp *rocketpool.RocketPool + bc beacon.Client + log *log.ColorLogger + + // Memoized Beacon config + beaconConfig *beacon.Eth2Config + + // Multicaller and batch balance contract addresses + contracts config.StateManagerContracts } // Create a new manager for the network state -func NewNetworkStateManager(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger) (*NetworkStateManager, error) { +func NewNetworkStateManager( + rp *rocketpool.RocketPool, + contracts config.StateManagerContracts, + bc beacon.Client, + log *log.ColorLogger, +) *NetworkStateManager { // Create the manager - m := &NetworkStateManager{ - cfg: cfg, - rp: rp, - ec: ec, - bc: bc, - log: log, - Config: cfg, - Network: cfg.Smartnode.Network.Value.(cfgtypes.Network), - ChainID: cfg.Smartnode.GetChainID(), + return &NetworkStateManager{ + rp: rp, + bc: bc, + log: log, + contracts: contracts, + } +} + +func (m *NetworkStateManager) getBeaconConfig() (*beacon.Eth2Config, error) { + if m.beaconConfig != nil { + return m.beaconConfig, nil } // Get the Beacon config info - var err error - m.BeaconConfig, err = m.bc.GetEth2Config() + beaconConfig, err := m.bc.GetEth2Config() if err != nil { return nil, err } + m.beaconConfig = &beaconConfig - return m, nil - + return m.beaconConfig, nil } // Get the state of the network using the latest Execution layer block @@ -86,27 +91,35 @@ func (m *NetworkStateManager) GetLatestBeaconBlock() (beacon.BeaconBlock, error) // Gets the latest valid finalized block func (m *NetworkStateManager) GetLatestFinalizedBeaconBlock() (beacon.BeaconBlock, error) { + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return beacon.BeaconBlock{}, fmt.Errorf("error getting Beacon config: %w", err) + } head, err := m.bc.GetBeaconHead() if err != nil { return beacon.BeaconBlock{}, fmt.Errorf("error getting Beacon chain head: %w", err) } - targetSlot := head.FinalizedEpoch*m.BeaconConfig.SlotsPerEpoch + (m.BeaconConfig.SlotsPerEpoch - 1) + targetSlot := head.FinalizedEpoch*beaconConfig.SlotsPerEpoch + (beaconConfig.SlotsPerEpoch - 1) return m.GetLatestProposedBeaconBlock(targetSlot) } // Gets the Beacon slot for the latest execution layer block func (m *NetworkStateManager) GetHeadSlot() (uint64, error) { + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return 0, fmt.Errorf("error getting Beacon config: %w", err) + } // Get the latest EL block - latestBlockHeader, err := m.ec.HeaderByNumber(context.Background(), nil) + latestBlockHeader, err := m.rp.Client.HeaderByNumber(context.Background(), nil) if err != nil { return 0, fmt.Errorf("error getting latest EL block: %w", err) } // Get the corresponding Beacon slot based on the timestamp latestBlockTime := time.Unix(int64(latestBlockHeader.Time), 0) - genesisTime := time.Unix(int64(m.BeaconConfig.GenesisTime), 0) + genesisTime := time.Unix(int64(beaconConfig.GenesisTime), 0) secondsSinceGenesis := uint64(latestBlockTime.Sub(genesisTime).Seconds()) - targetSlot := secondsSinceGenesis / m.BeaconConfig.SecondsPerSlot + targetSlot := secondsSinceGenesis / beaconConfig.SecondsPerSlot return targetSlot, nil } @@ -131,7 +144,11 @@ func (m *NetworkStateManager) GetLatestProposedBeaconBlock(targetSlot uint64) (b // Get the state of the network at the provided Beacon slot func (m *NetworkStateManager) getState(slotNumber uint64) (*NetworkState, error) { - state, err := CreateNetworkState(m.cfg, m.rp, m.ec, m.bc, m.log, slotNumber, m.BeaconConfig) + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return nil, fmt.Errorf("error getting Beacon config: %w", err) + } + state, err := createNetworkState(m.contracts, m.rp, m.bc, m.log, slotNumber, beaconConfig) if err != nil { return nil, err } @@ -140,7 +157,11 @@ func (m *NetworkStateManager) getState(slotNumber uint64) (*NetworkState, error) // Get the state of the network for a specific node only at the provided Beacon slot func (m *NetworkStateManager) getStateForNode(nodeAddress common.Address, slotNumber uint64, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { - state, totalEffectiveStake, err := CreateNetworkStateForNode(m.cfg, m.rp, m.ec, m.bc, m.log, slotNumber, m.BeaconConfig, nodeAddress, calculateTotalEffectiveStake) + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return nil, nil, fmt.Errorf("error getting Beacon config: %w", err) + } + state, totalEffectiveStake, err := createNetworkStateForNode(m.contracts, m.rp, m.bc, m.log, slotNumber, beaconConfig, nodeAddress, calculateTotalEffectiveStake) if err != nil { return nil, nil, err } diff --git a/shared/services/state/network-state.go b/shared/services/state/network-state.go index fe77381e9..983bb66e4 100644 --- a/shared/services/state/network-state.go +++ b/shared/services/state/network-state.go @@ -1,6 +1,7 @@ package state import ( + "encoding/json" "fmt" "math/big" "time" @@ -31,44 +32,134 @@ var fifteenEth = big.NewInt(0).Mul(big.NewInt(15), oneEth) var _13_6137_Eth = big.NewInt(0).Mul(big.NewInt(136137), big.NewInt(1e14)) var _13_Eth = big.NewInt(0).Mul(big.NewInt(13), oneEth) +type ValidatorDetailsMap map[types.ValidatorPubkey]beacon.ValidatorStatus + +func (vdm ValidatorDetailsMap) MarshalJSON() ([]byte, error) { + // Marshal as a slice of ValidatorStatus + out := make([]beacon.ValidatorStatus, 0, len(vdm)) + for _, v := range vdm { + out = append(out, v) + } + return json.Marshal(out) +} + +func (vdm *ValidatorDetailsMap) UnmarshalJSON(data []byte) error { + // Unmarshal as a slice of ValidatorStatus + var inp []beacon.ValidatorStatus + err := json.Unmarshal(data, &inp) + if err != nil { + return err + } + + *vdm = make(ValidatorDetailsMap, len(inp)) + + // Convert back to a map + for _, v := range inp { + // Return an error if the pubkey is already in the map + if _, exists := (*vdm)[v.Pubkey]; exists { + return fmt.Errorf("duplicate validator details for pubkey %s", v.Pubkey.Hex()) + } + (*vdm)[v.Pubkey] = v + } + return nil +} + type NetworkState struct { // Network version // Block / slot for this state - ElBlockNumber uint64 - BeaconSlotNumber uint64 - BeaconConfig beacon.Eth2Config + ElBlockNumber uint64 `json:"el_block_number"` + BeaconSlotNumber uint64 `json:"beacon_slot_number"` + BeaconConfig beacon.Eth2Config `json:"beacon_config"` // Network details - NetworkDetails *rpstate.NetworkDetails + NetworkDetails *rpstate.NetworkDetails `json:"network_details"` // Node details - NodeDetails []rpstate.NativeNodeDetails - NodeDetailsByAddress map[common.Address]*rpstate.NativeNodeDetails + NodeDetails []rpstate.NativeNodeDetails `json:"node_details"` + // NodeDetailsByAddress is an index over NodeDetails and is ignored when marshaling to JSON + // it is rebuilt when unmarshaling from JSON. + NodeDetailsByAddress map[common.Address]*rpstate.NativeNodeDetails `json:"-"` // Minipool details - MinipoolDetails []rpstate.NativeMinipoolDetails - MinipoolDetailsByAddress map[common.Address]*rpstate.NativeMinipoolDetails - MinipoolDetailsByNode map[common.Address][]*rpstate.NativeMinipoolDetails + MinipoolDetails []rpstate.NativeMinipoolDetails `json:"minipool_details"` + // These next two fields are indexes over MinipoolDetails and are ignored when marshaling to JSON + // they are rebuilt when unmarshaling from JSON. + MinipoolDetailsByAddress map[common.Address]*rpstate.NativeMinipoolDetails `json:"-"` + MinipoolDetailsByNode map[common.Address][]*rpstate.NativeMinipoolDetails `json:"-"` // Validator details - ValidatorDetails map[types.ValidatorPubkey]beacon.ValidatorStatus + ValidatorDetails ValidatorDetailsMap `json:"validator_details"` // Oracle DAO details - OracleDaoMemberDetails []rpstate.OracleDaoMemberDetails + OracleDaoMemberDetails []rpstate.OracleDaoMemberDetails `json:"oracle_dao_member_details"` // Protocol DAO proposals - ProtocolDaoProposalDetails []protocol.ProtocolDaoProposalDetails + ProtocolDaoProposalDetails []protocol.ProtocolDaoProposalDetails `json:"protocol_dao_proposal_details,omitempty"` // Internal fields log *log.ColorLogger } +func (ns NetworkState) MarshalJSON() ([]byte, error) { + // No changes needed + type Alias NetworkState + a := (*Alias)(&ns) + return json.Marshal(a) +} + +func (ns *NetworkState) UnmarshalJSON(data []byte) error { + type Alias NetworkState + var a Alias + err := json.Unmarshal(data, &a) + if err != nil { + return err + } + *ns = NetworkState(a) + // Rebuild the node details by address index + ns.NodeDetailsByAddress = make(map[common.Address]*rpstate.NativeNodeDetails) + for i, details := range ns.NodeDetails { + if _, ok := ns.NodeDetailsByAddress[details.NodeAddress]; ok { + return fmt.Errorf("duplicate node details for address %s", details.NodeAddress.Hex()) + } + // N.B. &details is not the same as &ns.NodeDetails[i] + // &details is the address of the current element in the loop + // &ns.NodeDetails[i] is the address of the struct in the slice + ns.NodeDetailsByAddress[details.NodeAddress] = &ns.NodeDetails[i] + } + + // Rebuild the minipool details by address index + ns.MinipoolDetailsByAddress = make(map[common.Address]*rpstate.NativeMinipoolDetails) + for i, details := range ns.MinipoolDetails { + if _, ok := ns.MinipoolDetailsByAddress[details.MinipoolAddress]; ok { + return fmt.Errorf("duplicate minipool details for address %s", details.MinipoolAddress.Hex()) + } + + // N.B. &details is not the same as &ns.MinipoolDetails[i] + // &details is the address of the current element in the loop + // &ns.MinipoolDetails[i] is the address of the struct in the slice + ns.MinipoolDetailsByAddress[details.MinipoolAddress] = &ns.MinipoolDetails[i] + } + + // Rebuild the minipool details by node index + ns.MinipoolDetailsByNode = make(map[common.Address][]*rpstate.NativeMinipoolDetails) + for i, details := range ns.MinipoolDetails { + // See comments in above loops as to why we're using &ns.MinipoolDetails[i] + currentDetails := &ns.MinipoolDetails[i] + nodeList, exists := ns.MinipoolDetailsByNode[details.NodeAddress] + if !exists { + ns.MinipoolDetailsByNode[details.NodeAddress] = []*rpstate.NativeMinipoolDetails{currentDetails} + continue + } + // See comments in other loops + ns.MinipoolDetailsByNode[details.NodeAddress] = append(nodeList, currentDetails) + } + + return nil +} + // Creates a snapshot of the entire Rocket Pool network state, on both the Execution and Consensus layers -func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig beacon.Eth2Config) (*NetworkState, error) { - // Get the relevant network contracts - multicallerAddress := common.HexToAddress(cfg.Smartnode.GetMulticallAddress()) - balanceBatcherAddress := common.HexToAddress(cfg.Smartnode.GetBalanceBatcherAddress()) +func createNetworkState(batchContracts config.StateManagerContracts, rp *rocketpool.RocketPool, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig *beacon.Eth2Config) (*NetworkState, error) { // Get the execution block for the given slot beaconBlock, exists, err := bc.GetBeaconBlock(fmt.Sprintf("%d", slotNumber)) @@ -92,7 +183,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, MinipoolDetailsByNode: map[common.Address][]*rpstate.NativeMinipoolDetails{}, BeaconSlotNumber: slotNumber, ElBlockNumber: elBlockNumber, - BeaconConfig: beaconConfig, + BeaconConfig: *beaconConfig, log: log, } @@ -100,7 +191,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, start := time.Now() // Network contracts and details - contracts, err := rpstate.NewNetworkContracts(rp, multicallerAddress, balanceBatcherAddress, opts) + contracts, err := rpstate.NewNetworkContracts(rp, batchContracts.Multicaller, batchContracts.BalanceBatcher, opts) if err != nil { return nil, fmt.Errorf("error getting network contracts: %w", err) } @@ -149,7 +240,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, // Calculate avg node fees and distributor shares for _, details := range state.NodeDetails { - rpstate.CalculateAverageFeeAndDistributorShares(rp, contracts, details, state.MinipoolDetailsByNode[details.NodeAddress]) + details.CalculateAverageFeeAndDistributorShares(state.MinipoolDetailsByNode[details.NodeAddress]) } // Oracle DAO member details @@ -193,16 +284,12 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, // Creates a snapshot of the Rocket Pool network, but only for a single node // Also gets the total effective RPL stake of the network for convenience since this is required by several node routines -func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig beacon.Eth2Config, nodeAddress common.Address, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { +func createNetworkStateForNode(batchContracts config.StateManagerContracts, rp *rocketpool.RocketPool, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig *beacon.Eth2Config, nodeAddress common.Address, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { steps := 5 if calculateTotalEffectiveStake { steps++ } - // Get the relevant network contracts - multicallerAddress := common.HexToAddress(cfg.Smartnode.GetMulticallAddress()) - balanceBatcherAddress := common.HexToAddress(cfg.Smartnode.GetBalanceBatcherAddress()) - // Get the execution block for the given slot beaconBlock, exists, err := bc.GetBeaconBlock(fmt.Sprintf("%d", slotNumber)) if err != nil { @@ -225,7 +312,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock MinipoolDetailsByNode: map[common.Address][]*rpstate.NativeMinipoolDetails{}, BeaconSlotNumber: slotNumber, ElBlockNumber: elBlockNumber, - BeaconConfig: beaconConfig, + BeaconConfig: *beaconConfig, log: log, } @@ -233,7 +320,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock start := time.Now() // Network contracts and details - contracts, err := rpstate.NewNetworkContracts(rp, multicallerAddress, balanceBatcherAddress, opts) + contracts, err := rpstate.NewNetworkContracts(rp, batchContracts.Multicaller, batchContracts.BalanceBatcher, opts) if err != nil { return nil, nil, fmt.Errorf("error getting network contracts: %w", err) } @@ -283,7 +370,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock // Calculate avg node fees and distributor shares for _, details := range state.NodeDetails { - rpstate.CalculateAverageFeeAndDistributorShares(rp, contracts, details, state.MinipoolDetailsByNode[details.NodeAddress]) + details.CalculateAverageFeeAndDistributorShares(state.MinipoolDetailsByNode[details.NodeAddress]) } // Get the total network effective RPL stake @@ -340,7 +427,8 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock return state, totalEffectiveStake, nil } -func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *big.Int) *big.Int { +func (s *NetworkState) GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth *big.Int, nodeStake *big.Int) (*big.Int, *big.Int) { + rplPrice := s.NetworkDetails.RplPrice // stakedRplValueInEth := nodeStake * ratio / 1 Eth @@ -348,11 +436,22 @@ func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *bi stakedRplValueInEth.Mul(nodeStake, rplPrice) stakedRplValueInEth.Quo(stakedRplValueInEth, oneEth) + // Avoid division by zero + if eligibleBorrowedEth.Sign() == 0 { + return stakedRplValueInEth, big.NewInt(0) + } + // percentOfBorrowedEth := stakedRplValueInEth * 100 Eth / eligibleBorrowedEth percentOfBorrowedEth := big.NewInt(0) percentOfBorrowedEth.Mul(stakedRplValueInEth, oneHundredEth) percentOfBorrowedEth.Quo(percentOfBorrowedEth, eligibleBorrowedEth) + return stakedRplValueInEth, percentOfBorrowedEth +} + +func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *big.Int) *big.Int { + stakedRplValueInEth, percentOfBorrowedEth := s.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeStake) + // If at or under 15%, return 100 * stakedRplValueInEth if percentOfBorrowedEth.Cmp(fifteenEth) <= 0 { stakedRplValueInEth.Mul(stakedRplValueInEth, oneHundred) diff --git a/shared/services/state/utils.go b/shared/services/state/utils.go index b09b06c28..d7bb23dd0 100644 --- a/shared/services/state/utils.go +++ b/shared/services/state/utils.go @@ -7,31 +7,30 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/smartnode/shared/services/config" ) // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetClaimIntervalTime(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Duration, error) { +func GetClaimIntervalTime(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Duration, error) { return rewards.GetClaimIntervalTime(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetNodeOperatorRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetNodeOperatorRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetNodeOperatorRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetTrustedNodeOperatorRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetTrustedNodeOperatorRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetTrustedNodeOperatorRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetProtocolDaoRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetProtocolDaoRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetProtocolDaoRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetPendingRPLRewards(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetPendingRPLRewards(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetPendingRPLRewards(rp, opts) } diff --git a/shared/types/api/node.go b/shared/types/api/node.go index e7613cfa0..7c98400d9 100644 --- a/shared/types/api/node.go +++ b/shared/types/api/node.go @@ -81,9 +81,11 @@ type NodeStatusResponse struct { ProposalVotes []SnapshotProposalVote `json:"proposalVotes"` ActiveSnapshotProposals []SnapshotProposal `json:"activeSnapshotProposals"` } `json:"snapshotResponse"` - Alerts []NodeAlert `json:"alerts"` - SignallingAddress common.Address `json:"signallingAddress"` - SignallingAddressFormatted string `json:"signallingAddressFormatted"` + Alerts []NodeAlert `json:"alerts"` + SignallingAddress common.Address `json:"signallingAddress"` + SignallingAddressFormatted string `json:"signallingAddressFormatted"` + Minipools []MinipoolDetails `json:"minipools"` + LatestDelegate common.Address `json:"latestDelegate"` } type NodeAlert struct { @@ -497,30 +499,6 @@ type NodeSignResponse struct { SignedData string `json:"signedData"` } -type EstimateSetSnapshotDelegateGasResponse struct { - Status string `json:"status"` - Error string `json:"error"` - GasInfo rocketpool.GasInfo `json:"gasInfo"` -} - -type SetSnapshotDelegateResponse struct { - Status string `json:"status"` - Error string `json:"error"` - TxHash common.Hash `json:"txHash"` -} - -type EstimateClearSnapshotDelegateGasResponse struct { - Status string `json:"status"` - Error string `json:"error"` - GasInfo rocketpool.GasInfo `json:"gasInfo"` -} - -type ClearSnapshotDelegateResponse struct { - Status string `json:"status"` - Error string `json:"error"` - TxHash common.Hash `json:"txHash"` -} - type NodeIsFeeDistributorInitializedResponse struct { Status string `json:"status"` Error string `json:"error"` diff --git a/shared/types/api/state.go b/shared/types/api/state.go new file mode 100644 index 000000000..3274586f3 --- /dev/null +++ b/shared/types/api/state.go @@ -0,0 +1,5 @@ +package api + +type BeaconStateResponse struct { + Proof []string `json:"proof"` +} diff --git a/shared/types/eth2/state.go b/shared/types/eth2/state.go new file mode 100644 index 000000000..963191f13 --- /dev/null +++ b/shared/types/eth2/state.go @@ -0,0 +1,255 @@ +package eth2 + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "reflect" + "sync/atomic" +) + +// Important indices for proof generation: +// BeaconStateDenebValidatorsIndex is the field offset of the Validators field in the BeaconStateDeneb struct +const beaconStateDenebValidatorsIndex uint64 = 11 + +// If this ever isn't a power of two, we need to round up to the next power of two +const beaconStateValidatorsMaxLength uint64 = 1 << 40 +const beaconBlockHeaderStateRootGeneralizedIndex uint64 = 11 // Container with 5 fields, so gid 8 is the first field. We want the 4th field, so gid 8 + 3 = 11 +const beaconStateValidatorWithdrawalCredentialsPubkeyGeneralizedIndex uint64 = 4 // Container with 5 fields, so gid 8 is the first field. We want the parent of 1st field, so gid 8 / 2 = 4 +// See https://github.com/ethereum/consensus-specs/blob/dev/ssz/merkle-proofs.md for general index calculation and helpers + +func getPowerOfTwoCeil(x uint64) uint64 { + // Base case + if x <= 1 { + return 1 + } + + // Check if already a power of two + if x&(x-1) == 0 { + return x + } + + // Find the most significant bit + msb := bits.Len64(x) - 1 + return 1 << (msb + 1) +} + +var beaconStateChunkSize atomic.Uint64 + +func getDenebStateChunkSize() uint64 { + // Use a static value to avoid multiple reflection calls + storedChunkSize := beaconStateChunkSize.Load() + if storedChunkSize == 0 { + s := reflect.TypeOf(BeaconStateDeneb{}).NumField() + beaconStateChunkSize.Store(uint64(s)) + storedChunkSize = uint64(s) + } + return storedChunkSize +} + +func getDenebGeneralizedIndexForValidators() uint64 { + // There's 28 fields, so rounding up to the next power of two is 32, a left-aligned node + // BeaconStateDenebValidatorsIndex is the 11th field, so its generalized index is 32 + 11 = 43 + return getPowerOfTwoCeil(getDenebStateChunkSize()) + beaconStateDenebValidatorsIndex +} + +func (state *BeaconStateDeneb) getGeneralizedIndexForValidator(index uint64) uint64 { + root := getDenebGeneralizedIndexForValidators() + + // Now, grab the validator index within the list + // `start` is `index * 32` and `pos` is `start / 32` so pos is just `index` + pos := index + baseIndex := uint64(2) // Lists have a base index of 2 + root = root*baseIndex*getPowerOfTwoCeil(beaconStateValidatorsMaxLength) + pos + + // root is now the generalized index for the validator + return root +} + +func (state *BeaconStateDeneb) validatorStateProof(index uint64) ([][]byte, error) { + + // Convert the state to a proof tree + root, err := state.GetTree() + if err != nil { + return nil, fmt.Errorf("could not get state tree: %w", err) + } + + // Find the validator's generalized index + generalizedIndex := state.getGeneralizedIndexForValidator(index) + + // Grab the proof for that index + proof, err := root.Prove(int(generalizedIndex)) + if err != nil { + return nil, fmt.Errorf("could not get proof for validator: %w", err) + } + + // Sanity check that the proof leaf matches the expected validator + validatorHashTreeRoot, err := state.Validators[index].HashTreeRoot() + if err != nil { + return nil, fmt.Errorf("could not get hash tree root for validator: %w", err) + } + if !bytes.Equal(proof.Leaf, validatorHashTreeRoot[:]) { + return nil, fmt.Errorf("proof leaf does not match expected validator") + } + + return proof.Hashes, nil + +} + +func (validator *Validator) validatorCredentialsPubkeyProof() ([][]byte, error) { + // Just get the portion of the proof for the validator's credentials. + generalizedIndex := beaconStateValidatorWithdrawalCredentialsPubkeyGeneralizedIndex + root, err := validator.GetTree() + if err != nil { + return nil, fmt.Errorf("could not get validator tree: %w", err) + } + proof, err := root.Prove(int(generalizedIndex)) + if err != nil { + return nil, fmt.Errorf("could not get proof for validator credentials: %w", err) + } + return proof.Hashes, nil +} + +// ValidatorCredentialsProof computes the merkle proof for a validator's credentials +// at a specific index in the validator registry. +func (state *BeaconStateDeneb) ValidatorCredentialsProof(index uint64) ([][]byte, error) { + + if index >= uint64(len(state.Validators)) { + return nil, errors.New("validator index out of bounds") + } + + // Get the validator's credentials proof + credentialsProof, err := state.Validators[index].validatorCredentialsPubkeyProof() + if err != nil { + return nil, fmt.Errorf("could not get validator credentials proof: %w", err) + } + + stateProof, err := state.validatorStateProof(index) + if err != nil { + return nil, fmt.Errorf("could not get validator state proof: %w", err) + } + + // The EL proves against BeaconBlockHeader root, so we need to merge the state proof with that. + generalizedIndex := beaconBlockHeaderStateRootGeneralizedIndex + root, err := state.LatestBlockHeader.GetTree() + if err != nil { + return nil, fmt.Errorf("could not get block header tree: %w", err) + } + blockHeaderProof, err := root.Prove(int(generalizedIndex)) + if err != nil { + return nil, fmt.Errorf("could not get proof for block header: %w", err) + } + + out := append(credentialsProof, stateProof...) + out = append(out, blockHeaderProof.Hashes...) + + return out, nil +} + +// Taken from https://github.com/prysmaticlabs/prysm/blob/ac1717f1e44bd218b0bd3af0c4dec951c075f462/proto/prysm/v1alpha1/beacon_state.pb.go#L1574 +// Unexported fields stripped, as well as proto-related field tags. JSON and ssz-size tags are preserved, and nested types are replaced with local copies as well. +type BeaconStateDeneb struct { + GenesisTime uint64 `json:"genesis_time"` + GenesisValidatorsRoot []byte `json:"genesis_validators_root" ssz-size:"32"` + Slot uint64 `json:"slot"` + Fork *Fork `json:"fork"` + LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"` + BlockRoots [8192][32]byte `json:"block_roots" ssz-size:"8192,32"` + StateRoots [8192][32]byte `json:"state_roots" ssz-size:"8192,32"` + HistoricalRoots [][]byte `json:"historical_roots" ssz-max:"16777216" ssz-size:"?,32"` + Eth1Data *Eth1Data `json:"eth1_data"` + Eth1DataVotes []*Eth1Data `json:"eth1_data_votes" ssz-max:"2048"` + Eth1DepositIndex uint64 `json:"eth1_deposit_index"` + Validators []*Validator `json:"validators" ssz-max:"1099511627776"` + Balances []uint64 `json:"balances" ssz-max:"1099511627776"` + RandaoMixes [][]byte `json:"randao_mixes" ssz-size:"65536,32"` + Slashings []uint64 `json:"slashings" ssz-size:"8192"` + PreviousEpochParticipation []byte `json:"previous_epoch_participation" ssz-max:"1099511627776"` + CurrentEpochParticipation []byte `json:"current_epoch_participation" ssz-max:"1099511627776"` + JustificationBits [1]byte `json:"justification_bits" ssz-size:"1"` + PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"` + CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"` + FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"` + InactivityScores []uint64 `json:"inactivity_scores" ssz-max:"1099511627776"` + CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"` + NextSyncCommittee *SyncCommittee `json:"next_sync_committee"` + LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"` + NextWithdrawalIndex uint64 `json:"next_withdrawal_index"` + NextWithdrawalValidatorIndex uint64 `json:"next_withdrawal_validator_index"` + HistoricalSummaries []*HistoricalSummary `json:"historical_summaries" ssz-max:"16777216"` +} + +// Remaining types taken from https://github.com/ferranbt/fastssz/blob/03cd29050aa2555fd4abc29ace7c1fac8b8fb25e/spectests/structs.go + +// Per-Fork ExecutionPayloadHeaders + +type ExecutionPayloadHeaderDeneb struct { + ParentHash [32]byte `json:"parent_hash" ssz-size:"32"` + FeeRecipient [20]byte `json:"fee_recipient" ssz-size:"20"` + StateRoot [32]byte `json:"state_root" ssz-size:"32"` + ReceiptsRoot [32]byte `json:"receipts_root" ssz-size:"32"` + LogsBloom [256]byte `json:"logs_bloom" ssz-size:"256"` + PrevRandao [32]byte `json:"prev_randao" ssz-size:"32"` + BlockNumber uint64 `json:"block_number"` + GasLimit uint64 `json:"gas_limit"` + GasUsed uint64 `json:"gas_used"` + Timestamp uint64 `json:"timestamp"` + ExtraData []byte `json:"extra_data" ssz-max:"32"` + BaseFeePerGas Uint256 `json:"base_fee_per_gas" ssz-size:"32"` + BlockHash [32]byte `json:"block_hash" ssz-size:"32"` + TransactionsRoot [32]byte `json:"transactions_root" ssz-size:"32"` + WithdrawalRoot [32]byte `json:"withdrawals_root" ssz-size:"32"` + BlobGasUsed uint64 `json:"blob_gas_used"` + ExcessBlobGas uint64 `json:"excess_blob_gas"` +} + +// Generic types + +type Uint256 [32]byte + +type Fork struct { + PreviousVersion []byte `json:"previous_version" ssz-size:"4"` + CurrentVersion []byte `json:"current_version" ssz-size:"4"` + Epoch uint64 `json:"epoch"` +} + +type BeaconBlockHeader struct { + Slot uint64 `json:"slot"` + ProposerIndex uint64 `json:"proposer_index"` + ParentRoot []byte `json:"parent_root" ssz-size:"32"` + StateRoot []byte `json:"state_root" ssz-size:"32"` + BodyRoot []byte `json:"body_root" ssz-size:"32"` +} + +type Eth1Data struct { + DepositRoot []byte `json:"deposit_root" ssz-size:"32"` + DepositCount uint64 `json:"deposit_count"` + BlockHash []byte `json:"block_hash" ssz-size:"32"` +} + +type Validator struct { + Pubkey []byte `json:"pubkey" ssz-size:"48"` + WithdrawalCredentials []byte `json:"withdrawal_credentials" ssz-size:"32"` + EffectiveBalance uint64 `json:"effective_balance"` + Slashed bool `json:"slashed"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` + ActivationEpoch uint64 `json:"activation_epoch"` + ExitEpoch uint64 `json:"exit_epoch"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` +} + +type Checkpoint struct { + Epoch uint64 `json:"epoch"` + Root []byte `json:"root" ssz-size:"32"` +} + +type SyncCommittee struct { + PubKeys [][]byte `json:"pubkeys" ssz-size:"512,48"` + AggregatePubKey [48]byte `json:"aggregate_pubkey" ssz-size:"48"` +} + +type HistoricalSummary struct { + BlockSummaryRoot [32]byte `json:"block_summary_root" ssz-size:"32"` + StateSummaryRoot [32]byte `json:"state_summary_root" ssz-size:"32"` +} diff --git a/shared/types/eth2/state_encoding.go b/shared/types/eth2/state_encoding.go new file mode 100644 index 000000000..cf8a58177 --- /dev/null +++ b/shared/types/eth2/state_encoding.go @@ -0,0 +1,1823 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: 8334e51dc7fef48f4bfcdf131ce2d53118dc8c0cba4ed032b43e8a8e83319297 +// Version: 0.1.3 +package eth2 + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the BeaconStateDeneb object +func (b *BeaconStateDeneb) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconStateDeneb object to a target array +func (b *BeaconStateDeneb) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(2736653) + + // Field (0) 'GenesisTime' + dst = ssz.MarshalUint64(dst, b.GenesisTime) + + // Field (1) 'GenesisValidatorsRoot' + if size := len(b.GenesisValidatorsRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.GenesisValidatorsRoot", size, 32) + return + } + dst = append(dst, b.GenesisValidatorsRoot...) + + // Field (2) 'Slot' + dst = ssz.MarshalUint64(dst, b.Slot) + + // Field (3) 'Fork' + if b.Fork == nil { + b.Fork = new(Fork) + } + if dst, err = b.Fork.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'LatestBlockHeader' + if b.LatestBlockHeader == nil { + b.LatestBlockHeader = new(BeaconBlockHeader) + } + if dst, err = b.LatestBlockHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'BlockRoots' + for ii := 0; ii < 8192; ii++ { + dst = append(dst, b.BlockRoots[ii][:]...) + } + + // Field (6) 'StateRoots' + for ii := 0; ii < 8192; ii++ { + dst = append(dst, b.StateRoots[ii][:]...) + } + + // Offset (7) 'HistoricalRoots' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.HistoricalRoots) * 32 + + // Field (8) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if dst, err = b.Eth1Data.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (9) 'Eth1DataVotes' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Eth1DataVotes) * 72 + + // Field (10) 'Eth1DepositIndex' + dst = ssz.MarshalUint64(dst, b.Eth1DepositIndex) + + // Offset (11) 'Validators' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Validators) * 121 + + // Offset (12) 'Balances' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Balances) * 8 + + // Field (13) 'RandaoMixes' + if size := len(b.RandaoMixes); size != 65536 { + err = ssz.ErrVectorLengthFn("BeaconStateDeneb.RandaoMixes", size, 65536) + return + } + for ii := 0; ii < 65536; ii++ { + if size := len(b.RandaoMixes[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.RandaoMixes[ii]", size, 32) + return + } + dst = append(dst, b.RandaoMixes[ii]...) + } + + // Field (14) 'Slashings' + if size := len(b.Slashings); size != 8192 { + err = ssz.ErrVectorLengthFn("BeaconStateDeneb.Slashings", size, 8192) + return + } + for ii := 0; ii < 8192; ii++ { + dst = ssz.MarshalUint64(dst, b.Slashings[ii]) + } + + // Offset (15) 'PreviousEpochParticipation' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.PreviousEpochParticipation) + + // Offset (16) 'CurrentEpochParticipation' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.CurrentEpochParticipation) + + // Field (17) 'JustificationBits' + dst = append(dst, b.JustificationBits[:]...) + + // Field (18) 'PreviousJustifiedCheckpoint' + if b.PreviousJustifiedCheckpoint == nil { + b.PreviousJustifiedCheckpoint = new(Checkpoint) + } + if dst, err = b.PreviousJustifiedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if b.CurrentJustifiedCheckpoint == nil { + b.CurrentJustifiedCheckpoint = new(Checkpoint) + } + if dst, err = b.CurrentJustifiedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Field (20) 'FinalizedCheckpoint' + if b.FinalizedCheckpoint == nil { + b.FinalizedCheckpoint = new(Checkpoint) + } + if dst, err = b.FinalizedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (21) 'InactivityScores' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.InactivityScores) * 8 + + // Field (22) 'CurrentSyncCommittee' + if b.CurrentSyncCommittee == nil { + b.CurrentSyncCommittee = new(SyncCommittee) + } + if dst, err = b.CurrentSyncCommittee.MarshalSSZTo(dst); err != nil { + return + } + + // Field (23) 'NextSyncCommittee' + if b.NextSyncCommittee == nil { + b.NextSyncCommittee = new(SyncCommittee) + } + if dst, err = b.NextSyncCommittee.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (24) 'LatestExecutionPayloadHeader' + dst = ssz.WriteOffset(dst, offset) + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeaderDeneb) + } + offset += b.LatestExecutionPayloadHeader.SizeSSZ() + + // Field (25) 'NextWithdrawalIndex' + dst = ssz.MarshalUint64(dst, b.NextWithdrawalIndex) + + // Field (26) 'NextWithdrawalValidatorIndex' + dst = ssz.MarshalUint64(dst, b.NextWithdrawalValidatorIndex) + + // Offset (27) 'HistoricalSummaries' + dst = ssz.WriteOffset(dst, offset) + + // Field (7) 'HistoricalRoots' + if size := len(b.HistoricalRoots); size > 16777216 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.HistoricalRoots", size, 16777216) + return + } + for ii := 0; ii < len(b.HistoricalRoots); ii++ { + if size := len(b.HistoricalRoots[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.HistoricalRoots[ii]", size, 32) + return + } + dst = append(dst, b.HistoricalRoots[ii]...) + } + + // Field (9) 'Eth1DataVotes' + if size := len(b.Eth1DataVotes); size > 2048 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.Eth1DataVotes", size, 2048) + return + } + for ii := 0; ii < len(b.Eth1DataVotes); ii++ { + if dst, err = b.Eth1DataVotes[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (11) 'Validators' + if size := len(b.Validators); size > 1099511627776 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.Validators", size, 1099511627776) + return + } + for ii := 0; ii < len(b.Validators); ii++ { + if dst, err = b.Validators[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (12) 'Balances' + if size := len(b.Balances); size > 1099511627776 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.Balances", size, 1099511627776) + return + } + for ii := 0; ii < len(b.Balances); ii++ { + dst = ssz.MarshalUint64(dst, b.Balances[ii]) + } + + // Field (15) 'PreviousEpochParticipation' + if size := len(b.PreviousEpochParticipation); size > 1099511627776 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.PreviousEpochParticipation", size, 1099511627776) + return + } + dst = append(dst, b.PreviousEpochParticipation...) + + // Field (16) 'CurrentEpochParticipation' + if size := len(b.CurrentEpochParticipation); size > 1099511627776 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.CurrentEpochParticipation", size, 1099511627776) + return + } + dst = append(dst, b.CurrentEpochParticipation...) + + // Field (21) 'InactivityScores' + if size := len(b.InactivityScores); size > 1099511627776 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.InactivityScores", size, 1099511627776) + return + } + for ii := 0; ii < len(b.InactivityScores); ii++ { + dst = ssz.MarshalUint64(dst, b.InactivityScores[ii]) + } + + // Field (24) 'LatestExecutionPayloadHeader' + if dst, err = b.LatestExecutionPayloadHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (27) 'HistoricalSummaries' + if size := len(b.HistoricalSummaries); size > 16777216 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.HistoricalSummaries", size, 16777216) + return + } + for ii := 0; ii < len(b.HistoricalSummaries); ii++ { + if dst, err = b.HistoricalSummaries[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconStateDeneb object +func (b *BeaconStateDeneb) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 2736653 { + return ssz.ErrSize + } + + tail := buf + var o7, o9, o11, o12, o15, o16, o21, o24, o27 uint64 + + // Field (0) 'GenesisTime' + b.GenesisTime = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'GenesisValidatorsRoot' + if cap(b.GenesisValidatorsRoot) == 0 { + b.GenesisValidatorsRoot = make([]byte, 0, len(buf[8:40])) + } + b.GenesisValidatorsRoot = append(b.GenesisValidatorsRoot, buf[8:40]...) + + // Field (2) 'Slot' + b.Slot = ssz.UnmarshallUint64(buf[40:48]) + + // Field (3) 'Fork' + if b.Fork == nil { + b.Fork = new(Fork) + } + if err = b.Fork.UnmarshalSSZ(buf[48:64]); err != nil { + return err + } + + // Field (4) 'LatestBlockHeader' + if b.LatestBlockHeader == nil { + b.LatestBlockHeader = new(BeaconBlockHeader) + } + if err = b.LatestBlockHeader.UnmarshalSSZ(buf[64:176]); err != nil { + return err + } + + // Field (5) 'BlockRoots' + + for ii := 0; ii < 8192; ii++ { + copy(b.BlockRoots[ii][:], buf[176:262320][ii*32:(ii+1)*32]) + } + + // Field (6) 'StateRoots' + + for ii := 0; ii < 8192; ii++ { + copy(b.StateRoots[ii][:], buf[262320:524464][ii*32:(ii+1)*32]) + } + + // Offset (7) 'HistoricalRoots' + if o7 = ssz.ReadOffset(buf[524464:524468]); o7 > size { + return ssz.ErrOffset + } + + if o7 != 2736653 { + return ssz.ErrInvalidVariableOffset + } + + // Field (8) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.UnmarshalSSZ(buf[524468:524540]); err != nil { + return err + } + + // Offset (9) 'Eth1DataVotes' + if o9 = ssz.ReadOffset(buf[524540:524544]); o9 > size || o7 > o9 { + return ssz.ErrOffset + } + + // Field (10) 'Eth1DepositIndex' + b.Eth1DepositIndex = ssz.UnmarshallUint64(buf[524544:524552]) + + // Offset (11) 'Validators' + if o11 = ssz.ReadOffset(buf[524552:524556]); o11 > size || o9 > o11 { + return ssz.ErrOffset + } + + // Offset (12) 'Balances' + if o12 = ssz.ReadOffset(buf[524556:524560]); o12 > size || o11 > o12 { + return ssz.ErrOffset + } + + // Field (13) 'RandaoMixes' + b.RandaoMixes = make([][]byte, 65536) + for ii := 0; ii < 65536; ii++ { + if cap(b.RandaoMixes[ii]) == 0 { + b.RandaoMixes[ii] = make([]byte, 0, len(buf[524560:2621712][ii*32:(ii+1)*32])) + } + b.RandaoMixes[ii] = append(b.RandaoMixes[ii], buf[524560:2621712][ii*32:(ii+1)*32]...) + } + + // Field (14) 'Slashings' + b.Slashings = ssz.ExtendUint64(b.Slashings, 8192) + for ii := 0; ii < 8192; ii++ { + b.Slashings[ii] = ssz.UnmarshallUint64(buf[2621712:2687248][ii*8 : (ii+1)*8]) + } + + // Offset (15) 'PreviousEpochParticipation' + if o15 = ssz.ReadOffset(buf[2687248:2687252]); o15 > size || o12 > o15 { + return ssz.ErrOffset + } + + // Offset (16) 'CurrentEpochParticipation' + if o16 = ssz.ReadOffset(buf[2687252:2687256]); o16 > size || o15 > o16 { + return ssz.ErrOffset + } + + // Field (17) 'JustificationBits' + copy(b.JustificationBits[:], buf[2687256:2687257]) + + // Field (18) 'PreviousJustifiedCheckpoint' + if b.PreviousJustifiedCheckpoint == nil { + b.PreviousJustifiedCheckpoint = new(Checkpoint) + } + if err = b.PreviousJustifiedCheckpoint.UnmarshalSSZ(buf[2687257:2687297]); err != nil { + return err + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if b.CurrentJustifiedCheckpoint == nil { + b.CurrentJustifiedCheckpoint = new(Checkpoint) + } + if err = b.CurrentJustifiedCheckpoint.UnmarshalSSZ(buf[2687297:2687337]); err != nil { + return err + } + + // Field (20) 'FinalizedCheckpoint' + if b.FinalizedCheckpoint == nil { + b.FinalizedCheckpoint = new(Checkpoint) + } + if err = b.FinalizedCheckpoint.UnmarshalSSZ(buf[2687337:2687377]); err != nil { + return err + } + + // Offset (21) 'InactivityScores' + if o21 = ssz.ReadOffset(buf[2687377:2687381]); o21 > size || o16 > o21 { + return ssz.ErrOffset + } + + // Field (22) 'CurrentSyncCommittee' + if b.CurrentSyncCommittee == nil { + b.CurrentSyncCommittee = new(SyncCommittee) + } + if err = b.CurrentSyncCommittee.UnmarshalSSZ(buf[2687381:2712005]); err != nil { + return err + } + + // Field (23) 'NextSyncCommittee' + if b.NextSyncCommittee == nil { + b.NextSyncCommittee = new(SyncCommittee) + } + if err = b.NextSyncCommittee.UnmarshalSSZ(buf[2712005:2736629]); err != nil { + return err + } + + // Offset (24) 'LatestExecutionPayloadHeader' + if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 { + return ssz.ErrOffset + } + + // Field (25) 'NextWithdrawalIndex' + b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736633:2736641]) + + // Field (26) 'NextWithdrawalValidatorIndex' + b.NextWithdrawalValidatorIndex = ssz.UnmarshallUint64(buf[2736641:2736649]) + + // Offset (27) 'HistoricalSummaries' + if o27 = ssz.ReadOffset(buf[2736649:2736653]); o27 > size || o24 > o27 { + return ssz.ErrOffset + } + + // Field (7) 'HistoricalRoots' + { + buf = tail[o7:o9] + num, err := ssz.DivideInt2(len(buf), 32, 16777216) + if err != nil { + return err + } + b.HistoricalRoots = make([][]byte, num) + for ii := 0; ii < num; ii++ { + if cap(b.HistoricalRoots[ii]) == 0 { + b.HistoricalRoots[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32])) + } + b.HistoricalRoots[ii] = append(b.HistoricalRoots[ii], buf[ii*32:(ii+1)*32]...) + } + } + + // Field (9) 'Eth1DataVotes' + { + buf = tail[o9:o11] + num, err := ssz.DivideInt2(len(buf), 72, 2048) + if err != nil { + return err + } + b.Eth1DataVotes = make([]*Eth1Data, num) + for ii := 0; ii < num; ii++ { + if b.Eth1DataVotes[ii] == nil { + b.Eth1DataVotes[ii] = new(Eth1Data) + } + if err = b.Eth1DataVotes[ii].UnmarshalSSZ(buf[ii*72 : (ii+1)*72]); err != nil { + return err + } + } + } + + // Field (11) 'Validators' + { + buf = tail[o11:o12] + num, err := ssz.DivideInt2(len(buf), 121, 1099511627776) + if err != nil { + return err + } + b.Validators = make([]*Validator, num) + for ii := 0; ii < num; ii++ { + if b.Validators[ii] == nil { + b.Validators[ii] = new(Validator) + } + if err = b.Validators[ii].UnmarshalSSZ(buf[ii*121 : (ii+1)*121]); err != nil { + return err + } + } + } + + // Field (12) 'Balances' + { + buf = tail[o12:o15] + num, err := ssz.DivideInt2(len(buf), 8, 1099511627776) + if err != nil { + return err + } + b.Balances = ssz.ExtendUint64(b.Balances, num) + for ii := 0; ii < num; ii++ { + b.Balances[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8]) + } + } + + // Field (15) 'PreviousEpochParticipation' + { + buf = tail[o15:o16] + if len(buf) > 1099511627776 { + return ssz.ErrBytesLength + } + if cap(b.PreviousEpochParticipation) == 0 { + b.PreviousEpochParticipation = make([]byte, 0, len(buf)) + } + b.PreviousEpochParticipation = append(b.PreviousEpochParticipation, buf...) + } + + // Field (16) 'CurrentEpochParticipation' + { + buf = tail[o16:o21] + if len(buf) > 1099511627776 { + return ssz.ErrBytesLength + } + if cap(b.CurrentEpochParticipation) == 0 { + b.CurrentEpochParticipation = make([]byte, 0, len(buf)) + } + b.CurrentEpochParticipation = append(b.CurrentEpochParticipation, buf...) + } + + // Field (21) 'InactivityScores' + { + buf = tail[o21:o24] + num, err := ssz.DivideInt2(len(buf), 8, 1099511627776) + if err != nil { + return err + } + b.InactivityScores = ssz.ExtendUint64(b.InactivityScores, num) + for ii := 0; ii < num; ii++ { + b.InactivityScores[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8]) + } + } + + // Field (24) 'LatestExecutionPayloadHeader' + { + buf = tail[o24:o27] + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeaderDeneb) + } + if err = b.LatestExecutionPayloadHeader.UnmarshalSSZ(buf); err != nil { + return err + } + } + + // Field (27) 'HistoricalSummaries' + { + buf = tail[o27:] + num, err := ssz.DivideInt2(len(buf), 64, 16777216) + if err != nil { + return err + } + b.HistoricalSummaries = make([]*HistoricalSummary, num) + for ii := 0; ii < num; ii++ { + if b.HistoricalSummaries[ii] == nil { + b.HistoricalSummaries[ii] = new(HistoricalSummary) + } + if err = b.HistoricalSummaries[ii].UnmarshalSSZ(buf[ii*64 : (ii+1)*64]); err != nil { + return err + } + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateDeneb object +func (b *BeaconStateDeneb) SizeSSZ() (size int) { + size = 2736653 + + // Field (7) 'HistoricalRoots' + size += len(b.HistoricalRoots) * 32 + + // Field (9) 'Eth1DataVotes' + size += len(b.Eth1DataVotes) * 72 + + // Field (11) 'Validators' + size += len(b.Validators) * 121 + + // Field (12) 'Balances' + size += len(b.Balances) * 8 + + // Field (15) 'PreviousEpochParticipation' + size += len(b.PreviousEpochParticipation) + + // Field (16) 'CurrentEpochParticipation' + size += len(b.CurrentEpochParticipation) + + // Field (21) 'InactivityScores' + size += len(b.InactivityScores) * 8 + + // Field (24) 'LatestExecutionPayloadHeader' + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeaderDeneb) + } + size += b.LatestExecutionPayloadHeader.SizeSSZ() + + // Field (27) 'HistoricalSummaries' + size += len(b.HistoricalSummaries) * 64 + + return +} + +// HashTreeRoot ssz hashes the BeaconStateDeneb object +func (b *BeaconStateDeneb) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconStateDeneb object with a hasher +func (b *BeaconStateDeneb) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'GenesisTime' + hh.PutUint64(b.GenesisTime) + + // Field (1) 'GenesisValidatorsRoot' + if size := len(b.GenesisValidatorsRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconStateDeneb.GenesisValidatorsRoot", size, 32) + return + } + hh.PutBytes(b.GenesisValidatorsRoot) + + // Field (2) 'Slot' + hh.PutUint64(b.Slot) + + // Field (3) 'Fork' + if b.Fork == nil { + b.Fork = new(Fork) + } + if err = b.Fork.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'LatestBlockHeader' + if b.LatestBlockHeader == nil { + b.LatestBlockHeader = new(BeaconBlockHeader) + } + if err = b.LatestBlockHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'BlockRoots' + { + subIndx := hh.Index() + for _, i := range b.BlockRoots { + hh.Append(i[:]) + } + hh.Merkleize(subIndx) + } + + // Field (6) 'StateRoots' + { + subIndx := hh.Index() + for _, i := range b.StateRoots { + hh.Append(i[:]) + } + hh.Merkleize(subIndx) + } + + // Field (7) 'HistoricalRoots' + { + if size := len(b.HistoricalRoots); size > 16777216 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.HistoricalRoots", size, 16777216) + return + } + subIndx := hh.Index() + for _, i := range b.HistoricalRoots { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + numItems := uint64(len(b.HistoricalRoots)) + hh.MerkleizeWithMixin(subIndx, numItems, 16777216) + } + + // Field (8) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.HashTreeRootWith(hh); err != nil { + return + } + + // Field (9) 'Eth1DataVotes' + { + subIndx := hh.Index() + num := uint64(len(b.Eth1DataVotes)) + if num > 2048 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Eth1DataVotes { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 2048) + } + + // Field (10) 'Eth1DepositIndex' + hh.PutUint64(b.Eth1DepositIndex) + + // Field (11) 'Validators' + { + subIndx := hh.Index() + num := uint64(len(b.Validators)) + if num > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Validators { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 1099511627776) + } + + // Field (12) 'Balances' + { + if size := len(b.Balances); size > 1099511627776 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.Balances", size, 1099511627776) + return + } + subIndx := hh.Index() + for _, i := range b.Balances { + hh.AppendUint64(i) + } + hh.FillUpTo32() + numItems := uint64(len(b.Balances)) + hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(1099511627776, numItems, 8)) + } + + // Field (13) 'RandaoMixes' + { + if size := len(b.RandaoMixes); size != 65536 { + err = ssz.ErrVectorLengthFn("BeaconStateDeneb.RandaoMixes", size, 65536) + return + } + subIndx := hh.Index() + for _, i := range b.RandaoMixes { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (14) 'Slashings' + { + if size := len(b.Slashings); size != 8192 { + err = ssz.ErrVectorLengthFn("BeaconStateDeneb.Slashings", size, 8192) + return + } + subIndx := hh.Index() + for _, i := range b.Slashings { + hh.AppendUint64(i) + } + hh.Merkleize(subIndx) + } + + // Field (15) 'PreviousEpochParticipation' + { + elemIndx := hh.Index() + byteLen := uint64(len(b.PreviousEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.Append(b.PreviousEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) + } + + // Field (16) 'CurrentEpochParticipation' + { + elemIndx := hh.Index() + byteLen := uint64(len(b.CurrentEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.Append(b.CurrentEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) + } + + // Field (17) 'JustificationBits' + hh.PutBytes(b.JustificationBits[:]) + + // Field (18) 'PreviousJustifiedCheckpoint' + if b.PreviousJustifiedCheckpoint == nil { + b.PreviousJustifiedCheckpoint = new(Checkpoint) + } + if err = b.PreviousJustifiedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if b.CurrentJustifiedCheckpoint == nil { + b.CurrentJustifiedCheckpoint = new(Checkpoint) + } + if err = b.CurrentJustifiedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (20) 'FinalizedCheckpoint' + if b.FinalizedCheckpoint == nil { + b.FinalizedCheckpoint = new(Checkpoint) + } + if err = b.FinalizedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (21) 'InactivityScores' + { + if size := len(b.InactivityScores); size > 1099511627776 { + err = ssz.ErrListTooBigFn("BeaconStateDeneb.InactivityScores", size, 1099511627776) + return + } + subIndx := hh.Index() + for _, i := range b.InactivityScores { + hh.AppendUint64(i) + } + hh.FillUpTo32() + numItems := uint64(len(b.InactivityScores)) + hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(1099511627776, numItems, 8)) + } + + // Field (22) 'CurrentSyncCommittee' + if b.CurrentSyncCommittee == nil { + b.CurrentSyncCommittee = new(SyncCommittee) + } + if err = b.CurrentSyncCommittee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (23) 'NextSyncCommittee' + if b.NextSyncCommittee == nil { + b.NextSyncCommittee = new(SyncCommittee) + } + if err = b.NextSyncCommittee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (24) 'LatestExecutionPayloadHeader' + if err = b.LatestExecutionPayloadHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (25) 'NextWithdrawalIndex' + hh.PutUint64(b.NextWithdrawalIndex) + + // Field (26) 'NextWithdrawalValidatorIndex' + hh.PutUint64(b.NextWithdrawalValidatorIndex) + + // Field (27) 'HistoricalSummaries' + { + subIndx := hh.Index() + num := uint64(len(b.HistoricalSummaries)) + if num > 16777216 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.HistoricalSummaries { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16777216) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the BeaconStateDeneb object +func (b *BeaconStateDeneb) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(b) +} + +// MarshalSSZ ssz marshals the ExecutionPayloadHeaderDeneb object +func (e *ExecutionPayloadHeaderDeneb) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the ExecutionPayloadHeaderDeneb object to a target array +func (e *ExecutionPayloadHeaderDeneb) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(584) + + // Field (0) 'ParentHash' + dst = append(dst, e.ParentHash[:]...) + + // Field (1) 'FeeRecipient' + dst = append(dst, e.FeeRecipient[:]...) + + // Field (2) 'StateRoot' + dst = append(dst, e.StateRoot[:]...) + + // Field (3) 'ReceiptsRoot' + dst = append(dst, e.ReceiptsRoot[:]...) + + // Field (4) 'LogsBloom' + dst = append(dst, e.LogsBloom[:]...) + + // Field (5) 'PrevRandao' + dst = append(dst, e.PrevRandao[:]...) + + // Field (6) 'BlockNumber' + dst = ssz.MarshalUint64(dst, e.BlockNumber) + + // Field (7) 'GasLimit' + dst = ssz.MarshalUint64(dst, e.GasLimit) + + // Field (8) 'GasUsed' + dst = ssz.MarshalUint64(dst, e.GasUsed) + + // Field (9) 'Timestamp' + dst = ssz.MarshalUint64(dst, e.Timestamp) + + // Offset (10) 'ExtraData' + dst = ssz.WriteOffset(dst, offset) + + // Field (11) 'BaseFeePerGas' + dst = append(dst, e.BaseFeePerGas[:]...) + + // Field (12) 'BlockHash' + dst = append(dst, e.BlockHash[:]...) + + // Field (13) 'TransactionsRoot' + dst = append(dst, e.TransactionsRoot[:]...) + + // Field (14) 'WithdrawalRoot' + dst = append(dst, e.WithdrawalRoot[:]...) + + // Field (15) 'BlobGasUsed' + dst = ssz.MarshalUint64(dst, e.BlobGasUsed) + + // Field (16) 'ExcessBlobGas' + dst = ssz.MarshalUint64(dst, e.ExcessBlobGas) + + // Field (10) 'ExtraData' + if size := len(e.ExtraData); size > 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayloadHeaderDeneb.ExtraData", size, 32) + return + } + dst = append(dst, e.ExtraData...) + + return +} + +// UnmarshalSSZ ssz unmarshals the ExecutionPayloadHeaderDeneb object +func (e *ExecutionPayloadHeaderDeneb) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 584 { + return ssz.ErrSize + } + + tail := buf + var o10 uint64 + + // Field (0) 'ParentHash' + copy(e.ParentHash[:], buf[0:32]) + + // Field (1) 'FeeRecipient' + copy(e.FeeRecipient[:], buf[32:52]) + + // Field (2) 'StateRoot' + copy(e.StateRoot[:], buf[52:84]) + + // Field (3) 'ReceiptsRoot' + copy(e.ReceiptsRoot[:], buf[84:116]) + + // Field (4) 'LogsBloom' + copy(e.LogsBloom[:], buf[116:372]) + + // Field (5) 'PrevRandao' + copy(e.PrevRandao[:], buf[372:404]) + + // Field (6) 'BlockNumber' + e.BlockNumber = ssz.UnmarshallUint64(buf[404:412]) + + // Field (7) 'GasLimit' + e.GasLimit = ssz.UnmarshallUint64(buf[412:420]) + + // Field (8) 'GasUsed' + e.GasUsed = ssz.UnmarshallUint64(buf[420:428]) + + // Field (9) 'Timestamp' + e.Timestamp = ssz.UnmarshallUint64(buf[428:436]) + + // Offset (10) 'ExtraData' + if o10 = ssz.ReadOffset(buf[436:440]); o10 > size { + return ssz.ErrOffset + } + + if o10 != 584 { + return ssz.ErrInvalidVariableOffset + } + + // Field (11) 'BaseFeePerGas' + copy(e.BaseFeePerGas[:], buf[440:472]) + + // Field (12) 'BlockHash' + copy(e.BlockHash[:], buf[472:504]) + + // Field (13) 'TransactionsRoot' + copy(e.TransactionsRoot[:], buf[504:536]) + + // Field (14) 'WithdrawalRoot' + copy(e.WithdrawalRoot[:], buf[536:568]) + + // Field (15) 'BlobGasUsed' + e.BlobGasUsed = ssz.UnmarshallUint64(buf[568:576]) + + // Field (16) 'ExcessBlobGas' + e.ExcessBlobGas = ssz.UnmarshallUint64(buf[576:584]) + + // Field (10) 'ExtraData' + { + buf = tail[o10:] + if len(buf) > 32 { + return ssz.ErrBytesLength + } + if cap(e.ExtraData) == 0 { + e.ExtraData = make([]byte, 0, len(buf)) + } + e.ExtraData = append(e.ExtraData, buf...) + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadHeaderDeneb object +func (e *ExecutionPayloadHeaderDeneb) SizeSSZ() (size int) { + size = 584 + + // Field (10) 'ExtraData' + size += len(e.ExtraData) + + return +} + +// HashTreeRoot ssz hashes the ExecutionPayloadHeaderDeneb object +func (e *ExecutionPayloadHeaderDeneb) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the ExecutionPayloadHeaderDeneb object with a hasher +func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ParentHash' + hh.PutBytes(e.ParentHash[:]) + + // Field (1) 'FeeRecipient' + hh.PutBytes(e.FeeRecipient[:]) + + // Field (2) 'StateRoot' + hh.PutBytes(e.StateRoot[:]) + + // Field (3) 'ReceiptsRoot' + hh.PutBytes(e.ReceiptsRoot[:]) + + // Field (4) 'LogsBloom' + hh.PutBytes(e.LogsBloom[:]) + + // Field (5) 'PrevRandao' + hh.PutBytes(e.PrevRandao[:]) + + // Field (6) 'BlockNumber' + hh.PutUint64(e.BlockNumber) + + // Field (7) 'GasLimit' + hh.PutUint64(e.GasLimit) + + // Field (8) 'GasUsed' + hh.PutUint64(e.GasUsed) + + // Field (9) 'Timestamp' + hh.PutUint64(e.Timestamp) + + // Field (10) 'ExtraData' + { + elemIndx := hh.Index() + byteLen := uint64(len(e.ExtraData)) + if byteLen > 32 { + err = ssz.ErrIncorrectListSize + return + } + hh.Append(e.ExtraData) + hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32) + } + + // Field (11) 'BaseFeePerGas' + hh.PutBytes(e.BaseFeePerGas[:]) + + // Field (12) 'BlockHash' + hh.PutBytes(e.BlockHash[:]) + + // Field (13) 'TransactionsRoot' + hh.PutBytes(e.TransactionsRoot[:]) + + // Field (14) 'WithdrawalRoot' + hh.PutBytes(e.WithdrawalRoot[:]) + + // Field (15) 'BlobGasUsed' + hh.PutUint64(e.BlobGasUsed) + + // Field (16) 'ExcessBlobGas' + hh.PutUint64(e.ExcessBlobGas) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the ExecutionPayloadHeaderDeneb object +func (e *ExecutionPayloadHeaderDeneb) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(e) +} + +// MarshalSSZ ssz marshals the Fork object +func (f *Fork) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(f) +} + +// MarshalSSZTo ssz marshals the Fork object to a target array +func (f *Fork) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'PreviousVersion' + if size := len(f.PreviousVersion); size != 4 { + err = ssz.ErrBytesLengthFn("Fork.PreviousVersion", size, 4) + return + } + dst = append(dst, f.PreviousVersion...) + + // Field (1) 'CurrentVersion' + if size := len(f.CurrentVersion); size != 4 { + err = ssz.ErrBytesLengthFn("Fork.CurrentVersion", size, 4) + return + } + dst = append(dst, f.CurrentVersion...) + + // Field (2) 'Epoch' + dst = ssz.MarshalUint64(dst, f.Epoch) + + return +} + +// UnmarshalSSZ ssz unmarshals the Fork object +func (f *Fork) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 16 { + return ssz.ErrSize + } + + // Field (0) 'PreviousVersion' + if cap(f.PreviousVersion) == 0 { + f.PreviousVersion = make([]byte, 0, len(buf[0:4])) + } + f.PreviousVersion = append(f.PreviousVersion, buf[0:4]...) + + // Field (1) 'CurrentVersion' + if cap(f.CurrentVersion) == 0 { + f.CurrentVersion = make([]byte, 0, len(buf[4:8])) + } + f.CurrentVersion = append(f.CurrentVersion, buf[4:8]...) + + // Field (2) 'Epoch' + f.Epoch = ssz.UnmarshallUint64(buf[8:16]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Fork object +func (f *Fork) SizeSSZ() (size int) { + size = 16 + return +} + +// HashTreeRoot ssz hashes the Fork object +func (f *Fork) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(f) +} + +// HashTreeRootWith ssz hashes the Fork object with a hasher +func (f *Fork) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'PreviousVersion' + if size := len(f.PreviousVersion); size != 4 { + err = ssz.ErrBytesLengthFn("Fork.PreviousVersion", size, 4) + return + } + hh.PutBytes(f.PreviousVersion) + + // Field (1) 'CurrentVersion' + if size := len(f.CurrentVersion); size != 4 { + err = ssz.ErrBytesLengthFn("Fork.CurrentVersion", size, 4) + return + } + hh.PutBytes(f.CurrentVersion) + + // Field (2) 'Epoch' + hh.PutUint64(f.Epoch) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Fork object +func (f *Fork) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(f) +} + +// MarshalSSZ ssz marshals the BeaconBlockHeader object +func (b *BeaconBlockHeader) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBlockHeader object to a target array +func (b *BeaconBlockHeader) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Slot' + dst = ssz.MarshalUint64(dst, b.Slot) + + // Field (1) 'ProposerIndex' + dst = ssz.MarshalUint64(dst, b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.ParentRoot", size, 32) + return + } + dst = append(dst, b.ParentRoot...) + + // Field (3) 'StateRoot' + if size := len(b.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.StateRoot", size, 32) + return + } + dst = append(dst, b.StateRoot...) + + // Field (4) 'BodyRoot' + if size := len(b.BodyRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.BodyRoot", size, 32) + return + } + dst = append(dst, b.BodyRoot...) + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBlockHeader object +func (b *BeaconBlockHeader) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 112 { + return ssz.ErrSize + } + + // Field (0) 'Slot' + b.Slot = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'ProposerIndex' + b.ProposerIndex = ssz.UnmarshallUint64(buf[8:16]) + + // Field (2) 'ParentRoot' + if cap(b.ParentRoot) == 0 { + b.ParentRoot = make([]byte, 0, len(buf[16:48])) + } + b.ParentRoot = append(b.ParentRoot, buf[16:48]...) + + // Field (3) 'StateRoot' + if cap(b.StateRoot) == 0 { + b.StateRoot = make([]byte, 0, len(buf[48:80])) + } + b.StateRoot = append(b.StateRoot, buf[48:80]...) + + // Field (4) 'BodyRoot' + if cap(b.BodyRoot) == 0 { + b.BodyRoot = make([]byte, 0, len(buf[80:112])) + } + b.BodyRoot = append(b.BodyRoot, buf[80:112]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockHeader object +func (b *BeaconBlockHeader) SizeSSZ() (size int) { + size = 112 + return +} + +// HashTreeRoot ssz hashes the BeaconBlockHeader object +func (b *BeaconBlockHeader) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBlockHeader object with a hasher +func (b *BeaconBlockHeader) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Slot' + hh.PutUint64(b.Slot) + + // Field (1) 'ProposerIndex' + hh.PutUint64(b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.ParentRoot", size, 32) + return + } + hh.PutBytes(b.ParentRoot) + + // Field (3) 'StateRoot' + if size := len(b.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.StateRoot", size, 32) + return + } + hh.PutBytes(b.StateRoot) + + // Field (4) 'BodyRoot' + if size := len(b.BodyRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.BodyRoot", size, 32) + return + } + hh.PutBytes(b.BodyRoot) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the BeaconBlockHeader object +func (b *BeaconBlockHeader) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(b) +} + +// MarshalSSZ ssz marshals the Eth1Data object +func (e *Eth1Data) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the Eth1Data object to a target array +func (e *Eth1Data) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'DepositRoot' + if size := len(e.DepositRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.DepositRoot", size, 32) + return + } + dst = append(dst, e.DepositRoot...) + + // Field (1) 'DepositCount' + dst = ssz.MarshalUint64(dst, e.DepositCount) + + // Field (2) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.BlockHash", size, 32) + return + } + dst = append(dst, e.BlockHash...) + + return +} + +// UnmarshalSSZ ssz unmarshals the Eth1Data object +func (e *Eth1Data) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 72 { + return ssz.ErrSize + } + + // Field (0) 'DepositRoot' + if cap(e.DepositRoot) == 0 { + e.DepositRoot = make([]byte, 0, len(buf[0:32])) + } + e.DepositRoot = append(e.DepositRoot, buf[0:32]...) + + // Field (1) 'DepositCount' + e.DepositCount = ssz.UnmarshallUint64(buf[32:40]) + + // Field (2) 'BlockHash' + if cap(e.BlockHash) == 0 { + e.BlockHash = make([]byte, 0, len(buf[40:72])) + } + e.BlockHash = append(e.BlockHash, buf[40:72]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Eth1Data object +func (e *Eth1Data) SizeSSZ() (size int) { + size = 72 + return +} + +// HashTreeRoot ssz hashes the Eth1Data object +func (e *Eth1Data) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the Eth1Data object with a hasher +func (e *Eth1Data) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'DepositRoot' + if size := len(e.DepositRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.DepositRoot", size, 32) + return + } + hh.PutBytes(e.DepositRoot) + + // Field (1) 'DepositCount' + hh.PutUint64(e.DepositCount) + + // Field (2) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.BlockHash", size, 32) + return + } + hh.PutBytes(e.BlockHash) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Eth1Data object +func (e *Eth1Data) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(e) +} + +// MarshalSSZ ssz marshals the Validator object +func (v *Validator) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(v) +} + +// MarshalSSZTo ssz marshals the Validator object to a target array +func (v *Validator) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Pubkey' + if size := len(v.Pubkey); size != 48 { + err = ssz.ErrBytesLengthFn("Validator.Pubkey", size, 48) + return + } + dst = append(dst, v.Pubkey...) + + // Field (1) 'WithdrawalCredentials' + if size := len(v.WithdrawalCredentials); size != 32 { + err = ssz.ErrBytesLengthFn("Validator.WithdrawalCredentials", size, 32) + return + } + dst = append(dst, v.WithdrawalCredentials...) + + // Field (2) 'EffectiveBalance' + dst = ssz.MarshalUint64(dst, v.EffectiveBalance) + + // Field (3) 'Slashed' + dst = ssz.MarshalBool(dst, v.Slashed) + + // Field (4) 'ActivationEligibilityEpoch' + dst = ssz.MarshalUint64(dst, v.ActivationEligibilityEpoch) + + // Field (5) 'ActivationEpoch' + dst = ssz.MarshalUint64(dst, v.ActivationEpoch) + + // Field (6) 'ExitEpoch' + dst = ssz.MarshalUint64(dst, v.ExitEpoch) + + // Field (7) 'WithdrawableEpoch' + dst = ssz.MarshalUint64(dst, v.WithdrawableEpoch) + + return +} + +// UnmarshalSSZ ssz unmarshals the Validator object +func (v *Validator) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 121 { + return ssz.ErrSize + } + + // Field (0) 'Pubkey' + if cap(v.Pubkey) == 0 { + v.Pubkey = make([]byte, 0, len(buf[0:48])) + } + v.Pubkey = append(v.Pubkey, buf[0:48]...) + + // Field (1) 'WithdrawalCredentials' + if cap(v.WithdrawalCredentials) == 0 { + v.WithdrawalCredentials = make([]byte, 0, len(buf[48:80])) + } + v.WithdrawalCredentials = append(v.WithdrawalCredentials, buf[48:80]...) + + // Field (2) 'EffectiveBalance' + v.EffectiveBalance = ssz.UnmarshallUint64(buf[80:88]) + + // Field (3) 'Slashed' + v.Slashed = ssz.UnmarshalBool(buf[88:89]) + + // Field (4) 'ActivationEligibilityEpoch' + v.ActivationEligibilityEpoch = ssz.UnmarshallUint64(buf[89:97]) + + // Field (5) 'ActivationEpoch' + v.ActivationEpoch = ssz.UnmarshallUint64(buf[97:105]) + + // Field (6) 'ExitEpoch' + v.ExitEpoch = ssz.UnmarshallUint64(buf[105:113]) + + // Field (7) 'WithdrawableEpoch' + v.WithdrawableEpoch = ssz.UnmarshallUint64(buf[113:121]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Validator object +func (v *Validator) SizeSSZ() (size int) { + size = 121 + return +} + +// HashTreeRoot ssz hashes the Validator object +func (v *Validator) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(v) +} + +// HashTreeRootWith ssz hashes the Validator object with a hasher +func (v *Validator) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Pubkey' + if size := len(v.Pubkey); size != 48 { + err = ssz.ErrBytesLengthFn("Validator.Pubkey", size, 48) + return + } + hh.PutBytes(v.Pubkey) + + // Field (1) 'WithdrawalCredentials' + if size := len(v.WithdrawalCredentials); size != 32 { + err = ssz.ErrBytesLengthFn("Validator.WithdrawalCredentials", size, 32) + return + } + hh.PutBytes(v.WithdrawalCredentials) + + // Field (2) 'EffectiveBalance' + hh.PutUint64(v.EffectiveBalance) + + // Field (3) 'Slashed' + hh.PutBool(v.Slashed) + + // Field (4) 'ActivationEligibilityEpoch' + hh.PutUint64(v.ActivationEligibilityEpoch) + + // Field (5) 'ActivationEpoch' + hh.PutUint64(v.ActivationEpoch) + + // Field (6) 'ExitEpoch' + hh.PutUint64(v.ExitEpoch) + + // Field (7) 'WithdrawableEpoch' + hh.PutUint64(v.WithdrawableEpoch) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Validator object +func (v *Validator) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(v) +} + +// MarshalSSZ ssz marshals the Checkpoint object +func (c *Checkpoint) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(c) +} + +// MarshalSSZTo ssz marshals the Checkpoint object to a target array +func (c *Checkpoint) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Epoch' + dst = ssz.MarshalUint64(dst, c.Epoch) + + // Field (1) 'Root' + if size := len(c.Root); size != 32 { + err = ssz.ErrBytesLengthFn("Checkpoint.Root", size, 32) + return + } + dst = append(dst, c.Root...) + + return +} + +// UnmarshalSSZ ssz unmarshals the Checkpoint object +func (c *Checkpoint) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 40 { + return ssz.ErrSize + } + + // Field (0) 'Epoch' + c.Epoch = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'Root' + if cap(c.Root) == 0 { + c.Root = make([]byte, 0, len(buf[8:40])) + } + c.Root = append(c.Root, buf[8:40]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Checkpoint object +func (c *Checkpoint) SizeSSZ() (size int) { + size = 40 + return +} + +// HashTreeRoot ssz hashes the Checkpoint object +func (c *Checkpoint) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(c) +} + +// HashTreeRootWith ssz hashes the Checkpoint object with a hasher +func (c *Checkpoint) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Epoch' + hh.PutUint64(c.Epoch) + + // Field (1) 'Root' + if size := len(c.Root); size != 32 { + err = ssz.ErrBytesLengthFn("Checkpoint.Root", size, 32) + return + } + hh.PutBytes(c.Root) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Checkpoint object +func (c *Checkpoint) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(c) +} + +// MarshalSSZ ssz marshals the SyncCommittee object +func (s *SyncCommittee) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SyncCommittee object to a target array +func (s *SyncCommittee) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'PubKeys' + if size := len(s.PubKeys); size != 512 { + err = ssz.ErrVectorLengthFn("SyncCommittee.PubKeys", size, 512) + return + } + for ii := 0; ii < 512; ii++ { + if size := len(s.PubKeys[ii]); size != 48 { + err = ssz.ErrBytesLengthFn("SyncCommittee.PubKeys[ii]", size, 48) + return + } + dst = append(dst, s.PubKeys[ii]...) + } + + // Field (1) 'AggregatePubKey' + dst = append(dst, s.AggregatePubKey[:]...) + + return +} + +// UnmarshalSSZ ssz unmarshals the SyncCommittee object +func (s *SyncCommittee) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 24624 { + return ssz.ErrSize + } + + // Field (0) 'PubKeys' + s.PubKeys = make([][]byte, 512) + for ii := 0; ii < 512; ii++ { + if cap(s.PubKeys[ii]) == 0 { + s.PubKeys[ii] = make([]byte, 0, len(buf[0:24576][ii*48:(ii+1)*48])) + } + s.PubKeys[ii] = append(s.PubKeys[ii], buf[0:24576][ii*48:(ii+1)*48]...) + } + + // Field (1) 'AggregatePubKey' + copy(s.AggregatePubKey[:], buf[24576:24624]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SyncCommittee object +func (s *SyncCommittee) SizeSSZ() (size int) { + size = 24624 + return +} + +// HashTreeRoot ssz hashes the SyncCommittee object +func (s *SyncCommittee) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SyncCommittee object with a hasher +func (s *SyncCommittee) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'PubKeys' + { + if size := len(s.PubKeys); size != 512 { + err = ssz.ErrVectorLengthFn("SyncCommittee.PubKeys", size, 512) + return + } + subIndx := hh.Index() + for _, i := range s.PubKeys { + if len(i) != 48 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(i) + } + hh.Merkleize(subIndx) + } + + // Field (1) 'AggregatePubKey' + hh.PutBytes(s.AggregatePubKey[:]) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SyncCommittee object +func (s *SyncCommittee) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the HistoricalSummary object +func (h *HistoricalSummary) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(h) +} + +// MarshalSSZTo ssz marshals the HistoricalSummary object to a target array +func (h *HistoricalSummary) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'BlockSummaryRoot' + dst = append(dst, h.BlockSummaryRoot[:]...) + + // Field (1) 'StateSummaryRoot' + dst = append(dst, h.StateSummaryRoot[:]...) + + return +} + +// UnmarshalSSZ ssz unmarshals the HistoricalSummary object +func (h *HistoricalSummary) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 64 { + return ssz.ErrSize + } + + // Field (0) 'BlockSummaryRoot' + copy(h.BlockSummaryRoot[:], buf[0:32]) + + // Field (1) 'StateSummaryRoot' + copy(h.StateSummaryRoot[:], buf[32:64]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the HistoricalSummary object +func (h *HistoricalSummary) SizeSSZ() (size int) { + size = 64 + return +} + +// HashTreeRoot ssz hashes the HistoricalSummary object +func (h *HistoricalSummary) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(h) +} + +// HashTreeRootWith ssz hashes the HistoricalSummary object with a hasher +func (h *HistoricalSummary) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'BlockSummaryRoot' + hh.PutBytes(h.BlockSummaryRoot[:]) + + // Field (1) 'StateSummaryRoot' + hh.PutBytes(h.StateSummaryRoot[:]) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the HistoricalSummary object +func (h *HistoricalSummary) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(h) +} diff --git a/shared/types/eth2/types.go b/shared/types/eth2/types.go index 00a2b5e23..45b131f17 100644 --- a/shared/types/eth2/types.go +++ b/shared/types/eth2/types.go @@ -33,3 +33,4 @@ type WithdrawalCredentialsChange struct { FromBLSPubkey [48]byte `json:"from_bls_pubkey" ssz-size:"48"` ToExecutionAddress [20]byte `json:"to_execution_address" ssz-size:"20"` } + diff --git a/shared/types/eth2/types_encoding.go b/shared/types/eth2/types_encoding.go index 21dc8c9c0..df1a4892f 100644 --- a/shared/types/eth2/types_encoding.go +++ b/shared/types/eth2/types_encoding.go @@ -1,6 +1,6 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: de1de6c602b97e33f12faec5f30221b3b10016d756604ad75f71674ad81d10d0 -// Version: 0.1.2 +// Hash: 8334e51dc7fef48f4bfcdf131ce2d53118dc8c0cba4ed032b43e8a8e83319297 +// Version: 0.1.3 package eth2 import ( diff --git a/shared/utils/hex/hex.go b/shared/utils/hex/hex.go index 2cc4db6a8..f74308a23 100644 --- a/shared/utils/hex/hex.go +++ b/shared/utils/hex/hex.go @@ -1,5 +1,11 @@ package hex +import "encoding/hex" + +func EncodeToString(value []byte) string { + return AddPrefix(hex.EncodeToString(value)) +} + // Add a prefix to a hex string if not present func AddPrefix(value string) string { if len(value) < 2 || value[0:2] != "0x" { diff --git a/shared/version.go b/shared/version.go index a1d60767f..1f508fd1a 100644 --- a/shared/version.go +++ b/shared/version.go @@ -1,6 +1,6 @@ package shared -const RocketPoolVersion string = "1.14.1" +const RocketPoolVersion string = "1.15.0-dev" const Logo string = `______ _ _ ______ _ | ___ \ | | | | | ___ \ | | diff --git a/sszgen.sh b/sszgen.sh index 660a35ffb..10811c0c2 100755 --- a/sszgen.sh +++ b/sszgen.sh @@ -3,4 +3,4 @@ # Generates the ssz encoding methods for eth2 types with fastssz # Install sszgen with `go get github.com/ferranbt/fastssz/sszgen` rm -f ./shared/types/eth2/types_encoding.go -sszgen --path ./shared/types/eth2 \ No newline at end of file +sszgen --path ./shared/types/eth2 --exclude-objs Uint256