From d9300300874e8267355c164b44e0d48b7b901e76 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 30 Oct 2024 00:33:44 +0100 Subject: [PATCH 001/245] change(deps): Use ECC deps with activation height for NU6 (#8978) * Remove temporary dependency patches We need to enable the `legacy-api` feature of `incrementalmerkletree` to be able to serialize note commitment trees using an old serialization format for the `z_gettreestate` RPC. * Use spaces instead of a tab * Bump ECC deps to match `zcashd` --- Cargo.lock | 230 +++++++++++++++-------------------------------------- Cargo.toml | 34 +++----- 2 files changed, 76 insertions(+), 188 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14220da0ea0..c1fe19bc718 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -566,7 +566,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cef977c7f8e75aa81fc589064c121ab8d32448b7939d34d58df479aa93e65ea5" dependencies = [ - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", ] [[package]] @@ -1343,15 +1343,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "equihash" -version = "0.2.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" -dependencies = [ - "blake2b_simd", - "byteorder", -] - [[package]] name = "equivalent" version = "1.0.1" @@ -1387,14 +1378,6 @@ dependencies = [ "blake2b_simd", ] -[[package]] -name = "f4jumble" -version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" -dependencies = [ - "blake2b_simd", -] - [[package]] name = "fastrand" version = "2.1.1" @@ -2157,19 +2140,11 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "incrementalmerkletree" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75346da3bd8e3d8891d02508245ed2df34447ca6637e343829f8d08986e9cde2" -dependencies = [ - "either", -] - [[package]] name = "incrementalmerkletree" version = "0.7.0" -source = "git+https://github.com/zcash/incrementalmerkletree?rev=ffe4234788fd22662b937ba7c6ea01535fcc1293#ffe4234788fd22662b937ba7c6ea01535fcc1293" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d45063fbc4b0a37837f6bfe0445f269d13d730ad0aa3b5a7f74aa7bf27a0f4df" dependencies = [ "either", ] @@ -2876,8 +2851,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.9.1" -source = "git+https://github.com/zcash/orchard?rev=55fb089a335bbbc1cda186c706bc037073df8eb7#55fb089a335bbbc1cda186c706bc037073df8eb7" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f18e997fa121de5c73e95cdc7e8512ae43b7de38904aeea5e5713cc48f3c0ba" dependencies = [ "aes", "bitvec", @@ -2888,7 +2864,7 @@ dependencies = [ "halo2_gadgets", "halo2_proofs", "hex", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "lazy_static", "memuse", "nonempty", @@ -4006,8 +3982,9 @@ dependencies = [ [[package]] name = "sapling-crypto" -version = "0.2.0" -source = "git+https://github.com/zcash/sapling-crypto?rev=b1ad3694ee13a2fc5d291ad04721a6252da0993c#b1ad3694ee13a2fc5d291ad04721a6252da0993c" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfff8cfce16aeb38da50b8e2ed33c9018f30552beff2210c266662a021b17f38" dependencies = [ "aes", "bellman", @@ -4021,7 +3998,7 @@ dependencies = [ "fpe", "group", "hex", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "jubjub", "lazy_static", "memuse", @@ -4342,12 +4319,13 @@ dependencies = [ [[package]] name = "shardtree" -version = "0.4.0" -source = "git+https://github.com/zcash/incrementalmerkletree?rev=ffe4234788fd22662b937ba7c6ea01535fcc1293#ffe4234788fd22662b937ba7c6ea01535fcc1293" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5f2390975ebfe8838f9e861f7a588123d49a7a7a0a08568ea831d8ad53fc9b4" dependencies = [ "bitflags 2.6.0", "either", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "tracing", ] @@ -5856,33 +5834,22 @@ checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" [[package]] name = "zcash_address" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d26f21381dc220836dd8d2a9a10dbe85928a26232b011bc6a42b611789b743" +checksum = "4ff95eac82f71286a79c750e674550d64fb2b7aadaef7b89286b2917f645457d" dependencies = [ "bech32", "bs58", - "f4jumble 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "f4jumble", "zcash_encoding", - "zcash_protocol 0.2.0", -] - -[[package]] -name = "zcash_address" -version = "0.5.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" -dependencies = [ - "bech32", - "bs58", - "f4jumble 0.1.0 (git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792)", - "zcash_encoding", - "zcash_protocol 0.3.0", + "zcash_protocol", ] [[package]] name = "zcash_client_backend" -version = "0.13.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbeeede366fdb642710d3c59fc2090489affd075f66db53ed11bb7138d2d0258" dependencies = [ "base64 0.22.1", "bech32", @@ -5892,7 +5859,7 @@ dependencies = [ "document-features", "group", "hex", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "memuse", "nom", "nonempty", @@ -5908,12 +5875,12 @@ dependencies = [ "tonic-build", "tracing", "which", - "zcash_address 0.5.0", + "zcash_address", "zcash_encoding", - "zcash_keys 0.3.0 (git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792)", + "zcash_keys", "zcash_note_encryption", - "zcash_primitives 0.17.0", - "zcash_protocol 0.3.0", + "zcash_primitives", + "zcash_protocol", "zip32", "zip321", ] @@ -5921,7 +5888,8 @@ dependencies = [ [[package]] name = "zcash_encoding" version = "0.2.1" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052d8230202f0a018cd9b5d1b56b94cd25e18eccc2d8665073bcea8261ab87fc" dependencies = [ "byteorder", "nonempty", @@ -5930,7 +5898,8 @@ dependencies = [ [[package]] name = "zcash_history" version = "0.4.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fde17bf53792f9c756b313730da14880257d7661b5bfc69d0571c3a7c11a76d" dependencies = [ "blake2b_simd", "byteorder", @@ -5939,34 +5908,9 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712faf4070107ab0b2828d0eda6aeaf4c3cb02564109832d95b97ad3467c95a5" -dependencies = [ - "bech32", - "blake2b_simd", - "bls12_381", - "bs58", - "document-features", - "group", - "memuse", - "nonempty", - "rand_core 0.6.4", - "sapling-crypto", - "secrecy", - "subtle", - "tracing", - "zcash_address 0.4.0", - "zcash_encoding", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", - "zip32", -] - -[[package]] -name = "zcash_keys" -version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +checksum = "e8162c94957f1e379b8e2fb30f97b95cfa93ac9c6bc02895946ca6392d1abb81" dependencies = [ "bech32", "blake2b_simd", @@ -5981,10 +5925,10 @@ dependencies = [ "secrecy", "subtle", "tracing", - "zcash_address 0.5.0", + "zcash_address", "zcash_encoding", - "zcash_primitives 0.17.0", - "zcash_protocol 0.3.0", + "zcash_primitives", + "zcash_protocol", "zip32", ] @@ -6003,44 +5947,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.16.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f044bc9cf2887ec408196fbafb44749e5581f57cc18d8da7aabaeb60cc40c64" -dependencies = [ - "aes", - "blake2b_simd", - "bs58", - "byteorder", - "document-features", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree 0.6.0", - "jubjub", - "memuse", - "nonempty", - "orchard", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "sapling-crypto", - "sha2", - "subtle", - "tracing", - "zcash_address 0.4.0", - "zcash_encoding", - "zcash_note_encryption", - "zcash_protocol 0.2.0", - "zcash_spec", - "zip32", -] - -[[package]] -name = "zcash_primitives" -version = "0.17.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +checksum = "6ab47d526d7fd6f88b3a2854ad81b54757a80c2aeadd1d8b06f690556af9743c" dependencies = [ "aes", "bip32", @@ -6048,12 +5957,12 @@ dependencies = [ "bs58", "byteorder", "document-features", - "equihash 0.2.0 (git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792)", + "equihash", "ff", "fpe", "group", "hex", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "jubjub", "memuse", "nonempty", @@ -6067,18 +5976,19 @@ dependencies = [ "sha2", "subtle", "tracing", - "zcash_address 0.5.0", + "zcash_address", "zcash_encoding", "zcash_note_encryption", - "zcash_protocol 0.3.0", + "zcash_protocol", "zcash_spec", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.17.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daba607872e60d91a09248d8e1ea3d6801c819fb80d67016d9de02d81323c10d" dependencies = [ "bellman", "blake2b_simd", @@ -6094,23 +6004,14 @@ dependencies = [ "sapling-crypto", "tracing", "xdg", - "zcash_primitives 0.17.0", + "zcash_primitives", ] [[package]] name = "zcash_protocol" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f35eac659fdbba614333d119217c5963c0d7cea43aee33176c4f2f95e5460d8d" -dependencies = [ - "document-features", - "memuse", -] - -[[package]] -name = "zcash_protocol" -version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +checksum = "6bc22b9155b2c7eb20105cd06de170d188c1bc86489b92aa3fda7b8da8d96acf" dependencies = [ "document-features", "memuse", @@ -6152,13 +6053,13 @@ dependencies = [ "criterion", "dirs", "ed25519-zebra", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash", "futures", "group", "halo2_proofs", "hex", "humantime", - "incrementalmerkletree 0.7.0", + "incrementalmerkletree", "itertools 0.13.0", "jubjub", "lazy_static", @@ -6190,13 +6091,13 @@ dependencies = [ "tracing", "uint 0.10.0", "x25519-dalek", - "zcash_address 0.5.0", + "zcash_address", "zcash_client_backend", "zcash_encoding", "zcash_history", "zcash_note_encryption", - "zcash_primitives 0.17.0", - "zcash_protocol 0.3.0", + "zcash_primitives", + "zcash_protocol", "zebra-test", ] @@ -6261,7 +6162,7 @@ dependencies = [ "tonic-build", "tonic-reflection", "tower 0.4.13", - "zcash_primitives 0.17.0", + "zcash_primitives", "zebra-chain", "zebra-node-services", "zebra-state", @@ -6350,8 +6251,8 @@ dependencies = [ "tonic-reflection", "tower 0.4.13", "tracing", - "zcash_address 0.5.0", - "zcash_primitives 0.17.0", + "zcash_address", + "zcash_primitives", "zebra-chain", "zebra-consensus", "zebra-network", @@ -6393,11 +6294,11 @@ dependencies = [ "tower 0.4.13", "tracing", "tracing-subscriber", - "zcash_address 0.5.0", + "zcash_address", "zcash_client_backend", - "zcash_keys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys", "zcash_note_encryption", - "zcash_primitives 0.17.0", + "zcash_primitives", "zebra-chain", "zebra-grpc", "zebra-node-services", @@ -6516,8 +6417,8 @@ dependencies = [ "tracing-error", "tracing-subscriber", "zcash_client_backend", - "zcash_primitives 0.17.0", - "zcash_protocol 0.3.0", + "zcash_primitives", + "zcash_protocol", "zebra-chain", "zebra-node-services", "zebra-rpc", @@ -6650,12 +6551,13 @@ dependencies = [ [[package]] name = "zip321" -version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash.git?rev=1410f1449100a417bfbc4f6c7167aa9808e38792#1410f1449100a417bfbc4f6c7167aa9808e38792" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3e613defb0940acef1f54774b51c7f48f2fa705613dd800870dc69f35cd2ea" dependencies = [ "base64 0.22.1", "nom", "percent-encoding", - "zcash_address 0.5.0", - "zcash_protocol 0.3.0", + "zcash_address", + "zcash_protocol", ] diff --git a/Cargo.toml b/Cargo.toml index 976c259130d..c50c93ad414 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ members = [ "zebra-test", "zebra-utils", "zebra-scan", - "zebra-grpc", + "zebra-grpc", "tower-batch-control", "tower-fallback", ] @@ -22,17 +22,17 @@ resolver = "2" # `cargo release` settings [workspace.dependencies] -incrementalmerkletree = "0.7.0" -orchard = "0.9.0" -sapling-crypto = "0.2.0" -zcash_address = "0.5.0" -zcash_client_backend = "0.13.0" +incrementalmerkletree = { version = "0.7.0", features = ["legacy-api"] } +orchard = "0.10.0" +sapling-crypto = "0.3.0" +zcash_address = "0.6.0" +zcash_client_backend = "0.14.0" zcash_encoding = "0.2.1" zcash_history = "0.4.0" -zcash_keys = "0.3.0" -zcash_primitives = "0.17.0" -zcash_proofs = "0.17.0" -zcash_protocol = "0.3.0" +zcash_keys = "0.4.0" +zcash_primitives = "0.19.0" +zcash_proofs = "0.19.0" +zcash_protocol = "0.4.0" [workspace.metadata.release] @@ -103,17 +103,3 @@ panic = "abort" # - see https://doc.rust-lang.org/rustc/linker-plugin-lto.html#cc-code-as-a-dependency-in-rust lto = "thin" -# We can remove this patches after we get out of 2.0 release candidate and upgrade the ECC dependencies above. -# This revisions are at the commit just before setting mainnet activation heights. -[patch.crates-io] -zcash_address = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_client_backend = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_encoding = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_history = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_primitives = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_proofs = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -zcash_protocol = { git = "https://github.com/zcash/librustzcash.git", rev = "1410f1449100a417bfbc4f6c7167aa9808e38792" } -sapling-crypto = { git = "https://github.com/zcash/sapling-crypto", rev = "b1ad3694ee13a2fc5d291ad04721a6252da0993c" } -orchard = { git = "https://github.com/zcash/orchard", rev = "55fb089a335bbbc1cda186c706bc037073df8eb7" } -incrementalmerkletree = { git = "https://github.com/zcash/incrementalmerkletree", rev = "ffe4234788fd22662b937ba7c6ea01535fcc1293" } -shardtree = { git = "https://github.com/zcash/incrementalmerkletree", rev = "ffe4234788fd22662b937ba7c6ea01535fcc1293" } From fef500a72840d4b7c89d68e14980eeda43869873 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 30 Oct 2024 15:41:41 +0100 Subject: [PATCH 002/245] chore: Release v2.0.1 (#8979) * Run `cargo update` * chore: Release * Update `release-crates-dry-run.sh` * Update `ESTIMATED_RELEASE_HEIGHT` * Update `CHANGELOG.md` * Update `ESTIMATED_RELEASE_HEIGHT` --- .../scripts/release-crates-dry-run.sh | 4 +- CHANGELOG.md | 28 +++++++ Cargo.lock | 79 ++++++++++--------- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 6 +- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 6 +- zebra-consensus/Cargo.toml | 20 ++--- zebra-grpc/Cargo.toml | 6 +- zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 24 +++--- zebra-scan/Cargo.toml | 20 ++--- zebra-script/Cargo.toml | 6 +- zebra-state/Cargo.toml | 10 +-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +- zebrad/Cargo.toml | 30 +++---- zebrad/src/components/sync/end_of_support.rs | 2 +- 20 files changed, 149 insertions(+), 120 deletions(-) diff --git a/.github/workflows/scripts/release-crates-dry-run.sh b/.github/workflows/scripts/release-crates-dry-run.sh index 9935e23a947..32fc0e671c7 100755 --- a/.github/workflows/scripts/release-crates-dry-run.sh +++ b/.github/workflows/scripts/release-crates-dry-run.sh @@ -23,8 +23,8 @@ fi cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc patch # Due to a bug in cargo-release, we need to pass exact versions for alpha crates: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.10 -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.8 +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.11 +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.9 # Update zebrad: cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch diff --git a/CHANGELOG.md b/CHANGELOG.md index e5565429f8a..17b898a38dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,34 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 2.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.1) - 2024-10-30 + +- Zebra now supports NU6 on Mainnet. This patch release updates dependencies + required for NU6. + +### Breaking Changes + +- The JSON RPC endpoint has cookie-based authentication enabled by default. + +### Added + +- NU6-related documentation + ([#8949](https://github.com/ZcashFoundation/zebra/pull/8949)) +- A cookie-based authentication system for the JSON RPC endpoint + ([#8900](https://github.com/ZcashFoundation/zebra/pull/8900), + [#8965](https://github.com/ZcashFoundation/zebra/pull/8965)) + +### Changed + +- Set the activation height of NU6 for Mainnet and bump Zebra's current network + protocol version + ([#8960](https://github.com/ZcashFoundation/zebra/pull/8960)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @oxarbitrage and @upbqdn. + ## [Zebra 2.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.0) - 2024-10-25 This release brings full support for NU6. diff --git a/Cargo.lock b/Cargo.lock index c1fe19bc718..c9e9dcbcb5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2041,7 +2041,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2064,9 +2064,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -2218,9 +2218,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" +checksum = "a1f72d3e19488cf7d8ea52d2fc0f8754fc933398b337cd3cbdb28aaeb35159ef" dependencies = [ "console", "lazy_static", @@ -2455,9 +2455,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -3406,7 +3406,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "socket2", "thiserror", "tokio", @@ -3423,7 +3423,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -3432,10 +3432,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -3716,9 +3717,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -3739,7 +3740,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -3864,9 +3865,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -3889,9 +3890,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -4088,7 +4089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00421ed8fa0c995f07cde48ba6c89e80f2b312f74ff637326f392fbfd23abe02" dependencies = [ "httpdate", - "reqwest 0.12.8", + "reqwest 0.12.9", "rustls 0.21.12", "sentry-backtrace", "sentry-contexts", @@ -4169,9 +4170,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -4187,9 +4188,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", @@ -4746,7 +4747,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -4940,7 +4941,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.17" +version = "0.2.41-beta.18" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4963,7 +4964,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.17" +version = "0.2.41-beta.18" dependencies = [ "futures-core", "pin-project", @@ -5274,7 +5275,7 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "url", "webpki-roots 0.26.6", @@ -6038,7 +6039,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "bitflags 2.6.0", "bitflags-serde-legacy", @@ -6103,7 +6104,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "bellman", "blake2b_simd", @@ -6149,7 +6150,7 @@ dependencies = [ [[package]] name = "zebra-grpc" -version = "0.1.0-alpha.8" +version = "0.1.0-alpha.9" dependencies = [ "color-eyre", "futures-util", @@ -6171,7 +6172,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -6212,7 +6213,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "color-eyre", "jsonrpc-core", @@ -6225,7 +6226,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "base64 0.22.1", "chrono", @@ -6264,7 +6265,7 @@ dependencies = [ [[package]] name = "zebra-scan" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "bls12_381", "chrono", @@ -6310,7 +6311,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "hex", "lazy_static", @@ -6322,7 +6323,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "bincode", "chrono", @@ -6367,7 +6368,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "color-eyre", "futures", @@ -6395,7 +6396,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" dependencies = [ "color-eyre", "hex", @@ -6426,7 +6427,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "2.0.0" +version = "2.0.1" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 90491024df3..dbaf726cf9a 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -37,7 +37,7 @@ docker run -d --platform linux/amd64 \ ### Build it locally ```shell -git clone --depth 1 --branch v2.0.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v2.0.1 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index 6648339f743..5903adf4337 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -76,7 +76,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v2.0.0 +git checkout v2.0.1 ``` 3. Build and Run `zebrad` @@ -89,7 +89,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.0.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.0.1 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 398a2baddbc..517c9cfca9c 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.17" +version = "0.2.41-beta.18" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -43,10 +43,10 @@ rand = "0.8.5" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.17" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.18" } tower-test = "0.4.0" -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 5919b1bc632..a3d504ed691 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.17" +version = "0.2.41-beta.18" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -24,4 +24,4 @@ tracing = "0.1.39" [dev-dependencies] tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 4ab99fd8857..b43e77f149a 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -145,7 +145,7 @@ proptest-derive = { version = "0.5.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42", optional = true } [dev-dependencies] # Benchmarks @@ -168,7 +168,7 @@ rand_chacha = "0.3.1" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } [[bench]] name = "block" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 2dd58ed562d..cf8424d1606 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -63,13 +63,13 @@ orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } wagyu-zcash-parameters = "0.2.0" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.17" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.17" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.18" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.18" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.41" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.42" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } @@ -94,6 +94,6 @@ tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.18" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index cc57da114a1..4f825686d52 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-grpc" -version = "0.1.0-alpha.8" +version = "0.1.0-alpha.9" authors = ["Zcash Foundation "] description = "Zebra gRPC interface" license = "MIT OR Apache-2.0" @@ -28,8 +28,8 @@ color-eyre = "0.6.3" zcash_primitives.workspace = true -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41", features = ["shielded-scan"] } -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.41" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.42" } [build-dependencies] tonic-build = "0.12.3" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 77eb565c0d1..e4967cc66f2 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["async-error"] } [dev-dependencies] proptest = "1.4.0" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 130f6da481f..cba315f1efa 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -37,7 +37,7 @@ rpc-client = [ shielded-scan = [] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.41" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.42" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 1562a77677e..85be248bc76 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -104,16 +104,16 @@ zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl proptest = { version = "1.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = [ "json-conversion", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.41" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.41" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = [ "rpc-client", ] } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.41" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.42" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } [build-dependencies] tonic-build = { version = "0.12.3", optional = true } @@ -126,17 +126,17 @@ proptest = "1.4.0" thiserror = "1.0.64" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = [ "proptest-impl", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.41", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42", features = [ "proptest-impl", ] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.41", features = [ +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42", features = [ "proptest-impl", ] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41", features = [ +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = [ "proptest-impl", ] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.41" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 4c13ed02050..c939700727e 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-scan" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" authors = ["Zcash Foundation "] description = "Shielded transaction scanner for the Zcash blockchain" license = "MIT OR Apache-2.0" @@ -77,11 +77,11 @@ zcash_primitives.workspace = true zcash_address.workspace = true sapling-crypto.workspace = true -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["shielded-scan"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41", features = ["shielded-scan"] } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41", features = ["shielded-scan"] } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.8" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.41" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.9" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42" } chrono = { version = "0.4.38", default-features = false, features = ["clock", "std", "serde"] } @@ -96,7 +96,7 @@ jubjub = { version = "0.10.0", optional = true } rand = { version = "0.8.5", optional = true } zcash_note_encryption = { version = "0.4.0", optional = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.41", optional = true } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42", optional = true } # zebra-scanner binary dependencies tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } @@ -107,7 +107,7 @@ serde_json = "1.0.132" jsonrpc = { version = "0.18.0", optional = true } hex = { version = "0.4.3", optional = true } -zebrad = { path = "../zebrad", version = "2.0.0" } +zebrad = { path = "../zebrad", version = "2.0.1" } [dev-dependencies] insta = { version = "1.40.0", features = ["ron", "redactions"] } @@ -125,6 +125,6 @@ zcash_note_encryption = "0.4.0" toml = "0.8.19" tonic = "0.12.3" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.41" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 92fdf77077a..1f3050ca53a 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -16,11 +16,11 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.2.0" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } thiserror = "1.0.64" [dev-dependencies] hex = "0.4.3" lazy_static = "1.4.0" -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.41" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 6bdfdaaeb66..55f4f2e1556 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -77,13 +77,13 @@ tracing = "0.1.39" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.132", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } @@ -108,5 +108,5 @@ jubjub = "0.10.0" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.41" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index c430257f5ef..86daa264305 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 0c46395b072..e8f81cb088e 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.41" +version = "1.0.0-beta.42" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -94,11 +94,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.18" thiserror = "1.0.64" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.41", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.13.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 2bea0392f9d..768f68dfcb3 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "2.0.0" +version = "2.0.1" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -157,15 +157,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.41" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.41" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.41", features = ["rpc-client"] } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.41" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["rpc-client"] } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.41", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.42", optional = true } abscissa_core = "0.7.0" clap = { version = "4.5.20", features = ["cargo"] } @@ -279,13 +279,13 @@ proptest-derive = "0.5.0" # enable span traces and track caller in tests color-eyre = { version = "0.6.3" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.41", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.41", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.41" } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.8" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.9" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -296,7 +296,7 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.8" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.41" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.42" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 0e54a978d04..36586678bdf 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_694_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_699_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 54fe39a91bac0c4cd6ed6e6affa8024378139d3c Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Wed, 30 Oct 2024 16:14:44 -0300 Subject: [PATCH 003/245] book: add section about private testnet testing (#8937) * book: add section about private testnet testing * Apply suggestions from code review Co-authored-by: Pili Guerra * Apply suggestions from code review Co-authored-by: Arya --------- Co-authored-by: Pili Guerra Co-authored-by: Arya --- book/src/SUMMARY.md | 1 + book/src/dev/private-testnet.md | 181 ++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+) create mode 100644 book/src/dev/private-testnet.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 1ec8dc35d67..a7b018a2b9a 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -36,6 +36,7 @@ - [Generating Zebra Checkpoints](dev/zebra-checkpoints.md) - [Doing Mass Renames](dev/mass-renames.md) - [Updating the ECC dependencies](dev/ecc-updates.md) + - [Running a Private Testnet Test](dev/private-testnet.md) - [Zebra RFCs](dev/rfcs.md) - [Pipelinable Block Lookup](dev/rfcs/0001-pipelinable-block-lookup.md) - [Parallel Verification](dev/rfcs/0002-parallel-verification.md) diff --git a/book/src/dev/private-testnet.md b/book/src/dev/private-testnet.md new file mode 100644 index 00000000000..92386d2c47b --- /dev/null +++ b/book/src/dev/private-testnet.md @@ -0,0 +1,181 @@ +# Private Testnet Test + +The objective of a private Testnet test is to test Testnet activation of an upcoming +network upgrade in an isolated fashion, before the actual Testnet activation. +It is usually done using the current state of the existing Testnet. For NU6, it was done +by ZF and ECC engineers over a call. + +## Steps + +### Make Backup + +Make a backup of your current Testnet state. Rename/copy the `testnet` folder in +Zebra's state cache directory to the lowercase version of the configured network name, +or the default `unknowntestnet` if no network name is explicitly configured. + +### Set Protocol Version + +Double check that Zebra has bumped its protocol version. + +### Set Up Lightwalletd Server + +It's a good idea to set up a lightwalletd server connected to your node, and +have a (Testnet) wallet connected to your lightwalletd server. + +### Connect to Peers + +Make sure everyone can connect to each other. You can **use Tailscale** to do +that. Everyone needs to send invites to everyone else. Note that being able to +access someone's node does not imply that they can access yours, it needs to be +enabled both ways. + +### Choose an Activation Height + +Choose an activation height with the other participants. It should be in +the near future, but with enough time for people to set things up; something +like 30 minutes in the future? + +### Ensure the Activation Height is Set in Code + +While Zebra allows creating a private Testnet in the config file, the height is +also set in some librustzcash crates. For this reason, someone will need to +**create a branch of librustzcash** with the chosen height set and you will need +to **change Zebra to use that**. However, double check if that's still +necessary. + +### Configure Zebra to use a custom testnet + +See sample config file below. The critical part is setting the activation +height. It is good to enable verbose logging to help debug things. Some of the +participants must enable mining also. It's not a huge deal to keep the DNS +seeders; the blockchain will fork when the activation happens and only the +participants will stay connected. On the other hand, if you want to ensure you +won't connect to anyone else, set `cache_dir = false` in the `[network]` section +and delete the peers file (`~/.cache/zebra/network/unknowntestnet.peers`). + +### Run Nodes + +Everyone runs their nodes, and checks if they connect to other nodes. You can use +e.g. `curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": +"getpeerinfo", "params": [] }' -H 'Content-Type: application/json' +http://127.0.0.1:8232` to check that. See "Getting Peers" section below. + +### Wait Until Activation Happens + +And monitor logs for behaviour. + +### Do Tests + +Do tests, including sending transactions if possible (which will require the +lightwalletd server). Check if whatever activated in the upgrade works. + + +## Zebra + +Relevant information about Zebra for the testing process. + +### Getting peers + +It seems Zebra is not very reliable at returning its currently connected peers; +you can use `getpeerinfo` RPC as above or check the peers file +(`~/.cache/zebra/network/unknowntestnet.peers`) if `cache_dir = true` in the +`[network]` section. You might want to sort this out before the next private +testnet test. + +### Unredact IPs + +Zebra redacts IPs when logging for privacy reasons. However, for a test like +this it can be annoying. You can disable that by editing `peer_addr.rs` +with something like + + +```diff +--- a/zebra-network/src/meta_addr/peer_addr.rs ++++ b/zebra-network/src/meta_addr/peer_addr.rs +@@ -30,7 +30,7 @@ impl fmt::Display for PeerSocketAddr { + let ip_version = if self.is_ipv4() { "v4" } else { "v6" }; + + // The port is usually not sensitive, and it's useful for debugging. +- f.pad(&format!("{}redacted:{}", ip_version, self.port())) ++ f.pad(&format!("{}:{}", self.ip(), self.port())) + } + } +``` + +### Sample config file + +Note: Zebra's db path will end in "unknowntestnet" instead of "testnet" with +this configuration. + +``` +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true +miner_address = "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" +# if you want to enable mining, which also requires selecting the `internal-miner` compilation feature +internal_miner = true + +[network] +# This will save peers to a file. Take care that it also reads peers from it; +# if you want to be truly isolated and only connect to the other participants, +# either disable this or delete the peers file before starting. +cache_dir = true +crawl_new_peer_interval = "1m 1s" + +initial_mainnet_peers = [] + +initial_testnet_peers = [ + # List the other participant's Tailscale IPs here. + # You can also keep the default DNS seeders if you wish. + "100.10.0.1:18233", +] + +listen_addr = "0.0.0.0:18233" +max_connections_per_ip = 1 +network = "Testnet" +peerset_initial_target_size = 25 + +[network.testnet_parameters] + +[network.testnet_parameters.activation_heights] +BeforeOverwinter = 1 +Overwinter = 207_500 +Sapling = 280_000 +Blossom = 584_000 +Heartwood = 903_800 +Canopy = 1_028_500 +NU5 = 1_842_420 +NU6 = 2_969_920 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 +listen_addr = "127.0.0.1:8232" +indexer_listen_addr = "127.0.0.1:8231" + +[state] +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false +# This enables debug network logging. It can be useful but it's very verbose! +filter = 'info,zebra_network=debug' +``` \ No newline at end of file From 83921bc01a5ee8d262384cfed6fd87e0a94ffbc3 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 1 Nov 2024 05:23:42 -0400 Subject: [PATCH 004/245] Adds rust-toolchain.toml (#8985) --- rust-toolchain.toml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 rust-toolchain.toml diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 00000000000..292fe499e3b --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "stable" From 7b317113060db699f7ec0d32aeaffe4eeaca2f28 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 4 Nov 2024 10:44:58 +0000 Subject: [PATCH 005/245] ref: adjust GCP instances resources to better fit requirements (#8986) Previously, most of our deployed instances needed to sync the whole blockchain from genesis, but after implementing the mounting of cached states for the release instances, this is no longer required. Main changes: - Reduce the boot disk size for CD images to 10GB (in CI Zebra might need to rebuild based on test flags, requiring more disk space) - Use `pd-standard` instead of `pd-ssd` for the boot disk - Use `pd-balanced` instead of `pd-ssd` for the mounted disk (where most of the reads and writes happens) - Change our `GCP_SMALL_MACHINE` from `c2-standard-4` (vCPUs: 4, RAM: 16 GiB) to `c2d-standard-2` (vCPUs: 2, RAM: 8 GiB) - Keep long running tests `is_long_test` with `GCP_LARGE_MACHINE` (`c2d-standard-16`) and other with the new `GCP_SMALL_MACHINE` configuration (`c2d-standard-2`) --- .github/workflows/cd-deploy-nodes-gcp.yml | 12 ++++++------ .github/workflows/manual-zcashd-deploy.yml | 4 ++-- .../workflows/sub-deploy-integration-tests-gcp.yml | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 1333816530f..01bad9142c0 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -276,7 +276,7 @@ jobs: - name: Create instance template for ${{ matrix.network }} run: | DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" - DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-ssd" + DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" elif [ ${{ inputs.no_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then @@ -287,8 +287,8 @@ jobs: fi gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --boot-disk-size 50GB \ - --boot-disk-type=pd-ssd \ + --boot-disk-size=10GB \ + --boot-disk-type=pd-standard \ --image-project=cos-cloud \ --image-family=cos-stable \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ @@ -388,7 +388,7 @@ jobs: - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad run: | DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" - DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-ssd" + DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" elif [ ${{ inputs.no_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then @@ -399,8 +399,8 @@ jobs: fi gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --boot-disk-size 50GB \ - --boot-disk-type=pd-ssd \ + --boot-disk-size=10GB \ + --boot-disk-type=pd-standard \ --image-project=cos-cloud \ --image-family=cos-stable \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 05872f2532d..dfde89b3e86 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -64,8 +64,8 @@ jobs: - name: Create instance template run: | gcloud compute instance-templates create-with-container zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --boot-disk-size 10GB \ - --boot-disk-type=pd-ssd \ + --boot-disk-size=10GB \ + --boot-disk-type=pd-standard \ --image-project=cos-cloud \ --image-family=cos-stable \ --container-stdin \ diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 7266f60ea54..26c13e6044c 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -188,18 +188,18 @@ jobs: shell: /usr/bin/bash -x {0} run: | NAME="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}" - DISK_PARAMS="size=400GB,type=pd-ssd,name=${NAME},device-name=${NAME}" + DISK_PARAMS="size=400GB,type=pd-balanced,name=${NAME},device-name=${NAME}" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" fi gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 50GB \ - --boot-disk-type pd-ssd \ + --boot-disk-size=50GB \ + --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ --create-disk="${DISK_PARAMS}" \ --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ + --machine-type ${{ inputs.is_long_test && vars.GCP_LARGE_MACHINE || vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --scopes cloud-platform \ --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ From 75fae66dc6e4258fe8ac84a1e03117bdde7a3aff Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 4 Nov 2024 18:42:51 +0000 Subject: [PATCH 006/245] fix(ci): fail cache disk creation if no db version is found (#8987) In some cases Zebra logs might not output the database version, and thus we should avoid creating a disk without a version. Before this change, a disk was created without a db version number, just indicating a `-v-`, that caused other tests to fail as an actual version was not found in their regexes. --- .../workflows/sub-deploy-integration-tests-gcp.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 26c13e6044c..600153e6c32 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -503,18 +503,19 @@ jobs: ) if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then + # Check for new database creation + if echo "$DOCKER_LOGS" | grep -q "creating.new.database"; then + INITIAL_DISK_DB_VERSION="new" + else echo "Checked logs:" echo "" echo "$DOCKER_LOGS" echo "" - echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + echo "Missing initial disk database version in logs" # Fail the tests, because Zebra didn't log the initial disk database version, # or the regex in this step is wrong. - false + exit 1 fi - - if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then - INITIAL_DISK_DB_VERSION="new" else INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}" fi @@ -538,7 +539,7 @@ jobs: echo "Missing running database version in logs: $RUNNING_DB_VERSION" # Fail the tests, because Zebra didn't log the running database version, # or the regex in this step is wrong. - false + exit 1 fi RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}" From 6a825315855a108ec17f31bcff3b0bca65bf5dd9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 11:47:35 +0000 Subject: [PATCH 007/245] build(deps): bump the devops group across 1 directory with 5 updates (#8993) Bumps the devops group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [actions/checkout](https://github.com/actions/checkout) | `4.2.1` | `4.2.2` | | [google-github-actions/auth](https://github.com/google-github-actions/auth) | `2.1.6` | `2.1.7` | | [google-github-actions/setup-gcloud](https://github.com/google-github-actions/setup-gcloud) | `2.1.1` | `2.1.2` | | [jontze/action-mdbook](https://github.com/jontze/action-mdbook) | `3.0.0` | `3.0.1` | | [docker/scout-action](https://github.com/docker/scout-action) | `1.14.0` | `1.15.0` | Updates `actions/checkout` from 4.2.1 to 4.2.2 - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.2.1...v4.2.2) Updates `google-github-actions/auth` from 2.1.6 to 2.1.7 - [Release notes](https://github.com/google-github-actions/auth/releases) - [Changelog](https://github.com/google-github-actions/auth/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/auth/compare/v2.1.6...v2.1.7) Updates `google-github-actions/setup-gcloud` from 2.1.1 to 2.1.2 - [Release notes](https://github.com/google-github-actions/setup-gcloud/releases) - [Changelog](https://github.com/google-github-actions/setup-gcloud/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/setup-gcloud/compare/v2.1.1...v2.1.2) Updates `jontze/action-mdbook` from 3.0.0 to 3.0.1 - [Release notes](https://github.com/jontze/action-mdbook/releases) - [Changelog](https://github.com/jontze/action-mdbook/blob/master/CHANGELOG.md) - [Commits](https://github.com/jontze/action-mdbook/compare/v3.0.0...v3.0.1) Updates `docker/scout-action` from 1.14.0 to 1.15.0 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.14.0...v1.15.0) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: google-github-actions/auth dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: google-github-actions/setup-gcloud dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: jontze/action-mdbook dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: docker/scout-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cd-deploy-nodes-gcp.yml | 12 ++++++------ .../workflows/chore-delete-gcp-resources.yml | 10 +++++----- .github/workflows/ci-build-crates.patch.yml | 2 +- .github/workflows/ci-build-crates.yml | 4 ++-- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 10 +++++----- .github/workflows/ci-unit-tests-os.yml | 10 +++++----- .github/workflows/docs-deploy-firebase.yml | 10 +++++----- .../workflows/docs-dockerhub-description.yml | 2 +- .github/workflows/manual-zcashd-deploy.yml | 6 +++--- .github/workflows/release-crates-io.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 6 +++--- .../sub-deploy-integration-tests-gcp.yml | 18 +++++++++--------- .github/workflows/sub-find-cached-disks.yml | 6 +++--- .github/workflows/sub-test-zebra-config.yml | 2 +- 15 files changed, 51 insertions(+), 51 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 01bad9142c0..54259760227 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -242,7 +242,7 @@ jobs: if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false @@ -265,13 +265,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 - name: Create instance template for ${{ matrix.network }} run: | @@ -353,7 +353,7 @@ jobs: if: ${{ !failure() && github.event_name == 'workflow_dispatch' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false @@ -376,13 +376,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Create instance template from container image - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 4470d244029..b4e9eda2f64 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -39,20 +39,20 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Deletes all mainnet and testnet instances older than $DELETE_INSTANCE_DAYS days. # @@ -106,14 +106,14 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/ci-build-crates.patch.yml b/.github/workflows/ci-build-crates.patch.yml index c4333a86aba..525904507e8 100644 --- a/.github/workflows/ci-build-crates.patch.yml +++ b/.github/workflows/ci-build-crates.patch.yml @@ -23,7 +23,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 67404f75972..ca66a2bf2c0 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -60,7 +60,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 - uses: r7kamura/rust-problem-matchers@v1.5.0 # Setup Rust with stable toolchain and minimal profile @@ -122,7 +122,7 @@ jobs: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 057cc1cf916..3d1e3b946c5 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -69,7 +69,7 @@ jobs: runs-on: ubuntu-latest-xl steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index b9966de9058..22ec5089c37 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -37,7 +37,7 @@ jobs: rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }} workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false fetch-depth: 0 @@ -69,7 +69,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false @@ -119,7 +119,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -149,7 +149,7 @@ jobs: needs: changed-files if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 - name: actionlint uses: reviewdog/action-actionlint@v1.48.0 with: @@ -166,7 +166,7 @@ jobs: runs-on: ubuntu-latest needs: changed-files steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 - uses: codespell-project/actions-codespell@v2.1 with: only_warn: 1 diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 7c194c51c5e..6fe7238bc29 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -94,7 +94,7 @@ jobs: rust: beta steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -183,7 +183,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -205,7 +205,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -248,7 +248,7 @@ jobs: continue-on-error: ${{ matrix.checks == 'advisories' }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -269,7 +269,7 @@ jobs: steps: - name: Checkout git repository - uses: actions/checkout@v4.2.1 + uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 1c8ce7fd773..72ffb5c6534 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -85,14 +85,14 @@ jobs: pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v4.2.1 + uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Setup mdBook - uses: jontze/action-mdbook@v3.0.0 + uses: jontze/action-mdbook@v3.0.1 with: token: ${{ secrets.GITHUB_TOKEN }} mdbook-version: '~0.4' @@ -106,7 +106,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' @@ -138,7 +138,7 @@ jobs: pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v4.2.1 + uses: actions/checkout@v4.2.2 with: persist-credentials: false @@ -164,7 +164,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index b96a2e2fb1c..754208d6651 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -17,7 +17,7 @@ jobs: dockerHubDescription: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index dfde89b3e86..6553bdf5ba5 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -29,7 +29,7 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false @@ -52,13 +52,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Create instance template from container image - name: Create instance template diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 60db739b236..4f08917230f 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -70,7 +70,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Checkout git repository - uses: actions/checkout@v4.2.1 + uses: actions/checkout@v4.2.2 with: persist-credentials: false diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 11b6399d625..02444c6e427 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -80,7 +80,7 @@ jobs: env: DOCKER_BUILD_SUMMARY: ${{ vars.DOCKER_BUILD_SUMMARY }} steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.5.0 @@ -126,7 +126,7 @@ jobs: - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_ARTIFACTS_SA }}' @@ -193,7 +193,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.14.0 + uses: docker/scout-action@v1.15.0 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 600153e6c32..d121ee9ab20 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -140,7 +140,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false fetch-depth: '2' @@ -172,13 +172,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME env as the source image to populate the disk cached state @@ -390,7 +390,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false fetch-depth: '2' @@ -434,13 +434,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, # and the empty string otherwise. @@ -688,7 +688,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false fetch-depth: '2' @@ -702,13 +702,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Deletes the instances that has been recently deployed in the actual commit after all # previous jobs have run, no matter the outcome of the job. diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index a71237887e2..9c2ee919d32 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -58,7 +58,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false fetch-depth: 0 @@ -66,13 +66,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.6 + uses: google-github-actions/auth@v2.1.7 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 + uses: google-github-actions/setup-gcloud@v2.1.2 # Performs formatting on disk name components. # diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 1f8c455b4b9..c0d2532b220 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -38,7 +38,7 @@ jobs: timeout-minutes: 30 runs-on: ubuntu-latest-m steps: - - uses: actions/checkout@v4.2.1 + - uses: actions/checkout@v4.2.2 with: persist-credentials: false From d7fbde3176416613b8710c0ffeb89501f62e86ab Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 5 Nov 2024 12:30:00 +0000 Subject: [PATCH 008/245] fix(actions): do not require the `get-disk-name` job for forks (#8988) --- .github/workflows/cd-deploy-nodes-gcp.patch-external.yml | 6 ++++++ .github/workflows/cd-deploy-nodes-gcp.patch.yml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 8381f0011a0..3b1e2c066cb 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -43,3 +43,9 @@ jobs: runs-on: ubuntu-latest steps: - run: 'echo "Skipping job on fork"' + + get-disk-name: + name: Get disk name + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index fb963ec3726..6f88cf2cfe2 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -52,3 +52,9 @@ jobs: runs-on: ubuntu-latest steps: - run: 'echo "No build required"' + + get-disk-name: + name: Get disk name + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' From c26c3f2be1213285706b2ba2ccbe5655353fb992 Mon Sep 17 00:00:00 2001 From: idky137 <150072198+idky137@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:15:27 +0000 Subject: [PATCH 009/245] add pub functionality for zaino (#8964) * add pub functionality for zaino * updated doc comment with review suggestion --- zebra-chain/src/parameters/network_upgrade.rs | 6 ++ zebra-rpc/src/methods.rs | 75 ++++++++++++++++++- zebra-rpc/src/methods/trees.rs | 41 +++++++++- 3 files changed, 116 insertions(+), 6 deletions(-) diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index 57165d0c760..b08cfec520d 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -163,6 +163,12 @@ impl From for u32 { } } +impl From for ConsensusBranchId { + fn from(branch: u32) -> Self { + ConsensusBranchId(branch) + } +} + impl ToHex for &ConsensusBranchId { fn encode_hex>(&self) -> T { self.bytes_in_display_order().encode_hex() diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 8becc5bb79c..736021c7668 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -1510,11 +1510,23 @@ pub struct AddressBalance { /// A hex-encoded [`ConsensusBranchId`] string. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] -struct ConsensusBranchIdHex(#[serde(with = "hex")] ConsensusBranchId); +pub struct ConsensusBranchIdHex(#[serde(with = "hex")] ConsensusBranchId); + +impl ConsensusBranchIdHex { + /// Returns a new instance of ['ConsensusBranchIdHex']. + pub fn new(consensus_branch_id: u32) -> Self { + ConsensusBranchIdHex(consensus_branch_id.into()) + } + + /// Returns the value of the ['ConsensusBranchId']. + pub fn inner(&self) -> u32 { + self.0.into() + } +} /// Information about [`NetworkUpgrade`] activation. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] -struct NetworkUpgradeInfo { +pub struct NetworkUpgradeInfo { /// Name of upgrade, string. /// /// Ignored by lightwalletd, but useful for debugging. @@ -1528,9 +1540,29 @@ struct NetworkUpgradeInfo { status: NetworkUpgradeStatus, } +impl NetworkUpgradeInfo { + /// Constructs [`NetworkUpgradeInfo`] from its constituent parts. + pub fn from_parts( + name: NetworkUpgrade, + activation_height: Height, + status: NetworkUpgradeStatus, + ) -> Self { + Self { + name, + activation_height, + status, + } + } + + /// Returns the contents of ['NetworkUpgradeInfo']. + pub fn into_parts(self) -> (NetworkUpgrade, Height, NetworkUpgradeStatus) { + (self.name, self.activation_height, self.status) + } +} + /// The activation status of a [`NetworkUpgrade`]. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] -enum NetworkUpgradeStatus { +pub enum NetworkUpgradeStatus { /// The network upgrade is currently active. /// /// Includes all network upgrades that have previously activated, @@ -1551,7 +1583,7 @@ enum NetworkUpgradeStatus { /// /// These branch IDs are different when the next block is a network upgrade activation block. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] -struct TipConsensusBranch { +pub struct TipConsensusBranch { /// Branch ID used to validate the current chain tip, big-endian, hex-encoded. #[serde(rename = "chaintip")] chain_tip: ConsensusBranchIdHex, @@ -1561,6 +1593,21 @@ struct TipConsensusBranch { next_block: ConsensusBranchIdHex, } +impl TipConsensusBranch { + /// Constructs [`TipConsensusBranch`] from its constituent parts. + pub fn from_parts(chain_tip: u32, next_block: u32) -> Self { + Self { + chain_tip: ConsensusBranchIdHex::new(chain_tip), + next_block: ConsensusBranchIdHex::new(next_block), + } + } + + /// Returns the contents of ['TipConsensusBranch']. + pub fn into_parts(self) -> (u32, u32) { + (self.chain_tip.inner(), self.next_block.inner()) + } +} + /// Response to a `sendrawtransaction` RPC request. /// /// Contains the hex-encoded hash of the sent transaction. @@ -1793,6 +1840,26 @@ impl Default for GetBlockTrees { } } +impl GetBlockTrees { + /// Constructs a new instance of ['GetBlockTrees']. + pub fn new(sapling: u64, orchard: u64) -> Self { + GetBlockTrees { + sapling: SaplingTrees { size: sapling }, + orchard: OrchardTrees { size: orchard }, + } + } + + /// Returns sapling data held by ['GetBlockTrees']. + pub fn sapling(self) -> u64 { + self.sapling.size + } + + /// Returns orchard data held by ['GetBlockTrees']. + pub fn orchard(self) -> u64 { + self.orchard.size + } +} + /// Sapling note commitment tree information. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] pub struct SaplingTrees { diff --git a/zebra-rpc/src/methods/trees.rs b/zebra-rpc/src/methods/trees.rs index 79059688a92..70838bb719e 100644 --- a/zebra-rpc/src/methods/trees.rs +++ b/zebra-rpc/src/methods/trees.rs @@ -105,6 +105,19 @@ impl GetTreestate { orchard, } } + + /// Returns the contents of ['GetTreeState']. + pub fn into_parts(self) -> (Hash, Height, u32, Option>, Option>) { + ( + self.hash, + self.height, + self.time, + self.sapling + .map(|treestate| treestate.commitments.final_state), + self.orchard + .map(|treestate| treestate.commitments.final_state), + ) + } } impl Default for GetTreestate { @@ -123,12 +136,24 @@ impl Default for GetTreestate { /// /// [1]: https://zcash.github.io/rpc/z_gettreestate.html #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -struct Treestate> { +pub struct Treestate> { /// Contains an Orchard or Sapling serialized note commitment tree, /// hex-encoded. commitments: Commitments, } +impl> Treestate { + /// Returns a new instance of ['Treestate']. + pub fn new(commitments: Commitments) -> Self { + Treestate { commitments } + } + + /// Returns a reference to the commitments. + pub fn inner(&self) -> &Commitments { + &self.commitments + } +} + /// A wrapper that contains either an Orchard or Sapling note commitment tree. /// /// Note that in the original [`z_gettreestate`][1] RPC, [`Commitments`] also @@ -136,9 +161,21 @@ struct Treestate> { /// /// [1]: https://zcash.github.io/rpc/z_gettreestate.html #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -struct Commitments> { +pub struct Commitments> { /// Orchard or Sapling serialized note commitment tree, hex-encoded. #[serde(with = "hex")] #[serde(rename = "finalState")] final_state: Tree, } + +impl> Commitments { + /// Returns a new instance of ['Commitments']. + pub fn new(final_state: Tree) -> Self { + Commitments { final_state } + } + + /// Returns a reference to the final_state. + pub fn inner(&self) -> &Tree { + &self.final_state + } +} From f919da3aca1ef11c6743d65ddb6b18223b5ec502 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Sat, 9 Nov 2024 00:03:39 +0000 Subject: [PATCH 010/245] chore(ci): do not default to `tracing` mode in jobs execution (#9004) --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index d121ee9ab20..af506c4de58 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -185,7 +185,6 @@ jobs: # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance - shell: /usr/bin/bash -x {0} run: | NAME="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}" DISK_PARAMS="size=400GB,type=pd-balanced,name=${NAME},device-name=${NAME}" @@ -256,7 +255,6 @@ jobs: # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test id: launch-test - shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -288,7 +286,6 @@ jobs: # Show debug logs if previous job failed - name: Show debug logs if previous job failed if: ${{ failure() }} - shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -315,7 +312,6 @@ jobs: # # Errors in the tests are caught by the final test status job. - name: Check startup logs for ${{ inputs.test_id }} - shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -344,7 +340,6 @@ jobs: # with that status. # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test - shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -477,7 +472,6 @@ jobs: # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. - name: Get database versions from logs - shell: /usr/bin/bash -x {0} run: | INITIAL_DISK_DB_VERSION="" RUNNING_DB_VERSION="" @@ -568,7 +562,6 @@ jobs: # # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - name: Get sync height from logs - shell: /usr/bin/bash -x {0} run: | SYNC_HEIGHT="" From edff643c088a5e864aea17bdc4b296428f36815f Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 11 Nov 2024 12:51:51 +0000 Subject: [PATCH 011/245] feat(actions): migrate Mergify to GitHub's Merge Queue (#9005) This is an initial step before completely removing Mergify, to keep the duality betwen the two to avoid downtime with queue management --- .../cd-deploy-nodes-gcp.patch-external.yml | 15 +++---- .github/workflows/cd-deploy-nodes-gcp.yml | 41 ++++++++++--------- .github/workflows/ci-build-crates.yml | 2 +- .github/workflows/ci-tests.patch-external.yml | 2 +- .github/workflows/ci-tests.yml | 5 ++- .github/workflows/ci-unit-tests-os.yml | 5 ++- .../docs-deploy-firebase.patch-external.yml | 2 +- .github/workflows/docs-deploy-firebase.yml | 4 +- .github/workflows/release-crates-io.yml | 2 +- .../sub-ci-integration-tests-gcp.yml | 10 ++--- .../workflows/sub-ci-unit-tests-docker.yml | 4 +- 11 files changed, 49 insertions(+), 43 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 3b1e2c066cb..0b0cdfa5018 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -1,7 +1,7 @@ # Workflow patches for skipping Google Cloud CD deployments on PRs from external repositories. name: Deploy Nodes to GCP -# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. # GitHub doesn't support filtering workflows by source branch names, so we have to do it for each # job. on: @@ -13,10 +13,16 @@ on: # `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + get-disk-name: + name: Get disk name + if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + build: name: Build CD Docker / Build images # Only run on PRs from external repositories, skipping ZF branches and tags. - if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} runs-on: ubuntu-latest steps: - run: 'echo "Skipping job on fork"' @@ -44,8 +50,3 @@ jobs: steps: - run: 'echo "Skipping job on fork"' - get-disk-name: - name: Get disk name - runs-on: ubuntu-latest - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 54259760227..459f53b5f7b 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,6 +1,6 @@ # Google Cloud node deployments and tests that run when Rust code or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. -# (External PRs are tested/deployed by mergify.) +# (External PRs are tested/deployed by GitHub's Merge Queue.) # # 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions. # 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. @@ -27,6 +27,9 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} on: + merge_group: + types: [ checks_requested ] + workflow_dispatch: inputs: network: @@ -141,15 +144,29 @@ jobs: id: set run: echo "major_version=${{ steps.get.outputs.result }}" >> "$GITHUB_OUTPUT" + # Finds a cached state disk for zebra + # + # Passes the disk name to subsequent jobs using `cached_disk_name` output + # + get-disk-name: + name: Get disk name + uses: ./.github/workflows/sub-find-cached-disks.yml + # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. + # This workflow also runs on release tags, the event name check will run it on releases. + if: ${{ (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && !inputs.no_cached_disk }} + with: + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + disk_prefix: zebrad-cache + disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} + prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} + # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git # # The image will be commonly named `zebrad:` build: name: Build CD Docker - # Skip PRs from external repositories, let them pass, and then Mergify will check them. - # This workflow also runs on release tags, the event name check will run it on releases. - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} + needs: get-disk-name uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -197,20 +214,6 @@ jobs: test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} - # Finds a cached state disk for zebra - # - # Passes the disk name to subsequent jobs using `cached_disk_name` output - # - get-disk-name: - name: Get disk name - uses: ./.github/workflows/sub-find-cached-disks.yml - if: ${{ !inputs.no_cached_disk }} - with: - network: ${{ inputs.network || vars.ZCASH_NETWORK }} - disk_prefix: zebrad-cache - disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} - prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} - # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. # @@ -422,7 +425,7 @@ jobs: # When a new job is added to this workflow, add it to this list. needs: [ versioning, build, deploy-nodes, deploy-instance ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index ca66a2bf2c0..4498fdec2c9 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -169,7 +169,7 @@ jobs: # When a new job is added to this workflow, add it to this list. needs: [ matrix, build ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ci-tests.patch-external.yml b/.github/workflows/ci-tests.patch-external.yml index 48a6ab667b9..8fef3e75889 100644 --- a/.github/workflows/ci-tests.patch-external.yml +++ b/.github/workflows/ci-tests.patch-external.yml @@ -1,7 +1,7 @@ # Workflow patches for skipping CI tests on PRs from external repositories name: Run tests -# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. # GitHub doesn't support filtering workflows by source branch names, so we have to do it for each # job. on: diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 997bf25eb00..7f0a19dcc06 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -10,6 +10,9 @@ concurrency: cancel-in-progress: true on: + merge_group: + types: [checks_requested] + schedule: # Run this job every Friday at mid-day UTC # This is limited to the Zebra and lightwalletd Full Sync jobs @@ -119,7 +122,7 @@ jobs: # testnet when running the image. build: name: Build images - # Skip PRs from external repositories, let them pass, and then Mergify will check them + # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-build-docker-image.yml with: diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 6fe7238bc29..ec1b52fd5d8 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -14,6 +14,9 @@ concurrency: cancel-in-progress: true on: + merge_group: + types: [checks_requested] + workflow_dispatch: pull_request: @@ -305,7 +308,7 @@ jobs: # When a new job is added to this workflow, add it to this list. needs: [ test, install-from-lockfile-no-cache, check-cargo-lock, cargo-deny, unused-deps ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/docs-deploy-firebase.patch-external.yml b/.github/workflows/docs-deploy-firebase.patch-external.yml index 8478e4c2ded..3c6d9c16942 100644 --- a/.github/workflows/docs-deploy-firebase.patch-external.yml +++ b/.github/workflows/docs-deploy-firebase.patch-external.yml @@ -1,7 +1,7 @@ # Workflow patches for skipping Google Cloud docs updates on PRs from external repositories. name: Docs -# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. # GitHub doesn't support filtering workflows by source branch names, so we have to do it for each # job. on: diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 72ffb5c6534..38542be45e9 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -1,5 +1,5 @@ # Google Cloud docs updates that run when docs, Rust code, or dependencies are modified, -# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by mergify.) +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by GitHub's Merge Queue.) # - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase. # - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase. @@ -74,7 +74,7 @@ env: jobs: build-docs-book: name: Build and Deploy Zebra Book Docs - # Skip PRs from external repositories, let them pass, and then Mergify will check them + # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} timeout-minutes: 5 runs-on: ubuntu-latest diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 4f08917230f..cbefc90151e 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -112,7 +112,7 @@ jobs: # When a new job is added to this workflow, add it to this list. needs: [ check-release ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 3ff5ab1e79a..75de0bfda2a 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -1,5 +1,5 @@ # Google Cloud integration tests that run when Rust code or dependencies are modified, -# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by GitHub's Merge Queue.) # # Specific conditions and dependencies are set for each job to ensure they are executed in the correct sequence and under the right circumstances. # Each test has a description of the conditions under which it runs. @@ -36,10 +36,6 @@ on: #! `sub-deploy-integration-tests-gcp.yml` workflow file as inputs. If modified in this file, they must #! also be updated in the `sub-deploy-integration-tests-gcp.yml` file. jobs: - # to also run a job on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - # Check if the cached state disks used by the tests are available for the default network. # # The default network is mainnet unless a manually triggered workflow or repository variable @@ -48,7 +44,7 @@ jobs: # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} - # Skip PRs from external repositories, let them pass, and then Mergify will check them + # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-find-cached-disks.yml with: @@ -554,7 +550,7 @@ jobs: scan-task-commands-test, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index 3f80d24ebbd..dfd8ac9812b 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -1,5 +1,5 @@ # Google Cloud unit tests that run when Rust code or dependencies are modified, -# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by GitHub's Merge Queue.) # # This workflow is designed for running various unit tests within Docker containers. # Jobs: @@ -183,7 +183,7 @@ jobs: # Testnet jobs are not in this list, because we expect testnet to fail occasionally. needs: [ test-all, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. - # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? if: failure() && github.event.pull_request == null runs-on: ubuntu-latest From 4eb285de50848f1a4dcebd0fbe353e4f150fd371 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:51:57 +0000 Subject: [PATCH 012/245] build(deps): bump rlespinasse/github-slug-action in the devops group (#9002) Bumps the devops group with 1 update: [rlespinasse/github-slug-action](https://github.com/rlespinasse/github-slug-action). Updates `rlespinasse/github-slug-action` from 4 to 5 - [Release notes](https://github.com/rlespinasse/github-slug-action/releases) - [Commits](https://github.com/rlespinasse/github-slug-action/compare/v4...v5) --- updated-dependencies: - dependency-name: rlespinasse/github-slug-action dependency-type: direct:production update-type: version-update:semver-major dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cd-deploy-nodes-gcp.yml | 4 ++-- .github/workflows/manual-zcashd-deploy.yml | 2 +- .github/workflows/release-crates-io.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- .github/workflows/sub-ci-unit-tests-docker.yml | 8 ++++---- .github/workflows/sub-deploy-integration-tests-gcp.yml | 6 +++--- .github/workflows/sub-test-zebra-config.yml | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 459f53b5f7b..6eb3f10e9d6 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -250,7 +250,7 @@ jobs: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -361,7 +361,7 @@ jobs: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 6553bdf5ba5..8fc5951d142 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index cbefc90151e..27b902d1729 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -75,7 +75,7 @@ jobs: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 02444c6e427..ee95278b9cf 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -86,7 +86,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index dfd8ac9812b..475072e81c7 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -48,7 +48,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -89,7 +89,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -109,7 +109,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -129,7 +129,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index af506c4de58..05b2c42019f 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -147,7 +147,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -392,7 +392,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 @@ -688,7 +688,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index c0d2532b220..41586052cd4 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -43,7 +43,7 @@ jobs: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + uses: rlespinasse/github-slug-action@v5 with: short-length: 7 From ac90773331b64ea016fb1a7bda87815847517ca8 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 14 Nov 2024 12:03:51 +0000 Subject: [PATCH 013/245] ref(mergify): use the the configuration format and keys (#9018) --- .github/mergify.yml | 65 +++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index a0a5b7e282d..7448075cd92 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -3,34 +3,42 @@ # This file can be edited and validated using: # https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/config-editor -queue_rules: - - name: urgent +# Provides a means to set configuration values that act as fallbacks +# for queue_rules and pull_request_rules +defaults: + actions: + squash: + # TODO: Adapt our PR template to use title+body + commit_message: all-commits + + queue_rule: # Allow to update/rebase the original pull request if possible to check its mergeability, # and it does not create a draft PR if not needed allow_inplace_checks: True - allow_checks_interruption: False speculative_checks: 1 - batch_size: 8 - # Wait a short time to embark hotfixes together in a merge train - batch_max_wait_time: "2 minutes" - conditions: + batch_size: 20 + # Wait for about 10% of the time it takes Rust PRs to run CI (~1h) + batch_max_wait_time: "10 minutes" + queue_conditions: # Mergify automatically applies status check, approval, and conversation rules, # which are the same as the GitHub main branch protection rules # https://docs.mergify.com/conditions/#about-branch-protection - base=main +# Allows to define the rules that reign over our merge queues +queue_rules: + - name: urgent + batch_size: 8 + # Wait a short time to embark hotfixes together in a merge train + batch_max_wait_time: "2 minutes" + - name: batched - allow_inplace_checks: True - allow_checks_interruption: True - speculative_checks: 1 - batch_size: 20 - # Wait for about 10% of the time it takes Rust PRs to run CI (3h) - batch_max_wait_time: "20 minutes" - conditions: - - base=main +# Rules that will determine which priority a pull request has when entering +# our merge queue +# # These rules are checked in order, the first one to be satisfied applies -pull_request_rules: +priority_rules: - name: move to urgent queue when CI passes with multiple reviews conditions: # This queue handles a PR if it: @@ -45,10 +53,8 @@ pull_request_rules: - -draft # does not include the do-not-merge label - label!=do-not-merge - actions: - queue: - name: urgent - method: squash + allow_checks_interruption: true + priority: high - name: move to urgent queue when CI passes with 1 review conditions: @@ -62,12 +68,9 @@ pull_request_rules: - base=main - -draft - label!=do-not-merge - actions: - queue: - name: urgent - method: squash + priority: high - - name: move to batched queue when CI passes with multiple reviews + - name: move to medium queue when CI passes with multiple reviews conditions: # This queue handles a PR if it: # has multiple approving reviewers @@ -77,12 +80,9 @@ pull_request_rules: - base=main - -draft - label!=do-not-merge - actions: - queue: - name: batched - method: squash + priority: medium - - name: move to batched queue when CI passes with 1 review + - name: move to low queue when CI passes with 1 review conditions: # This queue handles a PR if it: # has at least one approving reviewer (branch protection rule) @@ -93,7 +93,4 @@ pull_request_rules: - base=main - -draft - label!=do-not-merge - actions: - queue: - name: batched - method: squash + priority: low From 987a34a6e0a3434696bb91174325f074f9760ec3 Mon Sep 17 00:00:00 2001 From: pinglanlu Date: Thu, 14 Nov 2024 21:15:21 +0800 Subject: [PATCH 014/245] chore: remove redundant words in comment (#9015) Signed-off-by: pinglanlu --- book/src/user/shielded-scan-grpc-server.md | 2 +- zebra-network/src/meta_addr/tests/check.rs | 2 +- zebrad/src/components/mempool/tests/vector.rs | 2 +- zebrad/tests/acceptance.rs | 4 ++-- zebrad/tests/end_of_support.rs | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/book/src/user/shielded-scan-grpc-server.md b/book/src/user/shielded-scan-grpc-server.md index d08bc0df49e..327ec14f861 100644 --- a/book/src/user/shielded-scan-grpc-server.md +++ b/book/src/user/shielded-scan-grpc-server.md @@ -65,7 +65,7 @@ scanner.Scanner.GetResults scanner.Scanner.RegisterKeys ``` -To see the the request and response types for a method, for example the `GetResults` method, try: +To see the request and response types for a method, for example the `GetResults` method, try: ```bash diff --git a/zebra-network/src/meta_addr/tests/check.rs b/zebra-network/src/meta_addr/tests/check.rs index 234a2352567..f24c4a71fc5 100644 --- a/zebra-network/src/meta_addr/tests/check.rs +++ b/zebra-network/src/meta_addr/tests/check.rs @@ -67,7 +67,7 @@ pub(crate) fn sanitize_avoids_leaks(original: &MetaAddr, sanitized: &MetaAddr) { // check the other fields - // Sanitize to the the default state, even though it's not serialized + // Sanitize to the default state, even though it's not serialized assert_eq!(sanitized.last_connection_state, Default::default()); // Sanitize to known flags let sanitized_peer_services = diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index c285923fa7d..86848c8bae7 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -888,7 +888,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .await; // Push block 2 to the state. This will increase the tip height past the expected - // tip height that the the tx was verified at. + // tip height that the tx was verified at. state_service .ready() .await diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index cd3572ce3f2..b0949cd336d 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -2555,7 +2555,7 @@ async fn submit_block() -> Result<()> { common::get_block_template_rpcs::submit_block::run().await } -/// Check that the the end of support code is called at least once. +/// Check that the end of support code is called at least once. #[test] fn end_of_support_is_checked_at_start() -> Result<()> { let _init_guard = zebra_test::init(); @@ -3474,7 +3474,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { "invalid block with excessive coinbase output value should be rejected" ); - // Use an invalid coinbase transaction (with an output value less than than the `block_subsidy + miner_fees - expected_lockbox_funding_stream`) + // Use an invalid coinbase transaction (with an output value less than the `block_subsidy + miner_fees - expected_lockbox_funding_stream`) let network = base_network_params .clone() .with_post_nu6_funding_streams(ConfiguredFundingStreams { diff --git a/zebrad/tests/end_of_support.rs b/zebrad/tests/end_of_support.rs index 94c4d3cc967..9b28d63ee91 100644 --- a/zebrad/tests/end_of_support.rs +++ b/zebrad/tests/end_of_support.rs @@ -68,7 +68,7 @@ fn end_of_support_date() { )); } -/// Check that the the end of support task is working. +/// Check that the end of support task is working. #[tokio::test] #[tracing_test::traced_test] async fn end_of_support_task() -> Result<()> { From 7d37f7705992d4f2ba6b35b46037c0801707ba76 Mon Sep 17 00:00:00 2001 From: Cypher Pepe <125112044+cypherpepe@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:15:26 +0300 Subject: [PATCH 015/245] fix: typos correction docs (#9014) * typo 0005-treestate.md * typo xxxx-block-subsidy.md * typo continuous-delivery.md * typo docker.md * typo shielded-scan.md --- book/src/dev/continuous-delivery.md | 2 +- book/src/dev/rfcs/drafts/0005-treestate.md | 2 +- book/src/dev/rfcs/drafts/xxxx-block-subsidy.md | 2 +- book/src/user/docker.md | 2 +- book/src/user/shielded-scan.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/book/src/dev/continuous-delivery.md b/book/src/dev/continuous-delivery.md index 30c3ed7e86b..c977de01fc3 100644 --- a/book/src/dev/continuous-delivery.md +++ b/book/src/dev/continuous-delivery.md @@ -1,6 +1,6 @@ # Zebra Continuous Delivery -Zebra has an extension of it's continuous integration since it automatically deploys all +Zebra has an extension of its continuous integration since it automatically deploys all code changes to a testing and/or pre-production environment after each PR gets merged into the `main` branch, and on each Zebra `release`. diff --git a/book/src/dev/rfcs/drafts/0005-treestate.md b/book/src/dev/rfcs/drafts/0005-treestate.md index f8c83936da7..67ad62262e8 100644 --- a/book/src/dev/rfcs/drafts/0005-treestate.md +++ b/book/src/dev/rfcs/drafts/0005-treestate.md @@ -96,7 +96,7 @@ parsed and the notes for each tree collected in their appropriate positions, the root of each tree is computed. While the trees are being built, the respective block nullifier sets are updated in memory as note nullifiers are revealed. If the rest of the block is validated according to consensus rules, that root is -committed to its own datastructure via our state service (Sprout anchors, +committed to its own data structure via our state service (Sprout anchors, Sapling anchors). Sapling block validation includes comparing the specified FinalSaplingRoot in its block header to the root of the Sapling `NoteCommitment` tree that we have just computed to make sure they match. diff --git a/book/src/dev/rfcs/drafts/xxxx-block-subsidy.md b/book/src/dev/rfcs/drafts/xxxx-block-subsidy.md index 2ab752c4be6..838343ee77d 100644 --- a/book/src/dev/rfcs/drafts/xxxx-block-subsidy.md +++ b/book/src/dev/rfcs/drafts/xxxx-block-subsidy.md @@ -70,7 +70,7 @@ In Zebra the consensus related code lives in the `zebra-consensus` crate. The bl Inside `zebra-consensus/src/block/subsidy/` the following submodules will be created: - `general.rs`: General block reward functions and utilities. -- `founders_reward.rs`: Specific functions related to funders reward. +- `founders_reward.rs`: Specific functions related to founders reward. - `funding_streams.rs`: Specific functions for funding streams. In addition to calculations the block subsidy requires constants defined in the protocol. The implementation will also create additional constants, all of them will live at: diff --git a/book/src/user/docker.md b/book/src/user/docker.md index dbaf726cf9a..d77c234b537 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -177,6 +177,6 @@ Specific tests are defined in `docker/test.env` file and can be enabled by setti ## Registries -The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. +The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml) (`zebrad-test`, `lightwalletd`) might be deleted on a scheduled basis. We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zfnd-dev-zebra/us/zebra) to build external tools and test images. diff --git a/book/src/user/shielded-scan.md b/book/src/user/shielded-scan.md index dff3e599ed8..7358b65d782 100644 --- a/book/src/user/shielded-scan.md +++ b/book/src/user/shielded-scan.md @@ -100,4 +100,4 @@ ldb --db="$HOME/.cache/zebra/private-scan/v1/mainnet" --secondary_path= --column Some of the output will be markers the scanner uses to keep track of progress, however, some of them will be transactions found. -To lean more about how to filter the database please refer to [RocksDB Administration and Data Access Tool](https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool) +To learn more about how to filter the database please refer to [RocksDB Administration and Data Access Tool](https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool) From 2a6e184086adaaf640cf79f5ca46cb249baea318 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 18 Nov 2024 11:08:58 +0000 Subject: [PATCH 016/245] fix(mergify): remove deprecated `speculative_checks` option (#9033) While using Mergify's configuration checker, this issue was not raised, but it's now showing in some PR's summaries, as in https://github.com/ZcashFoundation/zebra/pull/9026/checks?check_run_id=33066692861 --- .github/mergify.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 7448075cd92..ebce6885ebf 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -3,6 +3,10 @@ # This file can be edited and validated using: # https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/config-editor +# Set the maximum number of PRs that can be checked in parallel in a queue +merge_queue: + max_parallel_checks: 5 + # Provides a means to set configuration values that act as fallbacks # for queue_rules and pull_request_rules defaults: @@ -15,7 +19,6 @@ defaults: # Allow to update/rebase the original pull request if possible to check its mergeability, # and it does not create a draft PR if not needed allow_inplace_checks: True - speculative_checks: 1 batch_size: 20 # Wait for about 10% of the time it takes Rust PRs to run CI (~1h) batch_max_wait_time: "10 minutes" From 77f14601c1f194ed1c80c4d3ed49f8eb0806a06a Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 18 Nov 2024 08:36:35 -0300 Subject: [PATCH 017/245] chore: update CHANGELOG to better convey the 2.0.0 issue (#9007) * chore: update CHANGELOG to better convey the 2.0.0 issue * Apply suggestions from code review Co-authored-by: Arya --------- Co-authored-by: Alfredo Garcia Co-authored-by: Arya --- CHANGELOG.md | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17b898a38dc..1a345971d3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,39 +8,41 @@ and this project adheres to [Semantic Versioning](https://semver.org). ## [Zebra 2.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.1) - 2024-10-30 - Zebra now supports NU6 on Mainnet. This patch release updates dependencies - required for NU6. + required for NU6. The 2.0.0 release was pointing to the incorrect dependencies + and would panic on NU6 activation. ### Breaking Changes - The JSON RPC endpoint has cookie-based authentication enabled by default. - -### Added - -- NU6-related documentation - ([#8949](https://github.com/ZcashFoundation/zebra/pull/8949)) -- A cookie-based authentication system for the JSON RPC endpoint - ([#8900](https://github.com/ZcashFoundation/zebra/pull/8900), - [#8965](https://github.com/ZcashFoundation/zebra/pull/8965)) + **If you rely on Zebra RPC, you will need to adjust your config.** The + simplest change is to disable authentication by adding `enable_cookie_auth = + false` to the `[rpc]` section of the Zebra config file; [refer to the + docs for more information](https://zebra.zfnd.org/user/lightwalletd.html#json-rpc) (this was added + in v2.0.0, but is being mentioned again here for clarity). ### Changed -- Set the activation height of NU6 for Mainnet and bump Zebra's current network - protocol version - ([#8960](https://github.com/ZcashFoundation/zebra/pull/8960)) +- Use ECC deps with activation height for NU6 + ([#8960](https://github.com/ZcashFoundation/zebra/pull/8978)) ### Contributors Thank you to everyone who contributed to this release, we couldn't make Zebra without you: @arya2, @gustavovalverde, @oxarbitrage and @upbqdn. -## [Zebra 2.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.0) - 2024-10-25 +## [Zebra 2.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.0) - 2024-10-25 - [YANKED] -This release brings full support for NU6. +This release was intended to support NU6 but was pointing to the wrong version +of dependencies which would make Zebra panic at NU6 activation. Use v2.0.1 instead. ### Breaking Changes - Zebra now supports NU6 on Mainnet. - The JSON RPC endpoint has a cookie-based authentication enabled by default. + **If you rely on Zebra RPC, you will need to adjust your config.** The + simplest change is to disable authentication by adding `enable_cookie_auth = + false` to the `[rpc]` section of the Zebra config file; [refer to the + docs](https://zebra.zfnd.org/user/lightwalletd.html#json-rpc). ### Added @@ -109,7 +111,7 @@ by syncing Zebra from scratch, or by using the `copy-state` command to create a command, first make a copy Zebra's Testnet configuration with a different cache directory path, for example, if Zebra's configuration is at the default path, by running `cp ~/.config/zebrad.toml ./zebrad-copy-target.toml`, then opening the new configuration file and editing the `cache_dir` path in the `state` section. Once there's a copy of Zebra's configuration with the new state cache directory path, run: -`zebrad copy-state --target-config-path "./zebrad-copy-target.toml" --max-source-height "2975999"`, and then update the original +`zebrad copy-state --target-config-path "./zebrad-copy-target.toml" --max-source-height "2975999"`, and then update the original Zebra configuration to use the new state cache directory. ### Added @@ -155,7 +157,7 @@ Thank you to everyone who contributed to this release, we couldn't make Zebra wi - Support for custom Testnets and Regtest is greatly enhanced. - Windows is now back in the second tier of supported platforms. - The end-of-support time interval is set to match `zcashd`'s 16 weeks. -- The RPC serialization of empty treestates matches `zcashd`. +- The RPC serialization of empty treestates matches `zcashd`. ### Added @@ -221,11 +223,11 @@ Thank you to everyone who contributed to this release, we couldn't make Zebra wi ## [Zebra 1.6.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.6.1) - 2024-04-15 -This release adds an OpenAPI specification for Zebra's RPC methods and startup logs about Zebra's storage usage and other database information. +This release adds an OpenAPI specification for Zebra's RPC methods and startup logs about Zebra's storage usage and other database information. It also includes: - Bug fixes and improved error messages for some zebra-scan gRPC methods -- A performance improvement in Zebra's `getblock` RPC method +- A performance improvement in Zebra's `getblock` RPC method ### Added From 1dfac407510154b5128b08ea0863b7cba730950e Mon Sep 17 00:00:00 2001 From: Arya Date: Mon, 18 Nov 2024 07:16:21 -0500 Subject: [PATCH 018/245] add(mempool): Verify transactions with unmined inputs in the mempool (#8857) * Adds a parameter to `zebra_consensus::router::init()` for accepting a mempool setup argument, adds and uses an `init_test()` fn for passing a closed channel receiver in tests where no mempool service is needed in the transaction verifier. * Adds a `mempool` argument to the transaction::Verifier constructor (and a `new_for_tests()` constructor for convenience) * Removes `Clone` impl on `transaction::Verifier` to add mempool oneshot receiver, updates tests. * Adds TODOs * updates transaction verifier's poll_ready() method to setup the mempool service handle. * Updates VerifiedSet struct used in mempool storage * Updates mempool service and its `Storage` to use the updated `VerifiedSet` `transactions()` return type. * updates `created_outputs` when inserting or removing a transaction from the mempool's verified set * Adds a TODO, updates field docs * Updates `spent_utxos()` to query the mempool for unspent outputs * Adds `spent_mempool_outpoints` as a field on tx verifier mempool response * Updates mempool `Downloads` to return the spent_mempool_outpoints from the tx verifier response * Updates `Storage.insert()` to accept a list of spent mempool transaction outputs * Adds transaction dependencies when inserting a tx in `VerifiedSet` * polls mempool svc from tx verifier when a mempool tx that creates transparent outputs has been verified. adds a TODO for adding a `pending_outputs` field to the mempool Storage * Adds `pending_outputs` field on mempool Storage and responds to pending outputs requests when inserting new transactions into the mempool's verified set * replaces `UnminedTxId` type with `transaction::Hash` in mempool's verified set * prune pending outputs when rejecting and removing same effects. * Remove dependent transactions from verified set when removing a tx * updates tests * appeases clippy. * removes unused `len()` method * fixes doc links * Adds transaction dependencies to the `FullTransactions` response, let the caller handle it (required to avoid moving zip317 tx selection code to mempool) * updates block template construction to avoid including transactions unless their dependencies have already been added. * updates tests * Replaces placeholder setup channel with one that sends the mempool svc to the tx verifier, adds a timeout layer, adds a TODO about a concurrency bug * Use a single query to check for unspent outputs in the mempool * Updates `getblocktemplate` method to consider dependencies when sorting transactions for the final template * fixes clippy lints, removes unnecessary Option in UnspentOutput response variant * renames type alias and method, adds a TODO to use iteration instead of recursion * Adds mempool_removes_dependent_transactions() test * Updates Storage and VerifiedSet clear() methods to clear pending_outputs, created_outputs, and transaction_dependencies, adds TODO to use iteration instead of recursion. * removes outdated TODO * Adds a TODO for reporting queued transaction verification results from the mempool from the poll_ready() method * Adds `mempool_responds_to_await_output` test * updates mempool_responds_to_await_output test * Uses iteration instead of recursion in verified set's remove() method and zip317 mod's dependencies_depth() method * Adds a mempool_request_with_mempool_output_is_accepted test for the transaction verifier * Moves delay duration before polling the mempool to a constant, uses a shorter timeout for mempool output lookups, adds a `poll_count` to MockService, and updates `mempool_request_with_unmined_output_spends_is_accepted` to check that the transaction verifier polls the mempool after verifying a mempool transaction with transparent outputs * adds long_poll_input_mempool_tx_ids_are_sorted test * Adds a `excludes_tx_with_unselected_dependencies` test * Updates a TODO * moves `TransactionDependencies` struct to `zebra-node-services` * Updates `FullTransactions` response variant's `transaction_dependencies` type * updates zip317 transaction selection for block templates to include dependent transactions * Moves and refactors zip317 tx selection test to its own module, adds an `unmined_transactions_in_blocks()` method on network * Removes `unmined_transactions_in_blocks()` test utility fn from mempool Storage test module and replaces calls to it with calls to the new test method on Network * Fixes spelling mistake * Adds `includes_tx_with_selected_dependencies` test * fixes zip317 block construction issue * Fixes vectors test * Update zebra-node-services/src/mempool.rs * restores `tip_rejected_exact` type * updates affected tests * Documents the new argument in `Storage::insert()`, updates outdated comment * Update zebrad/src/components/mempool/storage/verified_set.rs * fixes potential issue with calling buffered mempool's poll_ready() method without calling it. * Avoids removing dependent transactions of transactions that have been mined onto the best chain. * Updates `spent_utxos()` method documentation * Avoids sorting getblocktemplate transactions in non-test compilations * documents PendingOutputs struct * Apply suggestions from code review Co-authored-by: Marek * cargo fmt * Applies suggestions from code review Avoids unnecessarily rejecting dependent transactions of randomly evicted mempool transactions. Updates `TransactionDependencies::remove_all()` to omit provided transaction id from the list of removed transaction ids. * Applies suggestions from code review. * Adds minor comments * Update zebrad/src/components/mempool/storage/verified_set.rs Co-authored-by: Marek * Remove an outdated comment (#9013) --------- Co-authored-by: Marek --- zebra-chain/src/tests/vectors.rs | 43 +++- zebra-consensus/src/block/tests.rs | 2 +- zebra-consensus/src/router.rs | 48 +++- zebra-consensus/src/router/tests.rs | 4 +- zebra-consensus/src/transaction.rs | 211 +++++++++++++--- zebra-consensus/src/transaction/tests.rs | 227 ++++++++++++++---- zebra-consensus/src/transaction/tests/prop.rs | 9 +- zebra-node-services/src/mempool.rs | 30 ++- .../src/mempool/transaction_dependencies.rs | 124 ++++++++++ zebra-rpc/src/methods.rs | 1 + .../src/methods/get_block_template_rpcs.rs | 28 ++- .../get_block_template.rs | 9 +- .../types/get_block_template.rs | 61 +++-- .../types/long_poll.rs | 33 ++- .../methods/get_block_template_rpcs/zip317.rs | 226 +++++++++++++++-- .../get_block_template_rpcs/zip317/tests.rs | 116 +++++++++ zebra-rpc/src/methods/tests/prop.rs | 1 + zebra-rpc/src/methods/tests/snapshot.rs | 1 + .../tests/snapshot/get_block_template_rpcs.rs | 9 +- zebra-rpc/src/methods/tests/vectors.rs | 41 +++- zebra-test/src/mock_service.rs | 23 +- zebrad/src/commands/start.rs | 6 + .../components/inbound/tests/fake_peer_set.rs | 19 +- zebrad/src/components/mempool.rs | 49 +++- zebrad/src/components/mempool/downloads.rs | 33 ++- .../src/components/mempool/pending_outputs.rs | 65 +++++ zebrad/src/components/mempool/storage.rs | 156 +++++++----- .../src/components/mempool/storage/tests.rs | 43 +--- .../components/mempool/storage/tests/prop.rs | 26 +- .../mempool/storage/tests/vectors.rs | 137 +++++++++-- .../mempool/storage/verified_set.rs | 209 ++++++++++------ zebrad/src/components/mempool/tests/prop.rs | 6 +- zebrad/src/components/mempool/tests/vector.rs | 153 +++++++++++- zebrad/tests/acceptance.rs | 12 +- 34 files changed, 1742 insertions(+), 419 deletions(-) create mode 100644 zebra-node-services/src/mempool/transaction_dependencies.rs create mode 100644 zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs create mode 100644 zebrad/src/components/mempool/pending_outputs.rs diff --git a/zebra-chain/src/tests/vectors.rs b/zebra-chain/src/tests/vectors.rs index deb2a507707..ef9d00f6d99 100644 --- a/zebra-chain/src/tests/vectors.rs +++ b/zebra-chain/src/tests/vectors.rs @@ -1,9 +1,15 @@ //! Network methods for fetching blockchain vectors. //! -use std::collections::BTreeMap; +use std::{collections::BTreeMap, ops::RangeBounds}; -use crate::{block::Block, parameters::Network, serialization::ZcashDeserializeInto}; +use crate::{ + amount::Amount, + block::Block, + parameters::Network, + serialization::ZcashDeserializeInto, + transaction::{UnminedTx, VerifiedUnminedTx}, +}; use zebra_test::vectors::{ BLOCK_MAINNET_1046400_BYTES, BLOCK_MAINNET_653599_BYTES, BLOCK_MAINNET_982681_BYTES, @@ -30,6 +36,39 @@ impl Network { } } + /// Returns iterator over verified unmined transactions in the provided block height range. + pub fn unmined_transactions_in_blocks( + &self, + block_height_range: impl RangeBounds, + ) -> impl DoubleEndedIterator { + let blocks = self.block_iter(); + + // Deserialize the blocks that are selected based on the specified `block_height_range`. + let selected_blocks = blocks + .filter(move |(&height, _)| block_height_range.contains(&height)) + .map(|(_, block)| { + block + .zcash_deserialize_into::() + .expect("block test vector is structurally valid") + }); + + // Extract the transactions from the blocks and wrap each one as an unmined transaction. + // Use a fake zero miner fee and sigops, because we don't have the UTXOs to calculate + // the correct fee. + selected_blocks + .flat_map(|block| block.transactions) + .map(UnminedTx::from) + // Skip transactions that fail ZIP-317 mempool checks + .filter_map(|transaction| { + VerifiedUnminedTx::new( + transaction, + Amount::try_from(1_000_000).expect("invalid value"), + 0, + ) + .ok() + }) + } + /// Returns blocks indexed by height in a [`BTreeMap`]. /// /// Returns Mainnet blocks if `self` is set to Mainnet, and Testnet blocks otherwise. diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index e6eb6f2c4b9..eea5f40015e 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -137,7 +137,7 @@ async fn check_transcripts() -> Result<(), Report> { let network = Network::Mainnet; let state_service = zebra_state::init_test(&network); - let transaction = transaction::Verifier::new(&network, state_service.clone()); + let transaction = transaction::Verifier::new_for_tests(&network, state_service.clone()); let transaction = Buffer::new(BoxService::new(transaction), 1); let block_verifier = Buffer::new( SemanticBlockVerifier::new(&network, state_service.clone(), transaction), diff --git a/zebra-consensus/src/router.rs b/zebra-consensus/src/router.rs index ba42896e56f..38819d0b245 100644 --- a/zebra-consensus/src/router.rs +++ b/zebra-consensus/src/router.rs @@ -21,7 +21,7 @@ use std::{ use futures::{FutureExt, TryFutureExt}; use thiserror::Error; -use tokio::task::JoinHandle; +use tokio::{sync::oneshot, task::JoinHandle}; use tower::{buffer::Buffer, util::BoxService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; @@ -30,6 +30,7 @@ use zebra_chain::{ parameters::Network, }; +use zebra_node_services::mempool; use zebra_state as zs; use crate::{ @@ -230,11 +231,12 @@ where /// Block and transaction verification requests should be wrapped in a timeout, /// so that out-of-order and invalid requests do not hang indefinitely. /// See the [`router`](`crate::router`) module documentation for details. -#[instrument(skip(state_service))] -pub async fn init( +#[instrument(skip(state_service, mempool))] +pub async fn init( config: Config, network: &Network, mut state_service: S, + mempool: oneshot::Receiver, ) -> ( Buffer, Request>, Buffer< @@ -247,6 +249,11 @@ pub async fn init( where S: Service + Send + Clone + 'static, S::Future: Send + 'static, + Mempool: Service + + Send + + Clone + + 'static, + Mempool::Future: Send + 'static, { // Give other tasks priority before spawning the checkpoint task. tokio::task::yield_now().await; @@ -333,7 +340,7 @@ where // transaction verification - let transaction = transaction::Verifier::new(network, state_service.clone()); + let transaction = transaction::Verifier::new(network, state_service.clone(), mempool); let transaction = Buffer::new(BoxService::new(transaction), VERIFIER_BUFFER_BOUND); // block verification @@ -397,3 +404,36 @@ pub struct BackgroundTaskHandles { /// Finishes when all the checkpoints are verified, or when the state tip is reached. pub state_checkpoint_verify_handle: JoinHandle<()>, } + +/// Calls [`init`] with a closed mempool setup channel for conciseness in tests. +/// +/// See [`init`] for more details. +#[cfg(any(test, feature = "proptest-impl"))] +pub async fn init_test( + config: Config, + network: &Network, + state_service: S, +) -> ( + Buffer, Request>, + Buffer< + BoxService, + transaction::Request, + >, + BackgroundTaskHandles, + Height, +) +where + S: Service + Send + Clone + 'static, + S::Future: Send + 'static, +{ + init( + config.clone(), + network, + state_service.clone(), + oneshot::channel::< + Buffer, mempool::Request>, + >() + .1, + ) + .await +} diff --git a/zebra-consensus/src/router/tests.rs b/zebra-consensus/src/router/tests.rs index 8fe304e3364..063cc7394cf 100644 --- a/zebra-consensus/src/router/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -68,7 +68,7 @@ async fn verifiers_from_network( _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = crate::router::init(Config::default(), &network, state_service.clone()).await; + ) = crate::router::init_test(Config::default(), &network, state_service.clone()).await; // We can drop the download task handle here, because: // - if the download task fails, the tests will panic, and @@ -169,7 +169,7 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = super::init(config.clone(), &network, zs::init_test(&network)).await; + ) = super::init_test(config.clone(), &network, zs::init_test(&network)).await; // Add a timeout layer let block_verifier_router = diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 1c303003615..aac77a055d6 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -6,6 +6,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Duration, }; use chrono::{DateTime, Utc}; @@ -13,7 +14,13 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; -use tower::{timeout::Timeout, Service, ServiceExt}; +use tokio::sync::oneshot; +use tower::{ + buffer::Buffer, + timeout::{error::Elapsed, Timeout}, + util::BoxService, + Service, ServiceExt, +}; use tracing::Instrument; use zebra_chain::{ @@ -26,9 +33,10 @@ use zebra_chain::{ transaction::{ self, HashType, SigHash, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx, }, - transparent::{self, OrderedUtxo}, + transparent, }; +use zebra_node_services::mempool; use zebra_script::CachedFfiTransaction; use zebra_state as zs; @@ -52,6 +60,23 @@ mod tests; /// chain in the correct order.) const UTXO_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(6 * 60); +/// A timeout applied to output lookup requests sent to the mempool. This is shorter than the +/// timeout for the state UTXO lookups because a block is likely to be mined every 75 seconds +/// after Blossom is active, changing the best chain tip and requiring re-verification of transactions +/// in the mempool. +/// +/// This is how long Zebra will wait for an output to be added to the mempool before verification +/// of the transaction that spends it will fail. +const MEMPOOL_OUTPUT_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); + +/// How long to wait after responding to a mempool request with a transaction that creates new +/// transparent outputs before polling the mempool service so that it will try adding the verified +/// transaction and responding to any potential `AwaitOutput` requests. +/// +/// This should be long enough for the mempool service's `Downloads` to finish processing the +/// response from the transaction verifier. +const POLL_MEMPOOL_DELAY: std::time::Duration = Duration::from_millis(50); + /// Asynchronous transaction verification. /// /// # Correctness @@ -59,24 +84,55 @@ const UTXO_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs( /// Transaction verification requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`) /// module documentation for details. -#[derive(Debug, Clone)] -pub struct Verifier { +pub struct Verifier { network: Network, state: Timeout, + // TODO: Use an enum so that this can either be Pending(oneshot::Receiver) or Initialized(MempoolService) + mempool: Option>, script_verifier: script::Verifier, + mempool_setup_rx: oneshot::Receiver, } -impl Verifier +impl Verifier where ZS: Service + Send + Clone + 'static, ZS::Future: Send + 'static, + Mempool: Service + + Send + + Clone + + 'static, + Mempool::Future: Send + 'static, { /// Create a new transaction verifier. - pub fn new(network: &Network, state: ZS) -> Self { + pub fn new(network: &Network, state: ZS, mempool_setup_rx: oneshot::Receiver) -> Self { Self { network: network.clone(), state: Timeout::new(state, UTXO_LOOKUP_TIMEOUT), + mempool: None, script_verifier: script::Verifier, + mempool_setup_rx, + } + } +} + +impl + Verifier< + ZS, + Buffer, mempool::Request>, + > +where + ZS: Service + Send + Clone + 'static, + ZS::Future: Send + 'static, +{ + /// Create a new transaction verifier with a closed channel receiver for mempool setup for tests. + #[cfg(test)] + pub fn new_for_tests(network: &Network, state: ZS) -> Self { + Self { + network: network.clone(), + state: Timeout::new(state, UTXO_LOOKUP_TIMEOUT), + mempool: None, + script_verifier: script::Verifier, + mempool_setup_rx: oneshot::channel().1, } } } @@ -156,12 +212,24 @@ pub enum Response { /// [`Response::Mempool`] responses are uniquely identified by the /// [`UnminedTxId`] variant for their transaction version. transaction: VerifiedUnminedTx, + + /// A list of spent [`transparent::OutPoint`]s that were found in + /// the mempool's list of `created_outputs`. + /// + /// Used by the mempool to determine dependencies between transactions + /// in the mempool and to avoid adding transactions with missing spends + /// to its verified set. + spent_mempool_outpoints: Vec, }, } +#[cfg(any(test, feature = "proptest-impl"))] impl From for Response { fn from(transaction: VerifiedUnminedTx) -> Self { - Response::Mempool { transaction } + Response::Mempool { + transaction, + spent_mempool_outpoints: Vec::new(), + } } } @@ -228,14 +296,6 @@ impl Request { } impl Response { - /// The verified mempool transaction, if this is a mempool response. - pub fn into_mempool_transaction(self) -> Option { - match self { - Response::Block { .. } => None, - Response::Mempool { transaction, .. } => Some(transaction), - } - } - /// The unmined transaction ID for the transaction in this response. pub fn tx_id(&self) -> UnminedTxId { match self { @@ -276,10 +336,15 @@ impl Response { } } -impl Service for Verifier +impl Service for Verifier where ZS: Service + Send + Clone + 'static, ZS::Future: Send + 'static, + Mempool: Service + + Send + + Clone + + 'static, + Mempool::Future: Send + 'static, { type Response = Response; type Error = TransactionError; @@ -287,6 +352,14 @@ where Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + // Note: The block verifier expects the transaction verifier to always be ready. + + if self.mempool.is_none() { + if let Ok(mempool) = self.mempool_setup_rx.try_recv() { + self.mempool = Some(Timeout::new(mempool, MEMPOOL_OUTPUT_LOOKUP_TIMEOUT)); + } + } + Poll::Ready(Ok(())) } @@ -295,6 +368,7 @@ where let script_verifier = self.script_verifier; let network = self.network.clone(); let state = self.state.clone(); + let mempool = self.mempool.clone(); let tx = req.transaction(); let tx_id = req.tx_id(); @@ -370,8 +444,8 @@ where // Load spent UTXOs from state. // The UTXOs are required for almost all the async checks. let load_spent_utxos_fut = - Self::spent_utxos(tx.clone(), req.known_utxos(), req.is_mempool(), state.clone()); - let (spent_utxos, spent_outputs) = load_spent_utxos_fut.await?; + Self::spent_utxos(tx.clone(), req.clone(), state.clone(), mempool.clone(),); + let (spent_utxos, spent_outputs, spent_mempool_outpoints) = load_spent_utxos_fut.await?; // WONTFIX: Return an error for Request::Block as well to replace this check in // the state once #2336 has been implemented? @@ -473,7 +547,22 @@ where ), legacy_sigop_count, )?; - Response::Mempool { transaction } + + if let Some(mut mempool) = mempool { + if !transaction.transaction.transaction.outputs().is_empty() { + tokio::spawn(async move { + tokio::time::sleep(POLL_MEMPOOL_DELAY).await; + let _ = mempool + .ready() + .await + .expect("mempool poll_ready() method should not return an error") + .call(mempool::Request::CheckForVerifiedTransactions) + .await; + }); + } + } + + Response::Mempool { transaction, spent_mempool_outpoints } }, }; @@ -488,10 +577,15 @@ where } } -impl Verifier +impl Verifier where ZS: Service + Send + Clone + 'static, ZS::Future: Send + 'static, + Mempool: Service + + Send + + Clone + + 'static, + Mempool::Future: Send + 'static, { /// Fetches the median-time-past of the *next* block after the best state tip. /// @@ -514,33 +608,42 @@ where } } - /// Wait for the UTXOs that are being spent by the given transaction. + /// Waits for the UTXOs that are being spent by the given transaction to arrive in + /// the state for [`Block`](Request::Block) requests. /// - /// `known_utxos` are additional UTXOs known at the time of validation (i.e. - /// from previous transactions in the block). + /// Looks up UTXOs that are being spent by the given transaction in the state or waits + /// for them to be added to the mempool for [`Mempool`](Request::Mempool) requests. /// - /// Returns a tuple with a OutPoint -> Utxo map, and a vector of Outputs - /// in the same order as the matching inputs in the transaction. + /// Returns a triple containing: + /// - `OutPoint` -> `Utxo` map, + /// - vec of `Output`s in the same order as the matching inputs in the `tx`, + /// - vec of `Outpoint`s spent by a mempool `tx` that were not found in the best chain's utxo set. async fn spent_utxos( tx: Arc, - known_utxos: Arc>, - is_mempool: bool, + req: Request, state: Timeout, + mempool: Option>, ) -> Result< ( HashMap, Vec, + Vec, ), TransactionError, > { + let is_mempool = req.is_mempool(); + // Additional UTXOs known at the time of validation, + // i.e., from previous transactions in the block. + let known_utxos = req.known_utxos(); + let inputs = tx.inputs(); let mut spent_utxos = HashMap::new(); let mut spent_outputs = Vec::new(); + let mut spent_mempool_outpoints = Vec::new(); + for input in inputs { if let transparent::Input::PrevOut { outpoint, .. } = input { tracing::trace!("awaiting outpoint lookup"); - // Currently, Zebra only supports known UTXOs in block transactions. - // But it might support them in the mempool in future. let utxo = if let Some(output) = known_utxos.get(outpoint) { tracing::trace!("UXTO in known_utxos, discarding query"); output.utxo.clone() @@ -548,11 +651,17 @@ where let query = state .clone() .oneshot(zs::Request::UnspentBestChainUtxo(*outpoint)); - if let zebra_state::Response::UnspentBestChainUtxo(utxo) = query.await? { - utxo.ok_or(TransactionError::TransparentInputNotFound)? - } else { + + let zebra_state::Response::UnspentBestChainUtxo(utxo) = query.await? else { unreachable!("UnspentBestChainUtxo always responds with Option") - } + }; + + let Some(utxo) = utxo else { + spent_mempool_outpoints.push(*outpoint); + continue; + }; + + utxo } else { let query = state .clone() @@ -570,7 +679,41 @@ where continue; } } - Ok((spent_utxos, spent_outputs)) + + if let Some(mempool) = mempool { + for &spent_mempool_outpoint in &spent_mempool_outpoints { + let query = mempool + .clone() + .oneshot(mempool::Request::AwaitOutput(spent_mempool_outpoint)); + + let output = match query.await { + Ok(mempool::Response::UnspentOutput(output)) => output, + Ok(_) => unreachable!("UnspentOutput always responds with UnspentOutput"), + Err(err) => { + return match err.downcast::() { + Ok(_) => Err(TransactionError::TransparentInputNotFound), + Err(err) => Err(err.into()), + }; + } + }; + + spent_outputs.push(output.clone()); + spent_utxos.insert( + spent_mempool_outpoint, + // Assume the Utxo height will be next height after the best chain tip height + // + // # Correctness + // + // If the tip height changes while an umined transaction is being verified, + // the transaction must be re-verified before being added to the mempool. + transparent::Utxo::new(output, req.height(), false), + ); + } + } else if !spent_mempool_outpoints.is_empty() { + return Err(TransactionError::TransparentInputNotFound); + } + + Ok((spent_utxos, spent_outputs, spent_mempool_outpoints)) } /// Accepts `request`, a transaction verifier [`&Request`](Request), diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 0a4c21bb039..d42bbb8594c 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -7,7 +7,7 @@ use std::{collections::HashMap, sync::Arc}; use chrono::{DateTime, TimeZone, Utc}; use color_eyre::eyre::Report; use halo2::pasta::{group::ff::PrimeField, pallas}; -use tower::{service_fn, ServiceExt}; +use tower::{buffer::Buffer, service_fn, ServiceExt}; use zebra_chain::{ amount::{Amount, NonNegative}, @@ -28,10 +28,11 @@ use zebra_chain::{ transparent::{self, CoinbaseData}, }; +use zebra_node_services::mempool; use zebra_state::ValidateContextError; use zebra_test::mock_service::MockService; -use crate::error::TransactionError; +use crate::{error::TransactionError, transaction::POLL_MEMPOOL_DELAY}; use super::{check, Request, Verifier}; @@ -181,7 +182,7 @@ fn v5_transaction_with_no_inputs_fails_validation() { #[tokio::test] async fn mempool_request_with_missing_input_is_rejected() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let (height, tx) = transactions_from_blocks(zebra_test::vectors::MAINNET_BLOCKS.iter()) .find(|(_, tx)| !(tx.is_coinbase() || tx.inputs().is_empty())) @@ -230,7 +231,7 @@ async fn mempool_request_with_missing_input_is_rejected() { #[tokio::test] async fn mempool_request_with_present_input_is_accepted() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -297,7 +298,7 @@ async fn mempool_request_with_present_input_is_accepted() { #[tokio::test] async fn mempool_request_with_invalid_lock_time_is_rejected() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -376,7 +377,7 @@ async fn mempool_request_with_invalid_lock_time_is_rejected() { #[tokio::test] async fn mempool_request_with_unlocked_lock_time_is_accepted() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -443,7 +444,7 @@ async fn mempool_request_with_unlocked_lock_time_is_accepted() { #[tokio::test] async fn mempool_request_with_lock_time_max_sequence_number_is_accepted() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -513,7 +514,7 @@ async fn mempool_request_with_lock_time_max_sequence_number_is_accepted() { #[tokio::test] async fn mempool_request_with_past_lock_time_is_accepted() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -585,6 +586,123 @@ async fn mempool_request_with_past_lock_time_is_accepted() { ); } +#[tokio::test] +async fn mempool_request_with_unmined_output_spends_is_accepted() { + let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); + let mempool: MockService<_, _, _, _> = MockService::build().for_prop_tests(); + let (mempool_setup_tx, mempool_setup_rx) = tokio::sync::oneshot::channel(); + let verifier = Verifier::new(&Network::Mainnet, state.clone(), mempool_setup_rx); + mempool_setup_tx + .send(mempool.clone()) + .ok() + .expect("send should succeed"); + + let height = NetworkUpgrade::Canopy + .activation_height(&Network::Mainnet) + .expect("Canopy activation height is specified"); + let fund_height = (height - 1).expect("fake source fund block height is too small"); + let (input, output, known_utxos) = mock_transparent_transfer( + fund_height, + true, + 0, + Amount::try_from(10001).expect("invalid value"), + ); + + // Create a non-coinbase V4 tx with the last valid expiry height. + let tx = Transaction::V4 { + inputs: vec![input], + outputs: vec![output], + lock_time: LockTime::min_lock_time_timestamp(), + expiry_height: height, + joinsplit_data: None, + sapling_shielded_data: None, + }; + + let input_outpoint = match tx.inputs()[0] { + transparent::Input::PrevOut { outpoint, .. } => outpoint, + transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), + }; + + tokio::spawn(async move { + state + .expect_request(zebra_state::Request::BestChainNextMedianTimePast) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::BestChainNextMedianTimePast( + DateTime32::MAX, + )); + + state + .expect_request(zebra_state::Request::UnspentBestChainUtxo(input_outpoint)) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::UnspentBestChainUtxo(None)); + + state + .expect_request_that(|req| { + matches!( + req, + zebra_state::Request::CheckBestChainTipNullifiersAndAnchors(_) + ) + }) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors); + }); + + let mut mempool_clone = mempool.clone(); + tokio::spawn(async move { + mempool_clone + .expect_request(mempool::Request::AwaitOutput(input_outpoint)) + .await + .expect("verifier should call mock state service with correct request") + .respond(mempool::Response::UnspentOutput( + known_utxos + .get(&input_outpoint) + .expect("input outpoint should exist in known_utxos") + .utxo + .output + .clone(), + )); + }); + + let verifier_response = verifier + .oneshot(Request::Mempool { + transaction: tx.into(), + height, + }) + .await; + + assert!( + verifier_response.is_ok(), + "expected successful verification, got: {verifier_response:?}" + ); + + let crate::transaction::Response::Mempool { + transaction: _, + spent_mempool_outpoints, + } = verifier_response.expect("already checked that response is ok") + else { + panic!("unexpected response variant from transaction verifier for Mempool request") + }; + + assert_eq!( + spent_mempool_outpoints, + vec![input_outpoint], + "spent_mempool_outpoints in tx verifier response should match input_outpoint" + ); + + tokio::time::sleep(POLL_MEMPOOL_DELAY * 2).await; + assert_eq!( + mempool.poll_count(), + 2, + "the mempool service should have been polled twice, \ + first before being called with an AwaitOutput request, \ + then again shortly after a mempool transaction with transparent outputs \ + is successfully verified" + ); +} + /// Tests that calls to the transaction verifier with a mempool request that spends /// immature coinbase outputs will return an error. #[tokio::test] @@ -592,7 +710,7 @@ async fn mempool_request_with_immature_spend_is_rejected() { let _init_guard = zebra_test::init(); let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -695,7 +813,7 @@ async fn state_error_converted_correctly() { use zebra_state::DuplicateNullifierError; let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -856,7 +974,7 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { for network in Network::iter() { let state_service = service_fn(|_| async { unreachable!("Service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let transaction = fake_v5_transactions_for_network(&network, network.block_iter()) .next_back() @@ -903,7 +1021,7 @@ fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) let blocks = network.block_iter(); let state_service = service_fn(|_| async { unreachable!("Service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let mut transaction = fake_v5_transactions_for_network(&network, blocks) .next_back() @@ -975,7 +1093,7 @@ async fn v4_transaction_with_transparent_transfer_is_accepted() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -998,7 +1116,7 @@ async fn v4_transaction_with_transparent_transfer_is_accepted() { async fn v4_transaction_with_last_valid_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); let block_height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -1045,7 +1163,7 @@ async fn v4_transaction_with_last_valid_expiry_height() { async fn v4_coinbase_transaction_with_low_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); let block_height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -1086,7 +1204,7 @@ async fn v4_coinbase_transaction_with_low_expiry_height() { async fn v4_transaction_with_too_low_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); let block_height = NetworkUpgrade::Canopy .activation_height(&Network::Mainnet) @@ -1138,7 +1256,7 @@ async fn v4_transaction_with_too_low_expiry_height() { async fn v4_transaction_with_exceeding_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); let block_height = block::Height::MAX; @@ -1189,7 +1307,7 @@ async fn v4_transaction_with_exceeding_expiry_height() { async fn v4_coinbase_transaction_with_exceeding_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); // Use an arbitrary pre-NU5 block height. // It can't be NU5-onward because the expiry height limit is not enforced @@ -1265,7 +1383,7 @@ async fn v4_coinbase_transaction_is_accepted() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1320,7 +1438,7 @@ async fn v4_transaction_with_transparent_transfer_is_rejected_by_the_script() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1375,7 +1493,7 @@ async fn v4_transaction_with_conflicting_transparent_spend_is_rejected() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1446,7 +1564,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1522,7 +1640,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1581,7 +1699,7 @@ async fn v5_transaction_with_transparent_transfer_is_accepted() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1605,7 +1723,7 @@ async fn v5_transaction_with_last_valid_expiry_height() { let network = Network::new_default_testnet(); let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let block_height = NetworkUpgrade::Nu5 .activation_height(&network) @@ -1651,7 +1769,8 @@ async fn v5_coinbase_transaction_expiry_height() { let network = Network::new_default_testnet(); let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); + let verifier = Buffer::new(verifier, 10); let block_height = NetworkUpgrade::Nu5 .activation_height(&network) @@ -1701,7 +1820,11 @@ async fn v5_coinbase_transaction_expiry_height() { height: block_height, time: DateTime::::MAX_UTC, }) - .await; + .await + .map_err(|err| { + *err.downcast() + .expect("error type should be TransactionError") + }); assert_eq!( result, @@ -1726,7 +1849,11 @@ async fn v5_coinbase_transaction_expiry_height() { height: block_height, time: DateTime::::MAX_UTC, }) - .await; + .await + .map_err(|err| { + *err.downcast() + .expect("error type should be TransactionError") + }); assert_eq!( result, @@ -1768,7 +1895,7 @@ async fn v5_transaction_with_too_low_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let block_height = NetworkUpgrade::Nu5 .activation_height(&network) @@ -1820,7 +1947,7 @@ async fn v5_transaction_with_too_low_expiry_height() { async fn v5_transaction_with_exceeding_expiry_height() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&Network::Mainnet, state_service); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); let block_height = block::Height::MAX; @@ -1898,7 +2025,7 @@ async fn v5_coinbase_transaction_is_accepted() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -1955,7 +2082,7 @@ async fn v5_transaction_with_transparent_transfer_is_rejected_by_the_script() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -2012,7 +2139,7 @@ async fn v5_transaction_with_conflicting_transparent_spend_is_rejected() { let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); let result = verifier .oneshot(Request::Block { @@ -2055,11 +2182,10 @@ fn v4_with_signed_sprout_transfer_is_accepted() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction, known_utxos: Arc::new(HashMap::new()), @@ -2135,7 +2261,8 @@ async fn v4_with_joinsplit_is_rejected_for_modification( // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called.") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); + let verifier = Buffer::new(verifier, 10); // Test the transaction verifier. // @@ -2154,7 +2281,11 @@ async fn v4_with_joinsplit_is_rejected_for_modification( height, time: DateTime::::MAX_UTC, }) - .await; + .await + .map_err(|err| { + *err.downcast() + .expect("error type should be TransactionError") + }); if result == expected_error || i >= 100 { break result; @@ -2186,11 +2317,10 @@ fn v4_with_sapling_spends() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction, known_utxos: Arc::new(HashMap::new()), @@ -2229,11 +2359,10 @@ fn v4_with_duplicate_sapling_spends() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction, known_utxos: Arc::new(HashMap::new()), @@ -2274,11 +2403,10 @@ fn v4_with_sapling_outputs_and_no_spends() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction, known_utxos: Arc::new(HashMap::new()), @@ -2323,11 +2451,10 @@ fn v5_with_sapling_spends() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), @@ -2367,11 +2494,10 @@ fn v5_with_duplicate_sapling_spends() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), @@ -2430,11 +2556,10 @@ fn v5_with_duplicate_orchard_action() { // Initialize the verifier let state_service = service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new(&network, state_service); + let verifier = Verifier::new_for_tests(&network, state_service); // Test the transaction verifier let result = verifier - .clone() .oneshot(Request::Block { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), @@ -2933,7 +3058,7 @@ fn shielded_outputs_are_not_decryptable_for_fake_v5_blocks() { #[tokio::test] async fn mempool_zip317_error() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Nu5 .activation_height(&Network::Mainnet) @@ -3005,7 +3130,7 @@ async fn mempool_zip317_error() { #[tokio::test] async fn mempool_zip317_ok() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new(&Network::Mainnet, state.clone()); + let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); let height = NetworkUpgrade::Nu5 .activation_height(&Network::Mainnet) diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index f45b4731de0..856742e5d74 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -4,7 +4,7 @@ use std::{collections::HashMap, sync::Arc}; use chrono::{DateTime, Duration, Utc}; use proptest::{collection::vec, prelude::*}; -use tower::ServiceExt; +use tower::{buffer::Buffer, ServiceExt}; use zebra_chain::{ amount::Amount, @@ -450,7 +450,8 @@ fn validate( // Initialize the verifier let state_service = tower::service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = transaction::Verifier::new(&network, state_service); + let verifier = transaction::Verifier::new_for_tests(&network, state_service); + let verifier = Buffer::new(verifier, 10); // Test the transaction verifier verifier @@ -462,5 +463,9 @@ fn validate( time: block_time, }) .await + .map_err(|err| { + *err.downcast() + .expect("error type should be TransactionError") + }) }) } diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index fbaaf029c75..10f51cf4a30 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -5,7 +5,10 @@ use std::collections::HashSet; use tokio::sync::oneshot; -use zebra_chain::transaction::{self, UnminedTx, UnminedTxId}; +use zebra_chain::{ + transaction::{self, UnminedTx, UnminedTxId}, + transparent, +}; #[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::transaction::VerifiedUnminedTx; @@ -14,6 +17,10 @@ use crate::BoxError; mod gossip; +mod transaction_dependencies; + +pub use transaction_dependencies::TransactionDependencies; + pub use self::gossip::Gossip; /// A mempool service request. @@ -39,6 +46,21 @@ pub enum Request { /// the [`AuthDigest`](zebra_chain::transaction::AuthDigest). TransactionsByMinedId(HashSet), + /// Request a [`transparent::Output`] identified by the given [`OutPoint`](transparent::OutPoint), + /// waiting until it becomes available if it is unknown. + /// + /// This request is purely informational, and there are no guarantees about + /// whether the UTXO remains unspent or is on the best chain, or any chain. + /// Its purpose is to allow orphaned mempool transaction verification. + /// + /// # Correctness + /// + /// Output requests should be wrapped in a timeout, so that + /// out-of-order and invalid requests do not hang indefinitely. + /// + /// Outdated requests are pruned on a regular basis. + AwaitOutput(transparent::OutPoint), + /// Get all the [`VerifiedUnminedTx`] in the mempool. /// /// Equivalent to `TransactionsById(TransactionIds)`, @@ -99,6 +121,9 @@ pub enum Response { /// different transactions with different mined IDs. Transactions(Vec), + /// Response to [`Request::AwaitOutput`] with the transparent output + UnspentOutput(transparent::Output), + /// Returns all [`VerifiedUnminedTx`] in the mempool. // // TODO: make the Transactions response return VerifiedUnminedTx, @@ -108,6 +133,9 @@ pub enum Response { /// All [`VerifiedUnminedTx`]s in the mempool transactions: Vec, + /// All transaction dependencies in the mempool + transaction_dependencies: TransactionDependencies, + /// Last seen chain tip hash by mempool service last_seen_tip_hash: zebra_chain::block::Hash, }, diff --git a/zebra-node-services/src/mempool/transaction_dependencies.rs b/zebra-node-services/src/mempool/transaction_dependencies.rs new file mode 100644 index 00000000000..2b333060b77 --- /dev/null +++ b/zebra-node-services/src/mempool/transaction_dependencies.rs @@ -0,0 +1,124 @@ +//! Representation of mempool transactions' dependencies on other transactions in the mempool. + +use std::collections::{HashMap, HashSet}; + +use zebra_chain::{transaction, transparent}; + +/// Representation of mempool transactions' dependencies on other transactions in the mempool. +#[derive(Default, Debug, Clone)] +pub struct TransactionDependencies { + /// Lists of mempool transaction ids that create UTXOs spent by + /// a mempool transaction. Used during block template construction + /// to exclude transactions from block templates unless all of the + /// transactions they depend on have been included. + dependencies: HashMap>, + + /// Lists of transaction ids in the mempool that spend UTXOs created + /// by a transaction in the mempool, e.g. tx1 -> set(tx2, tx3, tx4) where + /// tx2, tx3, and tx4 spend outputs created by tx1. + dependents: HashMap>, +} + +impl TransactionDependencies { + /// Adds a transaction that spends outputs created by other transactions in the mempool + /// as a dependent of those transactions, and adds the transactions that created the outputs + /// spent by the dependent transaction as dependencies of the dependent transaction. + /// + /// # Correctness + /// + /// It's the caller's responsibility to ensure that there are no cyclical dependencies. + /// + /// The transaction verifier will wait until the spent output of a transaction has been added to the verified set, + /// so its `AwaitOutput` requests will timeout if there is a cyclical dependency. + pub fn add( + &mut self, + dependent: transaction::Hash, + spent_mempool_outpoints: Vec, + ) { + for &spent_mempool_outpoint in &spent_mempool_outpoints { + self.dependents + .entry(spent_mempool_outpoint.hash) + .or_default() + .insert(dependent); + } + + // Only add an entries to `dependencies` for transactions that spend unmined outputs so it + // can be used to handle transactions with dependencies differently during block production. + if !spent_mempool_outpoints.is_empty() { + self.dependencies.insert( + dependent, + spent_mempool_outpoints + .into_iter() + .map(|outpoint| outpoint.hash) + .collect(), + ); + } + } + + /// Removes all dependents for a list of mined transaction ids and removes the mined transaction ids + /// from the dependencies of their dependents. + pub fn clear_mined_dependencies(&mut self, mined_ids: &HashSet) { + for mined_tx_id in mined_ids { + for dependent_id in self.dependents.remove(mined_tx_id).unwrap_or_default() { + let Some(dependencies) = self.dependencies.get_mut(&dependent_id) else { + // TODO: Move this struct to zebra-chain and log a warning here. + continue; + }; + + // TODO: Move this struct to zebra-chain and log a warning here if the dependency was not found. + let _ = dependencies.remove(&dependent_id); + } + } + } + + /// Removes the hash of a transaction in the mempool and the hashes of any transactions + /// that are tracked as being directly or indirectly dependent on that transaction from + /// this [`TransactionDependencies`]. + /// + /// Returns a list of transaction hashes that were being tracked as dependents of the + /// provided transaction hash. + pub fn remove_all(&mut self, &tx_hash: &transaction::Hash) -> HashSet { + let mut all_dependents = HashSet::new(); + let mut current_level_dependents: HashSet<_> = [tx_hash].into(); + + while !current_level_dependents.is_empty() { + current_level_dependents = current_level_dependents + .iter() + .flat_map(|dep| { + self.dependencies.remove(dep); + self.dependents.remove(dep).unwrap_or_default() + }) + .collect(); + + all_dependents.extend(¤t_level_dependents); + } + + all_dependents + } + + /// Returns a list of hashes of transactions that directly depend on the transaction for `tx_hash`. + pub fn direct_dependents(&self, tx_hash: &transaction::Hash) -> HashSet { + self.dependents.get(tx_hash).cloned().unwrap_or_default() + } + + /// Returns a list of hashes of transactions that are direct dependencies of the transaction for `tx_hash`. + pub fn direct_dependencies(&self, tx_hash: &transaction::Hash) -> HashSet { + self.dependencies.get(tx_hash).cloned().unwrap_or_default() + } + + /// Clear the maps of transaction dependencies. + pub fn clear(&mut self) { + self.dependencies.clear(); + self.dependents.clear(); + } + + /// Returns the map of transaction's dependencies + pub fn dependencies(&self) -> &HashMap> { + &self.dependencies + } + + /// Returns the map of transaction's dependents + pub fn dependents(&self) -> &HashMap> { + &self.dependents + } +} diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 736021c7668..268676beb27 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -965,6 +965,7 @@ where #[cfg(feature = "getblocktemplate-rpcs")] mempool::Response::FullTransactions { mut transactions, + transaction_dependencies: _, last_seen_tip_hash: _, } => { // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 2d50552cfec..aed926b3635 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -648,7 +648,13 @@ where // The loop returns the server long poll ID, // which should be different to the client long poll ID. - let (server_long_poll_id, chain_tip_and_local_time, mempool_txs, submit_old) = loop { + let ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ) = loop { // Check if we are synced to the tip. // The result of this check can change during long polling. // @@ -688,12 +694,13 @@ where // // Optional TODO: // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some(mempool_txs) = fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Vec::new)) + let Some((mempool_txs, mempool_tx_deps)) = + fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Default::default)) else { continue; }; @@ -728,6 +735,7 @@ where server_long_poll_id, chain_tip_and_local_time, mempool_txs, + mempool_tx_deps, submit_old, ); } @@ -888,15 +896,15 @@ where next_block_height, &miner_address, mempool_txs, + mempool_tx_deps, debug_like_zcashd, extra_coinbase_data.clone(), - ) - .await; + ); tracing::debug!( selected_mempool_tx_hashes = ?mempool_txs .iter() - .map(|tx| tx.transaction.id.mined_id()) + .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) .collect::>(), "selected transactions for the template from the mempool" ); diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 8e9578180be..7ab1a48e20a 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -22,7 +22,7 @@ use zebra_chain::{ use zebra_consensus::{ block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, }; -use zebra_node_services::mempool; +use zebra_node_services::mempool::{self, TransactionDependencies}; use zebra_state::GetBlockTemplateChainInfo; use crate::methods::{ @@ -253,7 +253,7 @@ where pub async fn fetch_mempool_transactions( mempool: Mempool, chain_tip_hash: block::Hash, -) -> Result>> +) -> Result, TransactionDependencies)>> where Mempool: Service< mempool::Request, @@ -271,8 +271,11 @@ where data: None, })?; + // TODO: Order transactions in block templates based on their dependencies + let mempool::Response::FullTransactions { transactions, + transaction_dependencies, last_seen_tip_hash, } = response else { @@ -280,7 +283,7 @@ where }; // Check that the mempool and state were in sync when we made the requests - Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions)) + Ok((last_seen_tip_hash == chain_tip_hash).then_some((transactions, transaction_dependencies))) } // - Response processing diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs index d7c31e11a81..879425bb667 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs @@ -35,6 +35,11 @@ pub mod proposal; pub use parameters::{GetBlockTemplateCapability, GetBlockTemplateRequestMode, JsonParameters}; pub use proposal::{proposal_block_from_template, ProposalResponse}; +/// An alias to indicate that a usize value represents the depth of in-block dependencies of a transaction. +/// +/// See the `dependencies_depth()` function in [`zip317`](super::super::zip317) for more details. +pub type InBlockTxDependenciesDepth = usize; + /// A serialized `getblocktemplate` RPC response in template mode. #[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetBlockTemplate { @@ -227,7 +232,8 @@ impl GetBlockTemplate { miner_address: &transparent::Address, chain_tip_and_local_time: &GetBlockTemplateChainInfo, long_poll_id: LongPollId, - mempool_txs: Vec, + #[cfg(not(test))] mempool_txs: Vec, + #[cfg(test)] mempool_txs: Vec<(InBlockTxDependenciesDepth, VerifiedUnminedTx)>, submit_old: Option, like_zcashd: bool, extra_coinbase_data: Vec, @@ -237,28 +243,45 @@ impl GetBlockTemplate { (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); // Convert transactions into TransactionTemplates - let mut mempool_txs_with_templates: Vec<( - TransactionTemplate, - VerifiedUnminedTx, - )> = mempool_txs - .into_iter() - .map(|tx| ((&tx).into(), tx)) - .collect(); + #[cfg(not(test))] + let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = + mempool_txs.into_iter().map(|tx| ((&tx).into(), tx)).unzip(); // Transaction selection returns transactions in an arbitrary order, // but Zebra's snapshot tests expect the same order every time. - if like_zcashd { - // Sort in serialized data order, excluding the length byte. - // `zcashd` sometimes seems to do this, but other times the order is arbitrary. - mempool_txs_with_templates.sort_by_key(|(tx_template, _tx)| tx_template.data.clone()); - } else { - // Sort by hash, this is faster. + // + // # Correctness + // + // Transactions that spend outputs created in the same block must appear + // after the transactions that create those outputs. + #[cfg(test)] + let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = { + let mut mempool_txs_with_templates: Vec<( + InBlockTxDependenciesDepth, + TransactionTemplate, + VerifiedUnminedTx, + )> = mempool_txs + .into_iter() + .map(|(min_tx_index, tx)| (min_tx_index, (&tx).into(), tx)) + .collect(); + + if like_zcashd { + // Sort in serialized data order, excluding the length byte. + // `zcashd` sometimes seems to do this, but other times the order is arbitrary. + mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { + (*min_tx_index, tx_template.data.clone()) + }); + } else { + // Sort by hash, this is faster. + mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { + (*min_tx_index, tx_template.hash.bytes_in_display_order()) + }); + } mempool_txs_with_templates - .sort_by_key(|(tx_template, _tx)| tx_template.hash.bytes_in_display_order()); - } - - let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = - mempool_txs_with_templates.into_iter().unzip(); + .into_iter() + .map(|(_, template, tx)| (template, tx)) + .unzip() + }; // Generate the coinbase transaction and default roots // diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs index 8817a8c12c0..08439df2fcf 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs @@ -71,14 +71,18 @@ impl LongPollInput { max_time: DateTime32, mempool_tx_ids: impl IntoIterator, ) -> Self { - let mempool_transaction_mined_ids = + let mut tx_mined_ids: Vec = mempool_tx_ids.into_iter().map(|id| id.mined_id()).collect(); + // The mempool returns unordered transactions, we need to sort them here so + // that the longpollid doesn't change unexpectedly. + tx_mined_ids.sort(); + LongPollInput { tip_height, tip_hash, max_time, - mempool_transaction_mined_ids, + mempool_transaction_mined_ids: tx_mined_ids.into(), } } @@ -293,3 +297,28 @@ impl TryFrom for LongPollId { s.parse() } } + +/// Check that [`LongPollInput::new`] will sort mempool transaction ids. +/// +/// The mempool does not currently guarantee the order in which it will return transactions and +/// may return the same items in a different order, while the long poll id should be the same if +/// its other components are equal and no transactions have been added or removed in the mempool. +#[test] +fn long_poll_input_mempool_tx_ids_are_sorted() { + let mempool_tx_ids = || { + (0..10) + .map(|i| transaction::Hash::from([i; 32])) + .map(UnminedTxId::Legacy) + }; + + assert_eq!( + LongPollInput::new(Height::MIN, Default::default(), 0.into(), mempool_tx_ids()), + LongPollInput::new( + Height::MIN, + Default::default(), + 0.into(), + mempool_tx_ids().rev() + ), + "long poll input should sort mempool tx ids" + ); +} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs b/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs index 3f0979dc266..75ae9575d62 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs @@ -6,6 +6,8 @@ //! > when computing `size_target`, since there is no consensus requirement for this to be //! > exactly the same between implementations. +use std::collections::{HashMap, HashSet}; + use rand::{ distributions::{Distribution, WeightedIndex}, prelude::thread_rng, @@ -15,15 +17,30 @@ use zebra_chain::{ amount::NegativeOrZero, block::{Height, MAX_BLOCK_BYTES}, parameters::Network, - transaction::{zip317::BLOCK_UNPAID_ACTION_LIMIT, VerifiedUnminedTx}, + transaction::{self, zip317::BLOCK_UNPAID_ACTION_LIMIT, VerifiedUnminedTx}, transparent, }; use zebra_consensus::MAX_BLOCK_SIGOPS; +use zebra_node_services::mempool::TransactionDependencies; use crate::methods::get_block_template_rpcs::{ get_block_template::generate_coinbase_transaction, types::transaction::TransactionTemplate, }; +#[cfg(test)] +use super::get_block_template::InBlockTxDependenciesDepth; + +#[cfg(test)] +mod tests; + +/// Used in the return type of [`select_mempool_transactions()`] for test compilations. +#[cfg(test)] +type SelectedMempoolTx = (InBlockTxDependenciesDepth, VerifiedUnminedTx); + +/// Used in the return type of [`select_mempool_transactions()`] for non-test compilations. +#[cfg(not(test))] +type SelectedMempoolTx = VerifiedUnminedTx; + /// Selects mempool transactions for block production according to [ZIP-317], /// using a fake coinbase transaction and the mempool. /// @@ -36,14 +53,15 @@ use crate::methods::get_block_template_rpcs::{ /// Returns selected transactions from `mempool_txs`. /// /// [ZIP-317]: https://zips.z.cash/zip-0317#block-production -pub async fn select_mempool_transactions( +pub fn select_mempool_transactions( network: &Network, next_block_height: Height, miner_address: &transparent::Address, mempool_txs: Vec, + mempool_tx_deps: TransactionDependencies, like_zcashd: bool, extra_coinbase_data: Vec, -) -> Vec { +) -> Vec { // Use a fake coinbase transaction to break the dependency between transaction // selection, the miner fee, and the fee payment in the coinbase transaction. let fake_coinbase_tx = fake_coinbase_transaction( @@ -54,9 +72,16 @@ pub async fn select_mempool_transactions( extra_coinbase_data, ); + let tx_dependencies = mempool_tx_deps.dependencies(); + let (independent_mempool_txs, mut dependent_mempool_txs): (HashMap<_, _>, HashMap<_, _>) = + mempool_txs + .into_iter() + .map(|tx| (tx.transaction.id.mined_id(), tx)) + .partition(|(tx_id, _tx)| !tx_dependencies.contains_key(tx_id)); + // Setup the transaction lists. - let (mut conventional_fee_txs, mut low_fee_txs): (Vec<_>, Vec<_>) = mempool_txs - .into_iter() + let (mut conventional_fee_txs, mut low_fee_txs): (Vec<_>, Vec<_>) = independent_mempool_txs + .into_values() .partition(VerifiedUnminedTx::pays_conventional_fee); let mut selected_txs = Vec::new(); @@ -77,8 +102,10 @@ pub async fn select_mempool_transactions( while let Some(tx_weights) = conventional_fee_tx_weights { conventional_fee_tx_weights = checked_add_transaction_weighted_random( &mut conventional_fee_txs, + &mut dependent_mempool_txs, tx_weights, &mut selected_txs, + &mempool_tx_deps, &mut remaining_block_bytes, &mut remaining_block_sigops, // The number of unpaid actions is always zero for transactions that pay the @@ -93,8 +120,10 @@ pub async fn select_mempool_transactions( while let Some(tx_weights) = low_fee_tx_weights { low_fee_tx_weights = checked_add_transaction_weighted_random( &mut low_fee_txs, + &mut dependent_mempool_txs, tx_weights, &mut selected_txs, + &mempool_tx_deps, &mut remaining_block_bytes, &mut remaining_block_sigops, &mut remaining_block_unpaid_actions, @@ -158,6 +187,59 @@ fn setup_fee_weighted_index(transactions: &[VerifiedUnminedTx]) -> Option>, + selected_txs: &Vec, +) -> bool { + let Some(deps) = candidate_tx_deps else { + return true; + }; + + if selected_txs.len() < deps.len() { + return false; + } + + let mut num_available_deps = 0; + for tx in selected_txs { + #[cfg(test)] + let (_, tx) = tx; + if deps.contains(&tx.transaction.id.mined_id()) { + num_available_deps += 1; + } else { + continue; + } + + if num_available_deps == deps.len() { + return true; + } + } + + false +} + +/// Returns the depth of a transaction's dependencies in the block for a candidate +/// transaction with the provided dependencies. +#[cfg(test)] +fn dependencies_depth( + dependent_tx_id: &transaction::Hash, + mempool_tx_deps: &TransactionDependencies, +) -> InBlockTxDependenciesDepth { + let mut current_level = 0; + let mut current_level_deps = mempool_tx_deps.direct_dependencies(dependent_tx_id); + while !current_level_deps.is_empty() { + current_level += 1; + current_level_deps = current_level_deps + .iter() + .flat_map(|dep_id| mempool_tx_deps.direct_dependencies(dep_id)) + .collect(); + } + + current_level +} + /// Chooses a random transaction from `txs` using the weighted index `tx_weights`, /// and tries to add it to `selected_txs`. /// @@ -168,10 +250,14 @@ fn setup_fee_weighted_index(transactions: &[VerifiedUnminedTx]) -> Option, + dependent_txs: &mut HashMap, tx_weights: WeightedIndex, - selected_txs: &mut Vec, + selected_txs: &mut Vec, + mempool_tx_deps: &TransactionDependencies, remaining_block_bytes: &mut usize, remaining_block_sigops: &mut u64, remaining_block_unpaid_actions: &mut u32, @@ -181,30 +267,124 @@ fn checked_add_transaction_weighted_random( let (new_tx_weights, candidate_tx) = choose_transaction_weighted_random(candidate_txs, tx_weights); - // > If the block template with this transaction included - // > would be within the block size limit and block sigop limit, - // > and block_unpaid_actions <= block_unpaid_action_limit, - // > add the transaction to the block template - // - // Unpaid actions are always zero for transactions that pay the conventional fee, - // so the unpaid action check always passes for those transactions. - if candidate_tx.transaction.size <= *remaining_block_bytes - && candidate_tx.legacy_sigop_count <= *remaining_block_sigops - && candidate_tx.unpaid_actions <= *remaining_block_unpaid_actions - { - selected_txs.push(candidate_tx.clone()); + if !candidate_tx.try_update_block_template_limits( + remaining_block_bytes, + remaining_block_sigops, + remaining_block_unpaid_actions, + ) { + return new_tx_weights; + } - *remaining_block_bytes -= candidate_tx.transaction.size; - *remaining_block_sigops -= candidate_tx.legacy_sigop_count; + let tx_dependencies = mempool_tx_deps.dependencies(); + let selected_tx_id = &candidate_tx.transaction.id.mined_id(); + debug_assert!( + !tx_dependencies.contains_key(selected_tx_id), + "all candidate transactions should be independent" + ); - // Unpaid actions are always zero for transactions that pay the conventional fee, - // so this limit always remains the same after they are added. - *remaining_block_unpaid_actions -= candidate_tx.unpaid_actions; + #[cfg(not(test))] + selected_txs.push(candidate_tx); + + #[cfg(test)] + selected_txs.push((0, candidate_tx)); + + // Try adding any dependent transactions if all of their dependencies have been selected. + + let mut current_level_dependents = mempool_tx_deps.direct_dependents(selected_tx_id); + while !current_level_dependents.is_empty() { + let mut next_level_dependents = HashSet::new(); + + for dependent_tx_id in ¤t_level_dependents { + // ## Note + // + // A necessary condition for adding the dependent tx is that it spends unmined outputs coming only from + // the selected txs, which come from the mempool. If the tx also spends in-chain outputs, it won't + // be added. This behavior is not specified by consensus rules and can be changed at any time, + // meaning that such txs could be added. + if has_direct_dependencies(tx_dependencies.get(dependent_tx_id), selected_txs) { + let Some(candidate_tx) = dependent_txs.remove(dependent_tx_id) else { + continue; + }; + + // Transactions that don't pay the conventional fee should not have + // the same probability of being included as their dependencies. + if !candidate_tx.pays_conventional_fee() { + continue; + } + + if !candidate_tx.try_update_block_template_limits( + remaining_block_bytes, + remaining_block_sigops, + remaining_block_unpaid_actions, + ) { + continue; + } + + #[cfg(not(test))] + selected_txs.push(candidate_tx); + + #[cfg(test)] + selected_txs.push(( + dependencies_depth(dependent_tx_id, mempool_tx_deps), + candidate_tx, + )); + + next_level_dependents.extend(mempool_tx_deps.direct_dependents(dependent_tx_id)); + } + } + + current_level_dependents = next_level_dependents; } new_tx_weights } +trait TryUpdateBlockLimits { + /// Checks if a transaction fits within the provided remaining block bytes, + /// sigops, and unpaid actions limits. + /// + /// Updates the limits and returns true if the transaction does fit, or + /// returns false otherwise. + fn try_update_block_template_limits( + &self, + remaining_block_bytes: &mut usize, + remaining_block_sigops: &mut u64, + remaining_block_unpaid_actions: &mut u32, + ) -> bool; +} + +impl TryUpdateBlockLimits for VerifiedUnminedTx { + fn try_update_block_template_limits( + &self, + remaining_block_bytes: &mut usize, + remaining_block_sigops: &mut u64, + remaining_block_unpaid_actions: &mut u32, + ) -> bool { + // > If the block template with this transaction included + // > would be within the block size limit and block sigop limit, + // > and block_unpaid_actions <= block_unpaid_action_limit, + // > add the transaction to the block template + // + // Unpaid actions are always zero for transactions that pay the conventional fee, + // so the unpaid action check always passes for those transactions. + if self.transaction.size <= *remaining_block_bytes + && self.legacy_sigop_count <= *remaining_block_sigops + && self.unpaid_actions <= *remaining_block_unpaid_actions + { + *remaining_block_bytes -= self.transaction.size; + *remaining_block_sigops -= self.legacy_sigop_count; + + // Unpaid actions are always zero for transactions that pay the conventional fee, + // so this limit always remains the same after they are added. + *remaining_block_unpaid_actions -= self.unpaid_actions; + + true + } else { + false + } + } +} + /// Choose a transaction from `transactions`, using the previously set up `weighted_index`. /// /// If some transactions have not yet been chosen, returns the weighted index and the transaction. diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs b/zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs new file mode 100644 index 00000000000..a132d855937 --- /dev/null +++ b/zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs @@ -0,0 +1,116 @@ +//! Tests for ZIP-317 transaction selection for block template production + +use zebra_chain::{ + block::Height, + parameters::Network, + transaction, + transparent::{self, OutPoint}, +}; +use zebra_node_services::mempool::TransactionDependencies; + +use super::select_mempool_transactions; + +#[test] +fn excludes_tx_with_unselected_dependencies() { + let network = Network::Mainnet; + let next_block_height = Height(1_000_000); + let miner_address = transparent::Address::from_pub_key_hash(network.kind(), [0; 20]); + let unmined_tx = network + .unmined_transactions_in_blocks(..) + .next() + .expect("should not be empty"); + + let mut mempool_tx_deps = TransactionDependencies::default(); + mempool_tx_deps.add( + unmined_tx.transaction.id.mined_id(), + vec![OutPoint::from_usize(transaction::Hash([0; 32]), 0)], + ); + + let like_zcashd = true; + let extra_coinbase_data = Vec::new(); + + assert_eq!( + select_mempool_transactions( + &network, + next_block_height, + &miner_address, + vec![unmined_tx], + mempool_tx_deps, + like_zcashd, + extra_coinbase_data, + ), + vec![], + "should not select any transactions when dependencies are unavailable" + ); +} + +#[test] +fn includes_tx_with_selected_dependencies() { + let network = Network::Mainnet; + let next_block_height = Height(1_000_000); + let miner_address = transparent::Address::from_pub_key_hash(network.kind(), [0; 20]); + let unmined_txs: Vec<_> = network.unmined_transactions_in_blocks(..).take(3).collect(); + + let dependent_tx1 = unmined_txs.first().expect("should have 3 txns"); + let dependent_tx2 = unmined_txs.get(1).expect("should have 3 txns"); + let independent_tx_id = unmined_txs + .get(2) + .expect("should have 3 txns") + .transaction + .id + .mined_id(); + + let mut mempool_tx_deps = TransactionDependencies::default(); + mempool_tx_deps.add( + dependent_tx1.transaction.id.mined_id(), + vec![OutPoint::from_usize(independent_tx_id, 0)], + ); + mempool_tx_deps.add( + dependent_tx2.transaction.id.mined_id(), + vec![ + OutPoint::from_usize(independent_tx_id, 0), + OutPoint::from_usize(transaction::Hash([0; 32]), 0), + ], + ); + + let like_zcashd = true; + let extra_coinbase_data = Vec::new(); + + let selected_txs = select_mempool_transactions( + &network, + next_block_height, + &miner_address, + unmined_txs.clone(), + mempool_tx_deps.clone(), + like_zcashd, + extra_coinbase_data, + ); + + assert_eq!( + selected_txs.len(), + 2, + "should select the independent transaction and 1 of the dependent txs, selected: {selected_txs:?}" + ); + + let selected_tx_by_id = |id| { + selected_txs + .iter() + .find(|(_, tx)| tx.transaction.id.mined_id() == id) + }; + + let (dependency_depth, _) = + selected_tx_by_id(independent_tx_id).expect("should select the independent tx"); + + assert_eq!( + *dependency_depth, 0, + "should return a dependency depth of 0 for the independent tx" + ); + + let (dependency_depth, _) = selected_tx_by_id(dependent_tx1.transaction.id.mined_id()) + .expect("should select dependent_tx1"); + + assert_eq!( + *dependency_depth, 1, + "should return a dependency depth of 1 for the dependent tx" + ); +} diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 409a6aefe52..726ddca159a 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -424,6 +424,7 @@ proptest! { .await? .respond(mempool::Response::FullTransactions { transactions, + transaction_dependencies: Default::default(), last_seen_tip_hash: [0; 32].into(), }); diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index f4d7804088e..c0cda974ede 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -356,6 +356,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { .map(|responder| { responder.respond(mempool::Response::FullTransactions { transactions: vec![], + transaction_dependencies: Default::default(), last_seen_tip_hash: blocks[blocks.len() - 1].hash(), }); }); diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 8afb7dd312d..b2e012c7bcd 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -86,8 +86,12 @@ pub async fn test_responses( _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), network, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + network, + state.clone(), + ) + .await; let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -261,6 +265,7 @@ pub async fn test_responses( .await .respond(mempool::Response::FullTransactions { transactions: vec![], + transaction_dependencies: Default::default(), // tip hash needs to match chain info for long poll requests last_seen_tip_hash: fake_tip_hash, }); diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 5b5a21e23d0..b82ac588d5c 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -921,8 +921,12 @@ async fn rpc_getblockcount() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &Mainnet, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &Mainnet, + state.clone(), + ) + .await; // Init RPC let get_block_template_rpc = GetBlockTemplateRpcImpl::new( @@ -966,8 +970,12 @@ async fn rpc_getblockcount_empty_state() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &Mainnet, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &Mainnet, + state.clone(), + ) + .await; // Init RPC let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( @@ -1013,8 +1021,12 @@ async fn rpc_getpeerinfo() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &network, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &network, + state.clone(), + ) + .await; let mock_peer_address = zebra_network::types::MetaAddr::new_initial_peer( std::net::SocketAddr::new( @@ -1083,8 +1095,12 @@ async fn rpc_getblockhash() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &Mainnet, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &Mainnet, + state.clone(), + ) + .await; // Init RPC let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( @@ -1348,6 +1364,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .await .respond(mempool::Response::FullTransactions { transactions, + transaction_dependencies: Default::default(), last_seen_tip_hash, }); } @@ -1569,8 +1586,12 @@ async fn rpc_submitblock_errors() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &Mainnet, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &Mainnet, + state.clone(), + ) + .await; // Init RPC let get_block_template_rpc = GetBlockTemplateRpcImpl::new( diff --git a/zebra-test/src/mock_service.rs b/zebra-test/src/mock_service.rs index 7ab0d1f613b..cf5c2da0db7 100644 --- a/zebra-test/src/mock_service.rs +++ b/zebra-test/src/mock_service.rs @@ -43,7 +43,10 @@ use std::{ fmt::Debug, marker::PhantomData, - sync::Arc, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, task::{Context, Poll}, time::Duration, }; @@ -111,6 +114,7 @@ type ProxyItem = pub struct MockService { receiver: broadcast::Receiver>, sender: broadcast::Sender>, + poll_count: Arc, max_request_delay: Duration, _assertion_type: PhantomData, } @@ -155,6 +159,7 @@ where type Future = BoxFuture<'static, Result>; fn poll_ready(&mut self, _context: &mut Context) -> Poll> { + self.poll_count.fetch_add(1, Ordering::SeqCst); Poll::Ready(Ok(())) } @@ -271,6 +276,7 @@ impl MockServiceBuilder { MockService { receiver, sender, + poll_count: Arc::new(AtomicUsize::new(0)), max_request_delay: self.max_request_delay.unwrap_or(DEFAULT_MAX_REQUEST_DELAY), _assertion_type: PhantomData, } @@ -454,6 +460,13 @@ impl MockService usize { + self.poll_count.load(Ordering::SeqCst) + } } /// Implementation of [`MockService`] methods that use [`mod@proptest`] assertions. @@ -667,6 +680,13 @@ impl MockService usize { + self.poll_count.load(Ordering::SeqCst) + } } /// Code that is independent of the assertions used in [`MockService`]. @@ -708,6 +728,7 @@ impl Clone MockService { receiver: self.sender.subscribe(), sender: self.sender.clone(), + poll_count: self.poll_count.clone(), max_request_delay: self.max_request_delay, _assertion_type: PhantomData, } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 887f1cc0242..2f8a1563b8a 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -179,11 +179,13 @@ impl StartCmd { .await; info!("initializing verifiers"); + let (tx_verifier_setup_tx, tx_verifier_setup_rx) = oneshot::channel(); let (block_verifier_router, tx_verifier, consensus_task_handles, max_checkpoint_height) = zebra_consensus::router::init( config.consensus.clone(), &config.network.network, state.clone(), + tx_verifier_setup_rx, ) .await; @@ -212,6 +214,10 @@ impl StartCmd { .buffer(mempool::downloads::MAX_INBOUND_CONCURRENCY) .service(mempool); + if tx_verifier_setup_tx.send(mempool.clone()).is_err() { + warn!("error setting up the transaction verifier with a handle to the mempool service"); + }; + info!("fully initializing inbound peer request handler"); // Fully start the inbound service as soon as possible let setup_data = InboundSetupData { diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 3ca30c5759a..176ec8c1c57 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -31,8 +31,8 @@ use crate::{ components::{ inbound::{downloads::MAX_INBOUND_CONCURRENCY, Inbound, InboundSetupData}, mempool::{ - gossip_mempool_transaction_id, unmined_transactions_in_blocks, Config as MempoolConfig, - Mempool, MempoolError, SameEffectsChainRejectionError, UnboxMempoolError, + gossip_mempool_transaction_id, Config as MempoolConfig, Mempool, MempoolError, + SameEffectsChainRejectionError, UnboxMempoolError, }, sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, }, @@ -785,7 +785,7 @@ async fn caches_getaddr_response() { _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( + ) = zebra_consensus::router::init_test( consensus_config.clone(), &network, state_service.clone(), @@ -894,8 +894,12 @@ async fn setup( // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. let (block_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - zebra_consensus::router::init(consensus_config.clone(), &network, state_service.clone()) - .await; + zebra_consensus::router::init_test( + consensus_config.clone(), + &network, + state_service.clone(), + ) + .await; let mut peer_set = MockService::build() .with_max_request_delay(MAX_PEER_SET_REQUEST_DELAY) @@ -1050,14 +1054,15 @@ fn add_some_stuff_to_mempool( network: Network, ) -> Vec { // get the genesis block coinbase transaction from the Zcash blockchain. - let genesis_transactions: Vec<_> = unmined_transactions_in_blocks(..=0, &network) + let genesis_transactions: Vec<_> = network + .unmined_transactions_in_blocks(..=0) .take(1) .collect(); // Insert the genesis block coinbase transaction into the mempool storage. mempool_service .storage() - .insert(genesis_transactions[0].clone()) + .insert(genesis_transactions[0].clone(), Vec::new()) .unwrap(); genesis_transactions diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 05732ddaac2..b94ad0b09b8 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -50,6 +50,7 @@ mod crawler; pub mod downloads; mod error; pub mod gossip; +mod pending_outputs; mod queue_checker; mod storage; @@ -68,7 +69,7 @@ pub use storage::{ }; #[cfg(test)] -pub use self::{storage::tests::unmined_transactions_in_blocks, tests::UnboxMempoolError}; +pub use self::tests::UnboxMempoolError; use downloads::{ Downloads as TxDownloads, TRANSACTION_DOWNLOAD_TIMEOUT, TRANSACTION_VERIFY_TIMEOUT, @@ -132,7 +133,10 @@ impl ActiveState { } => { let mut transactions = Vec::new(); - let storage = storage.transactions().map(|tx| tx.clone().into()); + let storage = storage + .transactions() + .values() + .map(|tx| tx.transaction.clone().into()); transactions.extend(storage); let pending = tx_downloads.transaction_requests().cloned(); @@ -387,10 +391,11 @@ impl Mempool { /// Remove expired transaction ids from a given list of inserted ones. fn remove_expired_from_peer_list( send_to_peers_ids: &HashSet, - expired_transactions: &HashSet, + expired_transactions: &HashSet, ) -> HashSet { send_to_peers_ids - .difference(expired_transactions) + .iter() + .filter(|id| !expired_transactions.contains(&id.mined_id())) .copied() .collect() } @@ -585,7 +590,7 @@ impl Service for Mempool { pin!(tx_downloads.timeout(RATE_LIMIT_DELAY)).poll_next(cx) { match r { - Ok(Ok((tx, expected_tip_height))) => { + Ok(Ok((tx, spent_mempool_outpoints, expected_tip_height))) => { // # Correctness: // // It's okay to use tip height here instead of the tip hash since @@ -593,7 +598,7 @@ impl Service for Mempool { // the best chain changes (which is the only way to stay at the same height), and the // mempool re-verifies all pending tx_downloads when there's a `TipAction::Reset`. if best_tip_height == expected_tip_height { - let insert_result = storage.insert(tx.clone()); + let insert_result = storage.insert(tx.clone(), spent_mempool_outpoints); tracing::trace!( ?insert_result, @@ -612,11 +617,11 @@ impl Service for Mempool { .download_if_needed_and_verify(tx.transaction.into(), None); } } - Ok(Err((txid, error))) => { - tracing::debug!(?txid, ?error, "mempool transaction failed to verify"); + Ok(Err((tx_id, error))) => { + tracing::debug!(?tx_id, ?error, "mempool transaction failed to verify"); metrics::counter!("mempool.failed.verify.tasks.total", "reason" => error.to_string()).increment(1); - storage.reject_if_needed(txid, error); + storage.reject_if_needed(tx_id, error); } Err(_elapsed) => { // A timeout happens when the stream hangs waiting for another service, @@ -638,6 +643,7 @@ impl Service for Mempool { // with the same mined IDs as recently mined transactions. let mined_ids = block.transaction_hashes.iter().cloned().collect(); tx_downloads.cancel(&mined_ids); + storage.clear_mined_dependencies(&mined_ids); storage.reject_and_remove_same_effects(&mined_ids, block.transactions); // Clear any transaction rejections if they might have become valid after @@ -728,16 +734,32 @@ impl Service for Mempool { async move { Ok(Response::Transactions(res)) }.boxed() } + Request::AwaitOutput(outpoint) => { + trace!(?req, "got mempool request"); + + let response_fut = storage.pending_outputs.queue(outpoint); + + if let Some(output) = storage.created_output(&outpoint) { + storage.pending_outputs.respond(&outpoint, output) + } + + trace!("answered mempool request"); + + response_fut.boxed() + } + #[cfg(feature = "getblocktemplate-rpcs")] Request::FullTransactions => { trace!(?req, "got mempool request"); - let transactions: Vec<_> = storage.full_transactions().cloned().collect(); + let transactions: Vec<_> = storage.transactions().values().cloned().collect(); + let transaction_dependencies = storage.transaction_dependencies().clone(); trace!(?req, transactions_count = ?transactions.len(), "answered mempool request"); let response = Response::FullTransactions { transactions, + transaction_dependencies, last_seen_tip_hash: *last_seen_tip_hash, }; @@ -806,6 +828,13 @@ impl Service for Mempool { Request::TransactionsById(_) => Response::Transactions(Default::default()), Request::TransactionsByMinedId(_) => Response::Transactions(Default::default()), + Request::AwaitOutput(_) => { + return async move { + Err("mempool is not active: wait for Zebra to sync to the tip".into()) + } + .boxed() + } + #[cfg(feature = "getblocktemplate-rpcs")] Request::FullTransactions => { return async move { diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index eeda6bd9567..45fd44a7c05 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -47,6 +47,7 @@ use tracing_futures::Instrument; use zebra_chain::{ block::Height, transaction::{self, UnminedTxId, VerifiedUnminedTx}, + transparent, }; use zebra_consensus::transaction as tx; use zebra_network as zn; @@ -153,7 +154,11 @@ where pending: FuturesUnordered< JoinHandle< Result< - (VerifiedUnminedTx, Option), + ( + VerifiedUnminedTx, + Vec, + Option, + ), (TransactionDownloadVerifyError, UnminedTxId), >, >, @@ -173,8 +178,14 @@ where ZS: Service + Send + Clone + 'static, ZS::Future: Send, { - type Item = - Result<(VerifiedUnminedTx, Option), (UnminedTxId, TransactionDownloadVerifyError)>; + type Item = Result< + ( + VerifiedUnminedTx, + Vec, + Option, + ), + (UnminedTxId, TransactionDownloadVerifyError), + >; fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = self.project(); @@ -189,9 +200,9 @@ where // TODO: this would be cleaner with poll_map (#2693) if let Some(join_result) = ready!(this.pending.poll_next(cx)) { match join_result.expect("transaction download and verify tasks must not panic") { - Ok((tx, tip_height)) => { + Ok((tx, spent_mempool_outpoints, tip_height)) => { this.cancel_handles.remove(&tx.transaction.id); - Poll::Ready(Some(Ok((tx, tip_height)))) + Poll::Ready(Some(Ok((tx, spent_mempool_outpoints, tip_height)))) } Err((e, hash)) => { this.cancel_handles.remove(&hash); @@ -347,8 +358,11 @@ where height: next_height, }) .map_ok(|rsp| { - (rsp.into_mempool_transaction() - .expect("unexpected non-mempool response to mempool request"), tip_height) + let tx::Response::Mempool { transaction, spent_mempool_outpoints } = rsp else { + panic!("unexpected non-mempool response to mempool request") + }; + + (transaction, spent_mempool_outpoints, tip_height) }) .await; @@ -357,12 +371,12 @@ where result.map_err(|e| TransactionDownloadVerifyError::Invalid(e.into())) } - .map_ok(|(tx, tip_height)| { + .map_ok(|(tx, spent_mempool_outpoints, tip_height)| { metrics::counter!( "mempool.verified.transactions.total", "version" => format!("{}", tx.transaction.transaction.version()), ).increment(1); - (tx, tip_height) + (tx, spent_mempool_outpoints, tip_height) }) // Tack the hash onto the error so we can remove the cancel handle // on failure as well as on success. @@ -387,6 +401,7 @@ where }; // Send the result to responder channel if one was provided. + // TODO: Wait until transactions are added to the verified set before sending an Ok to `rsp_tx`. if let Some(rsp_tx) = rsp_tx { let _ = rsp_tx.send( result diff --git a/zebrad/src/components/mempool/pending_outputs.rs b/zebrad/src/components/mempool/pending_outputs.rs new file mode 100644 index 00000000000..495613019cc --- /dev/null +++ b/zebrad/src/components/mempool/pending_outputs.rs @@ -0,0 +1,65 @@ +//! Pending [`transparent::Output`] tracker for [`AwaitOutput` requests](zebra_node_services::mempool::Request::AwaitOutput). + +use std::{collections::HashMap, future::Future}; + +use tokio::sync::broadcast; + +use tower::BoxError; +use zebra_chain::transparent; + +use zebra_node_services::mempool::Response; + +/// Pending [`transparent::Output`] tracker for handling the mempool's +/// [`AwaitOutput` requests](zebra_node_services::mempool::Request::AwaitOutput). +#[derive(Debug, Default)] +pub struct PendingOutputs(HashMap>); + +impl PendingOutputs { + /// Returns a future that will resolve to the `transparent::Output` pointed + /// to by the given `transparent::OutPoint` when it is available. + pub fn queue( + &mut self, + outpoint: transparent::OutPoint, + ) -> impl Future> { + let mut receiver = self + .0 + .entry(outpoint) + .or_insert_with(|| { + let (sender, _) = broadcast::channel(1); + sender + }) + .subscribe(); + + async move { + receiver + .recv() + .await + .map(Response::UnspentOutput) + .map_err(BoxError::from) + } + } + + /// Notify all requests waiting for the [`transparent::Output`] pointed to by + /// the given [`transparent::OutPoint`] that the [`transparent::Output`] has + /// arrived. + #[inline] + pub fn respond(&mut self, outpoint: &transparent::OutPoint, output: transparent::Output) { + if let Some(sender) = self.0.remove(outpoint) { + // Adding the outpoint as a field lets us cross-reference + // with the trace of the verification that made the request. + tracing::trace!(?outpoint, "found pending mempool output"); + let _ = sender.send(output); + } + } + + /// Scan the set of waiting Output requests for channels where all receivers + /// have been dropped and remove the corresponding sender. + pub fn prune(&mut self) { + self.0.retain(|_, chan| chan.receiver_count() > 0); + } + + /// Clears the inner [`HashMap`] of queued pending output requests. + pub fn clear(&mut self) { + self.0.clear(); + } +} diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index d380efb84aa..ce6f09cf1d6 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -16,12 +16,17 @@ use std::{ use thiserror::Error; -use zebra_chain::transaction::{ - self, Hash, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx, +use zebra_chain::{ + transaction::{self, Hash, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx}, + transparent, }; +use zebra_node_services::mempool::TransactionDependencies; use self::{eviction_list::EvictionList, verified_set::VerifiedSet}; -use super::{config, downloads::TransactionDownloadVerifyError, MempoolError}; +use super::{ + config, downloads::TransactionDownloadVerifyError, pending_outputs::PendingOutputs, + MempoolError, +}; #[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; @@ -67,6 +72,12 @@ pub enum SameEffectsTipRejectionError { its inputs" )] SpendConflict, + + #[error( + "transaction rejected because it spends missing outputs from \ + another transaction in the mempool" + )] + MissingOutput, } /// Transactions rejected based only on their effects (spends, outputs, transaction header). @@ -116,6 +127,9 @@ pub struct Storage { /// The set of verified transactions in the mempool. verified: VerifiedSet, + /// The set of outpoints with pending requests for their associated transparent::Output. + pub(super) pending_outputs: PendingOutputs, + /// The set of transactions rejected due to bad authorizations, or for other /// reasons, and their rejection reasons. These rejections only apply to the /// current tip. @@ -165,6 +179,7 @@ impl Storage { tx_cost_limit: config.tx_cost_limit, eviction_memory_time: config.eviction_memory_time, verified: Default::default(), + pending_outputs: Default::default(), tip_rejected_exact: Default::default(), tip_rejected_same_effects: Default::default(), chain_rejected_same_effects: Default::default(), @@ -173,6 +188,10 @@ impl Storage { /// Insert a [`VerifiedUnminedTx`] into the mempool, caching any rejections. /// + /// Accepts the [`VerifiedUnminedTx`] being inserted and `spent_mempool_outpoints`, + /// a list of transparent inputs of the provided [`VerifiedUnminedTx`] that were found + /// as newly created transparent outputs in the mempool during transaction verification. + /// /// Returns an error if the mempool's verified transactions or rejection caches /// prevent this transaction from being inserted. /// These errors should not be propagated to peers, because the transactions are valid. @@ -180,14 +199,19 @@ impl Storage { /// If inserting this transaction evicts other transactions, they will be tracked /// as [`SameEffectsChainRejectionError::RandomlyEvicted`]. #[allow(clippy::unwrap_in_result)] - pub fn insert(&mut self, tx: VerifiedUnminedTx) -> Result { + pub fn insert( + &mut self, + tx: VerifiedUnminedTx, + spent_mempool_outpoints: Vec, + ) -> Result { // # Security // // This method must call `reject`, rather than modifying the rejection lists directly. - let tx_id = tx.transaction.id; + let unmined_tx_id = tx.transaction.id; + let tx_id = unmined_tx_id.mined_id(); // First, check if we have a cached rejection for this transaction. - if let Some(error) = self.rejection_error(&tx_id) { + if let Some(error) = self.rejection_error(&unmined_tx_id) { tracing::trace!( ?tx_id, ?error, @@ -213,8 +237,11 @@ impl Storage { } // Then, we try to insert into the pool. If this fails the transaction is rejected. - let mut result = Ok(tx_id); - if let Err(rejection_error) = self.verified.insert(tx) { + let mut result = Ok(unmined_tx_id); + if let Err(rejection_error) = + self.verified + .insert(tx, spent_mempool_outpoints, &mut self.pending_outputs) + { tracing::debug!( ?tx_id, ?rejection_error, @@ -223,7 +250,7 @@ impl Storage { ); // We could return here, but we still want to check the mempool size - self.reject(tx_id, rejection_error.clone().into()); + self.reject(unmined_tx_id, rejection_error.clone().into()); result = Err(rejection_error.into()); } @@ -256,8 +283,7 @@ impl Storage { ); // If this transaction gets evicted, set its result to the same error - // (we could return here, but we still want to check the mempool size) - if victim_tx.transaction.id == tx_id { + if victim_tx.transaction.id == unmined_tx_id { result = Err(SameEffectsChainRejectionError::RandomlyEvicted.into()); } } @@ -285,6 +311,11 @@ impl Storage { .remove_all_that(|tx| exact_wtxids.contains(&tx.transaction.id)) } + /// Clears a list of mined transaction ids from the verified set's tracked transaction dependencies. + pub fn clear_mined_dependencies(&mut self, mined_ids: &HashSet) { + self.verified.clear_mined_dependencies(mined_ids); + } + /// Reject and remove transactions from the mempool via non-malleable [`transaction::Hash`]. /// - For v5 transactions, transactions are matched by TXID, /// using only the non-malleable transaction ID. @@ -293,6 +324,7 @@ impl Storage { /// - Returns the number of transactions which were removed. /// - Removes from the 'verified' set, if present. /// Maintains the order in which the other unmined transactions have been inserted into the mempool. + /// - Prunes `pending_outputs` of any closed channels. /// /// Reject and remove transactions from the mempool that contain any spent outpoints or revealed /// nullifiers from the passed in `transactions`. @@ -327,23 +359,21 @@ impl Storage { let duplicate_spend_ids: HashSet<_> = self .verified .transactions() - .filter_map(|tx| { - (tx.transaction - .spent_outpoints() + .values() + .map(|tx| (tx.transaction.id, &tx.transaction.transaction)) + .filter_map(|(tx_id, tx)| { + (tx.spent_outpoints() .any(|outpoint| spent_outpoints.contains(&outpoint)) || tx - .transaction .sprout_nullifiers() .any(|nullifier| sprout_nullifiers.contains(nullifier)) || tx - .transaction .sapling_nullifiers() .any(|nullifier| sapling_nullifiers.contains(nullifier)) || tx - .transaction .orchard_nullifiers() .any(|nullifier| orchard_nullifiers.contains(nullifier))) - .then_some(tx.id) + .then_some(tx_id) }) .collect(); @@ -367,6 +397,8 @@ impl Storage { ); } + self.pending_outputs.prune(); + num_removed_mined + num_removed_duplicate_spend } @@ -375,6 +407,7 @@ impl Storage { pub fn clear(&mut self) { self.verified.clear(); self.tip_rejected_exact.clear(); + self.pending_outputs.clear(); self.tip_rejected_same_effects.clear(); self.chain_rejected_same_effects.clear(); self.update_rejected_metrics(); @@ -407,24 +440,26 @@ impl Storage { /// Returns the set of [`UnminedTxId`]s in the mempool. pub fn tx_ids(&self) -> impl Iterator + '_ { - self.verified.transactions().map(|tx| tx.id) - } - - /// Returns an iterator over the [`UnminedTx`]s in the mempool. - // - // TODO: make the transactions() method return VerifiedUnminedTx, - // and remove the full_transactions() method - pub fn transactions(&self) -> impl Iterator { - self.verified.transactions() + self.transactions().values().map(|tx| tx.transaction.id) } - /// Returns an iterator over the [`VerifiedUnminedTx`] in the set. + /// Returns a reference to the [`HashMap`] of [`VerifiedUnminedTx`]s in the verified set. /// /// Each [`VerifiedUnminedTx`] contains an [`UnminedTx`], /// and adds extra fields from the transaction verifier result. - #[allow(dead_code)] - pub fn full_transactions(&self) -> impl Iterator + '_ { - self.verified.full_transactions() + pub fn transactions(&self) -> &HashMap { + self.verified.transactions() + } + + /// Returns a reference to the [`TransactionDependencies`] in the verified set. + pub fn transaction_dependencies(&self) -> &TransactionDependencies { + self.verified.transaction_dependencies() + } + + /// Returns a [`transparent::Output`] created by a mempool transaction for the provided + /// [`transparent::OutPoint`] if one exists, or None otherwise. + pub fn created_output(&self, outpoint: &transparent::OutPoint) -> Option { + self.verified.created_output(outpoint) } /// Returns the number of transactions in the mempool. @@ -455,9 +490,11 @@ impl Storage { &self, tx_ids: HashSet, ) -> impl Iterator { - self.verified - .transactions() - .filter(move |tx| tx_ids.contains(&tx.id)) + tx_ids.into_iter().filter_map(|tx_id| { + self.transactions() + .get(&tx_id.mined_id()) + .map(|tx| &tx.transaction) + }) } /// Returns the set of [`UnminedTx`]es with matching [`transaction::Hash`]es @@ -471,7 +508,9 @@ impl Storage { ) -> impl Iterator { self.verified .transactions() - .filter(move |tx| tx_ids.contains(&tx.id.mined_id())) + .iter() + .filter(move |(tx_id, _)| tx_ids.contains(tx_id)) + .map(|(_, tx)| &tx.transaction) } /// Returns `true` if a transaction exactly matching an [`UnminedTxId`] is in @@ -479,8 +518,8 @@ impl Storage { /// /// This matches the exact transaction, with identical blockchain effects, /// signatures, and proofs. - pub fn contains_transaction_exact(&self, txid: &UnminedTxId) -> bool { - self.verified.transactions().any(|tx| &tx.id == txid) + pub fn contains_transaction_exact(&self, tx_id: &transaction::Hash) -> bool { + self.verified.contains(tx_id) } /// Returns the number of rejected [`UnminedTxId`]s or [`transaction::Hash`]es. @@ -498,13 +537,13 @@ impl Storage { } /// Add a transaction to the rejected list for the given reason. - pub fn reject(&mut self, txid: UnminedTxId, reason: RejectionError) { + pub fn reject(&mut self, tx_id: UnminedTxId, reason: RejectionError) { match reason { RejectionError::ExactTip(e) => { - self.tip_rejected_exact.insert(txid, e); + self.tip_rejected_exact.insert(tx_id, e); } RejectionError::SameEffectsTip(e) => { - self.tip_rejected_same_effects.insert(txid.mined_id(), e); + self.tip_rejected_same_effects.insert(tx_id.mined_id(), e); } RejectionError::SameEffectsChain(e) => { let eviction_memory_time = self.eviction_memory_time; @@ -513,7 +552,7 @@ impl Storage { .or_insert_with(|| { EvictionList::new(MAX_EVICTION_MEMORY_ENTRIES, eviction_memory_time) }) - .insert(txid.mined_id()); + .insert(tx_id.mined_id()); } } self.limit_rejection_list_memory(); @@ -565,7 +604,7 @@ impl Storage { /// Add a transaction that failed download and verification to the rejected list /// if needed, depending on the reason for the failure. - pub fn reject_if_needed(&mut self, txid: UnminedTxId, e: TransactionDownloadVerifyError) { + pub fn reject_if_needed(&mut self, tx_id: UnminedTxId, e: TransactionDownloadVerifyError) { match e { // Rejecting a transaction already in state would speed up further // download attempts without checking the state. However it would @@ -588,7 +627,7 @@ impl Storage { // Consensus verification failed. Reject transaction to avoid // having to download and verify it again just for it to fail again. TransactionDownloadVerifyError::Invalid(e) => { - self.reject(txid, ExactTipRejectionError::FailedVerification(e).into()) + self.reject(tx_id, ExactTipRejectionError::FailedVerification(e).into()) } } } @@ -605,31 +644,32 @@ impl Storage { pub fn remove_expired_transactions( &mut self, tip_height: zebra_chain::block::Height, - ) -> HashSet { - let mut txid_set = HashSet::new(); - // we need a separate set, since reject() takes the original unmined ID, - // then extracts the mined ID out of it - let mut unmined_id_set = HashSet::new(); - - for t in self.transactions() { - if let Some(expiry_height) = t.transaction.expiry_height() { + ) -> HashSet { + let mut tx_ids = HashSet::new(); + + for (&tx_id, tx) in self.transactions() { + if let Some(expiry_height) = tx.transaction.transaction.expiry_height() { if tip_height >= expiry_height { - txid_set.insert(t.id.mined_id()); - unmined_id_set.insert(t.id); + tx_ids.insert(tx_id); } } } // expiry height is effecting data, so we match by non-malleable TXID self.verified - .remove_all_that(|tx| txid_set.contains(&tx.transaction.id.mined_id())); + .remove_all_that(|tx| tx_ids.contains(&tx.transaction.id.mined_id())); // also reject it - for id in unmined_id_set.iter() { - self.reject(*id, SameEffectsChainRejectionError::Expired.into()); + for &id in &tx_ids { + self.reject( + // It's okay to omit the auth digest here as we know that `reject()` will always + // use mined ids for `SameEffectsChainRejectionError`s. + UnminedTxId::Legacy(id), + SameEffectsChainRejectionError::Expired.into(), + ); } - unmined_id_set + tx_ids } /// Check if transaction should be downloaded and/or verified. @@ -638,7 +678,7 @@ impl Storage { /// then it shouldn't be downloaded/verified. pub fn should_download_or_verify(&mut self, txid: UnminedTxId) -> Result<(), MempoolError> { // Check if the transaction is already in the mempool. - if self.contains_transaction_exact(&txid) { + if self.contains_transaction_exact(&txid.mined_id()) { return Err(MempoolError::InMempool); } if let Some(error) = self.rejection_error(&txid) { diff --git a/zebrad/src/components/mempool/storage/tests.rs b/zebrad/src/components/mempool/storage/tests.rs index e47808a3860..197b706d2a4 100644 --- a/zebrad/src/components/mempool/storage/tests.rs +++ b/zebrad/src/components/mempool/storage/tests.rs @@ -1,45 +1,4 @@ -//! Tests and test utility functions for mempool storage. - -use std::ops::RangeBounds; - -use zebra_chain::{ - amount::Amount, - block::Block, - parameters::Network, - serialization::ZcashDeserializeInto, - transaction::{UnminedTx, VerifiedUnminedTx}, -}; +//! Tests for mempool storage. mod prop; mod vectors; - -pub fn unmined_transactions_in_blocks( - block_height_range: impl RangeBounds, - network: &Network, -) -> impl DoubleEndedIterator { - let blocks = network.block_iter(); - - // Deserialize the blocks that are selected based on the specified `block_height_range`. - let selected_blocks = blocks - .filter(move |(&height, _)| block_height_range.contains(&height)) - .map(|(_, block)| { - block - .zcash_deserialize_into::() - .expect("block test vector is structurally valid") - }); - - // Extract the transactions from the blocks and wrap each one as an unmined transaction. - // Use a fake zero miner fee and sigops, because we don't have the UTXOs to calculate - // the correct fee. - selected_blocks - .flat_map(|block| block.transactions) - .map(UnminedTx::from) - .map(|transaction| { - VerifiedUnminedTx::new( - transaction, - Amount::try_from(1_000_000).expect("invalid value"), - 0, - ) - .expect("verification should pass") - }) -} diff --git a/zebrad/src/components/mempool/storage/tests/prop.rs b/zebrad/src/components/mempool/storage/tests/prop.rs index eca65935acb..398ba0925f9 100644 --- a/zebrad/src/components/mempool/storage/tests/prop.rs +++ b/zebrad/src/components/mempool/storage/tests/prop.rs @@ -72,7 +72,7 @@ proptest! { for (transaction_to_accept, transaction_to_reject) in input_permutations { let id_to_accept = transaction_to_accept.transaction.id; - prop_assert_eq!(storage.insert(transaction_to_accept), Ok(id_to_accept)); + prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new()), Ok(id_to_accept)); // Make unique IDs by converting the index to bytes, and writing it to each ID let unique_ids = (0..MAX_EVICTION_MEMORY_ENTRIES as u32).map(move |index| { @@ -96,7 +96,7 @@ proptest! { // - transaction_to_accept, or // - a rejection from rejections prop_assert_eq!( - storage.insert(transaction_to_reject), + storage.insert(transaction_to_reject, Vec::new()), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); @@ -147,13 +147,13 @@ proptest! { if i < transactions.len() - 1 { // The initial transactions should be successful prop_assert_eq!( - storage.insert(transaction.clone()), + storage.insert(transaction.clone(), Vec::new()), Ok(tx_id) ); } else { // The final transaction will cause a random eviction, // which might return an error if this transaction is chosen - let result = storage.insert(transaction.clone()); + let result = storage.insert(transaction.clone(), Vec::new()); if result.is_ok() { prop_assert_eq!( @@ -281,10 +281,10 @@ proptest! { let id_to_accept = transaction_to_accept.transaction.id; let id_to_reject = transaction_to_reject.transaction.id; - prop_assert_eq!(storage.insert(transaction_to_accept), Ok(id_to_accept)); + prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new()), Ok(id_to_accept)); prop_assert_eq!( - storage.insert(transaction_to_reject), + storage.insert(transaction_to_reject, Vec::new()), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); @@ -332,19 +332,19 @@ proptest! { let id_to_reject = transaction_to_reject.transaction.id; prop_assert_eq!( - storage.insert(first_transaction_to_accept), + storage.insert(first_transaction_to_accept, Vec::new()), Ok(first_id_to_accept) ); prop_assert_eq!( - storage.insert(transaction_to_reject), + storage.insert(transaction_to_reject, Vec::new()), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); prop_assert!(storage.contains_rejected(&id_to_reject)); prop_assert_eq!( - storage.insert(second_transaction_to_accept), + storage.insert(second_transaction_to_accept, Vec::new()), Ok(second_id_to_accept) ); @@ -371,13 +371,13 @@ proptest! { .filter_map(|transaction| { let id = transaction.transaction.id; - storage.insert(transaction.clone()).ok().map(|_| id) + storage.insert(transaction.clone(), Vec::new()).ok().map(|_| id) }) .collect(); // Check that the inserted transactions are still there. for transaction_id in &inserted_transactions { - prop_assert!(storage.contains_transaction_exact(transaction_id)); + prop_assert!(storage.contains_transaction_exact(&transaction_id.mined_id())); } // Remove some transactions. @@ -399,14 +399,14 @@ proptest! { let removed_transactions = input.removed_transaction_ids(); for removed_transaction_id in &removed_transactions { - prop_assert!(!storage.contains_transaction_exact(removed_transaction_id)); + prop_assert!(!storage.contains_transaction_exact(&removed_transaction_id.mined_id())); } // Check that the remaining transactions are still in the storage. let remaining_transactions = inserted_transactions.difference(&removed_transactions); for remaining_transaction_id in remaining_transactions { - prop_assert!(storage.contains_transaction_exact(remaining_transaction_id)); + prop_assert!(storage.contains_transaction_exact(&remaining_transaction_id.mined_id())); } } } diff --git a/zebrad/src/components/mempool/storage/tests/vectors.rs b/zebrad/src/components/mempool/storage/tests/vectors.rs index 5b60c133e95..30ce35bb832 100644 --- a/zebrad/src/components/mempool/storage/tests/vectors.rs +++ b/zebrad/src/components/mempool/storage/tests/vectors.rs @@ -4,15 +4,14 @@ use std::iter; use color_eyre::eyre::Result; +use transparent::OutPoint; use zebra_chain::{ amount::Amount, block::{Block, Height}, parameters::Network, }; -use crate::components::mempool::{ - storage::tests::unmined_transactions_in_blocks, storage::*, Mempool, -}; +use crate::components::mempool::{storage::*, Mempool}; /// Eviction memory time used for tests. Most tests won't care about this /// so we use a large enough value that will never be reached in the tests. @@ -35,22 +34,23 @@ fn mempool_storage_crud_exact_mainnet() { }); // Get one (1) unmined transaction - let unmined_tx = unmined_transactions_in_blocks(.., &network) + let unmined_tx = network + .unmined_transactions_in_blocks(..) .next() .expect("at least one unmined transaction"); // Insert unmined tx into the mempool. - let _ = storage.insert(unmined_tx.clone()); + let _ = storage.insert(unmined_tx.clone(), Vec::new()); // Check that it is in the mempool, and not rejected. - assert!(storage.contains_transaction_exact(&unmined_tx.transaction.id)); + assert!(storage.contains_transaction_exact(&unmined_tx.transaction.id.mined_id())); // Remove tx let removal_count = storage.remove_exact(&iter::once(unmined_tx.transaction.id).collect()); // Check that it is /not/ in the mempool. assert_eq!(removal_count, 1); - assert!(!storage.contains_transaction_exact(&unmined_tx.transaction.id)); + assert!(!storage.contains_transaction_exact(&unmined_tx.transaction.id.mined_id())); } #[test] @@ -69,7 +69,7 @@ fn mempool_storage_basic() -> Result<()> { fn mempool_storage_basic_for_network(network: Network) -> Result<()> { // Get transactions from the first 10 blocks of the Zcash blockchain - let unmined_transactions: Vec<_> = unmined_transactions_in_blocks(..=10, &network).collect(); + let unmined_transactions: Vec<_> = network.unmined_transactions_in_blocks(..=10).collect(); assert!( MEMPOOL_TX_COUNT < unmined_transactions.len(), @@ -94,7 +94,7 @@ fn mempool_storage_basic_for_network(network: Network) -> Result<()> { let mut maybe_inserted_transactions = Vec::new(); let mut some_rejected_transactions = Vec::new(); for unmined_transaction in unmined_transactions.clone() { - let result = storage.insert(unmined_transaction.clone()); + let result = storage.insert(unmined_transaction.clone(), Vec::new()); match result { Ok(_) => { // While the transaction was inserted here, it can be rejected later. @@ -124,7 +124,7 @@ fn mempool_storage_basic_for_network(network: Network) -> Result<()> { // Test if rejected transactions were actually rejected. for tx in some_rejected_transactions.iter() { - assert!(!storage.contains_transaction_exact(&tx.transaction.id)); + assert!(!storage.contains_transaction_exact(&tx.transaction.id.mined_id())); } // Query all the ids we have for rejected, get back `total - MEMPOOL_SIZE` @@ -162,15 +162,16 @@ fn mempool_storage_crud_same_effects_mainnet() { }); // Get one (1) unmined transaction - let unmined_tx_1 = unmined_transactions_in_blocks(.., &network) + let unmined_tx_1 = network + .unmined_transactions_in_blocks(..) .next() .expect("at least one unmined transaction"); // Insert unmined tx into the mempool. - let _ = storage.insert(unmined_tx_1.clone()); + let _ = storage.insert(unmined_tx_1.clone(), Vec::new()); // Check that it is in the mempool, and not rejected. - assert!(storage.contains_transaction_exact(&unmined_tx_1.transaction.id)); + assert!(storage.contains_transaction_exact(&unmined_tx_1.transaction.id.mined_id())); // Reject and remove mined tx let removal_count = storage.reject_and_remove_same_effects( @@ -180,7 +181,7 @@ fn mempool_storage_crud_same_effects_mainnet() { // Check that it is /not/ in the mempool as a verified transaction. assert_eq!(removal_count, 1); - assert!(!storage.contains_transaction_exact(&unmined_tx_1.transaction.id)); + assert!(!storage.contains_transaction_exact(&unmined_tx_1.transaction.id.mined_id())); // Check that it's rejection is cached in the chain_rejected_same_effects' `Mined` eviction list. assert_eq!( @@ -188,12 +189,13 @@ fn mempool_storage_crud_same_effects_mainnet() { Some(SameEffectsChainRejectionError::Mined.into()) ); assert_eq!( - storage.insert(unmined_tx_1), + storage.insert(unmined_tx_1, Vec::new()), Err(SameEffectsChainRejectionError::Mined.into()) ); // Get a different unmined transaction - let unmined_tx_2 = unmined_transactions_in_blocks(1.., &network) + let unmined_tx_2 = network + .unmined_transactions_in_blocks(1..) .find(|tx| { tx.transaction .transaction @@ -205,12 +207,12 @@ fn mempool_storage_crud_same_effects_mainnet() { // Insert unmined tx into the mempool. assert_eq!( - storage.insert(unmined_tx_2.clone()), + storage.insert(unmined_tx_2.clone(), Vec::new()), Ok(unmined_tx_2.transaction.id) ); // Check that it is in the mempool, and not rejected. - assert!(storage.contains_transaction_exact(&unmined_tx_2.transaction.id)); + assert!(storage.contains_transaction_exact(&unmined_tx_2.transaction.id.mined_id())); // Reject and remove duplicate spend tx let removal_count = storage.reject_and_remove_same_effects( @@ -220,7 +222,7 @@ fn mempool_storage_crud_same_effects_mainnet() { // Check that it is /not/ in the mempool as a verified transaction. assert_eq!(removal_count, 1); - assert!(!storage.contains_transaction_exact(&unmined_tx_2.transaction.id)); + assert!(!storage.contains_transaction_exact(&unmined_tx_2.transaction.id.mined_id())); // Check that it's rejection is cached in the chain_rejected_same_effects' `SpendConflict` eviction list. assert_eq!( @@ -228,7 +230,7 @@ fn mempool_storage_crud_same_effects_mainnet() { Some(SameEffectsChainRejectionError::DuplicateSpend.into()) ); assert_eq!( - storage.insert(unmined_tx_2), + storage.insert(unmined_tx_2, Vec::new()), Err(SameEffectsChainRejectionError::DuplicateSpend.into()) ); } @@ -269,6 +271,7 @@ fn mempool_expired_basic_for_network(network: Network) -> Result<()> { 0, ) .expect("verification should pass"), + Vec::new(), )?; assert_eq!(storage.transaction_count(), 1); @@ -280,7 +283,7 @@ fn mempool_expired_basic_for_network(network: Network) -> Result<()> { // remove_expired_transactions() will return what was removed let expired = storage.remove_expired_transactions(Height(1)); - assert!(expired.contains(&tx_id)); + assert!(expired.contains(&tx_id.mined_id())); let everything_in_mempool: HashSet = storage.tx_ids().collect(); assert_eq!(everything_in_mempool.len(), 0); @@ -290,3 +293,95 @@ fn mempool_expired_basic_for_network(network: Network) -> Result<()> { Ok(()) } + +/// Check that the transaction dependencies are updated when transactions with spent mempool outputs +/// are inserted into storage, and that the `Storage.remove()` method also removes any transactions +/// that directly or indirectly spend outputs of a removed transaction. +#[test] +fn mempool_removes_dependent_transactions() -> Result<()> { + let network = Network::Mainnet; + + // Create an empty storage + let mut storage: Storage = Storage::new(&config::Config { + tx_cost_limit: 160_000_000, + eviction_memory_time: EVICTION_MEMORY_TIME, + ..Default::default() + }); + + let unmined_txs_with_transparent_outputs = || { + network + .unmined_transactions_in_blocks(..) + .filter(|tx| !tx.transaction.transaction.outputs().is_empty()) + }; + + let mut fake_spent_outpoints: Vec = Vec::new(); + let mut expected_transaction_dependencies = HashMap::new(); + let mut expected_transaction_dependents = HashMap::new(); + for unmined_tx in unmined_txs_with_transparent_outputs() { + let tx_id = unmined_tx.transaction.id.mined_id(); + let num_outputs = unmined_tx.transaction.transaction.outputs().len(); + + if let Some(&fake_spent_outpoint) = fake_spent_outpoints.first() { + expected_transaction_dependencies + .insert(tx_id, [fake_spent_outpoint.hash].into_iter().collect()); + expected_transaction_dependents + .insert(fake_spent_outpoint.hash, [tx_id].into_iter().collect()); + } + + storage + .insert(unmined_tx.clone(), fake_spent_outpoints) + .expect("should insert transaction"); + + // Add up to 5 of this transaction's outputs as fake spent outpoints for the next transaction + fake_spent_outpoints = (0..num_outputs.min(5)) + .map(|i| OutPoint::from_usize(tx_id, i)) + .collect(); + } + + assert_eq!( + storage.transaction_dependencies().dependencies().len(), + unmined_txs_with_transparent_outputs() + .count() + .checked_sub(1) + .expect("at least one unmined transaction with transparent outputs"), + "should have an entry all inserted txns except the first one" + ); + + assert_eq!( + storage.transaction_dependencies().dependencies(), + &expected_transaction_dependencies, + "should have expected transaction dependencies" + ); + + assert_eq!( + storage.transaction_dependencies().dependents(), + &expected_transaction_dependents, + "should have expected transaction dependents" + ); + + // Remove the first transaction and check that everything in storage is emptied. + let first_tx = unmined_txs_with_transparent_outputs() + .next() + .expect("at least one unmined transaction with transparent outputs"); + + let expected_num_removed = storage.transaction_count(); + let num_removed = storage.remove_exact(&[first_tx.transaction.id].into_iter().collect()); + + assert_eq!( + num_removed, expected_num_removed, + "remove_exact should total storage transaction count" + ); + + assert!( + storage.transaction_dependencies().dependencies().is_empty(), + "tx deps should be empty" + ); + + assert_eq!( + storage.transaction_count(), + 0, + "verified set should be empty" + ); + + Ok(()) +} diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index a9c850b4ef8..7cd82fb0be4 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -2,15 +2,18 @@ use std::{ borrow::Cow, - collections::{HashSet, VecDeque}, + collections::{HashMap, HashSet}, hash::Hash, }; use zebra_chain::{ orchard, sapling, sprout, - transaction::{Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx}, + transaction::{self, UnminedTx, VerifiedUnminedTx}, transparent, }; +use zebra_node_services::mempool::TransactionDependencies; + +use crate::components::mempool::pending_outputs::PendingOutputs; use super::super::SameEffectsTipRejectionError; @@ -23,6 +26,8 @@ use zebra_chain::transaction::MEMPOOL_TRANSACTION_COST_THRESHOLD; /// This also caches the all the spent outputs from the transactions in the mempool. The spent /// outputs include: /// +/// - the dependencies of transactions that spent the outputs of other transactions in the mempool +/// - the outputs of transactions in the mempool /// - the transparent outpoints spent by transactions in the mempool /// - the Sprout nullifiers revealed by transactions in the mempool /// - the Sapling nullifiers revealed by transactions in the mempool @@ -30,7 +35,16 @@ use zebra_chain::transaction::MEMPOOL_TRANSACTION_COST_THRESHOLD; #[derive(Default)] pub struct VerifiedSet { /// The set of verified transactions in the mempool. - transactions: VecDeque, + transactions: HashMap, + + /// A map of dependencies between transactions in the mempool that + /// spend or create outputs of other transactions in the mempool. + transaction_dependencies: TransactionDependencies, + + /// The [`transparent::Output`]s created by verified transactions in the mempool. + /// + /// These outputs may be spent by other transactions in the mempool. + created_outputs: HashMap, /// The total size of the transactions in the mempool if they were /// serialized. @@ -60,20 +74,20 @@ impl Drop for VerifiedSet { } impl VerifiedSet { - /// Returns an iterator over the [`UnminedTx`] in the set. - // - // TODO: make the transactions() method return VerifiedUnminedTx, - // and remove the full_transactions() method - pub fn transactions(&self) -> impl Iterator + '_ { - self.transactions.iter().map(|tx| &tx.transaction) + /// Returns a reference to the [`HashMap`] of [`VerifiedUnminedTx`]s in the set. + pub fn transactions(&self) -> &HashMap { + &self.transactions } - /// Returns an iterator over the [`VerifiedUnminedTx`] in the set. - /// - /// Each [`VerifiedUnminedTx`] contains an [`UnminedTx`], - /// and adds extra fields from the transaction verifier result. - pub fn full_transactions(&self) -> impl Iterator + '_ { - self.transactions.iter() + /// Returns a reference to the [`TransactionDependencies`] in the set. + pub fn transaction_dependencies(&self) -> &TransactionDependencies { + &self.transaction_dependencies + } + + /// Returns a [`transparent::Output`] created by a mempool transaction for the provided + /// [`transparent::OutPoint`] if one exists, or None otherwise. + pub fn created_output(&self, outpoint: &transparent::OutPoint) -> Option { + self.created_outputs.get(outpoint).cloned() } /// Returns the number of verified transactions in the set. @@ -97,9 +111,9 @@ impl VerifiedSet { } /// Returns `true` if the set of verified transactions contains the transaction with the - /// specified [`UnminedTxId`]. - pub fn contains(&self, id: &UnminedTxId) -> bool { - self.transactions.iter().any(|tx| &tx.transaction.id == id) + /// specified [`transaction::Hash`]. + pub fn contains(&self, id: &transaction::Hash) -> bool { + self.transactions.contains_key(id) } /// Clear the set of verified transactions. @@ -107,10 +121,12 @@ impl VerifiedSet { /// Also clears all internal caches. pub fn clear(&mut self) { self.transactions.clear(); + self.transaction_dependencies.clear(); self.spent_outpoints.clear(); self.sprout_nullifiers.clear(); self.sapling_nullifiers.clear(); self.orchard_nullifiers.clear(); + self.created_outputs.clear(); self.transactions_serialized_size = 0; self.total_cost = 0; self.update_metrics(); @@ -126,22 +142,49 @@ impl VerifiedSet { pub fn insert( &mut self, transaction: VerifiedUnminedTx, + spent_mempool_outpoints: Vec, + pending_outputs: &mut PendingOutputs, ) -> Result<(), SameEffectsTipRejectionError> { if self.has_spend_conflicts(&transaction.transaction) { return Err(SameEffectsTipRejectionError::SpendConflict); } - self.cache_outputs_from(&transaction.transaction.transaction); + // This likely only needs to check that the transaction hash of the outpoint is still in the mempool, + // but it's likely rare that a transaction spends multiple transparent outputs of + // a single transaction in practice. + for outpoint in &spent_mempool_outpoints { + if !self.created_outputs.contains_key(outpoint) { + return Err(SameEffectsTipRejectionError::MissingOutput); + } + } + + let tx_id = transaction.transaction.id.mined_id(); + self.transaction_dependencies + .add(tx_id, spent_mempool_outpoints); + + // Inserts the transaction's outputs into the internal caches and responds to pending output requests. + let tx = &transaction.transaction.transaction; + for (index, output) in tx.outputs().iter().cloned().enumerate() { + let outpoint = transparent::OutPoint::from_usize(tx_id, index); + self.created_outputs.insert(outpoint, output.clone()); + pending_outputs.respond(&outpoint, output) + } + self.spent_outpoints.extend(tx.spent_outpoints()); + self.sprout_nullifiers.extend(tx.sprout_nullifiers()); + self.sapling_nullifiers.extend(tx.sapling_nullifiers()); + self.orchard_nullifiers.extend(tx.orchard_nullifiers()); + self.transactions_serialized_size += transaction.transaction.size; self.total_cost += transaction.cost(); - self.transactions.push_front(transaction); + self.transactions.insert(tx_id, transaction); self.update_metrics(); Ok(()) } - /// Evict one transaction from the set, returns the victim transaction. + /// Evict one transaction and any transactions that directly or indirectly depend on + /// its outputs from the set, returns the victim transaction and any dependent transactions. /// /// Removes a transaction with probability in direct proportion to the /// eviction weight, as per [ZIP-401]. @@ -159,72 +202,90 @@ impl VerifiedSet { /// to 20,000 (mempooltxcostlimit/min(cost)), so the actual cost shouldn't /// be too bad. /// + /// This function is equivalent to `EvictTransaction` in [ZIP-401]. + /// /// [ZIP-401]: https://zips.z.cash/zip-0401 #[allow(clippy::unwrap_in_result)] pub fn evict_one(&mut self) -> Option { - if self.transactions.is_empty() { - None - } else { - use rand::distributions::{Distribution, WeightedIndex}; - use rand::prelude::thread_rng; - - let weights: Vec = self - .transactions - .iter() - .map(|tx| tx.clone().eviction_weight()) - .collect(); - - let dist = WeightedIndex::new(weights) - .expect("there is at least one weight, all weights are non-negative, and the total is positive"); - - Some(self.remove(dist.sample(&mut thread_rng()))) - } + use rand::distributions::{Distribution, WeightedIndex}; + use rand::prelude::thread_rng; + + let (keys, weights): (Vec, Vec) = self + .transactions + .iter() + .map(|(&tx_id, tx)| (tx_id, tx.eviction_weight())) + .unzip(); + + let dist = WeightedIndex::new(weights).expect( + "there is at least one weight, all weights are non-negative, and the total is positive", + ); + + let key_to_remove = keys + .get(dist.sample(&mut thread_rng())) + .expect("should have a key at every index in the distribution"); + + // Removes the randomly selected transaction and all of its dependents from the set, + // then returns just the randomly selected transaction + self.remove(key_to_remove).pop() + } + + /// Clears a list of mined transaction ids from the lists of dependencies for + /// any other transactions in the mempool and removes their dependents. + pub fn clear_mined_dependencies(&mut self, mined_ids: &HashSet) { + self.transaction_dependencies + .clear_mined_dependencies(mined_ids); } /// Removes all transactions in the set that match the `predicate`. /// /// Returns the amount of transactions removed. pub fn remove_all_that(&mut self, predicate: impl Fn(&VerifiedUnminedTx) -> bool) -> usize { - // Clippy suggests to remove the `collect` and the `into_iter` further down. However, it is - // unable to detect that when that is done, there is a borrow conflict. What happens is the - // iterator borrows `self.transactions` immutably, but it also need to be borrowed mutably - // in order to remove the transactions while traversing the iterator. - #[allow(clippy::needless_collect)] - let indices_to_remove: Vec<_> = self + let keys_to_remove: Vec<_> = self .transactions .iter() - .enumerate() - .filter(|(_, tx)| predicate(tx)) - .map(|(index, _)| index) + .filter_map(|(&tx_id, tx)| predicate(tx).then_some(tx_id)) .collect(); - let removed_count = indices_to_remove.len(); + let mut removed_count = 0; - // Correctness: remove indexes in reverse order, - // so earlier indexes still correspond to the same transactions - for index_to_remove in indices_to_remove.into_iter().rev() { - self.remove(index_to_remove); + for key_to_remove in keys_to_remove { + removed_count += self.remove(&key_to_remove).len(); } removed_count } - /// Removes a transaction from the set. + /// Accepts a transaction id for a transaction to remove from the verified set. /// - /// Also removes its outputs from the internal caches. - fn remove(&mut self, transaction_index: usize) -> VerifiedUnminedTx { - let removed_tx = self - .transactions - .remove(transaction_index) - .expect("invalid transaction index"); - - self.transactions_serialized_size -= removed_tx.transaction.size; - self.total_cost -= removed_tx.cost(); - self.remove_outputs(&removed_tx.transaction); + /// Removes the transaction and any transactions that directly or indirectly + /// depend on it from the set. + /// + /// Returns a list of transactions that have been removed with the target transaction + /// as the last item. + /// + /// Also removes the outputs of any removed transactions from the internal caches. + fn remove(&mut self, key_to_remove: &transaction::Hash) -> Vec { + let removed_transactions: Vec<_> = self + .transaction_dependencies + .remove_all(key_to_remove) + .iter() + .chain(std::iter::once(key_to_remove)) + .map(|key_to_remove| { + let removed_tx = self + .transactions + .remove(key_to_remove) + .expect("invalid transaction key"); + + self.transactions_serialized_size -= removed_tx.transaction.size; + self.total_cost -= removed_tx.cost(); + self.remove_outputs(&removed_tx.transaction); + + removed_tx + }) + .collect(); self.update_metrics(); - - removed_tx + removed_transactions } /// Returns `true` if the given `transaction` has any spend conflicts with transactions in the @@ -241,18 +302,18 @@ impl VerifiedSet { || Self::has_conflicts(&self.orchard_nullifiers, tx.orchard_nullifiers().copied()) } - /// Inserts the transaction's outputs into the internal caches. - fn cache_outputs_from(&mut self, tx: &Transaction) { - self.spent_outpoints.extend(tx.spent_outpoints()); - self.sprout_nullifiers.extend(tx.sprout_nullifiers()); - self.sapling_nullifiers.extend(tx.sapling_nullifiers()); - self.orchard_nullifiers.extend(tx.orchard_nullifiers()); - } - /// Removes the tracked transaction outputs from the mempool. fn remove_outputs(&mut self, unmined_tx: &UnminedTx) { let tx = &unmined_tx.transaction; + for index in 0..tx.outputs().len() { + self.created_outputs + .remove(&transparent::OutPoint::from_usize( + unmined_tx.id.mined_id(), + index, + )); + } + let spent_outpoints = tx.spent_outpoints().map(Cow::Owned); let sprout_nullifiers = tx.sprout_nullifiers().map(Cow::Borrowed); let sapling_nullifiers = tx.sapling_nullifiers().map(Cow::Borrowed); @@ -308,7 +369,7 @@ impl VerifiedSet { let mut size_with_weight_gt2 = 0; let mut size_with_weight_gt3 = 0; - for entry in self.full_transactions() { + for entry in self.transactions().values() { paid_actions += entry.conventional_actions - entry.unpaid_actions; if entry.fee_weight_ratio > 3.0 { diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index 9f05b79d567..e12b205e34c 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -74,7 +74,7 @@ proptest! { // Insert a dummy transaction. mempool .storage() - .insert(transaction.0) + .insert(transaction.0, Vec::new()) .expect("Inserting a transaction should succeed"); // The first call to `poll_ready` shouldn't clear the storage yet. @@ -148,7 +148,7 @@ proptest! { // Insert the dummy transaction into the mempool. mempool .storage() - .insert(transaction.0.clone()) + .insert(transaction.0.clone(), Vec::new()) .expect("Inserting a transaction should succeed"); // Set the new chain tip. @@ -205,7 +205,7 @@ proptest! { // Insert a dummy transaction. mempool .storage() - .insert(transaction) + .insert(transaction, Vec::new()) .expect("Inserting a transaction should succeed"); // The first call to `poll_ready` shouldn't clear the storage yet. diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 86848c8bae7..1b87097aaf1 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -1,6 +1,6 @@ //! Fixed test vectors for the mempool. -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use color_eyre::Report; use tokio::time::{self, timeout}; @@ -8,7 +8,7 @@ use tower::{ServiceBuilder, ServiceExt}; use zebra_chain::{ amount::Amount, block::Block, fmt::humantime_seconds, parameters::Network, - serialization::ZcashDeserializeInto, transaction::VerifiedUnminedTx, + serialization::ZcashDeserializeInto, transaction::VerifiedUnminedTx, transparent::OutPoint, }; use zebra_consensus::transaction as tx; use zebra_state::{Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; @@ -42,7 +42,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> { let network = Network::Mainnet; // get the genesis block transactions from the Zcash blockchain. - let mut unmined_transactions = unmined_transactions_in_blocks(1..=10, &network); + let mut unmined_transactions = network.unmined_transactions_in_blocks(1..=10); let genesis_transaction = unmined_transactions .next() .expect("Missing genesis transaction"); @@ -61,7 +61,9 @@ async fn mempool_service_basic_single() -> Result<(), Report> { // Insert the genesis block coinbase transaction into the mempool storage. let mut inserted_ids = HashSet::new(); - service.storage().insert(genesis_transaction.clone())?; + service + .storage() + .insert(genesis_transaction.clone(), Vec::new())?; inserted_ids.insert(genesis_transaction.transaction.id); // Test `Request::TransactionIds` @@ -131,7 +133,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> { inserted_ids.insert(tx.transaction.id); // Error must be ignored because a insert can trigger an eviction and // an error is returned if the transaction being inserted in chosen. - let _ = service.storage().insert(tx.clone()); + let _ = service.storage().insert(tx.clone(), Vec::new()); } // Test `Request::RejectedTransactionIds` @@ -185,7 +187,7 @@ async fn mempool_queue_single() -> Result<(), Report> { let network = Network::Mainnet; // Get transactions to use in the test - let unmined_transactions = unmined_transactions_in_blocks(1..=10, &network); + let unmined_transactions = network.unmined_transactions_in_blocks(1..=10); let mut transactions = unmined_transactions.collect::>(); // Split unmined_transactions into: // [transactions..., new_tx] @@ -212,7 +214,7 @@ async fn mempool_queue_single() -> Result<(), Report> { for tx in transactions.iter() { // Error must be ignored because a insert can trigger an eviction and // an error is returned if the transaction being inserted in chosen. - let _ = service.storage().insert(tx.clone()); + let _ = service.storage().insert(tx.clone(), Vec::new()); } // Test `Request::Queue` for a new transaction @@ -278,7 +280,7 @@ async fn mempool_service_disabled() -> Result<(), Report> { setup(&network, u64::MAX, true).await; // get the genesis block transactions from the Zcash blockchain. - let mut unmined_transactions = unmined_transactions_in_blocks(1..=10, &network); + let mut unmined_transactions = network.unmined_transactions_in_blocks(1..=10); let genesis_transaction = unmined_transactions .next() .expect("Missing genesis transaction"); @@ -293,7 +295,9 @@ async fn mempool_service_disabled() -> Result<(), Report> { assert!(service.is_enabled()); // Insert the genesis block coinbase transaction into the mempool storage. - service.storage().insert(genesis_transaction.clone())?; + service + .storage() + .insert(genesis_transaction.clone(), Vec::new())?; // Test if the mempool answers correctly (i.e. is enabled) let response = service @@ -614,7 +618,7 @@ async fn mempool_failed_verification_is_rejected() -> Result<(), Report> { ) = setup(&network, u64::MAX, true).await; // Get transactions to use in the test - let mut unmined_transactions = unmined_transactions_in_blocks(1..=2, &network); + let mut unmined_transactions = network.unmined_transactions_in_blocks(1..=2); let rejected_tx = unmined_transactions.next().unwrap().clone(); // Enable the mempool @@ -689,7 +693,7 @@ async fn mempool_failed_download_is_not_rejected() -> Result<(), Report> { ) = setup(&network, u64::MAX, true).await; // Get transactions to use in the test - let mut unmined_transactions = unmined_transactions_in_blocks(1..=2, &network); + let mut unmined_transactions = network.unmined_transactions_in_blocks(1..=2); let rejected_valid_tx = unmined_transactions.next().unwrap().clone(); // Enable the mempool @@ -918,6 +922,129 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { Ok(()) } +/// Checks that the mempool service responds to AwaitOutput requests after verifying transactions +/// that create those outputs, or immediately if the outputs had been created by transaction that +/// are already in the mempool. +#[tokio::test(flavor = "multi_thread")] +async fn mempool_responds_to_await_output() -> Result<(), Report> { + let network = Network::Mainnet; + + let ( + mut mempool, + _peer_set, + _state_service, + _chain_tip_change, + mut tx_verifier, + mut recent_syncs, + ) = setup(&network, u64::MAX, true).await; + mempool.enable(&mut recent_syncs).await; + + let verified_unmined_tx = network + .unmined_transactions_in_blocks(1..=10) + .find(|tx| !tx.transaction.transaction.outputs().is_empty()) + .expect("should have at least 1 tx with transparent outputs"); + + let unmined_tx = verified_unmined_tx.transaction.clone(); + let output_index = 0; + let outpoint = OutPoint::from_usize(unmined_tx.id.mined_id(), output_index); + let expected_output = unmined_tx + .transaction + .outputs() + .get(output_index) + .expect("already checked that tx has outputs") + .clone(); + + // Call mempool with an AwaitOutput request + + let request = Request::AwaitOutput(outpoint); + let await_output_response_fut = mempool.ready().await.unwrap().call(request); + + // Queue the transaction with the pending output to be added to the mempool + + let request = Request::Queue(vec![Gossip::Tx(unmined_tx)]); + let queue_response_fut = mempool.ready().await.unwrap().call(request); + let mock_verify_tx_fut = tx_verifier.expect_request_that(|_| true).map(|responder| { + responder.respond(transaction::Response::Mempool { + transaction: verified_unmined_tx, + spent_mempool_outpoints: Vec::new(), + }); + }); + + let (response, _) = futures::join!(queue_response_fut, mock_verify_tx_fut); + let Response::Queued(mut results) = response.expect("response should be Ok") else { + panic!("wrong response from mempool to Queued request"); + }; + + let result_rx = results.remove(0).expect("should pass initial checks"); + assert!(results.is_empty(), "should have 1 result for 1 queued tx"); + + tokio::time::timeout(Duration::from_secs(10), result_rx) + .await + .expect("should not time out") + .expect("mempool tx verification result channel should not be closed") + .expect("mocked verification should be successful"); + + // Wait for next steps in mempool's Downloads to finish + // TODO: Move this and the `ready().await` below above waiting for the mempool verification result above after + // waiting to respond with a transaction's verification result until after it's been inserted into the mempool. + tokio::time::sleep(Duration::from_secs(1)).await; + + mempool + .ready() + .await + .expect("polling mempool should succeed"); + + assert_eq!( + mempool.storage().transaction_count(), + 1, + "should have 1 transaction in mempool's verified set" + ); + + assert_eq!( + mempool.storage().created_output(&outpoint), + Some(expected_output.clone()), + "created output should match expected output" + ); + + // Check that the AwaitOutput request has been responded to after the relevant tx was added to the verified set + + let response_fut = tokio::time::timeout(Duration::from_secs(30), await_output_response_fut); + let response = response_fut + .await + .expect("should not time out") + .expect("should not return RecvError"); + + let Response::UnspentOutput(response) = response else { + panic!("wrong response from mempool to AwaitOutput request"); + }; + + assert_eq!( + response, expected_output, + "AwaitOutput response should match expected output" + ); + + // Check that the mempool responds to AwaitOutput requests correctly when the outpoint is already in its `created_outputs` collection too. + + let request = Request::AwaitOutput(outpoint); + let await_output_response_fut = mempool.ready().await.unwrap().call(request); + let response_fut = tokio::time::timeout(Duration::from_secs(30), await_output_response_fut); + let response = response_fut + .await + .expect("should not time out") + .expect("should not return RecvError"); + + let Response::UnspentOutput(response) = response else { + panic!("wrong response from mempool to AwaitOutput request"); + }; + + assert_eq!( + response, expected_output, + "AwaitOutput response should match expected output" + ); + + Ok(()) +} + /// Create a new [`Mempool`] instance using mocked services. async fn setup( network: &Network, @@ -943,7 +1070,7 @@ async fn setup( let (sync_status, recent_syncs) = SyncStatus::new(); - let (mempool, _mempool_transaction_receiver) = Mempool::new( + let (mempool, mut mempool_transaction_receiver) = Mempool::new( &mempool::Config { tx_cost_limit, ..Default::default() @@ -956,6 +1083,8 @@ async fn setup( chain_tip_change.clone(), ); + tokio::spawn(async move { while mempool_transaction_receiver.recv().await.is_ok() {} }); + if should_commit_genesis_block { let genesis_block: Arc = zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES .zcash_deserialize_into() diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index b0949cd336d..1a8cefbe0b2 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -2914,7 +2914,8 @@ async fn validate_regtest_genesis_block() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &network, state).await; + ) = zebra_consensus::router::init_test(zebra_consensus::Config::default(), &network, state) + .await; let genesis_hash = block_verifier_router .oneshot(zebra_consensus::Request::Commit(regtest_genesis_block())) @@ -3314,8 +3315,12 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init(zebra_consensus::Config::default(), &network, state.clone()) - .await; + ) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + &network, + state.clone(), + ) + .await; tracing::info!("started state service and block verifier, committing Regtest genesis block"); @@ -3348,6 +3353,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { .await .respond(mempool::Response::FullTransactions { transactions: vec![], + transaction_dependencies: Default::default(), // tip hash needs to match chain info for long poll requests last_seen_tip_hash: genesis_hash, }); From 1e46914131e31a3c0178e20d808bbd6abcfb533c Mon Sep 17 00:00:00 2001 From: Arya Date: Mon, 18 Nov 2024 07:16:27 -0500 Subject: [PATCH 019/245] add(rpc): Adds `getblockheader` RPC method (#8967) * Adds getblockheader RPC method * Updates snapshots, adds hash/height/next_block_hash fields to verbose response * updates getblock snapshot * updates getblockheader response type to hex-encode fields, adds ToHex impl for sapling::tree::Root, adds snapshot and vector tests for new RPC method, adds snapshots. * rustfmt * fixes snapshots, matches zcashd more closely * fixes vectors test * updates lwd failure messages (probably doesn't matter, but seems better to handle it now than risk debugging it later) * fixes getblock_rpc test, fixes/reverses finalsaplingroot field byte-order. * fixes vector test, addresses remaining differences with zcashd (except the `chainwork` field), updates snapshots, and avoids a possible panic when there's a chain reorg between state queries. * Adds a comment noting that the `relative_to_network()` method was copied from zcashd * Apply suggestions from code review Co-authored-by: Alfredo Garcia --------- Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/sapling/tree.rs | 20 ++ zebra-chain/src/work/difficulty.rs | 24 ++ zebra-chain/src/work/equihash.rs | 22 +- zebra-rpc/src/methods.rs | 239 ++++++++++++++++-- zebra-rpc/src/methods/tests/prop.rs | 25 +- zebra-rpc/src/methods/tests/snapshot.rs | 37 +++ .../get_block_header_hash@mainnet_10.snap | 5 + .../get_block_header_hash@testnet_10.snap | 5 + ..._block_header_hash_verbose@mainnet_10.snap | 19 ++ ..._block_header_hash_verbose@testnet_10.snap | 19 ++ .../get_block_header_height@mainnet_10.snap | 5 + .../get_block_header_height@testnet_10.snap | 5 + ...lock_header_height_verbose@mainnet_10.snap | 19 ++ ...lock_header_height_verbose@testnet_10.snap | 19 ++ ...k_verbose_hash_verbosity_2@mainnet_10.snap | 1 + ...k_verbose_hash_verbosity_2@testnet_10.snap | 1 + zebra-rpc/src/methods/tests/vectors.rs | 105 +++++++- zebra-state/src/request.rs | 9 + zebra-state/src/response.rs | 34 ++- zebra-state/src/service.rs | 36 ++- zebrad/tests/common/failure_messages.rs | 2 + 21 files changed, 609 insertions(+), 42 deletions(-) create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_height@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_height@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index 519d7deeeb9..a532f7dfcda 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -147,6 +147,26 @@ impl TryFrom<[u8; 32]> for Root { } } +impl ToHex for &Root { + fn encode_hex>(&self) -> T { + <[u8; 32]>::from(*self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + <[u8; 32]>::from(*self).encode_hex_upper() + } +} + +impl ToHex for Root { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} + impl ZcashSerialize for Root { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { writer.write_all(&<[u8; 32]>::from(*self)[..])?; diff --git a/zebra-chain/src/work/difficulty.rs b/zebra-chain/src/work/difficulty.rs index 20fc638453e..cda4fa926ff 100644 --- a/zebra-chain/src/work/difficulty.rs +++ b/zebra-chain/src/work/difficulty.rs @@ -290,6 +290,30 @@ impl CompactDifficulty { Ok(difficulty) } + + /// Returns a floating-point number representing a difficulty as a multiple + /// of the minimum difficulty for the provided network. + // Copied from + // TODO: Explain here what this ported code is doing and why, request help to do so with the ECC team. + pub fn relative_to_network(&self, network: &Network) -> f64 { + let network_difficulty = network.target_difficulty_limit().to_compact(); + + let [mut n_shift, ..] = self.0.to_be_bytes(); + let [n_shift_amount, ..] = network_difficulty.0.to_be_bytes(); + let mut d_diff = f64::from(network_difficulty.0 << 8) / f64::from(self.0 << 8); + + while n_shift < n_shift_amount { + d_diff *= 256.0; + n_shift += 1; + } + + while n_shift > n_shift_amount { + d_diff /= 256.0; + n_shift -= 1; + } + + d_diff + } } impl fmt::Debug for CompactDifficulty { diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index f79b9212e53..35aa39f849b 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -2,6 +2,7 @@ use std::{fmt, io}; +use hex::ToHex; use serde_big_array::BigArray; use crate::{ @@ -112,7 +113,6 @@ impl Solution { } /// Returns a [`Solution`] of `[0; SOLUTION_SIZE]` to be used in block proposals. - #[cfg(feature = "getblocktemplate-rpcs")] pub fn for_proposal() -> Self { // TODO: Accept network as an argument, and if it's Regtest, return the shorter null solution. Self::Common([0; SOLUTION_SIZE]) @@ -195,3 +195,23 @@ impl ZcashDeserialize for Solution { Self::from_bytes(&solution) } } + +impl ToHex for &Solution { + fn encode_hex>(&self) -> T { + self.value().encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.value().encode_hex_upper() + } +} + +impl ToHex for Solution { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 268676beb27..cb01ca8bbd3 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -11,6 +11,7 @@ use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; use futures::{stream::FuturesOrdered, FutureExt, StreamExt, TryFutureExt}; use hex::{FromHex, ToHex}; +use hex_data::HexData; use indexmap::IndexMap; use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; use jsonrpc_derive::rpc; @@ -23,10 +24,14 @@ use zebra_chain::{ block::{self, Height, SerializedBlock}, chain_tip::{ChainTip, NetworkChainTipHeightEstimator}, parameters::{ConsensusBranchId, Network, NetworkUpgrade}, - serialization::ZcashDeserialize, + serialization::{ZcashDeserialize, ZcashSerialize}, subtree::NoteCommitmentSubtreeIndex, transaction::{self, SerializedTransaction, Transaction, UnminedTx}, transparent::{self, Address}, + work::{ + difficulty::{CompactDifficulty, ExpandedDifficulty}, + equihash::Solution, + }, }; use zebra_node_services::mempool; use zebra_state::{HashOrHeight, MinedTx, OutputIndex, OutputLocation, TransactionLocation}; @@ -166,6 +171,23 @@ pub trait Rpc { verbosity: Option, ) -> BoxFuture>; + /// Returns the requested block header by hash or height, as a [`GetBlockHeader`] JSON string. + /// + /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. + /// - `verbose`: (bool, optional, default=false, example=true) false for hex encoded data, true for a json object + #[rpc(name = "getblockheader")] + fn get_block_header( + &self, + hash_or_height: String, + verbose: Option, + ) -> BoxFuture>; + /// Returns the hash of the current best blockchain tip block, as a [`GetBlockHash`] JSON string. /// /// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) @@ -548,13 +570,11 @@ where .await .map_server_error()?; - let zebra_state::ReadResponse::BlockHeader(block_header) = response else { + let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { unreachable!("unmatched response to a BlockHeader request") }; - let tip_block_time = block_header - .ok_or_server_error("unexpectedly could not read best chain tip block header")? - .time; + let tip_block_time = header.time; let now = Utc::now(); let zebra_estimated_height = @@ -792,10 +812,6 @@ where } }; - // TODO: look up the height if we only have a hash, - // this needs a new state request for the height -> hash index - let height = hash_or_height.height(); - // # Concurrency // // We look up by block hash so the hash, transaction IDs, and confirmations @@ -873,21 +889,18 @@ where _ => unreachable!("unmatched response to a depth request"), }; - let time = if should_read_block_header { + let (time, height) = if should_read_block_header { let block_header_response = futs.next().await.expect("`futs` should not be empty"); match block_header_response.map_server_error()? { - zebra_state::ReadResponse::BlockHeader(header) => Some( - header - .ok_or_server_error("Block not found")? - .time - .timestamp(), - ), + zebra_state::ReadResponse::BlockHeader { header, height, .. } => { + (Some(header.time.timestamp()), Some(height)) + } _ => unreachable!("unmatched response to a BlockHeader request"), } } else { - None + (None, hash_or_height.height()) }; let sapling = SaplingTrees { @@ -919,6 +932,103 @@ where .boxed() } + fn get_block_header( + &self, + hash_or_height: String, + verbose: Option, + ) -> BoxFuture> { + let state = self.state.clone(); + let verbose = verbose.unwrap_or(true); + let network = self.network.clone(); + + async move { + let hash_or_height: HashOrHeight = hash_or_height.parse().map_server_error()?; + let zebra_state::ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash, + } = state + .clone() + .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) + .await + .map_server_error()? + else { + panic!("unexpected response to BlockHeader request") + }; + + let response = if !verbose { + GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_server_error()?)) + } else { + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state + .clone() + .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) + .await + .map_server_error()? + else { + panic!("unexpected response to SaplingTree request") + }; + + // This could be `None` if there's a chain reorg between state queries. + let sapling_tree = + sapling_tree.ok_or_server_error("missing sapling tree for block")?; + + let zebra_state::ReadResponse::Depth(depth) = state + .clone() + .oneshot(zebra_state::ReadRequest::Depth(hash)) + .await + .map_server_error()? + else { + panic!("unexpected response to SaplingTree request") + }; + + // From + // TODO: Deduplicate const definition, consider refactoring this to avoid duplicate logic + const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; + + // Confirmations are one more than the depth. + // Depth is limited by height, so it will never overflow an i64. + let confirmations = depth + .map(|depth| i64::from(depth) + 1) + .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); + + let mut nonce = *header.nonce; + nonce.reverse(); + + let final_sapling_root: [u8; 32] = if sapling_tree.position().is_some() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] + }; + + let difficulty = header.difficulty_threshold.relative_to_network(&network); + + let block_header = GetBlockHeaderObject { + hash: GetBlockHash(hash), + confirmations, + height, + version: header.version, + merkle_root: header.merkle_root, + final_sapling_root, + time: header.time.timestamp(), + nonce, + solution: header.solution, + bits: header.difficulty_threshold, + difficulty, + previous_block_hash: GetBlockHash(header.previous_block_hash), + next_block_hash: next_block_hash.map(GetBlockHash), + }; + + GetBlockHeader::Object(Box::new(block_header)) + }; + + Ok(response) + } + .boxed() + } + fn get_best_block_hash(&self) -> Result { self.latest_chain_tip .best_tip_hash() @@ -1671,6 +1781,101 @@ impl Default for GetBlock { } } +/// Response to a `getblockheader` RPC request. +/// +/// See the notes for the [`Rpc::get_block_header`] method. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[serde(untagged)] +pub enum GetBlockHeader { + /// The request block header, hex-encoded. + Raw(hex_data::HexData), + + /// The block header object. + Object(Box), +} + +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +/// Verbose response to a `getblockheader` RPC request. +/// +/// See the notes for the [`Rpc::get_block_header`] method. +pub struct GetBlockHeaderObject { + /// The hash of the requested block. + pub hash: GetBlockHash, + + /// The number of confirmations of this block in the best chain, + /// or -1 if it is not in the best chain. + pub confirmations: i64, + + /// The height of the requested block. + pub height: Height, + + /// The version field of the requested block. + pub version: u32, + + /// The merkle root of the requesteed block. + #[serde(with = "hex", rename = "merkleroot")] + pub merkle_root: block::merkle::Root, + + /// The root of the Sapling commitment tree after applying this block. + #[serde(with = "hex", rename = "finalsaplingroot")] + pub final_sapling_root: [u8; 32], + + /// The block time of the requested block header in non-leap seconds since Jan 1 1970 GMT. + pub time: i64, + + /// The nonce of the requested block header. + #[serde(with = "hex")] + pub nonce: [u8; 32], + + /// The Equihash solution in the requested block header. + #[serde(with = "hex")] + solution: Solution, + + /// The difficulty threshold of the requested block header displayed in compact form. + #[serde(with = "hex")] + pub bits: CompactDifficulty, + + /// Floating point number that represents the difficulty limit for this block as a multiple + /// of the minimum difficulty for the network. + pub difficulty: f64, + + /// The previous block hash of the requested block header. + #[serde(rename = "previousblockhash")] + pub previous_block_hash: GetBlockHash, + + /// The next block hash after the requested block header. + #[serde(rename = "nextblockhash", skip_serializing_if = "Option::is_none")] + pub next_block_hash: Option, +} + +impl Default for GetBlockHeader { + fn default() -> Self { + GetBlockHeader::Object(Box::default()) + } +} + +impl Default for GetBlockHeaderObject { + fn default() -> Self { + let difficulty: ExpandedDifficulty = zebra_chain::work::difficulty::U256::one().into(); + + GetBlockHeaderObject { + hash: GetBlockHash::default(), + confirmations: 0, + height: Height::MIN, + version: 4, + merkle_root: block::merkle::Root([0; 32]), + final_sapling_root: Default::default(), + time: 0, + nonce: [0; 32], + solution: Solution::for_proposal(), + bits: difficulty.to_compact(), + difficulty: 1.0, + previous_block_hash: Default::default(), + next_block_hash: Default::default(), + } + } +} + /// Response to a `getbestblockhash` and `getblockhash` RPC request. /// /// Contains the hex-encoded hash of the requested block. diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 726ddca159a..9435a68ac7e 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -664,16 +664,21 @@ proptest! { .expect_request(zebra_state::ReadRequest::BlockHeader(block_hash.into())) .await .expect("getblockchaininfo should call mock state service with correct request") - .respond(zebra_state::ReadResponse::BlockHeader(Some(Arc::new(block::Header { - time: block_time, - version: Default::default(), - previous_block_hash: Default::default(), - merkle_root: Default::default(), - commitment_bytes: Default::default(), - difficulty_threshold: Default::default(), - nonce: Default::default(), - solution: Default::default() - })))); + .respond(zebra_state::ReadResponse::BlockHeader { + header: Arc::new(block::Header { + time: block_time, + version: Default::default(), + previous_block_hash: Default::default(), + merkle_root: Default::default(), + commitment_bytes: Default::default(), + difficulty_threshold: Default::default(), + nonce: Default::default(), + solution: Default::default() + }), + hash: block::Hash::from([0; 32]), + height: Height::MIN, + next_block_hash: None, + }); } }; diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index c0cda974ede..02c633af260 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -338,6 +338,34 @@ async fn test_rpc_response_data_for_network(network: &Network) { .expect("We should have a GetBlock struct"); snapshot_rpc_getblock_verbose("hash_verbosity_default", get_block, &settings); + // `getblockheader(hash, verbose = false)` + let get_block_header = rpc + .get_block_header(block_hash.to_string(), Some(false)) + .await + .expect("We should have a GetBlock struct"); + snapshot_rpc_getblockheader("hash", get_block_header, &settings); + + // `getblockheader(height, verbose = false)` + let get_block_header = rpc + .get_block_header(BLOCK_HEIGHT.to_string(), Some(false)) + .await + .expect("We should have a GetBlock struct"); + snapshot_rpc_getblockheader("height", get_block_header, &settings); + + // `getblockheader(hash, verbose = true)` + let get_block_header = rpc + .get_block_header(block_hash.to_string(), Some(true)) + .await + .expect("We should have a GetBlock struct"); + snapshot_rpc_getblockheader("hash_verbose", get_block_header, &settings); + + // `getblockheader(height, verbose = true)` where verbose is the default value. + let get_block_header = rpc + .get_block_header(BLOCK_HEIGHT.to_string(), None) + .await + .expect("We should have a GetBlock struct"); + snapshot_rpc_getblockheader("height_verbose", get_block_header, &settings); + // `getbestblockhash` let get_best_block_hash = rpc .get_best_block_hash() @@ -609,6 +637,15 @@ fn snapshot_rpc_getblock_verbose( settings.bind(|| insta::assert_json_snapshot!(format!("get_block_verbose_{variant}"), block)); } +/// Check valid `getblockheader` response using `cargo insta`. +fn snapshot_rpc_getblockheader( + variant: &'static str, + block: GetBlockHeader, + settings: &insta::Settings, +) { + settings.bind(|| insta::assert_json_snapshot!(format!("get_block_header_{variant}"), block)); +} + /// Check invalid height `getblock` response using `cargo insta`. fn snapshot_rpc_getblock_invalid( variant: &'static str, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@mainnet_10.snap new file mode 100644 index 00000000000..5ab74801c1e --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@mainnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +"0400000008ce3d9731b000c08338455c8a4a6bd05da16e26b11daa1b917184ece80f04000946edb9c083c9942d92305444527765fad789c438c717783276a9f7fbf61b850000000000000000000000000000000000000000000000000000000000000000ac7a1358ffff071f7534e8cf161ff2e49d54bdb3bfbcde8cdbf2fc5963c9ec7d86aed4a67e975790fd4005002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@testnet_10.snap new file mode 100644 index 00000000000..7942eddd2e2 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash@testnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +"04000000382c4a332661c7ed0671f32a34d724619f086c61873bce7c99859dd9920aa605755f7c7d27a811596e9fae6dd30ca45be86e901d499909de35b6ff1f699f7ef30000000000000000000000000000000000000000000000000000000000000000e9851358ffff0720000056c2264c31261d597c6fcea7c5e00160cf6be1cd89ca96a0389473e50000fd40050053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap new file mode 100644 index 00000000000..723bf78e62e --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap @@ -0,0 +1,19 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +{ + "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", + "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap new file mode 100644 index 00000000000..0d76afbbb96 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap @@ -0,0 +1,19 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +{ + "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", + "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@mainnet_10.snap new file mode 100644 index 00000000000..5ab74801c1e --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@mainnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +"0400000008ce3d9731b000c08338455c8a4a6bd05da16e26b11daa1b917184ece80f04000946edb9c083c9942d92305444527765fad789c438c717783276a9f7fbf61b850000000000000000000000000000000000000000000000000000000000000000ac7a1358ffff071f7534e8cf161ff2e49d54bdb3bfbcde8cdbf2fc5963c9ec7d86aed4a67e975790fd4005002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@testnet_10.snap new file mode 100644 index 00000000000..7942eddd2e2 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height@testnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +"04000000382c4a332661c7ed0671f32a34d724619f086c61873bce7c99859dd9920aa605755f7c7d27a811596e9fae6dd30ca45be86e901d499909de35b6ff1f699f7ef30000000000000000000000000000000000000000000000000000000000000000e9851358ffff0720000056c2264c31261d597c6fcea7c5e00160cf6be1cd89ca96a0389473e50000fd40050053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap new file mode 100644 index 00000000000..723bf78e62e --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap @@ -0,0 +1,19 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +{ + "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", + "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap new file mode 100644 index 00000000000..0d76afbbb96 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap @@ -0,0 +1,19 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block +--- +{ + "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", + "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 3bcf968bed5..f18b879f6b3 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, + "height": 1, "time": 1477671596, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 7ea021d3382..013a4c09b23 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, + "height": 1, "time": 1477674473, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index b82ac588d5c..d5a923516c0 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -209,7 +209,7 @@ async fn rpc_getblock() { GetBlock::Object { hash: GetBlockHash(block.hash()), confirmations: (blocks.len() - i).try_into().expect("valid i64"), - height: None, + height: Some(Height(i as u32)), time: Some(block.header.time.timestamp()), tx: block .transactions @@ -374,6 +374,109 @@ async fn rpc_getblock_missing_error() { assert!(rpc_tx_queue_task_result.is_none()); } +#[tokio::test(flavor = "multi_thread")] +async fn rpc_getblockheader() { + let _init_guard = zebra_test::init(); + + // Create a continuous chain of mainnet blocks from genesis + let blocks: Vec> = zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS + .iter() + .map(|(_height, block_bytes)| block_bytes.zcash_deserialize_into().unwrap()) + .collect(); + + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + // Create a populated state service + let (_state, read_state, latest_chain_tip, _chain_tip_change) = + zebra_state::populated_state(blocks.clone(), &Mainnet).await; + + // Init RPC + let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", + "RPC test", + Mainnet, + false, + true, + Buffer::new(mempool.clone(), 1), + read_state.clone(), + latest_chain_tip, + ); + + // Make height calls with verbose=false and check response + for (i, block) in blocks.iter().enumerate() { + let expected_result = GetBlockHeader::Raw(HexData( + block + .header + .clone() + .zcash_serialize_to_vec() + .expect("test block header should serialize"), + )); + + let hash = block.hash(); + let height = Height(i as u32); + + for hash_or_height in [HashOrHeight::from(height), hash.into()] { + let get_block_header = rpc + .get_block_header(hash_or_height.to_string(), Some(false)) + .await + .expect("we should have a GetBlockHeader struct"); + assert_eq!(get_block_header, expected_result); + } + + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = read_state + .clone() + .oneshot(zebra_state::ReadRequest::SaplingTree(height.into())) + .await + .expect("should have sapling tree for block hash") + else { + panic!("unexpected response to SaplingTree request") + }; + + let mut expected_nonce = *block.header.nonce; + expected_nonce.reverse(); + let sapling_tree = sapling_tree.expect("should always have sapling root"); + let expected_final_sapling_root: [u8; 32] = if sapling_tree.position().is_some() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] + }; + + let expected_result = GetBlockHeader::Object(Box::new(GetBlockHeaderObject { + hash: GetBlockHash(hash), + confirmations: 11 - i as i64, + height, + version: 4, + merkle_root: block.header.merkle_root, + final_sapling_root: expected_final_sapling_root, + time: block.header.time.timestamp(), + nonce: expected_nonce, + solution: block.header.solution, + bits: block.header.difficulty_threshold, + difficulty: block + .header + .difficulty_threshold + .relative_to_network(&Mainnet), + previous_block_hash: GetBlockHash(block.header.previous_block_hash), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + })); + + for hash_or_height in [HashOrHeight::from(Height(i as u32)), block.hash().into()] { + let get_block_header = rpc + .get_block_header(hash_or_height.to_string(), Some(true)) + .await + .expect("we should have a GetBlockHeader struct"); + assert_eq!(get_block_header, expected_result); + } + } + + mempool.expect_no_requests().await; + + // The queue task should continue without errors or panics + let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + assert!(rpc_tx_queue_task_result.is_none()); +} + #[tokio::test(flavor = "multi_thread")] async fn rpc_getbestblockhash() { let _init_guard = zebra_test::init(); diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 56be011d48e..5894b7da55a 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -90,6 +90,15 @@ impl HashOrHeight { } } +impl std::fmt::Display for HashOrHeight { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HashOrHeight::Hash(hash) => write!(f, "{hash}"), + HashOrHeight::Height(height) => write!(f, "{}", height.0), + } + } +} + impl From for HashOrHeight { fn from(hash: block::Hash) -> Self { Self::Hash(hash) diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 77c252b0c75..daa2fbe2829 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -52,7 +52,16 @@ pub enum Response { Block(Option>), /// The response to a `BlockHeader` request. - BlockHeader(Option>), + BlockHeader { + /// The header of the requested block + header: Arc, + /// The hash of the requested block + hash: block::Hash, + /// The height of the requested block + height: block::Height, + /// The hash of the next block after the requested block + next_block_hash: Option, + }, /// The response to a `AwaitUtxo` request, from any non-finalized chains, finalized chain, /// pending unverified blocks, or blocks received after the request was sent. @@ -147,7 +156,16 @@ pub enum ReadResponse { Block(Option>), /// The response to a `BlockHeader` request. - BlockHeader(Option>), + BlockHeader { + /// The header of the requested block + header: Arc, + /// The hash of the requested block + hash: block::Hash, + /// The height of the requested block + height: block::Height, + /// The hash of the next block after the requested block + next_block_hash: Option, + }, /// Response to [`ReadRequest::Transaction`] with the specified transaction. Transaction(Option), @@ -287,7 +305,17 @@ impl TryFrom for Response { ReadResponse::BlockHash(hash) => Ok(Response::BlockHash(hash)), ReadResponse::Block(block) => Ok(Response::Block(block)), - ReadResponse::BlockHeader(header) => Ok(Response::BlockHeader(header)), + ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash + } => Ok(Response::BlockHeader { + header, + hash, + height, + next_block_hash + }), ReadResponse::Transaction(tx_info) => { Ok(Response::Transaction(tx_info.map(|tx_info| tx_info.tx))) } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index adc61f887ae..be3a78f0772 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1301,20 +1301,36 @@ impl Service for ReadStateService { tokio::task::spawn_blocking(move || { span.in_scope(move || { - let header = state.non_finalized_state_receiver.with_watch_data( - |non_finalized_state| { - read::block_header( - non_finalized_state.best_chain(), - &state.db, - hash_or_height, - ) - }, - ); + let best_chain = state.latest_best_chain(); + + let height = hash_or_height + .height_or_else(|hash| { + read::find::height_by_hash(best_chain.clone(), &state.db, hash) + }) + .ok_or_else(|| BoxError::from("block hash or height not found"))?; + + let hash = hash_or_height + .hash_or_else(|height| { + read::find::hash_by_height(best_chain.clone(), &state.db, height) + }) + .ok_or_else(|| BoxError::from("block hash or height not found"))?; + + let next_height = height.next()?; + let next_block_hash = + read::find::hash_by_height(best_chain.clone(), &state.db, next_height); + + let header = read::block_header(best_chain, &state.db, height.into()) + .ok_or_else(|| BoxError::from("block hash or height not found"))?; // The work is done in the future. timer.finish(module_path!(), line!(), "ReadRequest::Block"); - Ok(ReadResponse::BlockHeader(header)) + Ok(ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash, + }) }) }) .wait_for_panics() diff --git a/zebrad/tests/common/failure_messages.rs b/zebrad/tests/common/failure_messages.rs index 2550f2d21cf..6d41ab024bb 100644 --- a/zebrad/tests/common/failure_messages.rs +++ b/zebrad/tests/common/failure_messages.rs @@ -81,6 +81,8 @@ pub const LIGHTWALLETD_FAILURE_MESSAGES: &[&str] = &[ "error with", // Block error messages "error requesting block: 0: Block not found", + // This shouldn't happen unless lwd starts calling getblock with `verbosity = 2` + "error requesting block: 0: block hash or height not found", "error zcashd getblock rpc", "received overlong message", "received unexpected height block", From 17648cc6e1ec8c4608b05c6d713ff83a20bc4a04 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 19 Nov 2024 06:25:37 -0300 Subject: [PATCH 020/245] remove `scan-start-where-left-test` from CI (#9026) --- .../sub-ci-integration-tests-gcp.yml | 24 ------------------- docker/entrypoint.sh | 6 ----- 2 files changed, 30 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 75de0bfda2a..d4e4bd506d4 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -480,29 +480,6 @@ jobs: saves_to_disk: false secrets: inherit - # Test that the scanner can continue scanning where it was left when zebrad restarts. - # - # Runs: - # - after every PR is merged to `main` - # - on every PR update - # - # If the state version has changed, waits for the new cached states to be created. - # Otherwise, if the state rebuild was skipped, runs immediately after the build job. - scan-start-where-left-test: - name: Scan starts where left - needs: [test-full-sync, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - with: - app_name: zebra-scan - test_id: scan-start-where-left - test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" - needs_zebra_state: true - needs_lwd_state: false - saves_to_disk: true - secrets: inherit - # Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. # # Runs: @@ -546,7 +523,6 @@ jobs: lightwalletd-grpc-test, get-block-template-test, submit-block-test, - scan-start-where-left-test, scan-task-commands-test, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index d71be57805d..9c1165c54a3 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -74,7 +74,6 @@ fi : "${TEST_LWD_TRANSACTIONS:=}" : "${TEST_GET_BLOCK_TEMPLATE:=}" : "${TEST_SUBMIT_BLOCK:=}" -: "${TEST_SCAN_START_WHERE_LEFT:=}" : "${ENTRYPOINT_FEATURES:=}" : "${TEST_SCAN_TASK_COMMANDS:=}" @@ -340,11 +339,6 @@ case "$1" in check_directory_files "${ZEBRA_CACHED_STATE_DIR}" run_cargo_test "${ENTRYPOINT_FEATURES}" "submit_block" - elif [[ "${TEST_SCAN_START_WHERE_LEFT}" -eq "1" ]]; then - # Test that the scanner can continue scanning where it was left when zebra-scanner restarts. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - exec cargo test --locked --release --features "zebra-test" --package zebra-scan -- --nocapture --include-ignored scan_start_where_left - elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then # Test that the scan task commands are working. check_directory_files "${ZEBRA_CACHED_STATE_DIR}" From 3983428ac4dcb7e181896529963575ec702acc10 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 19 Nov 2024 11:41:57 +0000 Subject: [PATCH 021/245] fix(ci): run most lighwalletd tests correctly (#9038) A LWD test was expecting the `ZEBRA_TEST_LIGHTWALLETD` to be set, but this variable is needed for all LWD tests and not specifically for `lightwalletd_integration`. We had to rename this variable on a buggy `elif` statement in our Docker entrypoint. This was avoiding most LWD tests to run correctly. --- .github/workflows/sub-ci-unit-tests-docker.yml | 2 +- docker/entrypoint.sh | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index 475072e81c7..dd6c89a5a75 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -138,7 +138,7 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 -e TEST_LWD_INTEGRATION=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 9c1165c54a3..dc1dbc121cf 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -59,7 +59,7 @@ fi : "${RUN_ALL_EXPERIMENTAL_TESTS:=}" : "${TEST_FAKE_ACTIVATION_HEIGHTS:=}" : "${TEST_ZEBRA_EMPTY_SYNC:=}" -: "${ZEBRA_TEST_LIGHTWALLETD:=}" +: "${TEST_LWD_INTEGRATION:=}" : "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES:=}" : "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES:=}" : "${TEST_DISK_REBUILD:=}" @@ -239,10 +239,6 @@ case "$1" in # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. run_cargo_test "${ENTRYPOINT_FEATURES}" "sync_large_checkpoints_" - elif [[ "${ZEBRA_TEST_LIGHTWALLETD}" -eq "1" ]]; then - # Test launching lightwalletd with an empty lightwalletd and Zebra state. - run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_integration" - elif [[ -n "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES}" ]]; then # Run a Zebra full sync test on mainnet. run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_mainnet" @@ -303,6 +299,10 @@ case "$1" in # Since these tests use the same cached state, a state problem in the first test can fail the second test. run_cargo_test "${ENTRYPOINT_FEATURES}" "--test-threads" "1" "fully_synced_rpc_" + elif [[ "${TEST_LWD_INTEGRATION}" -eq "1" ]]; then + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_integration" + elif [[ "${TEST_LWD_FULL_SYNC}" -eq "1" ]]; then # Starting at a cached Zebra tip, run a lightwalletd sync to tip. check_directory_files "${ZEBRA_CACHED_STATE_DIR}" From 5f2f97209e96d7e3c9fa3d47f72e5aa134c877fd Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 22 Nov 2024 13:15:01 +0100 Subject: [PATCH 022/245] fix(test): Update the reference Sapling treestate (#9051) * Update the reference Sapling treestate Zebra's treestate serialization differs from `zcashd` in the following way: `zcashd` omits the serialization of empty trailing ommers, while Zebra doesn't. This means that `zcashd` serializes the Sapling treestate for height 419_201 as 019eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d9310002000150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d Whereas Zebra serializes it as 019eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d931001f000150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d0000000000000000000000000000000000000000000000000000000000 Serialization Format ==================== The serialized treestate consists of optional, hex-encoded, 32-byte hashes. If the hash is not present, it is serialized as byte 0, i.e., `0x00`. If the hash is present, it is prefixed by byte 1, i.e. `0x01`. The first two hashes in the serialized treestate are the treestate's left and right leaves. These are followed by the serialized length of the vector of ommers. This length is serialized as 1, 3, 5, or 9 bytes. If the length is less than 253, it is serialized as a single byte. The length is then followed by the serialized ommers. We can now parse the first string, produced by `zcashd`: - `0119eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d931` is the serialized left leaf, - `00` is the serialized right leaf, - `02` is the serialized length of the vector of ommers, - `00` is the serialized first ommer, - `0150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d` is the serialized second ommer. And the second one, produced by Zebra: - `0119eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d931` is the serialized left leaf, - `00` is the serialized right leaf, - `1f` is the serialized length of the vector of ommers, - `00` is the serialized first ommer, - `0150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d` is the serialized second ommer - `0000000000000000000000000000000000000000000000000000000000` are the remaining 29 serialized ommers. Note that both serializations represent the same treestate. * Remove a new line char --- zebra-test/src/vectors/sapling-treestate-main-0-419-201.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-test/src/vectors/sapling-treestate-main-0-419-201.txt b/zebra-test/src/vectors/sapling-treestate-main-0-419-201.txt index c6cd1b532c0..7ce9416012a 100644 --- a/zebra-test/src/vectors/sapling-treestate-main-0-419-201.txt +++ b/zebra-test/src/vectors/sapling-treestate-main-0-419-201.txt @@ -1 +1 @@ -019eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d9310002000150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d \ No newline at end of file +019eb30778ddeea84c72e69e07a1689f3c8def3dc0a1939f0edcbe47279069d931001f000150715810d52caf35471d10feb487213fbd95ff209122225b7b65d27a7fb1a44d0000000000000000000000000000000000000000000000000000000000 \ No newline at end of file From 4f0746a613fb893990f042b46b24989c5be76926 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Fri, 22 Nov 2024 09:15:06 -0300 Subject: [PATCH 023/245] fix links in release templates (#9050) --- .github/ISSUE_TEMPLATE/release.md | 2 +- .../release-checklist.md | 19 +++++++++++++------ book/src/SUMMARY.md | 1 + 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 0e2ee30b7b1..080bc385c20 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -16,7 +16,7 @@ They can be skipped for urgent releases. To check consensus correctness, we want to test that the state format is valid after a full sync. (Format upgrades are tested in CI on each PR.) -- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or +- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml?query=event%3Aschedule) since the last state change, or - [ ] Start a manual workflow run with a Zebra and `lightwalletd` full sync. State format changes can be made in `zebra-state` or `zebra-chain`. The state format can be changed by data that is sent to the state, data created within the state using `zebra-chain`, or serialization formats in `zebra-state` or `zebra-chain`. diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 5445834df3e..8679a37154f 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -9,7 +9,7 @@ assignees: '' # Prepare for the Release -- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or start a manual full sync. +- [ ] Make sure there has been [at least one successful full sync test in the main branch](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml?query=branch%3Amain) since the last state change, or start a manual full sync. - [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged. (See the release ticket checklist for details) @@ -57,7 +57,13 @@ fastmod --fixed-strings '1.58' '1.65' - [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. - [ ] Mark all non-release PRs with `do-not-merge`, because Mergify checks approved PRs against every commit, even when a queue is frozen. +- [ ] Add the `A-release` tag to the release pull request in order for the `check_no_git_refs_in_cargo_lock` to run. +## Zebra git sources dependencies + +- [ ] Ensure the `check_no_git_refs_in_cargo_lock` check passes. + +This check runs automatically on pull requests with the `A-release` label. It must pass for crates to be published to crates.io. If the check fails, you should either halt the release process or proceed with the understanding that the crates will not be published on crates.io. # Update Versions and End of Support @@ -76,7 +82,7 @@ Zebra's Rust API doesn't have any support or stability guarantees, so we keep al ### Update Crate Versions -If you're publishing crates for the first time, [log in to crates.io](https://github.com/ZcashFoundation/zebra/blob/doc-crate-own/book/src/dev/crate-owners.md#logging-in-to-cratesio), +If you're publishing crates for the first time, [log in to crates.io](https://zebra.zfnd.org/dev/crate-owners.html#logging-in-to-cratesio), and make sure you're a member of owners group. Check that the release will work: @@ -103,7 +109,7 @@ Crate publishing is [automatically checked in CI](https://github.com/ZcashFounda ## Update End of Support The end of support height is calculated from the current blockchain height: -- [ ] Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool. +- [ ] Find where the Zcash blockchain tip is now by using a [Zcash Block Explorer](https://mainnet.zcashexplorer.app/) or other tool. - [ ] Replace `ESTIMATED_RELEASE_HEIGHT` in [`end_of_support.rs`](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs) with the height you estimate the release will be tagged.
@@ -141,8 +147,7 @@ The end of support height is calculated from the current blockchain height: ## Test the Pre-Release - [ ] Wait until the Docker binaries have been built on `main`, and the quick tests have passed: - - [ ] [ci-unit-tests-docker.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-docker.yml?query=branch%3Amain) - - [ ] [ci-integration-tests-gcp.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain) + - [ ] [ci-tests.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml?query=branch%3Amain) - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) ## Publish Release @@ -151,7 +156,7 @@ The end of support height is calculated from the current blockchain height: ## Publish Crates -- [ ] [Run `cargo login`](https://github.com/ZcashFoundation/zebra/blob/doc-crate-own/book/src/dev/crate-owners.md#logging-in-to-cratesio) +- [ ] [Run `cargo login`](https://zebra.zfnd.org/dev/crate-owners.html#logging-in-to-cratesio) - [ ] Run `cargo clean` in the zebra repo (optional) - [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` - [ ] Check that Zebra can be installed from `crates.io`: @@ -159,7 +164,9 @@ The end of support height is calculated from the current blockchain height: and put the output in a comment on the PR. ## Publish Docker Images + - [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml?query=event%3Arelease). +- [ ] Wait for the new tag in the [dockerhub zebra space](https://hub.docker.com/r/zfnd/zebra/tags) - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Remove `do-not-merge` from the PRs you added it to diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index a7b018a2b9a..9f8715a8806 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -37,6 +37,7 @@ - [Doing Mass Renames](dev/mass-renames.md) - [Updating the ECC dependencies](dev/ecc-updates.md) - [Running a Private Testnet Test](dev/private-testnet.md) + - [Zebra crates](dev/crate-owners.md) - [Zebra RFCs](dev/rfcs.md) - [Pipelinable Block Lookup](dev/rfcs/0001-pipelinable-block-lookup.md) - [Parallel Verification](dev/rfcs/0002-parallel-verification.md) From 802a2433857647d5696bd88b02eed6fdf4d4fb95 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 27 Nov 2024 15:49:02 +0000 Subject: [PATCH 024/245] fix(mergify): align `build` job name across workflows (#9055) --- .github/workflows/ci-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 7f0a19dcc06..517ba4151f2 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -121,7 +121,7 @@ jobs: # workflow or repository variable is configured differently. Testnet jobs change that config to # testnet when running the image. build: - name: Build images + name: Build CI Docker # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-build-docker-image.yml From 0af07426134986f14ac202765358de522ef7cc23 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 27 Nov 2024 12:49:07 -0300 Subject: [PATCH 025/245] upgrade min protocol versions for all network kinds (#9058) --- zebra-network/src/constants.rs | 8 ++++---- zebra-network/src/peer_set/set/tests/vectors.rs | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index a116fd63018..acab966f40c 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -396,14 +396,14 @@ lazy_static! { /// /// The minimum network protocol version typically changes after Mainnet and /// Testnet network upgrades. - // TODO: Change `Nu5` to `Nu6` after NU6 activation. + // TODO: Change `Nu6` to `Nu7` after NU7 activation. // TODO: Move the value here to a field on `testnet::Parameters` (#8367) pub static ref INITIAL_MIN_NETWORK_PROTOCOL_VERSION: HashMap = { let mut hash_map = HashMap::new(); - hash_map.insert(NetworkKind::Mainnet, Version::min_specified_for_upgrade(&Mainnet, Nu5)); - hash_map.insert(NetworkKind::Testnet, Version::min_specified_for_upgrade(&Network::new_default_testnet(), Nu5)); - hash_map.insert(NetworkKind::Regtest, Version::min_specified_for_upgrade(&Network::new_regtest(None, None), Nu5)); + hash_map.insert(NetworkKind::Mainnet, Version::min_specified_for_upgrade(&Mainnet, Nu6)); + hash_map.insert(NetworkKind::Testnet, Version::min_specified_for_upgrade(&Network::new_default_testnet(), Nu6)); + hash_map.insert(NetworkKind::Regtest, Version::min_specified_for_upgrade(&Network::new_regtest(None, None), Nu6)); hash_map }; diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 0a5d3d34eaf..9f8fbe1f136 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -26,7 +26,7 @@ fn peer_set_ready_single_connection() { let peer_versions = PeerVersions { peer_versions: vec![Version::min_specified_for_upgrade( &Network::Mainnet, - NetworkUpgrade::Nu5, + NetworkUpgrade::Nu6, )], }; @@ -118,7 +118,7 @@ fn peer_set_ready_single_connection() { #[test] fn peer_set_ready_multiple_connections() { // Use three peers with the same version - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: vec![peer_version, peer_version, peer_version], }; @@ -182,7 +182,7 @@ fn peer_set_rejects_connections_past_per_ip_limit() { const NUM_PEER_VERSIONS: usize = crate::constants::DEFAULT_MAX_CONNS_PER_IP + 1; // Use three peers with the same version - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: [peer_version; NUM_PEER_VERSIONS].into_iter().collect(), }; @@ -232,7 +232,7 @@ fn peer_set_route_inv_empty_registry() { let test_hash = block::Hash([0; 32]); // Use two peers with the same version - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: vec![peer_version, peer_version], }; @@ -315,7 +315,7 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { let test_change = InventoryStatus::new_available(test_inv, test_peer); // Use two peers with the same version - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: vec![peer_version, peer_version], }; @@ -423,7 +423,7 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { let test_change = InventoryStatus::new_missing(test_inv, test_peer); // Use two peers with the same version - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: vec![peer_version, peer_version], }; @@ -525,7 +525,7 @@ fn peer_set_route_inv_all_missing_fail() { let test_change = InventoryStatus::new_missing(test_inv, test_peer); // Use one peer - let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu5); + let peer_version = Version::min_specified_for_upgrade(&Network::Mainnet, NetworkUpgrade::Nu6); let peer_versions = PeerVersions { peer_versions: vec![peer_version], }; From 8cfb61f52c0948cab9e57df7222244799de379e5 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 27 Nov 2024 10:49:12 -0500 Subject: [PATCH 026/245] add(ci): Check that dependencies have all been published to crates.io on release PRs (#8992) * Adds a test to check for crates in the Cargo.lock file that are being pulled in from a git source. * add `check_no_git_refs_in_cargo_lock` to CI * try skip instead of exclude --------- Co-authored-by: Alfredo Garcia --- .github/workflows/sub-ci-unit-tests-docker.yml | 9 +++++++++ docker/entrypoint.sh | 8 ++++++-- zebrad/tests/acceptance.rs | 12 ++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index dd6c89a5a75..da69d12e286 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -197,3 +197,12 @@ jobs: # If there is already an open issue with this label, any failures become comments on that issue. always-create-new-issue: false github-token: ${{ secrets.GITHUB_TOKEN }} + + run-check-no-git-refs: + if: contains(github.event.pull_request.labels.*.name, 'A-release') + runs-on: ubuntu-latest + steps: + - name: Run check_no_git_refs_in_cargo_lock + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty -e NETWORK -e RUN_CHECK_NO_GIT_REFS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index dc1dbc121cf..ccd09f43c33 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -224,12 +224,16 @@ case "$1" in if [[ "${RUN_ALL_TESTS}" -eq "1" ]]; then # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock elif [[ "${RUN_ALL_EXPERIMENTAL_TESTS}" -eq "1" ]]; then # Run unit, basic acceptance tests, and ignored tests with experimental features. # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES_EXPERIMENTAL}" --workspace -- --nocapture --include-ignored + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES_EXPERIMENTAL}" --workspace -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock + + elif [[ "${RUN_CHECK_NO_GIT_REFS}" -eq "1" ]]; then + # Run the check_no_git_refs_in_cargo_lock test. + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored check_no_git_refs_in_cargo_lock elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then # Run state tests with fake activation heights. diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 1a8cefbe0b2..3dfc959eb58 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3538,3 +3538,15 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { Ok(()) } + +/// Check that Zebra does not depend on any crates from git sources. +#[test] +#[ignore] +fn check_no_git_refs_in_cargo_lock() { + let cargo_lock_contents = + fs::read_to_string("../Cargo.lock").expect("should have Cargo.lock file in root dir"); + + if cargo_lock_contents.contains(r#"source = "git+"#) { + panic!("Cargo.lock includes git sources") + } +} From e15184d39bf4e480cc68bf3d9d2a7e9dbb4d92b8 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 29 Nov 2024 18:15:20 -0500 Subject: [PATCH 027/245] chore: Fix clippy lints (#9062) * Allows non-local impls for abscissa derive macros and fixes other clippy lints. * Fixes formatting --- tower-fallback/tests/fallback.rs | 2 ++ zebra-chain/src/chain_tip.rs | 2 +- zebra-chain/src/orchard/shielded_data.rs | 2 +- zebra-chain/src/serialization/tests.rs | 2 ++ zebra-chain/src/transaction.rs | 2 +- zebra-chain/src/transaction/serialize.rs | 2 +- zebra-network/src/protocol/external/codec.rs | 4 +++- zebra-scan/src/service.rs | 24 +++++++++---------- zebra-scan/src/storage/db/sapling.rs | 4 ++-- zebra-scan/tests/scan_task_commands.rs | 4 +++- .../service/finalized_state/column_family.rs | 14 +++++------ .../service/finalized_state/disk_format.rs | 2 +- .../src/service/non_finalized_state/chain.rs | 2 +- zebrad/src/commands.rs | 2 ++ zebrad/src/components/metrics.rs | 2 ++ zebrad/src/components/tokio.rs | 2 ++ zebrad/src/components/tracing/endpoint.rs | 2 ++ 17 files changed, 45 insertions(+), 29 deletions(-) diff --git a/tower-fallback/tests/fallback.rs b/tower-fallback/tests/fallback.rs index 8b60481d7b8..486dfb4a47e 100644 --- a/tower-fallback/tests/fallback.rs +++ b/tower-fallback/tests/fallback.rs @@ -1,3 +1,5 @@ +//! Tests for tower-fallback + use tower::{service_fn, Service, ServiceExt}; use tower_fallback::Fallback; diff --git a/zebra-chain/src/chain_tip.rs b/zebra-chain/src/chain_tip.rs index 04e98ecbff7..5428ec58d13 100644 --- a/zebra-chain/src/chain_tip.rs +++ b/zebra-chain/src/chain_tip.rs @@ -139,7 +139,7 @@ impl<'f> BestTipChanged<'f> { } } -impl<'f> Future for BestTipChanged<'f> { +impl Future for BestTipChanged<'_> { type Output = Result<(), BoxError>; fn poll( diff --git a/zebra-chain/src/orchard/shielded_data.rs b/zebra-chain/src/orchard/shielded_data.rs index 5347919cd01..6dc7f38784a 100644 --- a/zebra-chain/src/orchard/shielded_data.rs +++ b/zebra-chain/src/orchard/shielded_data.rs @@ -270,6 +270,6 @@ impl ZcashDeserialize for Flags { // the reserved bits 2..7 of the flagsOrchard field MUST be zero." // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus Flags::from_bits(reader.read_u8()?) - .ok_or_else(|| SerializationError::Parse("invalid reserved orchard flags")) + .ok_or(SerializationError::Parse("invalid reserved orchard flags")) } } diff --git a/zebra-chain/src/serialization/tests.rs b/zebra-chain/src/serialization/tests.rs index a7ac2ac35a7..762df3c751b 100644 --- a/zebra-chain/src/serialization/tests.rs +++ b/zebra-chain/src/serialization/tests.rs @@ -1,2 +1,4 @@ +//! Serialization tests. + mod preallocate; mod prop; diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 3df3edc8d53..96b2378e273 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -231,7 +231,7 @@ impl Transaction { &'a self, branch_id: ConsensusBranchId, all_previous_outputs: &'a [transparent::Output], - ) -> sighash::SigHasher { + ) -> sighash::SigHasher<'a> { sighash::SigHasher::new(self, branch_id, all_previous_outputs) } diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index 0e583efc5bf..47d1a4e4ad8 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -889,7 +889,7 @@ impl ZcashDeserialize for Transaction { // Convert it to a NetworkUpgrade let network_upgrade = NetworkUpgrade::from_branch_id(limited_reader.read_u32::()?) - .ok_or_else(|| { + .ok_or({ SerializationError::Parse( "expected a valid network upgrade from the consensus branch id", ) diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 1c99b33621f..1df72aecaef 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -508,7 +508,9 @@ impl Codec { timestamp: Utc .timestamp_opt(reader.read_i64::()?, 0) .single() - .ok_or_else(|| Error::Parse("version timestamp is out of range for DateTime"))?, + .ok_or(Error::Parse( + "version timestamp is out of range for DateTime", + ))?, address_recv: AddrInVersion::zcash_deserialize(&mut reader)?, address_from: AddrInVersion::zcash_deserialize(&mut reader)?, nonce: Nonce(reader.read_u64::()?), diff --git a/zebra-scan/src/service.rs b/zebra-scan/src/service.rs index 200160ab3ae..ad4318b72b2 100644 --- a/zebra-scan/src/service.rs +++ b/zebra-scan/src/service.rs @@ -93,18 +93,18 @@ impl Service for ScanService { Request::Info => { let db = self.db.clone(); - return async move { + async move { Ok(Response::Info { min_sapling_birthday_height: db.network().sapling_activation_height(), }) } - .boxed(); + .boxed() } Request::RegisterKeys(keys) => { let mut scan_task = self.scan_task.clone(); - return async move { + async move { let newly_registered_keys = scan_task.register_keys(keys)?.await?; if !newly_registered_keys.is_empty() { Ok(Response::RegisteredKeys(newly_registered_keys)) @@ -113,14 +113,14 @@ impl Service for ScanService { are valid Sapling extended full viewing keys".into()) } } - .boxed(); + .boxed() } Request::DeleteKeys(keys) => { let mut db = self.db.clone(); let mut scan_task = self.scan_task.clone(); - return async move { + async move { // Wait for a message to confirm that the scan task has removed the key up to `DELETE_KEY_TIMEOUT` let remove_keys_result = tokio::time::timeout( DELETE_KEY_TIMEOUT, @@ -141,13 +141,13 @@ impl Service for ScanService { Ok(Response::DeletedKeys) } - .boxed(); + .boxed() } Request::Results(keys) => { let db = self.db.clone(); - return async move { + async move { let mut final_result = BTreeMap::new(); for key in keys { let db = db.clone(); @@ -168,26 +168,26 @@ impl Service for ScanService { Ok(Response::Results(final_result)) } - .boxed(); + .boxed() } Request::SubscribeResults(keys) => { let mut scan_task = self.scan_task.clone(); - return async move { + async move { let results_receiver = scan_task.subscribe(keys)?.await.map_err(|_| { "scan task dropped responder, check that keys are registered" })?; Ok(Response::SubscribeResults(results_receiver)) } - .boxed(); + .boxed() } Request::ClearResults(keys) => { let mut db = self.db.clone(); - return async move { + async move { // Clear results from db for the provided `keys` tokio::task::spawn_blocking(move || { db.delete_sapling_results(keys); @@ -196,7 +196,7 @@ impl Service for ScanService { Ok(Response::ClearedResults) } - .boxed(); + .boxed() } } } diff --git a/zebra-scan/src/storage/db/sapling.rs b/zebra-scan/src/storage/db/sapling.rs index b3c7b870b42..a51ea861eab 100644 --- a/zebra-scan/src/storage/db/sapling.rs +++ b/zebra-scan/src/storage/db/sapling.rs @@ -265,7 +265,7 @@ trait InsertSaplingHeight { fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self; } -impl<'cf> InsertSaplingHeight for WriteSaplingTxIdsBatch<'cf> { +impl InsertSaplingHeight for WriteSaplingTxIdsBatch<'_> { /// Insert sapling height with no results. /// /// If a result already exists for the coinbase transaction at that height, @@ -283,7 +283,7 @@ trait DeleteSaplingKeys { fn delete_sapling_keys(self, sapling_key: Vec) -> Self; } -impl<'cf> DeleteSaplingKeys for WriteSaplingTxIdsBatch<'cf> { +impl DeleteSaplingKeys for WriteSaplingTxIdsBatch<'_> { /// Delete sapling keys and their results. fn delete_sapling_keys(mut self, sapling_keys: Vec) -> Self { for key in &sapling_keys { diff --git a/zebra-scan/tests/scan_task_commands.rs b/zebra-scan/tests/scan_task_commands.rs index 20bbcca190d..20c4edfe757 100644 --- a/zebra-scan/tests/scan_task_commands.rs +++ b/zebra-scan/tests/scan_task_commands.rs @@ -5,7 +5,7 @@ //! //! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" //! cargo test scan_task_commands --features="shielded-scan" -- --ignored --nocapture -#![allow(dead_code)] +#![allow(dead_code, non_local_definitions)] use std::{fs, time::Duration}; @@ -26,6 +26,7 @@ use zebra_scan::{ use zebra_state::{ChainTipChange, LatestChainTip}; +/// Boxed state service. pub type BoxStateService = BoxService; @@ -162,6 +163,7 @@ pub(crate) async fn run() -> Result<()> { Ok(()) } +/// Starts the state service with the provided cache directory. pub async fn start_state_service_with_cache_dir( network: &Network, cache_dir: impl Into, diff --git a/zebra-state/src/service/finalized_state/column_family.rs b/zebra-state/src/service/finalized_state/column_family.rs index 2befbf131e2..dbd017835fb 100644 --- a/zebra-state/src/service/finalized_state/column_family.rs +++ b/zebra-state/src/service/finalized_state/column_family.rs @@ -63,7 +63,7 @@ where batch: Batch, } -impl<'cf, Key, Value> Debug for TypedColumnFamily<'cf, Key, Value> +impl Debug for TypedColumnFamily<'_, Key, Value> where Key: IntoDisk + FromDisk + Debug, Value: IntoDisk + FromDisk, @@ -80,7 +80,7 @@ where } } -impl<'cf, Key, Value> PartialEq for TypedColumnFamily<'cf, Key, Value> +impl PartialEq for TypedColumnFamily<'_, Key, Value> where Key: IntoDisk + FromDisk + Debug, Value: IntoDisk + FromDisk, @@ -90,7 +90,7 @@ where } } -impl<'cf, Key, Value> Eq for TypedColumnFamily<'cf, Key, Value> +impl Eq for TypedColumnFamily<'_, Key, Value> where Key: IntoDisk + FromDisk + Debug, Value: IntoDisk + FromDisk, @@ -243,7 +243,7 @@ where } } -impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value> +impl TypedColumnFamily<'_, Key, Value> where Key: IntoDisk + FromDisk + Debug + Ord, Value: IntoDisk + FromDisk, @@ -259,7 +259,7 @@ where } } -impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value> +impl TypedColumnFamily<'_, Key, Value> where Key: IntoDisk + FromDisk + Debug + Hash + Eq, Value: IntoDisk + FromDisk, @@ -275,7 +275,7 @@ where } } -impl<'cf, Key, Value, Batch> WriteTypedBatch<'cf, Key, Value, Batch> +impl WriteTypedBatch<'_, Key, Value, Batch> where Key: IntoDisk + FromDisk + Debug, Value: IntoDisk + FromDisk, @@ -312,7 +312,7 @@ where } // Writing a batch to the database requires an owned batch. -impl<'cf, Key, Value> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> +impl WriteTypedBatch<'_, Key, Value, DiskWriteBatch> where Key: IntoDisk + FromDisk + Debug, Value: IntoDisk + FromDisk, diff --git a/zebra-state/src/service/finalized_state/disk_format.rs b/zebra-state/src/service/finalized_state/disk_format.rs index 0ce04431e54..459c370aaed 100644 --- a/zebra-state/src/service/finalized_state/disk_format.rs +++ b/zebra-state/src/service/finalized_state/disk_format.rs @@ -68,7 +68,7 @@ pub trait FromDisk: Sized { // Generic serialization impls -impl<'a, T> IntoDisk for &'a T +impl IntoDisk for &T where T: IntoDisk, { diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 12ee0528776..eb00fbda3a5 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -1272,7 +1272,7 @@ impl Chain { pub fn partial_transparent_indexes<'a>( &'a self, addresses: &'a HashSet, - ) -> impl Iterator { + ) -> impl Iterator { addresses .iter() .flat_map(|address| self.partial_transparent_transfers.get(address)) diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index 9751f8693e1..92c0619e6d3 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -1,5 +1,7 @@ //! Zebrad Subcommands +#![allow(non_local_definitions)] + use std::path::PathBuf; use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; diff --git a/zebrad/src/components/metrics.rs b/zebrad/src/components/metrics.rs index 5527d37747c..462eeb335d2 100644 --- a/zebrad/src/components/metrics.rs +++ b/zebrad/src/components/metrics.rs @@ -1,5 +1,7 @@ //! An HTTP endpoint for metrics collection. +#![allow(non_local_definitions)] + use std::net::SocketAddr; use abscissa_core::{Component, FrameworkError}; diff --git a/zebrad/src/components/tokio.rs b/zebrad/src/components/tokio.rs index 7478022dda1..dab20d5352f 100644 --- a/zebrad/src/components/tokio.rs +++ b/zebrad/src/components/tokio.rs @@ -7,6 +7,8 @@ //! The rayon thread pool is used for: //! - long-running CPU-bound tasks like cryptography, via [`rayon::spawn_fifo`]. +#![allow(non_local_definitions)] + use std::{future::Future, time::Duration}; use abscissa_core::{Component, FrameworkError, Shutdown}; diff --git a/zebrad/src/components/tracing/endpoint.rs b/zebrad/src/components/tracing/endpoint.rs index b5cb4a9da37..80631ac41bb 100644 --- a/zebrad/src/components/tracing/endpoint.rs +++ b/zebrad/src/components/tracing/endpoint.rs @@ -1,5 +1,7 @@ //! An HTTP endpoint for dynamically setting tracing filters. +#![allow(non_local_definitions)] + use std::net::SocketAddr; use abscissa_core::{Component, FrameworkError}; From d1ae441ab93317d147f7de60002f2a2d6b962312 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 2 Dec 2024 11:58:34 -0300 Subject: [PATCH 028/245] change(rpc): Update `getblock` RPC to more closely match zcashd (#9006) * rpc: align getblock with zcashd behaviour * Removes handling for verbosity = 3 in getblock method, adds finalorchardroot field, removes unnecessary state request. (#9008) * align final(sapling|orchard)root with zcashd behaviour * fix test * Apply suggestions from code review Co-authored-by: Alfredo Garcia * restore getblock docs; remove unneeded TODOs * Update zebra-rpc/src/methods.rs Co-authored-by: Arya * get rif of cloning self (#9044) --------- Co-authored-by: Arya Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-rpc/src/constants.rs | 4 + zebra-rpc/src/methods.rs | 331 +++++++++++------- ...k_verbose_hash_verbosity_1@mainnet_10.snap | 13 +- ...k_verbose_hash_verbosity_1@testnet_10.snap | 13 +- ...k_verbose_hash_verbosity_2@mainnet_10.snap | 13 +- ...k_verbose_hash_verbosity_2@testnet_10.snap | 13 +- ...ose_hash_verbosity_default@mainnet_10.snap | 13 +- ...ose_hash_verbosity_default@testnet_10.snap | 13 +- ...verbose_height_verbosity_1@mainnet_10.snap | 12 +- ...verbose_height_verbosity_1@testnet_10.snap | 12 +- ...verbose_height_verbosity_2@mainnet_10.snap | 13 +- ...verbose_height_verbosity_2@testnet_10.snap | 13 +- ...e_height_verbosity_default@mainnet_10.snap | 12 +- ...e_height_verbosity_default@testnet_10.snap | 12 +- zebra-rpc/src/methods/tests/vectors.rs | 160 ++++++++- zebra-utils/zcash-rpc-diff | 13 +- 16 files changed, 511 insertions(+), 149 deletions(-) diff --git a/zebra-rpc/src/constants.rs b/zebra-rpc/src/constants.rs index e8be508595f..14f89df6618 100644 --- a/zebra-rpc/src/constants.rs +++ b/zebra-rpc/src/constants.rs @@ -11,6 +11,10 @@ use jsonrpc_core::{Error, ErrorCode}; /// pub const INVALID_PARAMETERS_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-1); +/// The RPC error code used by `zcashd` for missing blocks, when looked up +/// by hash. +pub const INVALID_ADDRESS_OR_KEY_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-5); + /// The RPC error code used by `zcashd` for missing blocks. /// /// `lightwalletd` expects error code `-8` when a block is not found: diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index cb01ca8bbd3..52f28d606b3 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -37,7 +37,9 @@ use zebra_node_services::mempool; use zebra_state::{HashOrHeight, MinedTx, OutputIndex, OutputLocation, TransactionLocation}; use crate::{ - constants::{INVALID_PARAMETERS_ERROR_CODE, MISSING_BLOCK_ERROR_CODE}, + constants::{ + INVALID_ADDRESS_OR_KEY_ERROR_CODE, INVALID_PARAMETERS_ERROR_CODE, MISSING_BLOCK_ERROR_CODE, + }, methods::trees::{GetSubtrees, GetTreestate, SubtreeRpcData}, queue::Queue, }; @@ -145,7 +147,8 @@ pub trait Rpc { /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. /// If the block is not in Zebra's state, returns - /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + /// [error code `-8`.](https://github.com/zcash/zcash/issues/5758) if a height was + /// passed or -5 if a hash was passed. /// /// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html) /// method: post @@ -158,12 +161,14 @@ pub trait Rpc { /// /// # Notes /// - /// With verbosity=1, [`lightwalletd` only reads the `tx` field of the - /// result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152), - /// and other clients only read the `hash` and `confirmations` fields, - /// so we only return a few fields for now. + /// Zebra previously partially supported verbosity=1 by returning only the + /// fields required by lightwalletd ([`lightwalletd` only reads the `tx` + /// field of the result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152)). + /// That verbosity level was migrated to "3"; so while lightwalletd will + /// still work by using verbosity=1, it will sync faster if it is changed to + /// use verbosity=3. /// - /// `lightwalletd` and mining clients also do not use verbosity=2, so we don't support it. + /// The undocumented `chainwork` field is not returned. #[rpc(name = "getblock")] fn get_block( &self, @@ -172,6 +177,9 @@ pub trait Rpc { ) -> BoxFuture>; /// Returns the requested block header by hash or height, as a [`GetBlockHeader`] JSON string. + /// If the block is not in Zebra's state, + /// returns [error code `-8`.](https://github.com/zcash/zcash/issues/5758) + /// if a height was passed or -5 if a hash was passed. /// /// zcashd reference: [`getblockheader`](https://zcash.github.io/rpc/getblockheader.html) /// method: post @@ -181,6 +189,10 @@ pub trait Rpc { /// /// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned. /// - `verbose`: (bool, optional, default=false, example=true) false for hex encoded data, true for a json object + /// + /// # Notes + /// + /// The undocumented `chainwork` field is not returned. #[rpc(name = "getblockheader")] fn get_block_header( &self, @@ -738,6 +750,15 @@ where let mut state = self.state.clone(); let verbosity = verbosity.unwrap_or(DEFAULT_GETBLOCK_VERBOSITY); + let network = self.network.clone(); + let original_hash_or_height = hash_or_height.clone(); + + // If verbosity requires a call to `get_block_header`, resolve it here + let get_block_header_future = if matches!(verbosity, 1 | 2) { + Some(self.get_block_header(original_hash_or_height.clone(), Some(true))) + } else { + None + }; async move { let hash_or_height: HashOrHeight = hash_or_height.parse().map_server_error()?; @@ -765,58 +786,36 @@ where }), _ => unreachable!("unmatched response to a block request"), } - } else if verbosity == 1 || verbosity == 2 { - // # Performance - // - // This RPC is used in `lightwalletd`'s initial sync of 2 million blocks, - // so it needs to load all its fields very efficiently. - // - // Currently, we get the block hash and transaction IDs from indexes, - // which is much more efficient than loading all the block data, - // then hashing the block header and all the transactions. + } else if let Some(get_block_header_future) = get_block_header_future { + let get_block_header_result: Result = get_block_header_future.await; - // Get the block hash from the height -> hash index, if needed - // - // # Concurrency - // - // For consistency, this lookup must be performed first, then all the other - // lookups must be based on the hash. - // - // All possible responses are valid, even if the best chain changes. Clients - // must be able to handle chain forks, including a hash for a block that is - // later discovered to be on a side chain. - - let should_read_block_header = verbosity == 2; - - let hash = match hash_or_height { - HashOrHeight::Hash(hash) => hash, - HashOrHeight::Height(height) => { - let request = zebra_state::ReadRequest::BestChainBlockHash(height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_server_error()?; - - match response { - zebra_state::ReadResponse::BlockHash(Some(hash)) => hash, - zebra_state::ReadResponse::BlockHash(None) => { - return Err(Error { - code: MISSING_BLOCK_ERROR_CODE, - message: "block height not in best chain".to_string(), - data: None, - }) - } - _ => unreachable!("unmatched response to a block hash request"), - } - } + let GetBlockHeader::Object(block_header) = get_block_header_result? else { + panic!("must return Object") }; + let GetBlockHeaderObject { + hash, + confirmations, + height, + version, + merkle_root, + final_sapling_root, + sapling_tree_size, + time, + nonce, + solution, + bits, + difficulty, + previous_block_hash, + next_block_hash, + } = *block_header; + // # Concurrency // // We look up by block hash so the hash, transaction IDs, and confirmations // are consistent. - let mut requests = vec![ + let hash_or_height = hash.0.into(); + let requests = vec![ // Get transaction IDs from the transaction index by block hash // // # Concurrency @@ -824,27 +823,11 @@ where // A block's transaction IDs are never modified, so all possible responses are // valid. Clients that query block heights must be able to handle chain forks, // including getting transaction IDs from any chain fork. - zebra_state::ReadRequest::TransactionIdsForBlock(hash.into()), - // Sapling trees - zebra_state::ReadRequest::SaplingTree(hash.into()), + zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), // Orchard trees - zebra_state::ReadRequest::OrchardTree(hash.into()), - // Get block confirmations from the block height index - // - // # Concurrency - // - // All possible responses are valid, even if a block is added to the chain, or - // the best chain changes. Clients must be able to handle chain forks, including - // different confirmation values before or after added blocks, and switching - // between -1 and multiple different confirmation values. - zebra_state::ReadRequest::Depth(hash), + zebra_state::ReadRequest::OrchardTree(hash_or_height), ]; - if should_read_block_header { - // Block header - requests.push(zebra_state::ReadRequest::BlockHeader(hash.into())) - } - let mut futs = FuturesOrdered::new(); for request in requests { @@ -861,65 +844,55 @@ where _ => unreachable!("unmatched response to a transaction_ids_for_block request"), }; - let sapling_tree_response = futs.next().await.expect("`futs` should not be empty"); - let sapling_note_commitment_tree_count = - match sapling_tree_response.map_server_error()? { - zebra_state::ReadResponse::SaplingTree(Some(nct)) => nct.count(), - zebra_state::ReadResponse::SaplingTree(None) => 0, - _ => unreachable!("unmatched response to a SaplingTree request"), - }; - let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); - let orchard_note_commitment_tree_count = - match orchard_tree_response.map_server_error()? { - zebra_state::ReadResponse::OrchardTree(Some(nct)) => nct.count(), - zebra_state::ReadResponse::OrchardTree(None) => 0, - _ => unreachable!("unmatched response to a OrchardTree request"), - }; - - // From - const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; - - let depth_response = futs.next().await.expect("`futs` should not be empty"); - let confirmations = match depth_response.map_server_error()? { - // Confirmations are one more than the depth. - // Depth is limited by height, so it will never overflow an i64. - zebra_state::ReadResponse::Depth(Some(depth)) => i64::from(depth) + 1, - zebra_state::ReadResponse::Depth(None) => NOT_IN_BEST_CHAIN_CONFIRMATIONS, - _ => unreachable!("unmatched response to a depth request"), + let zebra_state::ReadResponse::OrchardTree(orchard_tree) = + orchard_tree_response.map_server_error()? + else { + unreachable!("unmatched response to a OrchardTree request"); }; - let (time, height) = if should_read_block_header { - let block_header_response = - futs.next().await.expect("`futs` should not be empty"); + let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); + + // This could be `None` if there's a chain reorg between state queries. + let orchard_tree = + orchard_tree.ok_or_server_error("missing orchard tree for block")?; - match block_header_response.map_server_error()? { - zebra_state::ReadResponse::BlockHeader { header, height, .. } => { - (Some(header.time.timestamp()), Some(height)) - } - _ => unreachable!("unmatched response to a BlockHeader request"), + let final_orchard_root = match nu5_activation { + Some(activation_height) if height >= activation_height => { + Some(orchard_tree.root().into()) } - } else { - (None, hash_or_height.height()) + _other => None, }; let sapling = SaplingTrees { - size: sapling_note_commitment_tree_count, + size: sapling_tree_size, }; + let orchard_tree_size = orchard_tree.count(); let orchard = OrchardTrees { - size: orchard_note_commitment_tree_count, + size: orchard_tree_size, }; let trees = GetBlockTrees { sapling, orchard }; Ok(GetBlock::Object { - hash: GetBlockHash(hash), + hash, confirmations, - height, - time, + height: Some(height), + version: Some(version), + merkle_root: Some(merkle_root), + time: Some(time), + nonce: Some(nonce), + solution: Some(solution), + bits: Some(bits), + difficulty: Some(difficulty), tx, trees, + size: None, + final_sapling_root: Some(final_sapling_root), + final_orchard_root, + previous_block_hash: Some(previous_block_hash), + next_block_hash, }) } else { Err(Error { @@ -952,7 +925,18 @@ where .clone() .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) .await - .map_server_error()? + .map_err(|_| Error { + // Compatibility with zcashd. Note that since this function + // is reused by getblock(), we return the errors expected + // by it (they differ whether a hash or a height was passed) + code: if hash_or_height.hash().is_some() { + INVALID_ADDRESS_OR_KEY_ERROR_CODE + } else { + MISSING_BLOCK_ERROR_CODE + }, + message: "block height not in best chain".to_string(), + data: None, + })? else { panic!("unexpected response to BlockHeader request") }; @@ -995,13 +979,16 @@ where let mut nonce = *header.nonce; nonce.reverse(); - let final_sapling_root: [u8; 32] = if sapling_tree.position().is_some() { - let mut root: [u8; 32] = sapling_tree.root().into(); - root.reverse(); - root - } else { - [0; 32] - }; + let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); + let sapling_tree_size = sapling_tree.count(); + let final_sapling_root: [u8; 32] = + if sapling_activation.is_some() && height >= sapling_activation.unwrap() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] + }; let difficulty = header.difficulty_threshold.relative_to_network(&network); @@ -1012,6 +999,7 @@ where version: header.version, merkle_root: header.merkle_root, final_sapling_root, + sapling_tree_size, time: header.time.timestamp(), nonce, solution: header.solution, @@ -1736,8 +1724,9 @@ impl Default for SentTransactionHash { /// Response to a `getblock` RPC request. /// /// See the notes for the [`Rpc::get_block`] method. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] +#[allow(clippy::large_enum_variant)] //TODO: create a struct for the Object and Box it pub enum GetBlock { /// The request block, hex-encoded. Raw(#[serde(with = "hex")] SerializedBlock), @@ -1750,21 +1739,84 @@ pub enum GetBlock { /// or -1 if it is not in the best chain. confirmations: i64, + /// The block size. TODO: fill it + #[serde(skip_serializing_if = "Option::is_none")] + size: Option, + /// The height of the requested block. #[serde(skip_serializing_if = "Option::is_none")] height: Option, - /// The height of the requested block. + /// The version field of the requested block. #[serde(skip_serializing_if = "Option::is_none")] - time: Option, + version: Option, + /// The merkle root of the requested block. + #[serde(with = "opthex", rename = "merkleroot")] + #[serde(skip_serializing_if = "Option::is_none")] + merkle_root: Option, + + // `blockcommitments` would be here. Undocumented. TODO: decide if we want to support it + // `authdataroot` would be here. Undocumented. TODO: decide if we want to support it + // + /// The root of the Sapling commitment tree after applying this block. + #[serde(with = "opthex", rename = "finalsaplingroot")] + #[serde(skip_serializing_if = "Option::is_none")] + final_sapling_root: Option<[u8; 32]>, + + /// The root of the Orchard commitment tree after applying this block. + #[serde(with = "opthex", rename = "finalorchardroot")] + #[serde(skip_serializing_if = "Option::is_none")] + final_orchard_root: Option<[u8; 32]>, + + // `chainhistoryroot` would be here. Undocumented. TODO: decide if we want to support it + // /// List of transaction IDs in block order, hex-encoded. // // TODO: use a typed Vec here + // TODO: support Objects tx: Vec, + /// The height of the requested block. + #[serde(skip_serializing_if = "Option::is_none")] + time: Option, + + /// The nonce of the requested block header. + #[serde(with = "opthex")] + #[serde(skip_serializing_if = "Option::is_none")] + nonce: Option<[u8; 32]>, + + /// The Equihash solution in the requested block header. + /// Note: presence of this field in getblock is not documented in zcashd. + #[serde(with = "opthex")] + #[serde(skip_serializing_if = "Option::is_none")] + solution: Option, + + /// The difficulty threshold of the requested block header displayed in compact form. + #[serde(with = "opthex")] + #[serde(skip_serializing_if = "Option::is_none")] + bits: Option, + + /// Floating point number that represents the difficulty limit for this block as a multiple + /// of the minimum difficulty for the network. + #[serde(skip_serializing_if = "Option::is_none")] + difficulty: Option, + + // `chainwork` would be here, but we don't plan on supporting it + // `anchor` would be here. Undocumented. TODO: decide if we want to support it + // `chainSupply` would be here, TODO: implement + // `valuePools` would be here, TODO: implement + // /// Information about the note commitment trees. trees: GetBlockTrees, + + /// The previous block hash of the requested block header. + #[serde(rename = "previousblockhash", skip_serializing_if = "Option::is_none")] + previous_block_hash: Option, + + /// The next block hash after the requested block header. + #[serde(rename = "nextblockhash", skip_serializing_if = "Option::is_none")] + next_block_hash: Option, }, } @@ -1777,6 +1829,17 @@ impl Default for GetBlock { time: None, tx: Vec::new(), trees: GetBlockTrees::default(), + size: None, + version: None, + merkle_root: None, + final_sapling_root: None, + final_orchard_root: None, + nonce: None, + bits: None, + difficulty: None, + previous_block_hash: None, + next_block_hash: None, + solution: None, } } } @@ -1820,6 +1883,11 @@ pub struct GetBlockHeaderObject { #[serde(with = "hex", rename = "finalsaplingroot")] pub final_sapling_root: [u8; 32], + /// The number of Sapling notes in the Sapling note commitment tree + /// after applying this block. Used by the `getblock` RPC method. + #[serde(skip)] + pub sapling_tree_size: u64, + /// The block time of the requested block header in non-leap seconds since Jan 1 1970 GMT. pub time: i64, @@ -1865,6 +1933,7 @@ impl Default for GetBlockHeaderObject { version: 4, merkle_root: block::merkle::Root([0; 32]), final_sapling_root: Default::default(), + sapling_tree_size: Default::default(), time: 0, nonce: [0; 32], solution: Solution::for_proposal(), @@ -2156,3 +2225,23 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result Ok(Height(sanitized_height)) } } + +/// A helper module to serialize `Option` as a hex string. +mod opthex { + use hex::ToHex; + use serde::Serializer; + + pub fn serialize(data: &Option, serializer: S) -> Result + where + S: Serializer, + T: ToHex, + { + match data { + Some(data) => { + let s = data.encode_hex::(); + serializer.serialize_str(&s) + } + None => serializer.serialize_none(), + } + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap index 6bed7d59cd2..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap @@ -5,8 +5,19 @@ expression: block { "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap index fe2c9527562..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap @@ -5,8 +5,19 @@ expression: block { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index f18b879f6b3..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -6,9 +6,18 @@ expression: block "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, "height": 1, - "time": 1477671596, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 013a4c09b23..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -6,9 +6,18 @@ expression: block "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, "height": 1, - "time": 1477674473, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap index 6bed7d59cd2..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap @@ -5,8 +5,19 @@ expression: block { "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap index fe2c9527562..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap @@ -5,8 +5,19 @@ expression: block { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, + "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap index 3d66b2dffa2..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap @@ -6,8 +6,18 @@ expression: block "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap index f79a4283b50..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap @@ -6,8 +6,18 @@ expression: block "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index f18b879f6b3..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -6,9 +6,18 @@ expression: block "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, "height": 1, - "time": 1477671596, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index 013a4c09b23..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -6,9 +6,18 @@ expression: block "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, "height": 1, - "time": 1477674473, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap index 3d66b2dffa2..93010ad42d4 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap @@ -6,8 +6,18 @@ expression: block "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, "height": 1, + "version": 4, + "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" ], - "trees": {} + "time": 1477671596, + "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", + "solution": "002b2ee0d2f5d0c1ebf5a265b6f5b428f2fdc9aaea07078a6c5cab4f1bbfcd56489863deae6ea3fd8d3d0762e8e5295ff2670c9e90d8e8c68a54a40927e82a65e1d44ced20d835818e172d7b7f5ffe0245d0c3860a3f11af5658d68b6a7253b4684ffef5242fefa77a0bfc3437e8d94df9dc57510f5a128e676dd9ddf23f0ef75b460090f507499585541ab53a470c547ea02723d3a979930941157792c4362e42d3b9faca342a5c05a56909b046b5e92e2870fca7c932ae2c2fdd97d75b6e0ecb501701c1250246093c73efc5ec2838aeb80b59577741aa5ccdf4a631b79f70fc419e28714fa22108d991c29052b2f5f72294c355b57504369313470ecdd8e0ae97fc48e243a38c2ee7315bb05b7de9602047e97449c81e46746513221738dc729d7077a1771cea858865d85261e71e82003ccfbba2416358f023251206d6ef4c5596bc35b2b5bce3e9351798aa2c9904723034e5815c7512d260cc957df5db6adf9ed7272483312d1e68c60955a944e713355089876a704aef06359238f6de5a618f7bd0b4552ba72d05a6165e582f62d55ff2e1b76991971689ba3bee16a520fd85380a6e5a31de4dd4654d561101ce0ca390862d5774921eae2c284008692e9e08562144e8aa1f399a9d3fab0c4559c1f12bc945e626f7a89668613e8829767f4116ee9a4f832cf7c3ade3a7aba8cb04de39edd94d0d05093ed642adf9fbd9d373a80832ffd1c62034e4341546b3515f0e42e6d8570393c6754be5cdb7753b4709527d3f164aebf3d315934f7b3736a1b31052f6cc5699758950331163b3df05b9772e9bf99c8c77f8960e10a15edb06200106f45742d740c422c86b7e4f5a52d3732aa79ee54cfc92f76e03c268ae226477c19924e733caf95b8f350233a5312f4ed349d3ad76f032358f83a6d0d6f83b2a456742aad7f3e615fa72286300f0ea1c9793831ef3a5a4ae08640a6e32f53d1cba0be284b25e923d0d110ba227e54725632efcbbe17c05a9cde976504f6aece0c461b562cfae1b85d5f6782ee27b3e332ac0775f681682ce524b32889f1dc4231226f1aada0703beaf8d41732c9647a0a940a86f8a1be7f239c44fcaa7ed7a055506bdbe1df848f9e047226bee1b6d788a03f6e352eead99b419cfc41741942dbeb7a5c55788d5a3e636d8aab7b36b4db71d16700373bbc1cdeba8f9b1db10bf39a621bc737ea4f4e333698d6e09b51ac7a97fb6fd117ccad1d6b6b3a7451699d5bfe448650396d7b58867b3b0872be13ad0b43da267df0ad77025155f04e20c56d6a9befb3e9c7d23b82cbf3a534295ebda540682cc81be9273781b92519c858f9c25294fbacf75c3b3c15bda6d36de1c83336f93e96910dbdcb190d6ef123c98565ff6df1e903f57d4e4df167ba6b829d6d9713eb2126b0cf869940204137babcc6a1b7cb2f0b94318a7460e5d1a605c249bd2e72123ebad332332c18adcb285ed8874dbde084ebcd4f744465350d57110f037fffed1569d642c258749e65b0d13e117eaa37014a769b5ab479b7c77178880e77099f999abe712e543dbbf626ca9bcfddc42ff2f109d21c8bd464894e55ae504fdf81e1a7694180225da7dac8879abd1036cf26bb50532b8cf138b337a1a1bd1a43f8dd70b7399e2690c8e7a5a1fe099026b8f2a6f65fc0dbedda15ba65e0abd66c7176fb426980549892b4817de78e345a7aeab05744c3def4a2f283b4255b02c91c1af7354a368c67a11703c642a385c7453131ce3a78b24c5e22ab7e136a38498ce82082181884418cb4d6c2920f258a3ad20cfbe7104af1c6c6cb5e58bf29a9901721ad19c0a260cd09a3a772443a45aea4a5c439a95834ef5dc2e26343278947b7b796f796ae9bcadb29e2899a1d7313e6f7bfb6f8b", + "bits": "1f07ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08", + "nextblockhash": "0002a26c902619fc964443264feb16f1e3e2d71322fc53dcb81cc5d797e273ed" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap index f79a4283b50..5bd22590f1b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap @@ -6,8 +6,18 @@ expression: block "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, "height": 1, + "version": 4, + "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" ], - "trees": {} + "time": 1477674473, + "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", + "solution": "0053f4438864bc5d6dfc009d4bba545ac5e5feaaf46f9455b975b02115f842a966e26517ce678f1c074d09cc8d0049a190859eb505af5f3e760312fbbe54da115db2bc03c96408f39b679891790b539d2d9d17a801dc6af9af14ca3f6ba060edce2a1dd45aa45f11fe37dbaf1eb2647ae7c393f6680c3d5d7e53687e34530f48edf58924a04d3e0231c150b1c8218998f674bc171edd222bcb4ac4ba4ea52d7baa86399f371d5284043e1e166f9069dd0f2904ff94c7922a70fa7c660e0553cc40a20d9ee08eb3f47278485801ddae9c270411360773f0b74e03db2d92c50952c9bd4924bbca2a260e1235e99df51fe71e75744232f2d641ef94f394110a5ad05f51a057e4cb515b92c16cb1404a8cdcc43d4a4bb2caa54ca35dccf41aa7d832da65123b7029223c46ed2a13387d598d445435d3cb32fdad9e27672903864c90d86353b162033078327b5b7aaffc89b40096ae004f2d5c6bd2c99188574348518db66e9b6020f93f12ee1c06f7b00fe346fefceaffb1da9e3cdf08285057f549733eb10825737fcd1431bfdfb155f323f24e95a869212baacf445b30f2670206645779110e6547d5da90a5f2fe5151da911d5ecd5a833023661d1356b6c395d85968947678d53efd4db7b06f23b21125e74492644277ea0c1131b80d6a4e3e8093b82332556fbb3255a55ac3f0b7e4844c0e12bf577c37fd02323ae5ef4781772ed501d63b568032a3d31576c5104a48c01ac54f715286932351a8adc8cf2467a84a0572e99f366ee00f82c3735545fd4bb941d591ce70070425a81304272db89887949bc7dd8236bb7e82190f9815da938cd6e8fec7660e91354326a7a9bfe38120e97997fca3c289d54513ed00286c2b825fbe84f91a39528f335674b5e957425a6edfdd00f2feb2c2df575616197998c1e964e069875d4d934f419a9b02b100848d023b76d47bd4e284c3895ef9227a40d8ea8826e86c7155d6aa95b8f9175812523a32cd611efc700688e03f7c245c5bff01718281b5d75cefe8318b2c08962236b14a0bf79534c203df735fd9cced97cbae07c2b4ee9cda8c9993f3f6277ff3fec261fb94d3961c4befe4b0893dcf67b312c7d8d6ff7adc8539cb2b1d3534fccf109efddd07a9f1e77b94ab1e505b164221dca1c34621b1e9d234c31a032a401267d95f65b800d579a2482638dfeade804149c81e95d7ef5510ac0b6212231506b1c635a2e1d2f0c9712989f9f246762fadb4c55c20f707dcc0e510a33e9465fc5d5bdbfa524dab0d7a1c6a1baaa36869cf542aa2257c5c44ef07547a570343442c6091e13bc04d559dc0e6db5b001861914bf956816edce2a86b274bd97f27e2dbb08608c16a3e5d8595952faa91fb162d7fa6a7a47e849a1ad8fab3ba620ee3295a04fe13e5fb655ac92ae60d01020b8999526af8d56b28733e69c9ffb285de27c61edc0bf62261ac0787eff347d0fcd62257301ede9603106ea41650a3e3119bd5c4e86a7f6a3f00934f3a545f7f21d41699f3e35d38cf925a8bdaf2bf7eedea11c31c3d8bf6c527c77c6378281cdf02211a58fa5e46d28d7e7c5fb79d69b31703fd752395da115845952cf99aaeb2155c2ab951a69f67d938f223185567e52cfa3e57b62c790bf78674c4b02c12b7d3225fe8f705b408ba11c24245b3924482e2f3480994461b550641a88cd941d371139f3498afacdcba1249631402b20695760eaada5376e68df0e45139c410700effc9420dc3726515e7fcb3f349320f30511451964bd9b6530682efec65910ceb548aa2ab05ac3309e803161697213631ae8e13cc7d223ac28446c1bf94a19a8782ac16ff57df7ee4f10fb6e488c02c68d6b6dee6987f6d2c39227da366c59f54ff67e312ca530e7c467c3dc8", + "bits": "2007ffff", + "difficulty": 1.0, + "trees": {}, + "previousblockhash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", + "nextblockhash": "00f1a49e54553ac3ef735f2eb1d8247c9a87c22a47dbd7823ae70adcd6c21a18" } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index d5a923516c0..007a89ee893 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -55,6 +55,37 @@ async fn rpc_getinfo() { assert!(rpc_tx_queue_task_result.is_none()); } +// Helper function that returns the nonce and final sapling root of a given +// Block. +async fn get_block_data( + read_state: &ReadStateService, + block: Arc, + height: usize, +) -> ([u8; 32], [u8; 32]) { + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = read_state + .clone() + .oneshot(zebra_state::ReadRequest::SaplingTree(HashOrHeight::Height( + (height as u32).try_into().unwrap(), + ))) + .await + .expect("should have sapling tree for block hash") + else { + panic!("unexpected response to SaplingTree request") + }; + + let mut expected_nonce = *block.header.nonce; + expected_nonce.reverse(); + let sapling_tree = sapling_tree.expect("should always have sapling root"); + let expected_final_sapling_root: [u8; 32] = if sapling_tree.position().is_some() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] + }; + (expected_nonce, expected_final_sapling_root) +} + #[tokio::test(flavor = "multi_thread")] async fn rpc_getblock() { let _init_guard = zebra_test::init(); @@ -78,7 +109,7 @@ async fn rpc_getblock() { false, true, Buffer::new(mempool.clone(), 1), - read_state, + read_state.clone(), latest_chain_tip, ); @@ -132,19 +163,38 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { hash: GetBlockHash(block.hash()), confirmations: (blocks.len() - i).try_into().expect("valid i64"), height: Some(Height(i.try_into().expect("valid u32"))), - time: None, + time: Some(block.header.time.timestamp()), tx: block .transactions .iter() .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -156,19 +206,38 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { hash: GetBlockHash(block.hash()), confirmations: (blocks.len() - i).try_into().expect("valid i64"), - height: None, - time: None, + height: Some(Height(i.try_into().expect("valid u32"))), + time: Some(block.header.time.timestamp()), tx: block .transactions .iter() .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -180,6 +249,9 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { @@ -193,6 +265,22 @@ async fn rpc_getblock() { .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -204,6 +292,9 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { @@ -217,6 +308,22 @@ async fn rpc_getblock() { .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -228,19 +335,38 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { hash: GetBlockHash(block.hash()), confirmations: (blocks.len() - i).try_into().expect("valid i64"), height: Some(Height(i.try_into().expect("valid u32"))), - time: None, + time: Some(block.header.time.timestamp()), tx: block .transactions .iter() .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -252,19 +378,38 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); + let (expected_nonce, expected_final_sapling_root) = + get_block_data(&read_state, block.clone(), i).await; + assert_eq!( get_block, GetBlock::Object { hash: GetBlockHash(block.hash()), confirmations: (blocks.len() - i).try_into().expect("valid i64"), - height: None, - time: None, + height: Some(Height(i.try_into().expect("valid u32"))), + time: Some(block.header.time.timestamp()), tx: block .transactions .iter() .map(|tx| tx.hash().encode_hex()) .collect(), trees, + size: None, + version: Some(block.header.version), + merkle_root: Some(block.header.merkle_root), + final_sapling_root: Some(expected_final_sapling_root), + final_orchard_root: None, + nonce: Some(expected_nonce), + bits: Some(block.header.difficulty_threshold), + difficulty: Some( + block + .header + .difficulty_threshold + .relative_to_network(&Mainnet) + ), + previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), + next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), + solution: Some(block.header.solution), } ); } @@ -449,6 +594,7 @@ async fn rpc_getblockheader() { version: 4, merkle_root: block.header.merkle_root, final_sapling_root: expected_final_sapling_root, + sapling_tree_size: sapling_tree.count(), time: block.header.time.timestamp(), nonce: expected_nonce, solution: block.header.solution, diff --git a/zebra-utils/zcash-rpc-diff b/zebra-utils/zcash-rpc-diff index 57cd5c42cca..8d1507f2dee 100755 --- a/zebra-utils/zcash-rpc-diff +++ b/zebra-utils/zcash-rpc-diff @@ -22,6 +22,9 @@ JQ="${JQ:-jq}" # - Use `-rpccookiefile=your/cookie/file` for a cookie file. # - Use `-rpcpassword=your-password` for a password. ZCASHD_EXTRA_ARGS="${ZCASHD_EXTRA_ARGS:-}" +# Zebrad authentication modes: +# - Use `-rpccookiefile=your/cookie/file` for a cookie file. +ZEBRAD_EXTRA_ARGS="${ZEBRAD_EXTRA_ARGS:-}" # We show this many lines of data, removing excess lines from the middle or end of the output. OUTPUT_DATA_LINE_LIMIT="${OUTPUT_DATA_LINE_LIMIT:-40}" # When checking different mempools, we show this many different transactions. @@ -49,7 +52,7 @@ ZEBRAD_RELEASE_INFO="$ZCASH_RPC_TMP_DIR/first-node-check-getinfo.json" ZCASHD_RELEASE_INFO="$ZCASH_RPC_TMP_DIR/second-node-check-getinfo.json" echo "Checking first node release info..." -$ZCASH_CLI -rpcport="$ZEBRAD_RPC_PORT" getinfo > "$ZEBRAD_RELEASE_INFO" +$ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" getinfo > "$ZEBRAD_RELEASE_INFO" ZEBRAD_NAME=$(cat "$ZEBRAD_RELEASE_INFO" | grep '"subversion"' | cut -d: -f2 | cut -d/ -f2 | \ tr 'A-Z' 'a-z' | sed 's/magicbean/zcashd/ ; s/zebra$/zebrad/') @@ -74,7 +77,7 @@ ZEBRAD_BLOCKCHAIN_INFO="$ZCASH_RPC_TMP_DIR/$ZEBRAD_NAME-check-getblockchaininfo. ZCASHD_BLOCKCHAIN_INFO="$ZCASH_RPC_TMP_DIR/$ZCASHD_NAME-check-getblockchaininfo.json" echo "Checking $ZEBRAD network and tip height..." -$ZCASH_CLI -rpcport="$ZEBRAD_RPC_PORT" getblockchaininfo > "$ZEBRAD_BLOCKCHAIN_INFO" +$ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" getblockchaininfo > "$ZEBRAD_BLOCKCHAIN_INFO" ZEBRAD_NET=$(cat "$ZEBRAD_BLOCKCHAIN_INFO" | grep '"chain"' | cut -d: -f2 | tr -d ' ,"') ZEBRAD_HEIGHT=$(cat "$ZEBRAD_BLOCKCHAIN_INFO" | grep '"blocks"' | cut -d: -f2 | tr -d ' ,"') @@ -109,7 +112,7 @@ echo "$@" echo echo "Querying $ZEBRAD $ZEBRAD_NET chain at height >=$ZEBRAD_HEIGHT..." -time $ZCASH_CLI -rpcport="$ZEBRAD_RPC_PORT" "$@" > "$ZEBRAD_RESPONSE" +time $ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" "$@" > "$ZEBRAD_RESPONSE" echo echo "Querying $ZCASHD $ZCASHD_NET chain at height >=$ZCASHD_HEIGHT..." @@ -166,7 +169,7 @@ echo "$@" echo echo "Querying $ZEBRAD $ZEBRAD_NET chain at height >=$ZEBRAD_HEIGHT..." -$ZCASH_CLI -rpcport="$ZEBRAD_RPC_PORT" "$@" > "$ZEBRAD_CHECK_RESPONSE" +$ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" "$@" > "$ZEBRAD_CHECK_RESPONSE" echo "Querying $ZCASHD $ZCASHD_NET chain at height >=$ZCASHD_HEIGHT..." $ZCASH_CLI $ZCASHD_EXTRA_ARGS "$@" > "$ZCASHD_CHECK_RESPONSE" @@ -257,7 +260,7 @@ if [ "$1" == "getrawmempool" ] && [ $CHECK_EXIT_STATUS != 0 ]; then for TRANSACTION_ID in $ZEBRAD_TRANSACTION_IDS; do TRANSACTION_HEX_FILE="$ZCASH_RPC_TMP_DIR/$ZEBRAD_NAME-$ZEBRAD_NET-$ZEBRAD_HEIGHT-$TRANSACTION_ID.json" - $ZCASH_CLI -rpcport="$ZEBRAD_RPC_PORT" getrawtransaction $TRANSACTION_ID 0 > $TRANSACTION_HEX_FILE + $ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" getrawtransaction $TRANSACTION_ID 0 > $TRANSACTION_HEX_FILE echo "## Displaying transaction $TRANSACTION_ID from zebrad (limited to ${OUTPUT_DATA_LINE_LIMIT} lines)" echo From 7561e1ef268b31c2872af3e09d4f3205abd94d0e Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 4 Dec 2024 08:53:06 -0500 Subject: [PATCH 029/245] change(mempool): Return verification result after attempting to insert transactions in the mempool (#9067) * change(mempool): Return verification result after attempting to insert transactions in the mempool (#8901) * respond with mempool verification result after a transaction has been inserted or has failed to be inserted into the mempool * returns mempool verification errors early, and fixes handling for cancellations or timeouts. * Adds a comment in test warning against code reuse with buffered services. * De-duplicates handling for timeout errors --- zebra-consensus/src/transaction.rs | 22 ++-- zebra-node-services/src/mempool.rs | 5 +- zebrad/src/components/mempool.rs | 20 +-- zebrad/src/components/mempool/downloads.rs | 116 +++++++++++------- zebrad/src/components/mempool/tests/vector.rs | 18 +-- 5 files changed, 108 insertions(+), 73 deletions(-) diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index aac77a055d6..a3729b0a280 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -549,17 +549,17 @@ where )?; if let Some(mut mempool) = mempool { - if !transaction.transaction.transaction.outputs().is_empty() { - tokio::spawn(async move { - tokio::time::sleep(POLL_MEMPOOL_DELAY).await; - let _ = mempool - .ready() - .await - .expect("mempool poll_ready() method should not return an error") - .call(mempool::Request::CheckForVerifiedTransactions) - .await; - }); - } + tokio::spawn(async move { + // Best-effort poll of the mempool to provide a timely response to + // `sendrawtransaction` RPC calls or `AwaitOutput` mempool calls. + tokio::time::sleep(POLL_MEMPOOL_DELAY).await; + let _ = mempool + .ready() + .await + .expect("mempool poll_ready() method should not return an error") + .call(mempool::Request::CheckForVerifiedTransactions) + .await; + }); } Response::Mempool { transaction, spent_mempool_outpoints } diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index 10f51cf4a30..ad3e28c7eec 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -16,12 +16,9 @@ use zebra_chain::transaction::VerifiedUnminedTx; use crate::BoxError; mod gossip; - mod transaction_dependencies; -pub use transaction_dependencies::TransactionDependencies; - -pub use self::gossip::Gossip; +pub use self::{gossip::Gossip, transaction_dependencies::TransactionDependencies}; /// A mempool service request. /// diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index b94ad0b09b8..6986f601e9c 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -28,7 +28,6 @@ use std::{ use futures::{future::FutureExt, stream::Stream}; use tokio::sync::{broadcast, oneshot}; -use tokio_stream::StreamExt; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service}; use zebra_chain::{ @@ -43,7 +42,7 @@ use zebra_node_services::mempool::{Gossip, Request, Response}; use zebra_state as zs; use zebra_state::{ChainTipChange, TipAction}; -use crate::components::{mempool::crawler::RATE_LIMIT_DELAY, sync::SyncStatus}; +use crate::components::sync::SyncStatus; pub mod config; mod crawler; @@ -586,11 +585,9 @@ impl Service for Mempool { let best_tip_height = self.latest_chain_tip.best_tip_height(); // Clean up completed download tasks and add to mempool if successful. - while let Poll::Ready(Some(r)) = - pin!(tx_downloads.timeout(RATE_LIMIT_DELAY)).poll_next(cx) - { - match r { - Ok(Ok((tx, spent_mempool_outpoints, expected_tip_height))) => { + while let Poll::Ready(Some(result)) = pin!(&mut *tx_downloads).poll_next(cx) { + match result { + Ok(Ok((tx, spent_mempool_outpoints, expected_tip_height, rsp_tx))) => { // # Correctness: // // It's okay to use tip height here instead of the tip hash since @@ -609,18 +606,25 @@ impl Service for Mempool { // Save transaction ids that we will send to peers send_to_peers_ids.insert(inserted_id); } + + // Send the result to responder channel if one was provided. + if let Some(rsp_tx) = rsp_tx { + let _ = rsp_tx + .send(insert_result.map(|_| ()).map_err(|err| err.into())); + } } else { tracing::trace!("chain grew during tx verification, retrying ..",); // We don't care if re-queueing the transaction request fails. let _result = tx_downloads - .download_if_needed_and_verify(tx.transaction.into(), None); + .download_if_needed_and_verify(tx.transaction.into(), rsp_tx); } } Ok(Err((tx_id, error))) => { tracing::debug!(?tx_id, ?error, "mempool transaction failed to verify"); metrics::counter!("mempool.failed.verify.tasks.total", "reason" => error.to_string()).increment(1); + storage.reject_if_needed(tx_id, error); } Err(_elapsed) => { diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index 45fd44a7c05..6e634717503 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -54,7 +54,10 @@ use zebra_network as zn; use zebra_node_services::mempool::Gossip; use zebra_state::{self as zs, CloneError}; -use crate::components::sync::{BLOCK_DOWNLOAD_TIMEOUT, BLOCK_VERIFY_TIMEOUT}; +use crate::components::{ + mempool::crawler::RATE_LIMIT_DELAY, + sync::{BLOCK_DOWNLOAD_TIMEOUT, BLOCK_VERIFY_TIMEOUT}, +}; use super::MempoolError; @@ -154,12 +157,16 @@ where pending: FuturesUnordered< JoinHandle< Result< - ( - VerifiedUnminedTx, - Vec, - Option, - ), - (TransactionDownloadVerifyError, UnminedTxId), + Result< + ( + VerifiedUnminedTx, + Vec, + Option, + Option>>, + ), + (TransactionDownloadVerifyError, UnminedTxId), + >, + tokio::time::error::Elapsed, >, >, >, @@ -179,12 +186,16 @@ where ZS::Future: Send, { type Item = Result< - ( - VerifiedUnminedTx, - Vec, - Option, - ), - (UnminedTxId, TransactionDownloadVerifyError), + Result< + ( + VerifiedUnminedTx, + Vec, + Option, + Option>>, + ), + (UnminedTxId, TransactionDownloadVerifyError), + >, + tokio::time::error::Elapsed, >; fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { @@ -198,20 +209,26 @@ where // task is scheduled for wakeup when the next task becomes ready. // // TODO: this would be cleaner with poll_map (#2693) - if let Some(join_result) = ready!(this.pending.poll_next(cx)) { - match join_result.expect("transaction download and verify tasks must not panic") { - Ok((tx, spent_mempool_outpoints, tip_height)) => { + let item = if let Some(join_result) = ready!(this.pending.poll_next(cx)) { + let result = join_result.expect("transaction download and verify tasks must not panic"); + let result = match result { + Ok(Ok((tx, spent_mempool_outpoints, tip_height, rsp_tx))) => { this.cancel_handles.remove(&tx.transaction.id); - Poll::Ready(Some(Ok((tx, spent_mempool_outpoints, tip_height)))) + Ok(Ok((tx, spent_mempool_outpoints, tip_height, rsp_tx))) } - Err((e, hash)) => { + Ok(Err((e, hash))) => { this.cancel_handles.remove(&hash); - Poll::Ready(Some(Err((hash, e)))) + Ok(Err((hash, e))) } - } + Err(elapsed) => Err(elapsed), + }; + + Some(result) } else { - Poll::Ready(None) - } + None + }; + + Poll::Ready(item) } fn size_hint(&self) -> (usize, Option) { @@ -255,7 +272,7 @@ where pub fn download_if_needed_and_verify( &mut self, gossiped_tx: Gossip, - rsp_tx: Option>>, + mut rsp_tx: Option>>, ) -> Result<(), MempoolError> { let txid = gossiped_tx.id(); @@ -381,36 +398,53 @@ where // Tack the hash onto the error so we can remove the cancel handle // on failure as well as on success. .map_err(move |e| (e, txid)) - .inspect(move |result| { - // Hide the transaction data to avoid filling the logs - let result = result.as_ref().map(|_tx| txid); - debug!("mempool transaction result: {result:?}"); - }) + .inspect(move |result| { + // Hide the transaction data to avoid filling the logs + let result = result.as_ref().map(|_tx| txid); + debug!("mempool transaction result: {result:?}"); + }) .in_current_span(); let task = tokio::spawn(async move { + let fut = tokio::time::timeout(RATE_LIMIT_DELAY, fut); + // Prefer the cancel handle if both are ready. let result = tokio::select! { biased; _ = &mut cancel_rx => { trace!("task cancelled prior to completion"); metrics::counter!("mempool.cancelled.verify.tasks.total").increment(1); - Err((TransactionDownloadVerifyError::Cancelled, txid)) + if let Some(rsp_tx) = rsp_tx.take() { + let _ = rsp_tx.send(Err("verification cancelled".into())); + } + + Ok(Err((TransactionDownloadVerifyError::Cancelled, txid))) } - verification = fut => verification, + verification = fut => { + verification + .inspect_err(|_elapsed| { + if let Some(rsp_tx) = rsp_tx.take() { + let _ = rsp_tx.send(Err("timeout waiting for verification result".into())); + } + }) + .map(|inner_result| { + match inner_result { + Ok((transaction, spent_mempool_outpoints, tip_height)) => Ok((transaction, spent_mempool_outpoints, tip_height, rsp_tx)), + Err((tx_verifier_error, tx_id)) => { + if let Some(rsp_tx) = rsp_tx.take() { + let error_msg = format!( + "failed to validate tx: {tx_id}, error: {tx_verifier_error}" + ); + let _ = rsp_tx.send(Err(error_msg.into())); + }; + + Err((tx_verifier_error, tx_id)) + } + } + }) + }, }; - // Send the result to responder channel if one was provided. - // TODO: Wait until transactions are added to the verified set before sending an Ok to `rsp_tx`. - if let Some(rsp_tx) = rsp_tx { - let _ = rsp_tx.send( - result - .as_ref() - .map(|_| ()) - .map_err(|(err, _)| err.clone().into()), - ); - } - result }); diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 1b87097aaf1..9dd3557de9c 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -978,22 +978,22 @@ async fn mempool_responds_to_await_output() -> Result<(), Report> { let result_rx = results.remove(0).expect("should pass initial checks"); assert!(results.is_empty(), "should have 1 result for 1 queued tx"); - tokio::time::timeout(Duration::from_secs(10), result_rx) - .await - .expect("should not time out") - .expect("mempool tx verification result channel should not be closed") - .expect("mocked verification should be successful"); - - // Wait for next steps in mempool's Downloads to finish - // TODO: Move this and the `ready().await` below above waiting for the mempool verification result above after - // waiting to respond with a transaction's verification result until after it's been inserted into the mempool. + // Wait for post-verification steps in mempool's Downloads tokio::time::sleep(Duration::from_secs(1)).await; + // Note: Buffered services shouldn't be polled without being called. + // See `mempool::Request::CheckForVerifiedTransactions` for more details. mempool .ready() .await .expect("polling mempool should succeed"); + tokio::time::timeout(Duration::from_secs(10), result_rx) + .await + .expect("should not time out") + .expect("mempool tx verification result channel should not be closed") + .expect("mocked verification should be successful"); + assert_eq!( mempool.storage().transaction_count(), 1, From e72c0fe6f2167ead685366dfcf15c56fc0e8b992 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 4 Dec 2024 14:52:28 +0000 Subject: [PATCH 030/245] refactor(mergify): streamline queue and priority rules (#9068) - Removed deprecated actions and simplified default queue rules. - Adjusted batch sizes for urgent and batched queues. - Consolidated priority rules to enhance clarity and efficiency. - Updated conditions for moving pull requests to different queues based on labels and reviews. --- .github/mergify.yml | 78 ++++++++++++++------------------------------- 1 file changed, 24 insertions(+), 54 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index ebce6885ebf..95e464822b3 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -10,16 +10,11 @@ merge_queue: # Provides a means to set configuration values that act as fallbacks # for queue_rules and pull_request_rules defaults: - actions: - squash: - # TODO: Adapt our PR template to use title+body - commit_message: all-commits - + # Define our default queue rules queue_rule: # Allow to update/rebase the original pull request if possible to check its mergeability, # and it does not create a draft PR if not needed allow_inplace_checks: True - batch_size: 20 # Wait for about 10% of the time it takes Rust PRs to run CI (~1h) batch_max_wait_time: "10 minutes" queue_conditions: @@ -27,73 +22,48 @@ defaults: # which are the same as the GitHub main branch protection rules # https://docs.mergify.com/conditions/#about-branch-protection - base=main + # is not in draft + - -draft + # does not include the do-not-merge label + - label!=do-not-merge + # has at least one approving reviewer + - "#approved-reviews-by >= 1" + # Allows to define the rules that reign over our merge queues queue_rules: - name: urgent - batch_size: 8 + batch_size: 5 # Wait a short time to embark hotfixes together in a merge train batch_max_wait_time: "2 minutes" + queue_conditions: + # is labeled with Critical priority + - 'label~=^P-Critical' - name: batched + batch_size: 20 + +pull_request_rules: + - name: move to any queue if GitHub Rulesets are satisfied + conditions: [] + actions: + queue: # Rules that will determine which priority a pull request has when entering # our merge queue # # These rules are checked in order, the first one to be satisfied applies priority_rules: - - name: move to urgent queue when CI passes with multiple reviews + - name: urgent conditions: - # This queue handles a PR if it: - # has multiple approving reviewers - - "#approved-reviews-by>=2" # is labeled with Critical priority - 'label~=^P-Critical' - # and satisfies the standard merge conditions: - # targets main - - base=main - # is not in draft - - -draft - # does not include the do-not-merge label - - label!=do-not-merge allow_checks_interruption: true priority: high - - name: move to urgent queue when CI passes with 1 review + - name: low conditions: - # This queue handles a PR if it: - # has at least one approving reviewer (branch protection rule) - # does not need extra reviews - - 'label!=extra-reviews' - # is labeled with Critical priority - - 'label~=^P-Critical' - # and satisfies the standard merge conditions: - - base=main - - -draft - - label!=do-not-merge - priority: high - - - name: move to medium queue when CI passes with multiple reviews - conditions: - # This queue handles a PR if it: - # has multiple approving reviewers - - "#approved-reviews-by>=2" - # is labeled with any other priority (rules are checked in order) - # and satisfies the standard merge conditions: - - base=main - - -draft - - label!=do-not-merge - priority: medium - - - name: move to low queue when CI passes with 1 review - conditions: - # This queue handles a PR if it: - # has at least one approving reviewer (branch protection rule) - # does not need extra reviews - - 'label!=extra-reviews' - # is labeled with any other priority (rules are checked in order) - # and satisfies the standard merge conditions: - - base=main - - -draft - - label!=do-not-merge + # is labeled with Optional or Low priority + - 'label~=^P-(Optional|Low)' + allow_checks_interruption: true priority: low From eb9e1f150ac874186c09f7ab688344b25a5ba792 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:17:33 +0000 Subject: [PATCH 031/245] build(deps): bump the devops group across 1 directory with 5 updates (#9061) Bumps the devops group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [codecov/codecov-action](https://github.com/codecov/codecov-action) | `4.6.0` | `5.0.7` | | [tj-actions/changed-files](https://github.com/tj-actions/changed-files) | `45.0.3` | `45.0.4` | | [docker/metadata-action](https://github.com/docker/metadata-action) | `5.5.1` | `5.6.1` | | [docker/build-push-action](https://github.com/docker/build-push-action) | `6.9.0` | `6.10.0` | | [docker/scout-action](https://github.com/docker/scout-action) | `1.15.0` | `1.15.1` | Updates `codecov/codecov-action` from 4.6.0 to 5.0.7 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.6.0...v5.0.7) Updates `tj-actions/changed-files` from 45.0.3 to 45.0.4 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v45.0.3...v45.0.4) Updates `docker/metadata-action` from 5.5.1 to 5.6.1 - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v5.5.1...v5.6.1) Updates `docker/build-push-action` from 6.9.0 to 6.10.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.9.0...v6.10.0) Updates `docker/scout-action` from 1.15.0 to 1.15.1 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.15.0...v1.15.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major dependency-group: devops - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: docker/metadata-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/scout-action dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 4 ++-- .github/workflows/sub-build-docker-image.yml | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 3d1e3b946c5..1b87753b508 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v4.6.0 + uses: codecov/codecov-action@v5.0.7 diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 22ec5089c37..43acadbd8ec 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.3 + uses: tj-actions/changed-files@v45.0.4 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.3 + uses: tj-actions/changed-files@v45.0.4 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index ee95278b9cf..9050d223080 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -93,7 +93,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v5.5.1 + uses: docker/metadata-action@v5.6.1 with: # list of Docker images to use as base name for tags # We only publish images to DockerHub if a release is not a pre-release @@ -161,7 +161,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.9.0 + uses: docker/build-push-action@v6.10.0 with: target: ${{ inputs.dockerfile_target }} context: . @@ -193,7 +193,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.15.0 + uses: docker/scout-action@v1.15.1 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. From a3bb1e2e05f61904e084f61eba28c92c328dbbd7 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Wed, 4 Dec 2024 15:45:10 -0300 Subject: [PATCH 032/245] change(diagnostics): Updates error messages to include inner error types (#9066) * add: add consensus validation reason to error messages * add additional instances --- zebra-chain/src/history_tree.rs | 2 +- zebra-consensus/src/block.rs | 8 ++++---- zebra-consensus/src/checkpoint.rs | 4 ++-- zebra-consensus/src/error.rs | 24 ++++++++++++---------- zebra-network/src/peer/error.rs | 4 ++-- zebra-state/src/error.rs | 6 +++--- zebrad/src/components/mempool/downloads.rs | 6 +++--- zebrad/src/components/mempool/error.rs | 8 +++++--- zebrad/src/components/mempool/storage.rs | 2 +- 9 files changed, 34 insertions(+), 30 deletions(-) diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index 91fa3a17628..d84f92321af 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -30,7 +30,7 @@ pub enum HistoryTreeError { #[non_exhaustive] InnerError { inner: zcash_history::Error }, - #[error("I/O error")] + #[error("I/O error: {0}")] IOError(#[from] io::Error), } diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 611aea2ceba..247079c401a 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -74,19 +74,19 @@ pub enum VerifyBlockError { #[error(transparent)] Time(zebra_chain::block::BlockTimeError), - #[error("unable to commit block after semantic verification")] + #[error("unable to commit block after semantic verification: {0}")] // TODO: make this into a concrete type, and add it to is_duplicate_request() (#2908) Commit(#[source] BoxError), #[cfg(feature = "getblocktemplate-rpcs")] - #[error("unable to validate block proposal: failed semantic verification (proof of work is not checked for proposals)")] + #[error("unable to validate block proposal: failed semantic verification (proof of work is not checked for proposals): {0}")] // TODO: make this into a concrete type (see #5732) ValidateProposal(#[source] BoxError), - #[error("invalid transaction")] + #[error("invalid transaction: {0}")] Transaction(#[from] TransactionError), - #[error("invalid block subsidy")] + #[error("invalid block subsidy: {0}")] Subsidy(#[from] SubsidyError), } diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 039ea6e33e3..36b3a76d57f 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -992,9 +992,9 @@ pub enum VerifyCheckpointError { CheckpointList(BoxError), #[error(transparent)] VerifyBlock(VerifyBlockError), - #[error("invalid block subsidy")] + #[error("invalid block subsidy: {0}")] SubsidyError(#[from] SubsidyError), - #[error("invalid amount")] + #[error("invalid amount: {0}")] AmountError(#[from] amount::Error), #[error("too many queued blocks at this height")] QueuedLimit, diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index 8fe14c62d52..b0c867fc148 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -121,7 +121,7 @@ pub enum TransactionError { transaction_hash: zebra_chain::transaction::Hash, }, - #[error("coinbase transaction failed subsidy validation")] + #[error("coinbase transaction failed subsidy validation: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] Subsidy(#[from] SubsidyError), @@ -140,7 +140,7 @@ pub enum TransactionError { #[error("if there are no Spends or Outputs, the value balance MUST be 0.")] BadBalance, - #[error("could not verify a transparent script")] + #[error("could not verify a transparent script: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] Script(#[from] zebra_script::Error), @@ -149,29 +149,29 @@ pub enum TransactionError { // TODO: the underlying error is bellman::VerificationError, but it does not implement // Arbitrary as required here. - #[error("spend proof MUST be valid given a primary input formed from the other fields except spendAuthSig")] + #[error("spend proof MUST be valid given a primary input formed from the other fields except spendAuthSig: {0}")] Groth16(String), // TODO: the underlying error is io::Error, but it does not implement Clone as required here. - #[error("Groth16 proof is malformed")] + #[error("Groth16 proof is malformed: {0}")] MalformedGroth16(String), #[error( - "Sprout joinSplitSig MUST represent a valid signature under joinSplitPubKey of dataToBeSigned" + "Sprout joinSplitSig MUST represent a valid signature under joinSplitPubKey of dataToBeSigned: {0}" )] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] Ed25519(#[from] zebra_chain::primitives::ed25519::Error), - #[error("Sapling bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash")] + #[error("Sapling bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] RedJubjub(zebra_chain::primitives::redjubjub::Error), - #[error("Orchard bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash")] + #[error("Orchard bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] RedPallas(zebra_chain::primitives::reddsa::Error), // temporary error type until #1186 is fixed - #[error("Downcast from BoxError to redjubjub::Error failed")] + #[error("Downcast from BoxError to redjubjub::Error failed: {0}")] InternalDowncastError(String), #[error("either vpub_old or vpub_new must be zero")] @@ -201,12 +201,12 @@ pub enum TransactionError { #[error("could not find a mempool transaction input UTXO in the best chain")] TransparentInputNotFound, - #[error("could not validate nullifiers and anchors on best chain")] + #[error("could not validate nullifiers and anchors on best chain: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] // This error variant is at least 128 bytes ValidateContextError(Box), - #[error("could not validate mempool transaction lock time on best chain")] + #[error("could not validate mempool transaction lock time on best chain: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] // TODO: turn this into a typed error ValidateMempoolLockTimeError(String), @@ -236,7 +236,9 @@ pub enum TransactionError { min_spend_height: block::Height, }, - #[error("failed to verify ZIP-317 transaction rules, transaction was not inserted to mempool")] + #[error( + "failed to verify ZIP-317 transaction rules, transaction was not inserted to mempool: {0}" + )] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] Zip317(#[from] zebra_chain::transaction::zip317::Error), } diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index c40c34b1d1d..d85e0e143ba 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -251,10 +251,10 @@ pub enum HandshakeError { #[error("Peer closed connection")] ConnectionClosed, /// An error occurred while performing an IO operation. - #[error("Underlying IO error")] + #[error("Underlying IO error: {0}")] Io(#[from] std::io::Error), /// A serialization error occurred while reading or writing a message. - #[error("Serialization error")] + #[error("Serialization error: {0}")] Serialization(#[from] SerializationError), /// The remote peer offered a version older than our minimum version. #[error("Peer offered obsolete version: {0:?}")] diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index cf495311efb..632591f4cb3 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -220,13 +220,13 @@ pub enum ValidateContextError { height: Option, }, - #[error("error updating a note commitment tree")] + #[error("error updating a note commitment tree: {0}")] NoteCommitmentTreeError(#[from] zebra_chain::parallel::tree::NoteCommitmentTreeError), - #[error("error building the history tree")] + #[error("error building the history tree: {0}")] HistoryTreeError(#[from] Arc), - #[error("block contains an invalid commitment")] + #[error("block contains an invalid commitment: {0}")] InvalidBlockCommitment(#[from] block::CommitmentError), #[error( diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index 6e634717503..68d29aadcaf 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -115,16 +115,16 @@ pub enum TransactionDownloadVerifyError { #[error("transaction is already in state")] InState, - #[error("error in state service")] + #[error("error in state service: {0}")] StateError(#[source] CloneError), - #[error("error downloading transaction")] + #[error("error downloading transaction: {0}")] DownloadFailed(#[source] CloneError), #[error("transaction download / verification was cancelled")] Cancelled, - #[error("transaction did not pass consensus validation")] + #[error("transaction did not pass consensus validation: {0}")] Invalid(#[from] zebra_consensus::error::TransactionError), } diff --git a/zebrad/src/components/mempool/error.rs b/zebrad/src/components/mempool/error.rs index c70ca56cbc6..d27c78b6da3 100644 --- a/zebrad/src/components/mempool/error.rs +++ b/zebrad/src/components/mempool/error.rs @@ -23,7 +23,9 @@ pub enum MempoolError { /// /// Note that the mempool caches this error. See [`super::storage::Storage`] /// for more details. - #[error("the transaction will be rejected from the mempool until the next chain tip block")] + #[error( + "the transaction will be rejected from the mempool until the next chain tip block: {0}" + )] StorageExactTip(#[from] ExactTipRejectionError), /// Transaction rejected based on its effects (spends, outputs, transaction @@ -33,7 +35,7 @@ pub enum MempoolError { /// /// Note that the mempool caches this error. See [`super::storage::Storage`] /// for more details. - #[error("any transaction with the same effects will be rejected from the mempool until the next chain tip block")] + #[error("any transaction with the same effects will be rejected from the mempool until the next chain tip block: {0}")] StorageEffectsTip(#[from] SameEffectsTipRejectionError), /// Transaction rejected based on its effects (spends, outputs, transaction @@ -44,7 +46,7 @@ pub enum MempoolError { /// /// Note that the mempool caches this error. See [`super::storage::Storage`] /// for more details. - #[error("any transaction with the same effects will be rejected from the mempool until a chain reset")] + #[error("any transaction with the same effects will be rejected from the mempool until a chain reset: {0}")] StorageEffectsChain(#[from] SameEffectsChainRejectionError), /// Transaction rejected because the mempool already contains another diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index ce6f09cf1d6..be7cbc9593f 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -55,7 +55,7 @@ pub(crate) const MAX_EVICTION_MEMORY_ENTRIES: usize = 40_000; #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] #[allow(dead_code)] pub enum ExactTipRejectionError { - #[error("transaction did not pass consensus validation")] + #[error("transaction did not pass consensus validation: {0}")] FailedVerification(#[from] zebra_consensus::error::TransactionError), } From bd122b6f7cfd15fe377d3b1ad402389c04568ae7 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 5 Dec 2024 16:06:17 +0100 Subject: [PATCH 033/245] add(consensus): Check consensus branch ids in tx verifier (#9063) * Add a consensus branch id check to tx verifier * Allow updating tx network upgrades * Fix unit tests for txs * Remove `println` * Move test-only tx methods out of the default impl * Add a test for checking consensus branch ids * Simplify some tests * Docs formatting * Update zebra-consensus/src/transaction/check.rs Co-authored-by: Conrado Gouvea * Add `effectiveVersion` to txs * Refactor the consensus branch ID check * Update zebra-consensus/src/error.rs Co-authored-by: Alfredo Garcia * Refactor the consensus branch ID check * Remove `effective_version` * Refactor tests for consensus branch ID check --------- Co-authored-by: Conrado Gouvea Co-authored-by: Alfredo Garcia --- zebra-chain/src/transaction.rs | 481 ++++++++++++----------- zebra-consensus/src/error.rs | 6 + zebra-consensus/src/transaction.rs | 1 + zebra-consensus/src/transaction/check.rs | 41 ++ zebra-consensus/src/transaction/tests.rs | 363 +++++++++++------ 5 files changed, 554 insertions(+), 338 deletions(-) diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 96b2378e273..1c121130fcc 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -324,7 +324,17 @@ impl Transaction { } } - /// Return the version of this transaction. + /// Returns the version of this transaction. + /// + /// Note that the returned version is equal to `effectiveVersion`, described in [§ 7.1 + /// Transaction Encoding and Consensus]: + /// + /// > `effectiveVersion` [...] is equal to `min(2, version)` when `fOverwintered = 0` and to + /// > `version` otherwise. + /// + /// Zebra handles the `fOverwintered` flag via the [`Self::is_overwintered`] method. + /// + /// [§ 7.1 Transaction Encoding and Consensus]: pub fn version(&self) -> u32 { match self { Transaction::V1 { .. } => 1, @@ -429,32 +439,6 @@ impl Transaction { } } - /// Modify the expiry height of this transaction. - /// - /// # Panics - /// - /// - if called on a v1 or v2 transaction - #[cfg(any(test, feature = "proptest-impl"))] - pub fn expiry_height_mut(&mut self) -> &mut block::Height { - match self { - Transaction::V1 { .. } | Transaction::V2 { .. } => { - panic!("v1 and v2 transactions are not supported") - } - Transaction::V3 { - ref mut expiry_height, - .. - } - | Transaction::V4 { - ref mut expiry_height, - .. - } - | Transaction::V5 { - ref mut expiry_height, - .. - } => expiry_height, - } - } - /// Get this transaction's network upgrade field, if any. /// This field is serialized as `nConsensusBranchId` ([7.1]). /// @@ -484,18 +468,6 @@ impl Transaction { } } - /// Modify the transparent inputs of this transaction, regardless of version. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn inputs_mut(&mut self) -> &mut Vec { - match self { - Transaction::V1 { ref mut inputs, .. } => inputs, - Transaction::V2 { ref mut inputs, .. } => inputs, - Transaction::V3 { ref mut inputs, .. } => inputs, - Transaction::V4 { ref mut inputs, .. } => inputs, - Transaction::V5 { ref mut inputs, .. } => inputs, - } - } - /// Access the [`transparent::OutPoint`]s spent by this transaction's [`transparent::Input`]s. pub fn spent_outpoints(&self) -> impl Iterator + '_ { self.inputs() @@ -514,28 +486,6 @@ impl Transaction { } } - /// Modify the transparent outputs of this transaction, regardless of version. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn outputs_mut(&mut self) -> &mut Vec { - match self { - Transaction::V1 { - ref mut outputs, .. - } => outputs, - Transaction::V2 { - ref mut outputs, .. - } => outputs, - Transaction::V3 { - ref mut outputs, .. - } => outputs, - Transaction::V4 { - ref mut outputs, .. - } => outputs, - Transaction::V5 { - ref mut outputs, .. - } => outputs, - } - } - /// Returns `true` if this transaction has valid inputs for a coinbase /// transaction, that is, has a single input and it is a coinbase input /// (null prevout). @@ -943,27 +893,6 @@ impl Transaction { } } - /// Modify the [`orchard::ShieldedData`] in this transaction, - /// regardless of version. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn orchard_shielded_data_mut(&mut self) -> Option<&mut orchard::ShieldedData> { - match self { - Transaction::V5 { - orchard_shielded_data: Some(orchard_shielded_data), - .. - } => Some(orchard_shielded_data), - - Transaction::V1 { .. } - | Transaction::V2 { .. } - | Transaction::V3 { .. } - | Transaction::V4 { .. } - | Transaction::V5 { - orchard_shielded_data: None, - .. - } => None, - } - } - /// Iterate over the [`orchard::Action`]s in this transaction, if there are any, /// regardless of version. pub fn orchard_actions(&self) -> impl Iterator { @@ -1035,14 +964,6 @@ impl Transaction { .map_err(ValueBalanceError::Transparent) } - /// Modify the transparent output values of this transaction, regardless of version. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn output_values_mut(&mut self) -> impl Iterator> { - self.outputs_mut() - .iter_mut() - .map(|output| &mut output.value) - } - /// Returns the `vpub_old` fields from `JoinSplit`s in this transaction, /// regardless of version, in the order they appear in the transaction. /// @@ -1090,55 +1011,6 @@ impl Transaction { } } - /// Modify the `vpub_old` fields from `JoinSplit`s in this transaction, - /// regardless of version, in the order they appear in the transaction. - /// - /// See `output_values_to_sprout` for details. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn output_values_to_sprout_mut( - &mut self, - ) -> Box> + '_> { - match self { - // JoinSplits with Bctv14 Proofs - Transaction::V2 { - joinsplit_data: Some(joinsplit_data), - .. - } - | Transaction::V3 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new( - joinsplit_data - .joinsplits_mut() - .map(|joinsplit| &mut joinsplit.vpub_old), - ), - // JoinSplits with Groth16 Proofs - Transaction::V4 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new( - joinsplit_data - .joinsplits_mut() - .map(|joinsplit| &mut joinsplit.vpub_old), - ), - // No JoinSplits - Transaction::V1 { .. } - | Transaction::V2 { - joinsplit_data: None, - .. - } - | Transaction::V3 { - joinsplit_data: None, - .. - } - | Transaction::V4 { - joinsplit_data: None, - .. - } - | Transaction::V5 { .. } => Box::new(std::iter::empty()), - } - } - /// Returns the `vpub_new` fields from `JoinSplit`s in this transaction, /// regardless of version, in the order they appear in the transaction. /// @@ -1186,55 +1058,6 @@ impl Transaction { } } - /// Modify the `vpub_new` fields from `JoinSplit`s in this transaction, - /// regardless of version, in the order they appear in the transaction. - /// - /// See `input_values_from_sprout` for details. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn input_values_from_sprout_mut( - &mut self, - ) -> Box> + '_> { - match self { - // JoinSplits with Bctv14 Proofs - Transaction::V2 { - joinsplit_data: Some(joinsplit_data), - .. - } - | Transaction::V3 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new( - joinsplit_data - .joinsplits_mut() - .map(|joinsplit| &mut joinsplit.vpub_new), - ), - // JoinSplits with Groth Proofs - Transaction::V4 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new( - joinsplit_data - .joinsplits_mut() - .map(|joinsplit| &mut joinsplit.vpub_new), - ), - // No JoinSplits - Transaction::V1 { .. } - | Transaction::V2 { - joinsplit_data: None, - .. - } - | Transaction::V3 { - joinsplit_data: None, - .. - } - | Transaction::V4 { - joinsplit_data: None, - .. - } - | Transaction::V5 { .. } => Box::new(std::iter::empty()), - } - } - /// Return a list of sprout value balances, /// the changes in the transaction value pool due to each sprout `JoinSplit`. /// @@ -1331,35 +1154,6 @@ impl Transaction { ValueBalance::from_sapling_amount(sapling_value_balance) } - /// Modify the `value_balance` field from the `sapling::ShieldedData` in this transaction, - /// regardless of version. - /// - /// See `sapling_value_balance` for details. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn sapling_value_balance_mut(&mut self) -> Option<&mut Amount> { - match self { - Transaction::V4 { - sapling_shielded_data: Some(sapling_shielded_data), - .. - } => Some(&mut sapling_shielded_data.value_balance), - Transaction::V5 { - sapling_shielded_data: Some(sapling_shielded_data), - .. - } => Some(&mut sapling_shielded_data.value_balance), - Transaction::V1 { .. } - | Transaction::V2 { .. } - | Transaction::V3 { .. } - | Transaction::V4 { - sapling_shielded_data: None, - .. - } - | Transaction::V5 { - sapling_shielded_data: None, - .. - } => None, - } - } - /// Return the orchard value balance, the change in the transaction value /// pool due to [`orchard::Action`]s. /// @@ -1380,16 +1174,6 @@ impl Transaction { ValueBalance::from_orchard_amount(orchard_value_balance) } - /// Modify the `value_balance` field from the `orchard::ShieldedData` in this transaction, - /// regardless of version. - /// - /// See `orchard_value_balance` for details. - #[cfg(any(test, feature = "proptest-impl"))] - pub fn orchard_value_balance_mut(&mut self) -> Option<&mut Amount> { - self.orchard_shielded_data_mut() - .map(|shielded_data| &mut shielded_data.value_balance) - } - /// Returns the value balances for this transaction using the provided transparent outputs. pub(crate) fn value_balance_from_outputs( &self, @@ -1428,3 +1212,246 @@ impl Transaction { self.value_balance_from_outputs(&outputs_from_utxos(utxos.clone())) } } + +#[cfg(any(test, feature = "proptest-impl"))] +impl Transaction { + /// Updates the [`NetworkUpgrade`] for this transaction. + /// + /// ## Notes + /// + /// - Updating the network upgrade for V1, V2, V3 and V4 transactions is not possible. + pub fn update_network_upgrade(&mut self, nu: NetworkUpgrade) -> Result<(), &str> { + match self { + Transaction::V1 { .. } + | Transaction::V2 { .. } + | Transaction::V3 { .. } + | Transaction::V4 { .. } => Err( + "Updating the network upgrade for V1, V2, V3 and V4 transactions is not possible.", + ), + Transaction::V5 { + ref mut network_upgrade, + .. + } => { + *network_upgrade = nu; + Ok(()) + } + } + } + + /// Modify the expiry height of this transaction. + /// + /// # Panics + /// + /// - if called on a v1 or v2 transaction + pub fn expiry_height_mut(&mut self) -> &mut block::Height { + match self { + Transaction::V1 { .. } | Transaction::V2 { .. } => { + panic!("v1 and v2 transactions are not supported") + } + Transaction::V3 { + ref mut expiry_height, + .. + } + | Transaction::V4 { + ref mut expiry_height, + .. + } + | Transaction::V5 { + ref mut expiry_height, + .. + } => expiry_height, + } + } + + /// Modify the transparent inputs of this transaction, regardless of version. + pub fn inputs_mut(&mut self) -> &mut Vec { + match self { + Transaction::V1 { ref mut inputs, .. } => inputs, + Transaction::V2 { ref mut inputs, .. } => inputs, + Transaction::V3 { ref mut inputs, .. } => inputs, + Transaction::V4 { ref mut inputs, .. } => inputs, + Transaction::V5 { ref mut inputs, .. } => inputs, + } + } + + /// Modify the `value_balance` field from the `orchard::ShieldedData` in this transaction, + /// regardless of version. + /// + /// See `orchard_value_balance` for details. + pub fn orchard_value_balance_mut(&mut self) -> Option<&mut Amount> { + self.orchard_shielded_data_mut() + .map(|shielded_data| &mut shielded_data.value_balance) + } + + /// Modify the `value_balance` field from the `sapling::ShieldedData` in this transaction, + /// regardless of version. + /// + /// See `sapling_value_balance` for details. + pub fn sapling_value_balance_mut(&mut self) -> Option<&mut Amount> { + match self { + Transaction::V4 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Some(&mut sapling_shielded_data.value_balance), + Transaction::V5 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Some(&mut sapling_shielded_data.value_balance), + Transaction::V1 { .. } + | Transaction::V2 { .. } + | Transaction::V3 { .. } + | Transaction::V4 { + sapling_shielded_data: None, + .. + } + | Transaction::V5 { + sapling_shielded_data: None, + .. + } => None, + } + } + + /// Modify the `vpub_new` fields from `JoinSplit`s in this transaction, + /// regardless of version, in the order they appear in the transaction. + /// + /// See `input_values_from_sprout` for details. + pub fn input_values_from_sprout_mut( + &mut self, + ) -> Box> + '_> { + match self { + // JoinSplits with Bctv14 Proofs + Transaction::V2 { + joinsplit_data: Some(joinsplit_data), + .. + } + | Transaction::V3 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new( + joinsplit_data + .joinsplits_mut() + .map(|joinsplit| &mut joinsplit.vpub_new), + ), + // JoinSplits with Groth Proofs + Transaction::V4 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new( + joinsplit_data + .joinsplits_mut() + .map(|joinsplit| &mut joinsplit.vpub_new), + ), + // No JoinSplits + Transaction::V1 { .. } + | Transaction::V2 { + joinsplit_data: None, + .. + } + | Transaction::V3 { + joinsplit_data: None, + .. + } + | Transaction::V4 { + joinsplit_data: None, + .. + } + | Transaction::V5 { .. } => Box::new(std::iter::empty()), + } + } + + /// Modify the `vpub_old` fields from `JoinSplit`s in this transaction, + /// regardless of version, in the order they appear in the transaction. + /// + /// See `output_values_to_sprout` for details. + pub fn output_values_to_sprout_mut( + &mut self, + ) -> Box> + '_> { + match self { + // JoinSplits with Bctv14 Proofs + Transaction::V2 { + joinsplit_data: Some(joinsplit_data), + .. + } + | Transaction::V3 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new( + joinsplit_data + .joinsplits_mut() + .map(|joinsplit| &mut joinsplit.vpub_old), + ), + // JoinSplits with Groth16 Proofs + Transaction::V4 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new( + joinsplit_data + .joinsplits_mut() + .map(|joinsplit| &mut joinsplit.vpub_old), + ), + // No JoinSplits + Transaction::V1 { .. } + | Transaction::V2 { + joinsplit_data: None, + .. + } + | Transaction::V3 { + joinsplit_data: None, + .. + } + | Transaction::V4 { + joinsplit_data: None, + .. + } + | Transaction::V5 { .. } => Box::new(std::iter::empty()), + } + } + + /// Modify the transparent output values of this transaction, regardless of version. + pub fn output_values_mut(&mut self) -> impl Iterator> { + self.outputs_mut() + .iter_mut() + .map(|output| &mut output.value) + } + + /// Modify the [`orchard::ShieldedData`] in this transaction, + /// regardless of version. + pub fn orchard_shielded_data_mut(&mut self) -> Option<&mut orchard::ShieldedData> { + match self { + Transaction::V5 { + orchard_shielded_data: Some(orchard_shielded_data), + .. + } => Some(orchard_shielded_data), + + Transaction::V1 { .. } + | Transaction::V2 { .. } + | Transaction::V3 { .. } + | Transaction::V4 { .. } + | Transaction::V5 { + orchard_shielded_data: None, + .. + } => None, + } + } + + /// Modify the transparent outputs of this transaction, regardless of version. + pub fn outputs_mut(&mut self) -> &mut Vec { + match self { + Transaction::V1 { + ref mut outputs, .. + } => outputs, + Transaction::V2 { + ref mut outputs, .. + } => outputs, + Transaction::V3 { + ref mut outputs, .. + } => outputs, + Transaction::V4 { + ref mut outputs, .. + } => outputs, + Transaction::V5 { + ref mut outputs, .. + } => outputs, + } + } +} diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index b0c867fc148..ac7e339eb55 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -241,6 +241,12 @@ pub enum TransactionError { )] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] Zip317(#[from] zebra_chain::transaction::zip317::Error), + + #[error("transaction uses an incorrect consensus branch id")] + WrongConsensusBranchId, + + #[error("wrong tx format: tx version is ≥ 5, but `nConsensusBranchId` is missing")] + MissingConsensusBranchId, } impl From for TransactionError { diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index a3729b0a280..ef20881bbbf 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -380,6 +380,7 @@ where // Do quick checks first check::has_inputs_and_outputs(&tx)?; check::has_enough_orchard_flags(&tx)?; + check::consensus_branch_id(&tx, req.height(), &network)?; // Validate the coinbase input consensus rules if req.is_mempool() && tx.is_coinbase() { diff --git a/zebra-consensus/src/transaction/check.rs b/zebra-consensus/src/transaction/check.rs index 66e3d0be595..d3ddc460264 100644 --- a/zebra-consensus/src/transaction/check.rs +++ b/zebra-consensus/src/transaction/check.rs @@ -495,3 +495,44 @@ pub fn tx_transparent_coinbase_spends_maturity( Ok(()) } + +/// Checks the `nConsensusBranchId` field. +/// +/// # Consensus +/// +/// ## [7.1.2 Transaction Consensus Rules] +/// +/// > [**NU5** onward] If `effectiveVersion` ≥ 5, the `nConsensusBranchId` field **MUST** match the +/// > consensus branch ID used for SIGHASH transaction hashes, as specified in [ZIP-244]. +/// +/// ### Notes +/// +/// - When deserializing transactions, Zebra converts the `nConsensusBranchId` into +/// [`NetworkUpgrade`]. +/// +/// - The values returned by [`Transaction::version`] match `effectiveVersion` so we use them in +/// place of `effectiveVersion`. More details in [`Transaction::version`]. +/// +/// [ZIP-244]: +/// [7.1.2 Transaction Consensus Rules]: +pub fn consensus_branch_id( + tx: &Transaction, + height: Height, + network: &Network, +) -> Result<(), TransactionError> { + let current_nu = NetworkUpgrade::current(network, height); + + if current_nu < NetworkUpgrade::Nu5 || tx.version() < 5 { + return Ok(()); + } + + let Some(tx_nu) = tx.network_upgrade() else { + return Err(TransactionError::MissingConsensusBranchId); + }; + + if tx_nu != current_nu { + return Err(TransactionError::WrongConsensusBranchId); + } + + Ok(()) +} diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index d42bbb8594c..8627a578c62 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -6,6 +6,7 @@ use std::{collections::HashMap, sync::Arc}; use chrono::{DateTime, TimeZone, Utc}; use color_eyre::eyre::Report; +use futures::{FutureExt, TryFutureExt}; use halo2::pasta::{group::ff::PrimeField, pallas}; use tower::{buffer::Buffer, service_fn, ServiceExt}; @@ -1002,58 +1003,47 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { } #[test] -fn v5_transaction_is_accepted_after_nu5_activation_mainnet() { - v5_transaction_is_accepted_after_nu5_activation_for_network(Network::Mainnet) -} - -#[test] -fn v5_transaction_is_accepted_after_nu5_activation_testnet() { - v5_transaction_is_accepted_after_nu5_activation_for_network(Network::new_default_testnet()) -} - -fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) { +fn v5_transaction_is_accepted_after_nu5_activation() { let _init_guard = zebra_test::init(); - zebra_test::MULTI_THREADED_RUNTIME.block_on(async { - let nu5 = NetworkUpgrade::Nu5; - let nu5_activation_height = nu5 - .activation_height(&network) - .expect("NU5 activation height is specified"); - let blocks = network.block_iter(); - let state_service = service_fn(|_| async { unreachable!("Service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); + for network in Network::iter() { + zebra_test::MULTI_THREADED_RUNTIME.block_on(async { + let nu5_activation_height = NetworkUpgrade::Nu5 + .activation_height(&network) + .expect("NU5 activation height is specified"); - let mut transaction = fake_v5_transactions_for_network(&network, blocks) - .next_back() - .expect("At least one fake V5 transaction in the test vectors"); - if transaction - .expiry_height() - .expect("V5 must have expiry_height") - < nu5_activation_height - { - let expiry_height = transaction.expiry_height_mut(); - *expiry_height = nu5_activation_height; - } + let state = service_fn(|_| async { unreachable!("Service should not be called") }); - let expected_hash = transaction.unmined_id(); - let expiry_height = transaction - .expiry_height() - .expect("V5 must have expiry_height"); + let mut tx = fake_v5_transactions_for_network(&network, network.block_iter()) + .next_back() + .expect("At least one fake V5 transaction in the test vectors"); - let result = verifier - .oneshot(Request::Block { - transaction: Arc::new(transaction), - known_utxos: Arc::new(HashMap::new()), - height: expiry_height, - time: DateTime::::MAX_UTC, - }) - .await; + if tx.expiry_height().expect("V5 must have expiry_height") < nu5_activation_height { + *tx.expiry_height_mut() = nu5_activation_height; + tx.update_network_upgrade(NetworkUpgrade::Nu5) + .expect("updating the network upgrade for a V5 tx should succeed"); + } - assert_eq!( - result.expect("unexpected error response").tx_id(), - expected_hash - ); - }) + let expected_hash = tx.unmined_id(); + let expiry_height = tx.expiry_height().expect("V5 must have expiry_height"); + + let verification_result = Verifier::new_for_tests(&network, state) + .oneshot(Request::Block { + transaction: Arc::new(tx), + known_utxos: Arc::new(HashMap::new()), + height: expiry_height, + time: DateTime::::MAX_UTC, + }) + .await; + + assert_eq!( + verification_result + .expect("successful verification") + .tx_id(), + expected_hash + ); + }); + } } /// Test if V4 transaction with transparent funds is accepted. @@ -1872,7 +1862,13 @@ async fn v5_coinbase_transaction_expiry_height() { *new_transaction.expiry_height_mut() = new_expiry_height; - let result = verifier + // Setting the new expiry height as the block height will activate NU6, so we need to set NU6 + // for the tx as well. + new_transaction + .update_network_upgrade(NetworkUpgrade::Nu6) + .expect("updating the network upgrade for a V5 tx should succeed"); + + let verification_result = verifier .clone() .oneshot(Request::Block { transaction: Arc::new(new_transaction.clone()), @@ -1883,7 +1879,9 @@ async fn v5_coinbase_transaction_expiry_height() { .await; assert_eq!( - result.expect("unexpected error response").tx_id(), + verification_result + .expect("successful verification") + .tx_id(), new_transaction.unmined_id() ); } @@ -1941,22 +1939,18 @@ async fn v5_transaction_with_too_low_expiry_height() { ); } -/// Tests if a non-coinbase V5 transaction with an expiry height exceeding the -/// maximum is rejected. +/// Tests if a non-coinbase V5 transaction with an expiry height exceeding the maximum is rejected. #[tokio::test] async fn v5_transaction_with_exceeding_expiry_height() { - let state_service = - service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new_for_tests(&Network::Mainnet, state_service); + let state = service_fn(|_| async { unreachable!("State service should not be called") }); - let block_height = block::Height::MAX; + let height_max = block::Height::MAX; - let fund_height = (block_height - 1).expect("fake source fund block height is too small"); let (input, output, known_utxos) = mock_transparent_transfer( - fund_height, + height_max.previous().expect("valid height"), true, 0, - Amount::try_from(1).expect("invalid value"), + Amount::try_from(1).expect("valid amount"), ); // This expiry height exceeds the maximum defined by the specification. @@ -1970,25 +1964,27 @@ async fn v5_transaction_with_exceeding_expiry_height() { expiry_height, sapling_shielded_data: None, orchard_shielded_data: None, - network_upgrade: NetworkUpgrade::Nu5, + network_upgrade: NetworkUpgrade::Nu6, }; - let result = verifier + let transaction_hash = transaction.hash(); + + let verification_result = Verifier::new_for_tests(&Network::Mainnet, state) .oneshot(Request::Block { - transaction: Arc::new(transaction.clone()), + transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), - height: block_height, + height: height_max, time: DateTime::::MAX_UTC, }) .await; assert_eq!( - result, + verification_result, Err(TransactionError::MaximumExpiryHeight { expiry_height, is_coinbase: false, - block_height, - transaction_hash: transaction.hash(), + block_height: height_max, + transaction_hash, }) ); } @@ -2105,59 +2101,49 @@ async fn v5_transaction_with_transparent_transfer_is_rejected_by_the_script() { /// Test if V5 transaction with an internal double spend of transparent funds is rejected. #[tokio::test] async fn v5_transaction_with_conflicting_transparent_spend_is_rejected() { - let network = Network::Mainnet; - let network_upgrade = NetworkUpgrade::Nu5; - - let canopy_activation_height = NetworkUpgrade::Canopy - .activation_height(&network) - .expect("Canopy activation height is specified"); - - let transaction_block_height = - (canopy_activation_height + 10).expect("transaction block height is too large"); - - let fake_source_fund_height = - (transaction_block_height - 1).expect("fake source fund block height is too small"); + for network in Network::iter() { + let canopy_activation_height = NetworkUpgrade::Canopy + .activation_height(&network) + .expect("Canopy activation height is specified"); - // Create a fake transparent transfer that should succeed - let (input, output, known_utxos) = mock_transparent_transfer( - fake_source_fund_height, - true, - 0, - Amount::try_from(1).expect("invalid value"), - ); + let height = (canopy_activation_height + 10).expect("valid height"); - // Create a V4 transaction - let transaction = Transaction::V5 { - inputs: vec![input.clone(), input.clone()], - outputs: vec![output], - lock_time: LockTime::Height(block::Height(0)), - expiry_height: (transaction_block_height + 1).expect("expiry height is too large"), - sapling_shielded_data: None, - orchard_shielded_data: None, - network_upgrade, - }; + // Create a fake transparent transfer that should succeed + let (input, output, known_utxos) = mock_transparent_transfer( + height.previous().expect("valid height"), + true, + 0, + Amount::try_from(1).expect("valid amount"), + ); - let state_service = - service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); + let transaction = Transaction::V5 { + inputs: vec![input.clone(), input.clone()], + outputs: vec![output], + lock_time: LockTime::Height(block::Height(0)), + expiry_height: height.next().expect("valid height"), + sapling_shielded_data: None, + orchard_shielded_data: None, + network_upgrade: NetworkUpgrade::Canopy, + }; - let result = verifier - .oneshot(Request::Block { - transaction: Arc::new(transaction), - known_utxos: Arc::new(known_utxos), - height: transaction_block_height, - time: DateTime::::MAX_UTC, - }) - .await; + let state = service_fn(|_| async { unreachable!("State service should not be called") }); - let expected_outpoint = input.outpoint().expect("Input should have an outpoint"); + let verification_result = Verifier::new_for_tests(&network, state) + .oneshot(Request::Block { + transaction: Arc::new(transaction), + known_utxos: Arc::new(known_utxos), + height, + time: DateTime::::MAX_UTC, + }) + .await; - assert_eq!( - result, - Err(TransactionError::DuplicateTransparentSpend( - expected_outpoint - )) - ); + assert_eq!( + verification_result, + Err(TransactionError::DuplicateTransparentSpend( + input.outpoint().expect("Input should have an outpoint") + )) + ); + } } /// Test if signed V4 transaction with a dummy [`sprout::JoinSplit`] is accepted. @@ -2577,6 +2563,161 @@ fn v5_with_duplicate_orchard_action() { }); } +/// Checks that the tx verifier handles consensus branch ids in V5 txs correctly. +#[tokio::test] +async fn v5_consensus_branch_ids() { + let mut state = MockService::build().for_unit_tests(); + + let (input, output, known_utxos) = mock_transparent_transfer( + Height(1), + true, + 0, + Amount::try_from(10001).expect("valid amount"), + ); + + let known_utxos = Arc::new(known_utxos); + + // NU5 is the first network upgrade that supports V5 txs. + let mut network_upgrade = NetworkUpgrade::Nu5; + + let mut tx = Transaction::V5 { + inputs: vec![input], + outputs: vec![output], + lock_time: LockTime::unlocked(), + expiry_height: Height::MAX_EXPIRY_HEIGHT, + sapling_shielded_data: None, + orchard_shielded_data: None, + network_upgrade, + }; + + let outpoint = match tx.inputs()[0] { + transparent::Input::PrevOut { outpoint, .. } => outpoint, + transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), + }; + + for network in Network::iter() { + let verifier = Buffer::new(Verifier::new_for_tests(&network, state.clone()), 10); + + while let Some(next_nu) = network_upgrade.next_upgrade() { + // Check an outdated network upgrade. + let height = next_nu.activation_height(&network).expect("height"); + + let block_req = verifier + .clone() + .oneshot(Request::Block { + transaction: Arc::new(tx.clone()), + known_utxos: known_utxos.clone(), + // The consensus branch ID of the tx is outdated for this height. + height, + time: DateTime::::MAX_UTC, + }) + .map_err(|err| *err.downcast().expect("`TransactionError` type")); + + let mempool_req = verifier + .clone() + .oneshot(Request::Mempool { + transaction: tx.clone().into(), + // The consensus branch ID of the tx is outdated for this height. + height, + }) + .map_err(|err| *err.downcast().expect("`TransactionError` type")); + + let (block_rsp, mempool_rsp) = futures::join!(block_req, mempool_req); + + assert_eq!(block_rsp, Err(TransactionError::WrongConsensusBranchId)); + assert_eq!(mempool_rsp, Err(TransactionError::WrongConsensusBranchId)); + + // Check the currently supported network upgrade. + let height = network_upgrade.activation_height(&network).expect("height"); + + let block_req = verifier + .clone() + .oneshot(Request::Block { + transaction: Arc::new(tx.clone()), + known_utxos: known_utxos.clone(), + // The consensus branch ID of the tx is supported by this height. + height, + time: DateTime::::MAX_UTC, + }) + .map_ok(|rsp| rsp.tx_id()) + .map_err(|e| format!("{e}")); + + let mempool_req = verifier + .clone() + .oneshot(Request::Mempool { + transaction: tx.clone().into(), + // The consensus branch ID of the tx is supported by this height. + height, + }) + .map_ok(|rsp| rsp.tx_id()) + .map_err(|e| format!("{e}")); + + let state_req = async { + state + .expect_request(zebra_state::Request::UnspentBestChainUtxo(outpoint)) + .map(|r| { + r.respond(zebra_state::Response::UnspentBestChainUtxo( + known_utxos.get(&outpoint).map(|utxo| utxo.utxo.clone()), + )) + }) + .await; + + state + .expect_request_that(|req| { + matches!( + req, + zebra_state::Request::CheckBestChainTipNullifiersAndAnchors(_) + ) + }) + .map(|r| { + r.respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors) + }) + .await; + }; + + let (block_rsp, mempool_rsp, _) = futures::join!(block_req, mempool_req, state_req); + let txid = tx.unmined_id(); + + assert_eq!(block_rsp, Ok(txid)); + assert_eq!(mempool_rsp, Ok(txid)); + + // Check a network upgrade that Zebra doesn't support yet. + tx.update_network_upgrade(next_nu) + .expect("V5 txs support updating NUs"); + + let height = network_upgrade.activation_height(&network).expect("height"); + + let block_req = verifier + .clone() + .oneshot(Request::Block { + transaction: Arc::new(tx.clone()), + known_utxos: known_utxos.clone(), + // The consensus branch ID of the tx is not supported by this height. + height, + time: DateTime::::MAX_UTC, + }) + .map_err(|err| *err.downcast().expect("`TransactionError` type")); + + let mempool_req = verifier + .clone() + .oneshot(Request::Mempool { + transaction: tx.clone().into(), + // The consensus branch ID of the tx is not supported by this height. + height, + }) + .map_err(|err| *err.downcast().expect("`TransactionError` type")); + + let (block_rsp, mempool_rsp) = futures::join!(block_req, mempool_req); + + assert_eq!(block_rsp, Err(TransactionError::WrongConsensusBranchId)); + assert_eq!(mempool_rsp, Err(TransactionError::WrongConsensusBranchId)); + + // Shift the network upgrade for the next loop iteration. + network_upgrade = next_nu; + } + } +} + // Utility functions /// Create a mock transparent transfer to be included in a transaction. From 1cf249c665f7a3d270e5946a27d5e6ca23d9478b Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 5 Dec 2024 18:10:20 -0500 Subject: [PATCH 034/245] Updates checkpoints (#9071) --- .../src/checkpoint/main-checkpoints.txt | 118 +++++++++++ .../src/checkpoint/test-checkpoints.txt | 192 ++++++++++++++++++ 2 files changed, 310 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index efc721d885a..e43aede25c7 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -12344,3 +12344,121 @@ 2691206 00000000015ff7395bab470fe1bc13758db68653bbcddf26f4d967817bcb5f6c 2691606 0000000000b35f7d242f8456123e83718c33af77072ce6cf709bfb34cb59fd3f 2692006 0000000000cc6bed9807a23d703d66735a63d615a494c5131c7f7c9f1f464cdc +2692406 0000000001719294b9f2ab2da8b9053768b624df557374f07c6aaff98d4b46c8 +2692806 000000000130c43396cca8b6417f0901e18cd5a8bf1b3da623a0b61e5abcbce7 +2693206 0000000000c7e0584fe776819a5e55d3bc800f9246adc55bbc3f152260e52820 +2693606 000000000077da042b72562c73c56dcacecd051cc5f1879493124a874f1560b9 +2694006 00000000000654f35f3dfd10dbc58d0a802f51b0e115e7597ea5ac941f8763a8 +2694406 0000000001327f55a0fe270e62387634b2d2d520277819e8f754d220c81e3068 +2694806 00000000013fc817d8c8e775db362ec09f1db5b8e49ef828f777d92fc0a01374 +2695206 00000000005f46bb00499b12074e9f72068f54cd37a29b4b77f48830306e4a50 +2695606 00000000013c4e60f25e9598cb19ae88999fc37c6be9b8efabd63baf603eb705 +2696006 00000000002392197019860ce6454e1c350036deb1173221a63b41ddd697d1ff +2696406 00000000002ba32ef8303fbf0ae0c27b8efc8057326cf1c17a5bc1cb2e1c167f +2696806 000000000172dcf83e463c42e7182cb21ad88f45a05c2c2b215dc320aa64c901 +2697206 0000000000e8a861d3fc8b30cff546a9bf337f89fece6d7a2ae71bb96e9f03ba +2697606 00000000013239c916a0e7c570d74067acd9b3dc5b350a66792993ff482bf4b3 +2698006 000000000012c16a08201040d43d0e6eb48aeca4a813f282302eb373c42804cd +2698406 0000000000d1548f3bae43987b247139e3b75de76246d14b44b21e6ab5bca052 +2698806 000000000146b5c34666c28501b95b72d253f241d7bd5a04d5c8ff01999e359d +2699206 00000000000c0578548ff76fed79afe9f84d97e590650904e62ee59b74db6f2c +2699606 00000000003e82ecd981993140693ea4a9cab188140e3d41626e8dfeccdf1ac0 +2700006 0000000000480d4924ad826fe000987e30c822f116f92d86518b489295b9df56 +2700406 0000000000a09f64f18d3e16588ccd7be4b7cc456af5830bb7645581b69b9fde +2700806 00000000011ce5fce0533f962ac3c1679ef02bf6ce74806e402560803e94a3b0 +2701206 0000000001019499edb3ec84cd89aeb63369261df6c2f5c87916b4e5a33be071 +2701606 0000000000c233d87ad99e5da004c720612056bdf95b88ab80fca69b29ed4677 +2702006 000000000028b54c7a711b408ea9bc83591827f9d8cfe6999beb1cdd3eaea073 +2702406 0000000000971036bc5760e7f18aff82be5b7745eb87f510e6395e88ad545273 +2702806 000000000027ca2a7d39436fb1b77d2ac7287343f947a273110810cea40925dc +2703206 0000000001422dc0ab6fde9670378abc2e2281c8b17d780a408f54b3d64ba535 +2703606 0000000000a5b138ef9ed9081d52b4a388b04945e97eebbd96706b39e6281cda +2704006 000000000188288e78ba15e6c7e09ef918e0fd5828304d503c751bf5a86c4a8e +2704406 0000000000da7d23dd4e750c56d1f1c7932ab07cf07c0105622ff70a422b234f +2704806 0000000000abd25ce416024d8c33fb9e4cc0815e43a74609a40f8847aaf02f86 +2705206 000000000138bd820a28a5191109b825996f52d9c30bf0ff95998205f5d68c67 +2705606 00000000016c7c5d0898e77f9f1bf4e0cdbe5fe94ee55321d2fff3be61195e6d +2706006 0000000000d28a4fe5275aa58b38730b49232d9f5db9b47f6f63f0e64177b083 +2706406 0000000000b8deedeb5f8733df0929bdd7030fb904e960845e6c5d04bafb9841 +2706806 00000000006cf18461d2d4c083b930c115c7f941856acbdbc98ea2c31f60ee2e +2707206 0000000000ae5ec2e20db9281d76047afda9670011e685691abdde3cae3e7fa5 +2707606 0000000000a44f9dc170936b47fc7b73a05e09b6505bbe4248d1f3444474f395 +2708006 00000000000c4bb2c4b279216d4669cc4a874cfa02cfcb49621277fac12d5654 +2708406 0000000000ceb61df7931a951875a5e4073480d58302aa2e4edb8b2eacf1fb45 +2708806 000000000096a4592dd90842479c001d8582f33becac44e1bdd5517490b18215 +2709206 000000000077f8d5905c37fa58894d44fa5103627b2dd19cb4a10143c0863986 +2709606 0000000000a022191870eaad22ef715e6500236c758888f6c07529bdf113a881 +2710006 0000000000f2ea45e0ad6cf78d4ac3dd99f3ab81bf3980fcc88059060d0f268d +2710406 0000000000aeaebdfca9b678fef0d67ec844c78d33f1386f3a8d30af4ee2ea65 +2710806 00000000011730e525ea25329e3e02893cdc1d38d35a52dcc4b765717fd0221f +2711206 00000000002e7fd21932cfd2e96586b71f7cccacf725261eaad61ec786b90b8f +2711606 000000000148c6852a28a12fe446477917a43a920c8344cdabc8eec3b0764d40 +2712006 0000000000746066e4338ae28389d8375fc26c15594dd10c1f7fb12553223cbe +2712406 0000000000cf64f0adeb4bc28a5a8fbf1628e84335a85185771ca804cd96c6cd +2712806 0000000000b80c770d2aa384731b79a71e1954db52d96a3de92342a48a93c8ee +2713206 0000000000e96bc60394746637a86c9d5839e4f9a9e8786fe50313f2b171dab5 +2713606 0000000000c14f2332e959fbc97e5fe4758923ffe7c767e292b47560336f86b5 +2714006 0000000000836341146e09e9906d9dfa380f07faf9b613709795374294c7fe87 +2714406 000000000082b4a0d76e938a33622aceca82f42b3136dc040b2c9d5937695633 +2714806 0000000001043da14f2fa881a8094bbc28b93d14ac7430b58ea36d4cb6b293f8 +2715206 0000000000bc0fee420372d6689c3cfa5ae13717d1f39c22796f2535f1a73a9e +2715606 00000000005a3d84d757000163776e4618423c437f1eab6ce1b2ff6bed7078cd +2716006 00000000014409956b84dde91e429c1be89e3cad8ca0ef2dcccb780b7d6002c6 +2716406 00000000012f4ebcb2c8e3deec9c14394f5e45ecb27cdde7fa759309529a5cca +2716806 000000000167358bb721422b1a88085186970ba1971d5bfdf4740ebe032f5ae9 +2717206 000000000077558bd54cf80e3ab6f0de23bb03a1c445edb050f75f56412477c5 +2717606 00000000002037120b696cc8596e9fc72dbd18d9b408abf7c29565e9e55c54ff +2718006 000000000072f423f72b51da55941c9133539b558ed9b44ce16c2f9baf462ced +2718406 000000000057e13e5699aa6796d2ca239c246605e7613972ccad0c13fefa1581 +2718806 000000000050dd5a261fdb9c635c2a44898a2323644eb5c74f055ab7bef96eae +2719206 00000000009b16918a52fef42ebe424986e239e00f12a46f788b5011fda21a01 +2719606 00000000007e5b53a638f464b402d4b53f1792af7413ef03649c35adcce31255 +2720006 00000000017f5646a8ea5eeec64fc9e52ce39196e98ee99ba718104fdbf3c21d +2720406 0000000000344133ec5c445b15a0ff6d1ece61b5e652445339124aec5d96f76a +2720806 0000000001158b39b245a964dfccec47d729b1c490bdc31588daa676d6ff08bc +2721206 00000000013fc4edf50ed74fc9ad68b4538eece10d39defec750ccbb084f6898 +2721606 0000000001b06cb25d6366c132893aeffb1ad79dcde6483db86e4d326ef288bb +2722006 0000000001207548c359ac039814559bf1e904e119d87216873274007c81694a +2722406 00000000010381376305bc609c15609e47a620cd6daac3479c993923a0da52c8 +2722806 0000000000547bebf1fbdbc32ab4816d9e1f1156bee60000c5e409713818afe5 +2723206 0000000001d918552a17534809df3a3c5225ff2d744c602e8f8e15f073bc0090 +2723606 00000000005c9c3cacb8a20277e75d3dfb07f4aa6e57ed0ad882432b7fac2759 +2724006 0000000000254615bba1b7562cb6a9ec71eb37942896b90cb59094e84fd1b8b5 +2724406 00000000008304748a8e8cbd37bd35925922fef221f2e89b94ffca2f8dc869ba +2724806 0000000000748a0f98d8e9a2142cd3933215bb988fda7252f9142cd841a4ef7e +2725206 000000000100c7908579b0a84e8256800606f22d7bf9360a174710032f64e9ef +2725606 0000000000368a867e4e2e1de339421317a37559fb21aba48b9115728f248680 +2726006 0000000000417a6004bdb7425ce22afae17a36ceb07ea24f3adbeb24bff65588 +2726406 000000000043e19681c7f3615a2a0a4e8b2f9dd55e122a5f7951c2f0afc97296 +2726806 00000000009e59eaac7bf02a11b2cdc43e19e69120248486b8e7e4075b2dd429 +2727206 00000000005e2adf49313bfea70c4156b1d4ae198f2eda562b3db705c32a20fc +2727606 00000000001db4277992d05a65af67a5bcbb1552657089591227a929d9966161 +2728006 0000000000b087e075e1e8dad82c1c8b67bb53e87da160e510b924e2f7988ec7 +2728406 00000000004b1a3ac42a6e78e13b3f7ba977751a859fb5f60968dbe2cf87e2fd +2728806 0000000000b79c00c6bb038b506333d8c2673f5a5d342fdafd937b20269cc8da +2729206 000000000038c2cd1b637bfa6b8b6f3c1696c4a87480d721c225dce9c6793fbb +2729606 0000000000772fe945a800db3e985ed6215abf00f3d69089153db01bb3e78033 +2730006 0000000000ad655ae30555a51f42e8d306bf85f05932419ea32a433e2e2f0033 +2730406 0000000000e623163acadd8dc93aae236c30986b968186580252b62a1f1eda42 +2730806 0000000000c3b50862f9475e78188f26cd067270a732d2defd8447f0b772f41f +2731206 0000000000bd104088a1efce928751bfc5a30360e65fba79f5187aa54a62be8f +2731606 000000000089bbec3013f5dd80b18bcfff6867bd49e95025e4ecb53dc1a1172a +2732006 0000000000df481eece9a5c6ac62a139302a116fb51cf521caa83a0dfdb29cb3 +2732406 00000000004f232180969382def2c69441525316f8cdbd498a74125e40713f0c +2732806 000000000039b11c58d7ace91033e0ea23978923f9015306f6ea78f8ba28ecb0 +2733206 000000000008606d81858a5bab587aaa1c10372c58a49c9b628b0ee475bd9ffc +2733606 000000000069b4e86514b95def4d2dda4bca6c438b6ad6c387b6ed3d77a5e674 +2734006 00000000016ca8b02d5ab7d94541ca77aa049f97d040c97120085594a04d8c9e +2734406 00000000001ed80068b9b9a5a9390cae0ba8ce169c1a2b8bec6dde7da01165b0 +2734806 000000000151e6a3a7a81ec264a759254408177718d632f1106a1b29f14f834d +2735206 0000000001e4d3d4e374a03df338bc49e302f4787fb5c3dfbf193ccdaa5278cc +2735606 000000000050be3b222e0dc27e522eb25fe47602b6bda9b2c7c4dc6b62afb33f +2736006 00000000012c29f508369a766803b7d5cb1f320a19cddd81c740d9a13b299580 +2736406 00000000001f30ea9fabaf68e65215927c721d46cf7bcab73d821f0d0ff6f966 +2736806 000000000122add7a1ea562a2bb993caa8cf74b802c9c1162c1d5387327196d9 +2737206 0000000000041fa435da22c217c284bd09cf6d748d0b6ec2faa346405fee18b2 +2737606 0000000000a2b51c7bd9e62bf2d7bd7735c55c99a903109b535e0ba3c1bc2018 +2738006 0000000001624cac6147f134c18a698b8ed35d20e3db5be4cc7d33250fa024d1 +2738406 0000000000e0fce4549ce421eec68895d377bd63404846e1640b82a19d93164e +2738806 0000000000355369814abacc697c0ac355f938c32cf6b3cdfa47857a71685717 +2739206 000000000090219b6c7c7fcc5890c632ee9322f77c193239208bb74bac90df53 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index c1c591b9200..5108ce28405 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -7593,3 +7593,195 @@ 3036800 002aa078e4546a72e4c6290402924c0917c2080a4dd9e0f7e7e851e82bcf562c 3037200 00613812406826a9bfa52508d602c8d14271c4dcf1cee107a9d2b8cff9ef51b5 3037600 00b758e58e0b8f8298a52e0246860ea833e6dc3ffb447d44474662c658b373c0 +3038000 002b275b2353025a6df5394427ed4674f0aef78aafdfa244e8ce6abac02f7dec +3038400 0060e905d468b737ed368651689bb7e4e2825504b25b7072da17b139cd6472b6 +3038800 00255bca8bed78bfed0acce0bc649d45c8b70cc91ee160e2571af7c17e3e5d49 +3039200 000b2b00897971a8025e2863ce37ca833ac6c65d2c9726c84ccf7f8df86bbfc3 +3039600 004dc1a6e654d9341e75cea6e2eff58195de7401cc1b133a6bc61b7561677ad0 +3040000 0025d5483bbed4f4f06cd5d4abbb90e130989d87e588835c31928c3966894534 +3040400 000ffb7239cf37ab16fe809012ab70f932a428dc4965f8faf6bb49ec1b98ced9 +3040800 0043fa33cdaf798be000cc176e960ae5f010597f6881437d0e82ba0f6c39bcb6 +3041200 0012f0dcddd47d78a9d3e19353a0850dfa06a809afe5b5cd489860e949d6b390 +3041600 000be374304e917b4d89a52d6bc8a9710981bf2cf9b2ad2a23eeb7999d4be09f +3042000 00026e6ae0f0dd1ced7cf683e87c814b8455e5ce9b01313a6bd0210114ad1f70 +3042400 00124b369ce53ebc43d2804c5eaf83ab5b15c1cb7a32d122954c8210fc3c80de +3042800 0027ee94859109c8cb2e51f6b99116219ff851db25b2aa1d04b1d57254f4ac00 +3043200 002ce8dfdebc789253fc9c252980c415ca54c1ae2bf96a56060610eadddfd10b +3043600 00233625e84611ca0ec3015365d06e334b9fef2929d3299d56497d12d97f9350 +3044000 0017b85cb414f91afcd15dc5fe636e6ae2da455f37aad631b3815f14528b1f6c +3044400 0089710ff7f5451688ba1ef4754c97d19cf1160268d25f9d5fbe0215b3a0c8d6 +3044800 000c8787710e8b62cbc1e530cff52830246169ab43ddc6a5a81a1c29606087cc +3045200 0041b972c5a63138aa77dfccb896bb1f8f47d68aa709a10f05b89432cfde24dc +3045600 001f73a8c1eda7bbea0786d9a53939d86d97315ebb4105a06d5f94ebb857266d +3046000 0000086d005def50e042df7b4b7aa87b6e0b284eeaee960180fca34494f3be63 +3046400 00338754bf6141086307cc067eacef097b4806e72ade8bcf60b91aadd000700f +3046800 0004c463aeeaf583853d4ea4afcb8e7201cd81c09a23bbe7a883ab4ea1018be9 +3047200 001068978bd221fbe0735b4192f114408a56700f05fb42a2648428e86aae0b2c +3047600 00270f4b4a5254c1d12e2b2c63b8e3e6af8d192fbf06944d6cc43d0743f0401f +3048000 003a9234f6dc8513d269412d4fdfd0dc82f21229adcff4f6e90e9f0eb9df625a +3048400 00774298d7fea2fb11f8e59ba3f5994ea7423619635c2e7d56f59d85f46b3807 +3048800 0063497b0d4eb27db72400c7caec94fabf047e0cd2cdc7b77957af2b985d4305 +3049200 001dffb8de633e7d9d49f495c27bdb9e80944d828c046404b77d67b3b4a35c11 +3049600 00a8f0c1268389b6fd94c91f2bffbf3245ded41069230f859ee46c1649cdfec2 +3050000 00287c032a0faf6ad198621c12a1a39c4f808cd8a86ec5b202800911226e7596 +3050400 00897717c241e74614639a15eaade8a94c53834ee7bb5520d9d237db2899a711 +3050800 000aa00670b75e356d19de9f98472f79087f9203db451e912bbee10d549e334d +3051200 00089364d1fd3bada465cbf752d47e4d5762fc18d7eaff9bf2179d46c4de3a90 +3051600 0000140a83613ba9871b621198fdf1823b4efe65ea92ea302941f0854bc24e8a +3052000 00029c4a8e994635a74e47990709a530a23589def9b62a457a6e6fc9ae506b52 +3052400 003dec3cbb59091b0a7cb89439b54bb8abf98240b998ad8f936ac85ad41050c9 +3052800 002d6c9c31e6981196aab65bc22a3a8e23c591b353b217b0d23b056b6b9f3f03 +3053200 00088878490efafabda8a968bebb5cb3440e4a81b8f9aed0e991c5ce63fc8f74 +3053600 000b9da54d53ea80616f33f789b570652da5af96e7153be8be51298e987c49fe +3054000 001cfe4680b003ce5c9ead4c408584b193be42e6daf67b37949120f6b0ce9aa7 +3054400 009cd5f6a5b5c7a79c5db5b8331bd79c40685d55ee63beb703c298c5420d9261 +3054800 000b6eccbb9f31eaad59d99548e47da914d88cf07c548b8767433debf15d8521 +3055200 00268f197db8b82627bf5fc8ea6b7ac034b63346c71877b795237f62f97c4136 +3055600 00051269be6258ba6771ee4baf14ff858d629997ca4f9aca1982aed220f765d2 +3056000 003706aa5b50e2aa9b3c3cfd9f8596ec442415ff7a84c172c18c8be29e796ae2 +3056400 00231455ba8aacc08362d49a5b6ce10ea6d58e84b00a8429cd10ccd3c3f2c8ac +3056800 000d8c25fa2cc7513f4271c5a9c46553a6aa613792f7910dee342a1e9ca2fa38 +3057200 0061a06b58b986335a9dc8bbe6ca00cb2c6c10f6a353c58f918dcc4a8ce85e2c +3057600 0001451d88c0b797bed1df42013c884696d688e76a59c3cc23508d78ff63877f +3058000 00093fe57c9e88dfb745e3ac9ab599fecda530397831fb1c2cd1d7a7b73bd0e7 +3058400 002c2bbf87de52c0e952e2ed4ed251dd781557318d2ffa52579029247543dbdd +3058800 000be4cd26e21a47bc7e8c63ed52fdf43fac30c4daca0ec81a09d74e80110bf5 +3059200 00b3b55af59ff16a236ef59155d8b3a0629a070f36a929fc375231b3c7fa1b51 +3059600 0008b5cab206c1c031f86d3d4a850c8d9bd08f4922e63b18af8abb25abd18ba2 +3060000 00454b294ded8c84957a04818d2eb80cea14dc211b500566df144e9ce1d908f4 +3060400 007a2d891769b8ed6f622c478364f46aa705dd88ef5457b6567e74e4c13bc193 +3060800 002513a66017c010a03f3506659287cd8e8ff2504ba03cbd04c5817fb49e8a74 +3061200 001aa3722bed76457b12fb1160a1d938ef4667100b86dd127d72e3adac17a35a +3061600 0003c549263084826cdc44b4960e9b1b1fed3205d4e52166766018e3b1454ccf +3062000 00100bb1d4e862fb306a3f9c814206231e3be94d6be32a2de5bb81bab7333792 +3062400 0012c5fd7aedf90cd9724cb721035cbb8f950043b30b4b427e31818d78dc1231 +3062800 00243a998fdc6addef61aa7500935b5c908961902b02212d982ef79034965672 +3063200 000fb809f7485dcb4772d8829307e0eaad61bc9e9d4db96ba4b8a28b373c3188 +3063600 00000cfc63b29fb40ba9af2eb64072c36651376f0fa1117cb90e20a6774b0759 +3064000 000e6c5afbb3e4a9e799e19c638d8b81d5a765691b7513f4a655e7a4371010a5 +3064400 0023dbbf18837786a7e2e5febf3165460150196000531053984f7b664180ca52 +3064800 0000429258a5b5feda9cbf53be3db748625888a5f49b8621b7b752b212e50aad +3065200 0001648f1bbb0dd35b0837d9a1057feb00a60f2168f2741d8ea36d78b74ff323 +3065600 0024aa77cdb10bbd8db78dd3a241960f8e38a93a90a37ef1b19fd55361bc93fa +3066000 001bb179293a5b199f06bcf6559806e81cd66fc943630d9fbbcdc58f81f172ad +3066400 000ddc0731195b15357b33d0b005e7a3e847be357918e2e7c15c9eedfa1e5358 +3066800 00035d966707d7720e2443ca3c9ed3565eb78163a97abf7d9d782be8bd3ab47d +3067200 0035d2a57985db375da1b8f20a977804943c479c0082b1e6bbd1cefa20a90b9f +3067600 0010cdc0be90498944a22f2be25eda0748799547ff49c1f2f75ddcd05bce67af +3068000 0000f5fb08ccf621eb9700c38bce3fa6390283958033dbcdb4fee008c3d523fb +3068400 0000d20805c301427ef1ad3f4bfc8d8d6034a536793af9b2f7437b6799fd3dd8 +3068800 000e1a66398682e4bc3dbe2e8ccbe31e06d4d53bade3e819069b14880a7cebfe +3069200 000f62ddb00d2fe8f51d36e36037e186d169496ac445e938993d511a7283d664 +3069600 000302254d59cd18325e60c0e612fb7fc8d77ff42253359568c0f192df767b13 +3070000 00057e5c43992673287bd6d01430da84d16248a3fd135a711d3dd3775da24345 +3070400 005fafa064e6266676b4f4c3c18296f1a296d2fe2e2b94852ecba8d7fb4a128d +3070800 001933baa29920ee136eab83bc4125fc0d5aa0ba2f0248bf9869fb589f125ae0 +3071200 000363e118285f938c8cfc07d1c3cf87ac0d4f3c94200cf977f2c12f3863bc14 +3071600 0021628326c18a40165b081c65d1789a94afe94eb0f36e2b711b19cf81630a16 +3072000 000f04047cda3a4d9e411f461c99ae3d18ffcc742f4d4356a8cc73a4f7b193e0 +3072400 0049d642a4d10e2467ab28811f9399987d27e5ff70c895b582ea63b88145f9fe +3072800 0013403fc552ee5f5a6ba9e8badde6637284a5445e507c139f9e5ece52627cab +3073200 0014953be26e316925d1a614c6180c92d83eca3981711315778114948df4ee74 +3073600 001072ee9bf464edc8dce5570f7d88bc1d5da7ca3efc8f6b70ed9be10024ed6c +3074000 0003e41ae24f76920f60479acf1daec054d0aa05ccde77e102d517646b2945d9 +3074400 0073f49a1fdd233d611e873f13a8a3e2a49d21955e1dc874c39fbd05b9968a57 +3074800 0003252836bb64c44721ff3bcb2344e3a95070fd06b1736f8e00d46b0bc979c0 +3075200 000da4574692c6079f6c8d9778bfdb3d9903048bd2a70b3126982fd9eb3a3acb +3075600 000d222bd083f9afb8554d13304826bf3cdf18d23a1792023a595ae3a3aff3ab +3076000 0031d8197b93b4de0a00fa083af96f03da6ab8a29d6011d18f903b2d11255a65 +3076400 0001186268fe57286c5179d2d08fd84106411e6046289e3a3dc6ed62839c50d7 +3076800 003b92c5c6535057fd0b1b27bc5f40bf0b61f2ee6c819b311e83b5e23c03734a +3077200 00225ee824d6215bdae39bf287348d6e1e9a006ed3969c7b9e567e5d2688dda1 +3077600 003437d51968b90c3193857ad7d91f24a0ead3f6a2c3f5413d188bf8ca83fdfb +3078000 0017b2825c2cbbdd28d7de69b7acca6a438d0e23645c334f6ab50aedab43c7b1 +3078400 0068b1ff49ef63d7546b9fb4a2ec45636c4ee79202410d68bbe9f69e79795256 +3078800 001a5f6d5c2db65bf3ca0008d7239320cd0433145d471beb1b02c7891ed45f9b +3079200 003b043bc941e75b0b4bc562cd3e3723a89d6b246e8fb2abea51275ee6db7dae +3079600 0009c550566b460f43fecd568e9150f9f0e6c762b87a9203fb36da5f2bb62b4e +3080000 002a90e74aafa829d2aeefe4f8572edddc7f1c121000c21f6f87aa373671687c +3080400 0000640d26b6b09c765d9417687ae4df2919745079caf50cf2d3f069b4dea9d3 +3080800 001ae1bc5d111ddc3dc341430f311f47f32ad4e49587aad4ec195e76ce188c97 +3081200 001c77876de320502e3652e38ba88051ef2c44938b4a5cf052f09c1a8cb283c8 +3081600 0032c9d59ac0906940956d6842cefa7d9406ec8b98f8494352146fa6507643df +3082000 0006bcbd93ca1ff0c90300b16859b831b645f517468fe5ea4904b6d1d3ca2d73 +3082400 0016844ea05418d3f6314ad167ded7b6afd0aa68d82094148dd087a9eb2d8f7b +3082800 0017e6a00b6c3b4505fc10370fd07437050760ff62fc7d5bb98c53d21b8b874b +3083200 0015573f154381dca3314a1357ae588f5c19e07a6ab535b27d8ac7b87ab4f28e +3083600 0014a0275af4558cecc05dbbfe708482faeed2b7eec39ffef5eeed276e8631f7 +3084000 00012ccb75dbbd6458e42c61b8d8dcdcbe33fedc0f23d2738237fbaf678dcfa2 +3084400 00111b2337ba9555a0eb19799d0ac35c7b049fc25a18e352d33cbdb55bb2e8fe +3084800 0010f64a7300b21203af4d703645fafbcd69fc5a89564bb6a77494a46772b695 +3085200 0011387cbab55c2bc978c3c361d3e4d638bc8fcde83924bcf87735ca516ba9c3 +3085600 001f5b47365ac260636f49a43cdade89533d0598a0a68ade57b7a3adf8357cc0 +3086000 003794bcc161a9828e427565b897e606ba7f192db463448a28535f98e8e870d9 +3086400 0013610bc88787c11e097004411a942773725d6cc2e8931b5da4ddedcd58b24c +3086800 0008cf7a82b631c5c3af30c3268a1bde628c46cf7259e25707a4aac99e1d9afd +3087200 001631f4a41753094e4cbdfbae797f5849cfdc5390ef036781d4fc5c86b0d857 +3087600 0020955101b0cdb440516dddf98fa55a838f64eaa727956b7dc13e6faf96eaf4 +3088000 00294284c2319afa1d20a3d49d4d177ebc701430d0665e030e82df042beee9fa +3088400 0000091c84f7cdd7f0bb4c6bbb6dad96983cc7614e595f317683e3c99d4c7886 +3088800 0000083ab66d0981f2a4e18d8140ded717c6b3aba3f3f7661ac6f6c80f4613ef +3089200 0000008e429556fbb566b6e8ad11b4f1de74e46d30d3bb1451667c4065dcd544 +3089600 00001c788f1d29a6872edf4735760339f718bb3c715f3f7143252177079b6c9b +3090000 0000005a823524433c61fd1cbb2fede136319c2a8ff583ec0a7376fb00c10584 +3090400 000024764a8bfd61b38d5a6dc81d2cb8c3882ad78c295789dc6a1f828f61ccc3 +3090800 0000003b0197f74feeae442ade58f4557d7d9b6aa1ae8f7eb03982b4d378ce41 +3091200 00000096f8478b95c02b168809a75566821115107e03fd08c184fbd9adc53aa4 +3091600 000e99c7e07dc5fd96258ab1d390e877d37e3c25a30cd9860c303f5aac081f97 +3092000 00790fdd32786461452cd2fd740b157717ff056fcdc91fa7e19027d16f6289e3 +3092400 00002b63a2f6ce1df3c7cb7485473a6467a890034e4bd4372124f2367c75e187 +3092800 0012cee344d0f0ef65573cf811fce6be9a23c54822d23e2c07f03f973c6abdf5 +3093200 0011756eee83f0545c479e7d7308578c076be9e9b1e54137aea9e20cc59d9068 +3093600 0007c7491fc9085b3f4b9484ad8718574992bf46c2bb081944c2c4ae5472e9b8 +3094000 000317a2a0a89506913115479aab1902cc7c67fdf9bc010ba484382fc6a19e2d +3094400 000b7f776ef1e89b609a459a74c89142fa41f0ed37233876b17d7300d74d7de4 +3094800 0020dfee5ec6734d2191b2f1645e22b6861a3716bc9092e7d11ba8f4376cccb0 +3095200 0002ac288cd2c643fe599f950f400ffea117ec67d7dd0de4616607802ec59b58 +3095600 000d32e4326fe09a3c3f55d2c97d5d41138106a47ba46cf7f89e26b0289820da +3096000 0017ffde279b7d98c25ad7972dea2b729d5d218ef7f7a457bf7c6b5f8fa1cf95 +3096400 0041ce2be54fda2aaa6498af0756223d6573e4929e7f433653c8078b0fe42849 +3096800 002f33fe3c301f15ee4283388ba9bbe559ce29c55920082b767be0769fd3badf +3097200 002eb04d80df1697893fdf5803720baa7da490c21cb279396f233002c4be16e7 +3097600 004051fc27ad8477b33085de1c01ca019a0d6768287bbb5a44a4d3481d7d3c0f +3098000 0014903099d44d1f8050cbace8bca99c004fc592ca94407ef584a15359921e54 +3098400 0026b3c712ac8346f07c4e6cc50818c7c1a0bc1e2f18d148f5cd3a1e4af6d513 +3098800 001e90fa87d94e2173e4113bfe5be4b4002e719160f9541c47015d05c85e7299 +3099200 0074ba5571808059b238f3ea60b22f1e76c229e90e13b8552254e545ce4abbf4 +3099600 0025ae185be2e354433554d914a51682611315b1a9dccdb65b037934a0200e04 +3100000 00472745b17d64bd1830f36128b3c6f8c0c9c519a367f00bd7995af941938e5f +3100400 01132001bef77bd3cc5cbeec0da01a62e4d8040cc58cde90773b3fdf69c9c64b +3100800 000cbf09b04fdf85ac76e5b4bfd8cd1e8f17826825df011e9e62ef690575304e +3101200 0016010211eb89b9da9a29a03f721bba5d30a68c4f7dd59f2eecc69b20fe4643 +3101600 005bbb618224113a7e38cd900c0295dcecda34bdcb0aea8a2ccc11554e931495 +3102000 00146bc3a7c64c732ad305b19d7447896d94a859892b53216ae6d011e643df9f +3102400 0021222c072d8bb21abacaa0b3e5faa9217742f6d14726735aced621377b7fa2 +3102800 000125806f3ee943302bd1485bcd2a1c379fa363696cdb564a77ba6b7d9aef9e +3103200 0014d439e6a76d0471c456511dd7836ee11427531ae0fbfeca8316104a6d4b6a +3103600 00090f38ac393611ee9c04f650c1f0aa90fa826418dd305ac08c9d4b326594ad +3104000 001e003e0acb4d8afc667bb7b8accfae464560f8da99043dead8202e69aba8f2 +3104400 00215614a95083aaf58e024f994f772c1a58151e7a8692d521a0e29df9701d84 +3104800 0025b9017ad682750ee252e88681ffc997955e5e4fa5c3c3e343f39f35553478 +3105200 0035d201930fb8d0e727285931ccba51b7a4c2fdeeab87bf8c4b4879502615f6 +3105600 0013d51e2ddad3d77e66a7261b9256eb78c222e16895bdde659afb216be5eb15 +3106000 003030ab6fde397cf766dc792b64c60237014f3f7915b34a9b480ad145f0f9f6 +3106400 0015945b97fa213c6be7f08f13b7947ef41938e01e0ab3970f0211a1dd420dd7 +3106800 00062e739908037556d312a62005519def252e48e1ccf04bfd55930d17609a25 +3107200 0088f5a5d6512ff4c3d21255a00fd448b52005ca39e5dc44ad185f0d3d1accdd +3107600 00245f610290c4c065920c7a87c39432d490994d3280ea615df99e4313c8d249 +3108000 001d819512ab45b4df52be18041dff056308c8a4f81bf3e30010049feb4d0e2d +3108400 00187f70dd70251e5dcf66e8559d8328e14d74a897864562f463b97bf706e063 +3108800 004ee960bf71f8c884276934caf48b2a3607e0e6f1b0a8695e484e58c1129129 +3109200 000776e0a10b43cea2bc26b8cacb90e44f35dc183f424e3cd06005e70b7fff75 +3109600 0029859b237c55c3a51ffeaf8c35a0deac0557cf56875b3169c7b67414d3eb2b +3110000 001875fb89d1618d412c27476047f4d816a71c30b1f83762bfce00294f7d5742 +3110400 0035399f4116aeee33a271d19b3b0a66c0cec52751061c35cf5d2bc4d919b4a6 +3110800 000ee2a30af50d97313eee197146d2941e4ae2116c4b9e99fe3717d30f0d65ab +3111200 001417ee81ea8400cb690bf5acd733f2a58e36717049f434f45e951e4017c820 +3111600 006ca48f7de3904774c73caf6cf239245a751dc0e4715946fb2439ee9838e2a6 +3112000 00174416297cf8b3ad5ab504bb06338815e9d814fe7bd8164c40005efafab4b1 +3112400 0011545737542de3cf288e07724ba8c8f3b43b0cdf1820bd3056f2e41be77e7d +3112800 0005e09e8d5800fd390619bd8c02599ea5273945f8b574b81a2e299e25bd4760 +3113200 001490d3b27ad73df18c8c0988743360799e0186414ee905de9b565112c843f3 +3113600 0020e6d001c5a143b75e728a77ac192224aed587b6986bfa3ec3a118b32faa25 +3114000 0000e4217cdd650fce03725c64ec76fba633ac45b66ef651d1357927ef5bdc81 +3114400 0089707b70b58f2db28a000c346e66a7367a9b76cac122dfb469e1c6d52cf48a From 179aaa4c4f4c39ec2a82f1489a50306955e8c98a Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 6 Dec 2024 13:28:56 -0500 Subject: [PATCH 035/245] Release v2.1.0 (#9072) * Updates changelog, crate versions, end of support height, and docs. * Update CHANGELOG.md Co-authored-by: Conrado Gouvea --------- Co-authored-by: Conrado Gouvea --- .../scripts/release-crates-dry-run.sh | 6 +-- CHANGELOG.md | 39 +++++++++++++++++++ Cargo.lock | 28 ++++++------- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 6 +-- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 6 +-- zebra-consensus/Cargo.toml | 20 +++++----- zebra-grpc/Cargo.toml | 6 +-- zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 24 ++++++------ zebra-scan/Cargo.toml | 20 +++++----- zebra-script/Cargo.toml | 6 +-- zebra-state/Cargo.toml | 10 ++--- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 ++-- zebrad/Cargo.toml | 30 +++++++------- zebrad/src/components/sync/end_of_support.rs | 2 +- 20 files changed, 135 insertions(+), 96 deletions(-) diff --git a/.github/workflows/scripts/release-crates-dry-run.sh b/.github/workflows/scripts/release-crates-dry-run.sh index 32fc0e671c7..c83b068f5bd 100755 --- a/.github/workflows/scripts/release-crates-dry-run.sh +++ b/.github/workflows/scripts/release-crates-dry-run.sh @@ -20,11 +20,11 @@ fi # We use the same commands as the [release drafter](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions) # with an extra `--no-confirm` argument for non-interactive testing. # Update everything except for alpha crates and zebrad: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc patch +cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta # Due to a bug in cargo-release, we need to pass exact versions for alpha crates: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.11 -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.9 +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.13 +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.11 # Update zebrad: cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a345971d3c..f12c495bb75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,45 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 2.1.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.1.0) - 2024-12-06 + +This release adds a check to verify that V5 transactions in the mempool have the correct consensus branch ID; +Zebra would previously accept those and return a transaction ID (indicating success) even though they would +be eventually rejected by the block consensus checks. Similarly, Zebra also now returns an error when trying +to submit transactions that would eventually fail some consensus checks (e.g. double spends) but would also +return a transaction ID indicating success. The release also bumps +Zebra's initial minimum protocol version such that this release of Zebra will always reject connections with peers advertising +a network protocol version below 170,120 on Mainnet and 170,110 on Testnet instead of accepting those connections until Zebra's +chain state reaches the NU6 activation height. +The `getblock` RPC method has been updated and now returns some additional information +such as the block height (even if you provide a block hash) and other fields as supported +by the `getblockheader` RPC call. + +### Breaking Changes + +- Upgrade minimum protocol versions for all Zcash networks ([#9058](https://github.com/ZcashFoundation/zebra/pull/9058)) + +### Added + +- `getblockheader` RPC method ([#8967](https://github.com/ZcashFoundation/zebra/pull/8967)) +- `rust-toolchain.toml` file ([#8985](https://github.com/ZcashFoundation/zebra/pull/8985)) + +### Changed + +- Updated `getblock` RPC to more closely match zcashd ([#9006](https://github.com/ZcashFoundation/zebra/pull/9006)) +- Updated error messages to include inner error types (notably for the transaction verifier) ([#9066](https://github.com/ZcashFoundation/zebra/pull/9066)) + +### Fixed + +- Validate consensus branch ids of mempool transactions ([#9063](https://github.com/ZcashFoundation/zebra/pull/9063)) +- Verify mempool transactions with unmined inputs if those inputs are in the mempool to support TEX transactions ([#8857](https://github.com/ZcashFoundation/zebra/pull/8857)) +- Wait until transactions have been added to the mempool before returning success response from `sendrawtransaction` RPC ([#9067](https://github.com/ZcashFoundation/zebra/pull/9067)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @conradoplg, @cypherpepe, @gustavovalverde, @idky137, @oxarbitrage, @pinglanlu and @upbqdn + ## [Zebra 2.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v2.0.1) - 2024-10-30 - Zebra now supports NU6 on Mainnet. This patch release updates dependencies diff --git a/Cargo.lock b/Cargo.lock index c9e9dcbcb5e..4d61c114c30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4941,7 +4941,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.18" +version = "0.2.41-beta.19" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4964,7 +4964,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.18" +version = "0.2.41-beta.19" dependencies = [ "futures-core", "pin-project", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "bitflags 2.6.0", "bitflags-serde-legacy", @@ -6104,7 +6104,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "bellman", "blake2b_simd", @@ -6150,7 +6150,7 @@ dependencies = [ [[package]] name = "zebra-grpc" -version = "0.1.0-alpha.9" +version = "0.1.0-alpha.10" dependencies = [ "color-eyre", "futures-util", @@ -6172,7 +6172,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "color-eyre", "jsonrpc-core", @@ -6226,7 +6226,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "base64 0.22.1", "chrono", @@ -6265,7 +6265,7 @@ dependencies = [ [[package]] name = "zebra-scan" -version = "0.1.0-alpha.11" +version = "0.1.0-alpha.12" dependencies = [ "bls12_381", "chrono", @@ -6311,7 +6311,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "hex", "lazy_static", @@ -6323,7 +6323,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "bincode", "chrono", @@ -6368,7 +6368,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "color-eyre", "futures", @@ -6396,7 +6396,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" dependencies = [ "color-eyre", "hex", @@ -6427,7 +6427,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "2.0.1" +version = "2.1.0" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index d77c234b537..6dc852a57ac 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -37,7 +37,7 @@ docker run -d --platform linux/amd64 \ ### Build it locally ```shell -git clone --depth 1 --branch v2.0.1 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v2.1.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index 5903adf4337..c72cfbf38c2 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -76,7 +76,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v2.0.1 +git checkout v2.1.0 ``` 3. Build and Run `zebrad` @@ -89,7 +89,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.0.1 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.1.0 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 517c9cfca9c..68ef0ca18bc 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.18" +version = "0.2.41-beta.19" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -43,10 +43,10 @@ rand = "0.8.5" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.18" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } tower-test = "0.4.0" -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index a3d504ed691..a717ff0e4c2 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.18" +version = "0.2.41-beta.19" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -24,4 +24,4 @@ tracing = "0.1.39" [dev-dependencies] tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index b43e77f149a..2502f7405f3 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -145,7 +145,7 @@ proptest-derive = { version = "0.5.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43", optional = true } [dev-dependencies] # Benchmarks @@ -168,7 +168,7 @@ rand_chacha = "0.3.1" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } [[bench]] name = "block" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index cf8424d1606..7228f4ff490 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -63,13 +63,13 @@ orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } wagyu-zcash-parameters = "0.2.0" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.18" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.18" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.19" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.42" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.43" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } @@ -94,6 +94,6 @@ tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.18" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index 4f825686d52..d0c273cac46 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-grpc" -version = "0.1.0-alpha.9" +version = "0.1.0-alpha.10" authors = ["Zcash Foundation "] description = "Zebra gRPC interface" license = "MIT OR Apache-2.0" @@ -28,8 +28,8 @@ color-eyre = "0.6.3" zcash_primitives.workspace = true -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["shielded-scan"] } -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.42" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } [build-dependencies] tonic-build = "0.12.3" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e4967cc66f2..cb9e2b14918 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["async-error"] } [dev-dependencies] proptest = "1.4.0" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index cba315f1efa..5ab39204d21 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -37,7 +37,7 @@ rpc-client = [ shielded-scan = [] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 85be248bc76..a542c0e1374 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -104,16 +104,16 @@ zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl proptest = { version = "1.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ "json-conversion", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = [ "rpc-client", ] } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.42" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.43" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } [build-dependencies] tonic-build = { version = "0.12.3", optional = true } @@ -126,17 +126,17 @@ proptest = "1.4.0" thiserror = "1.0.64" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ "proptest-impl", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43", features = [ "proptest-impl", ] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42", features = [ +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43", features = [ "proptest-impl", ] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = [ +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = [ "proptest-impl", ] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index c939700727e..6d5a0f0a3c5 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-scan" -version = "0.1.0-alpha.11" +version = "0.1.0-alpha.12" authors = ["Zcash Foundation "] description = "Shielded transaction scanner for the Zcash blockchain" license = "MIT OR Apache-2.0" @@ -77,11 +77,11 @@ zcash_primitives.workspace = true zcash_address.workspace = true sapling-crypto.workspace = true -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["shielded-scan"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["shielded-scan"] } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["shielded-scan"] } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.9" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43" } chrono = { version = "0.4.38", default-features = false, features = ["clock", "std", "serde"] } @@ -96,7 +96,7 @@ jubjub = { version = "0.10.0", optional = true } rand = { version = "0.8.5", optional = true } zcash_note_encryption = { version = "0.4.0", optional = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42", optional = true } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43", optional = true } # zebra-scanner binary dependencies tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } @@ -107,7 +107,7 @@ serde_json = "1.0.132" jsonrpc = { version = "0.18.0", optional = true } hex = { version = "0.4.3", optional = true } -zebrad = { path = "../zebrad", version = "2.0.1" } +zebrad = { path = "../zebrad", version = "2.1.0" } [dev-dependencies] insta = { version = "1.40.0", features = ["ron", "redactions"] } @@ -125,6 +125,6 @@ zcash_note_encryption = "0.4.0" toml = "0.8.19" tonic = "0.12.3" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 1f3050ca53a..abe5d7d1b55 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -16,11 +16,11 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.2.0" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } thiserror = "1.0.64" [dev-dependencies] hex = "0.4.3" lazy_static = "1.4.0" -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 55f4f2e1556..d3ce8a080b0 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -77,13 +77,13 @@ tracing = "0.1.39" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.132", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } @@ -108,5 +108,5 @@ jubjub = "0.10.0" tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 86daa264305..9ccd37d8dcd 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index e8f81cb088e..02cb7299d79 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.42" +version = "1.0.0-beta.43" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -94,11 +94,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.18" thiserror = "1.0.64" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.13.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 768f68dfcb3..6bcb08ca5b7 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "2.0.1" +version = "2.1.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -157,15 +157,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.42", features = ["rpc-client"] } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.42" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["rpc-client"] } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.42", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43", optional = true } abscissa_core = "0.7.0" clap = { version = "4.5.20", features = ["cargo"] } @@ -279,13 +279,13 @@ proptest-derive = "0.5.0" # enable span traces and track caller in tests color-eyre = { version = "0.6.3" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.42", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.42", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.42" } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.9" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -296,7 +296,7 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.9" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.42" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 36586678bdf..284c266fb6c 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_699_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_742_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From be50f7ce83fcfbe5917360f420fc20a778c4dd02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Dec 2024 04:27:09 +0000 Subject: [PATCH 036/245] build(deps): bump the prod group across 1 directory with 22 updates (#9065) * build(deps): bump the prod group across 1 directory with 22 updates Bumps the prod group with 21 updates in the / directory: | Package | From | To | | --- | --- | --- | | [clap](https://github.com/clap-rs/clap) | `4.5.20` | `4.5.21` | | [indexmap](https://github.com/indexmap-rs/indexmap) | `2.6.0` | `2.7.0` | | [serde](https://github.com/serde-rs/serde) | `1.0.214` | `1.0.215` | | [tokio](https://github.com/tokio-rs/tokio) | `1.41.0` | `1.41.1` | | [tower](https://github.com/tower-rs/tower) | `0.4.13` | `0.5.1` | | [thiserror](https://github.com/dtolnay/thiserror) | `1.0.65` | `2.0.3` | | [tracing-error](https://github.com/tokio-rs/tracing) | `0.2.0` | `0.2.1` | | [tracing](https://github.com/tokio-rs/tracing) | `0.1.40` | `0.1.41` | | [metrics](https://github.com/metrics-rs/metrics) | `0.24.0` | `0.24.1` | | [thread-priority](https://github.com/iddm/thread-priority) | `1.1.0` | `1.2.0` | | [sentry](https://github.com/getsentry/sentry-rust) | `0.32.3` | `0.35.0` | | [inferno](https://github.com/jonhoo/inferno) | `0.11.21` | `0.12.0` | | [hyper](https://github.com/hyperium/hyper) | `1.5.0` | `1.5.1` | | [bytes](https://github.com/tokio-rs/bytes) | `1.8.0` | `1.9.0` | | [indicatif](https://github.com/console-rs/indicatif) | `0.17.8` | `0.17.9` | | [insta](https://github.com/mitsuhiko/insta) | `1.41.0` | `1.41.1` | | [serde_json](https://github.com/serde-rs/json) | `1.0.132` | `1.0.133` | | [tempfile](https://github.com/Stebalien/tempfile) | `3.13.0` | `3.14.0` | | [primitive-types](https://github.com/paritytech/parity-common) | `0.12.2` | `0.13.1` | | [elasticsearch](https://github.com/elastic/elasticsearch-rs) | `8.5.0-alpha.1` | `8.16.0-alpha.1` | | [reqwest](https://github.com/seanmonstar/reqwest) | `0.11.27` | `0.12.9` | Updates `clap` from 4.5.20 to 4.5.21 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.20...clap_complete-v4.5.21) Updates `indexmap` from 2.6.0 to 2.7.0 - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.6.0...2.7.0) Updates `serde` from 1.0.214 to 1.0.215 - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.214...v1.0.215) Updates `tokio` from 1.41.0 to 1.41.1 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.0...tokio-1.41.1) Updates `tower` from 0.4.13 to 0.5.1 - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/tower-0.4.13...tower-0.5.1) Updates `thiserror` from 1.0.65 to 2.0.3 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.65...2.0.3) Updates `tracing-error` from 0.2.0 to 0.2.1 - [Release notes](https://github.com/tokio-rs/tracing/releases) - [Commits](https://github.com/tokio-rs/tracing/compare/tracing-error-0.2.0...tracing-error-0.2.1) Updates `tracing` from 0.1.40 to 0.1.41 - [Release notes](https://github.com/tokio-rs/tracing/releases) - [Commits](https://github.com/tokio-rs/tracing/compare/tracing-0.1.40...tracing-0.1.41) Updates `metrics` from 0.24.0 to 0.24.1 - [Changelog](https://github.com/metrics-rs/metrics/blob/main/release.toml) - [Commits](https://github.com/metrics-rs/metrics/compare/metrics-v0.24.0...metrics-v0.24.1) Updates `thread-priority` from 1.1.0 to 1.2.0 - [Commits](https://github.com/iddm/thread-priority/commits) Updates `sentry` from 0.32.3 to 0.35.0 - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.32.3...0.35.0) Updates `inferno` from 0.11.21 to 0.12.0 - [Changelog](https://github.com/jonhoo/inferno/blob/main/CHANGELOG.md) - [Commits](https://github.com/jonhoo/inferno/compare/v0.11.21...v0.12.0) Updates `hyper` from 1.5.0 to 1.5.1 - [Release notes](https://github.com/hyperium/hyper/releases) - [Changelog](https://github.com/hyperium/hyper/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper/compare/v1.5.0...v1.5.1) Updates `bytes` from 1.8.0 to 1.9.0 - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.8.0...v1.9.0) Updates `indicatif` from 0.17.8 to 0.17.9 - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.8...0.17.9) Updates `insta` from 1.41.0 to 1.41.1 - [Release notes](https://github.com/mitsuhiko/insta/releases) - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.41.0...1.41.1) Updates `serde_json` from 1.0.132 to 1.0.133 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.132...v1.0.133) Updates `tempfile` from 3.13.0 to 3.14.0 - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.13.0...v3.14.0) Updates `primitive-types` from 0.12.2 to 0.13.1 - [Commits](https://github.com/paritytech/parity-common/commits/primitive-types-v0.13.1) Updates `elasticsearch` from 8.5.0-alpha.1 to 8.16.0-alpha.1 - [Release notes](https://github.com/elastic/elasticsearch-rs/releases) - [Commits](https://github.com/elastic/elasticsearch-rs/commits) Updates `reqwest` from 0.11.27 to 0.12.9 - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.27...v0.12.9) Updates `syn` from 2.0.85 to 2.0.90 - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.85...2.0.90) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tower dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-major dependency-group: prod - dependency-name: tracing-error dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tracing dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: metrics dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: thread-priority dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: inferno dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: hyper dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: insta dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: primitive-types dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: elasticsearch dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod ... Signed-off-by: dependabot[bot] * downgrade tower, add denies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 681 +++++++++++---------------------- deny.toml | 6 +- tower-batch-control/Cargo.toml | 6 +- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 16 +- zebra-consensus/Cargo.toml | 14 +- zebra-grpc/Cargo.toml | 6 +- zebra-network/Cargo.toml | 20 +- zebra-node-services/Cargo.toml | 14 +- zebra-rpc/Cargo.toml | 16 +- zebra-scan/Cargo.toml | 16 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 22 +- zebra-test/Cargo.toml | 14 +- zebra-utils/Cargo.toml | 14 +- zebrad/Cargo.toml | 38 +- 16 files changed, 327 insertions(+), 562 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d61c114c30..f6af563af25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,13 +12,13 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.5.20", + "clap 4.5.21", "color-eyre", "fs-err", "once_cell", "regex", "secrecy", - "semver 1.0.23", + "semver", "serde", "termcolor", "toml 0.5.11", @@ -192,9 +192,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arc-swap" @@ -246,7 +246,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -257,7 +257,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -345,12 +345,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.13.1" @@ -428,7 +422,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -448,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -615,9 +609,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" [[package]] name = "bzip2-sys" @@ -662,10 +656,10 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.23", + "semver", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -818,15 +812,15 @@ dependencies = [ "bitflags 1.3.2", "strsim 0.8.0", "textwrap", - "unicode-width", + "unicode-width 0.1.14", "vec_map", ] [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -834,9 +828,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -853,7 +847,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -905,7 +899,7 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width", + "unicode-width 0.1.14", "windows-sys 0.52.0", ] @@ -960,16 +954,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1003,7 +987,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.20", + "clap 4.5.21", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1091,7 +1075,7 @@ dependencies = [ "curve25519-dalek-derive", "digest", "fiat-crypto", - "rustc_version 0.4.1", + "rustc_version", "serde", "subtle", "zeroize", @@ -1105,17 +1089,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", -] - -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", + "syn 2.0.90", ] [[package]] @@ -1124,22 +1098,8 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -1153,18 +1113,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.85", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", + "syn 2.0.90", ] [[package]] @@ -1173,9 +1122,9 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.20.10", + "darling_core", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -1290,20 +1239,21 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elasticsearch" -version = "8.5.0-alpha.1" +version = "8.16.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d9bd57d914cc66ce878f098f63ed7b5d5b64c30644a5adb950b008f874a6c6" +checksum = "774166217d4f9b96e9ab9d6832302e3d47196de507fddddb21de8184a39e2c6d" dependencies = [ - "base64 0.11.0", + "base64 0.22.1", "bytes", "dyn-clone", "lazy_static", "percent-encoding", - "reqwest 0.11.27", - "rustc_version 0.2.3", + "reqwest", + "rustc_version", "serde", "serde_json", - "serde_with 1.14.0", + "serde_with", + "tokio", "url", "void", ] @@ -1314,15 +1264,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "env_logger" version = "0.7.1" @@ -1548,7 +1489,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -1665,25 +1606,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.6.0", - "slab", - "tokio", - "tokio-util 0.7.12", - "tracing", -] - [[package]] name = "h2" version = "0.4.6" @@ -1696,7 +1618,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util 0.7.12", @@ -1982,7 +1904,6 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -1998,14 +1919,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", + "h2", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -2017,20 +1938,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.31", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.3" @@ -2039,14 +1946,14 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", - "rustls 0.23.16", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", - "webpki-roots 0.26.6", + "webpki-roots", ] [[package]] @@ -2055,7 +1962,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", "pin-project-lite", "tokio", @@ -2073,7 +1980,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.5.0", + "hyper 1.5.1", "pin-project-lite", "socket2", "tokio", @@ -2168,9 +2075,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", "hashbrown 0.15.0", @@ -2179,25 +2086,24 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" dependencies = [ "console", - "instant", "number_prefix", "portable-atomic", - "unicode-width", + "unicode-width 0.2.0", + "web-time", ] [[package]] name = "inferno" -version = "0.11.21" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" +checksum = "75a5d75fee4d36809e6b021e4b96b686e763d365ffdb03af2bd00786353f84fe" dependencies = [ "ahash", - "is-terminal", "itoa", "log", "num-format", @@ -2218,9 +2124,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f72d3e19488cf7d8ea52d2fc0f8754fc933398b337cd3cbdb28aaeb35159ef" +checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" dependencies = [ "console", "lazy_static", @@ -2592,9 +2498,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" +checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" dependencies = [ "ahash", "portable-atomic", @@ -2608,14 +2514,14 @@ checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "http-body-util", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", - "indexmap 2.6.0", + "indexmap 2.7.0", "ipnet", "metrics", "metrics-util", "quanta", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3030,7 +2936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -3054,7 +2960,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -3075,7 +2981,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.6.0", + "indexmap 2.7.0", ] [[package]] @@ -3095,7 +3001,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -3193,7 +3099,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -3251,9 +3157,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -3286,7 +3192,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -3316,7 +3222,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.85", + "syn 2.0.90", "tempfile", ] @@ -3330,7 +3236,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -3365,9 +3271,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.26.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +checksum = "f22f29bdff3987b4d8632ef95fd6424ec7e4e0a57e2f4fc63e489e75357f6a03" dependencies = [ "memchr", ] @@ -3406,9 +3312,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls", "socket2", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3423,9 +3329,9 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls", "slab", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tracing", ] @@ -3582,7 +3488,7 @@ dependencies = [ "pasta_curves", "rand_core 0.6.4", "serde", - "thiserror", + "thiserror 1.0.69", "zeroize", ] @@ -3595,7 +3501,7 @@ dependencies = [ "rand_core 0.6.4", "reddsa", "serde", - "thiserror", + "thiserror 1.0.69", "zeroize", ] @@ -3625,7 +3531,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3672,55 +3578,13 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "async-compression", - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.31", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-rustls 0.24.1", - "tokio-util 0.7.12", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 0.25.4", - "winreg", -] - [[package]] name = "reqwest" version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ + "async-compression", "base64 0.22.1", "bytes", "futures-channel", @@ -3729,8 +3593,8 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", - "hyper-rustls 0.27.3", + "hyper 1.5.1", + "hyper-rustls", "hyper-util", "ipnet", "js-sys", @@ -3740,21 +3604,22 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.16", - "rustls-pemfile 2.2.0", + "rustls", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", + "tokio-util 0.7.12", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.6", + "webpki-roots", "windows-registry", ] @@ -3845,29 +3710,20 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver", ] [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -3878,40 +3734,19 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.23.16" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.8", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -3927,16 +3762,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.8" @@ -4019,16 +3844,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "secp256k1" version = "0.27.0" @@ -4058,15 +3873,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.23" @@ -4076,35 +3882,29 @@ dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "sentry" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00421ed8fa0c995f07cde48ba6c89e80f2b312f74ff637326f392fbfd23abe02" +checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" dependencies = [ "httpdate", - "reqwest 0.12.9", - "rustls 0.21.12", + "reqwest", + "rustls", "sentry-backtrace", "sentry-contexts", "sentry-core", "sentry-tracing", "tokio", "ureq", - "webpki-roots 0.25.4", + "webpki-roots", ] [[package]] name = "sentry-backtrace" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a79194074f34b0cbe5dd33896e5928bbc6ab63a889bd9df2264af5acb186921e" +checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" dependencies = [ "backtrace", "once_cell", @@ -4114,23 +3914,23 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8870c5dba2bfd9db25c75574a11429f6b95957b0a78ac02e2970dd7a5249a" +checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" dependencies = [ "hostname", "libc", "os_info", - "rustc_version 0.4.1", + "rustc_version", "sentry-core", "uname", ] [[package]] name = "sentry-core" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a75011ea1c0d5c46e9e57df03ce81f5c7f0a9e199086334a1f9c0a541e0826" +checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", "rand 0.8.5", @@ -4141,9 +3941,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f715932bf369a61b7256687c6f0554141b7ce097287e30e3f7ed6e9de82498fe" +checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4153,16 +3953,16 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.32.3" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4519c900ce734f7a0eb7aba0869dfb225a7af8820634a7dd51449e3b093cfb7c" +checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" dependencies = [ "debugid", "hex", "rand 0.8.5", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "time", "url", "uuid", @@ -4170,9 +3970,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -4188,22 +3988,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -4231,16 +4031,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.11.0" @@ -4251,36 +4041,24 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", - "serde_with_macros 3.11.0", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ - "darling 0.20.10", + "darling", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -4289,7 +4067,7 @@ version = "0.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "libyml", "memchr", @@ -4450,12 +4228,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -4505,9 +4277,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -4541,27 +4313,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "tap" version = "1.0.1" @@ -4570,9 +4321,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if 1.0.0", "fastrand", @@ -4596,34 +4347,54 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ - "unicode-width", + "unicode-width 0.1.14", ] [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] name = "thread-priority" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3b04d33c9633b8662b167b847c7ab521f83d1ae20f2321b65b5b925e532e36" +checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", @@ -4703,9 +4474,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -4728,17 +4499,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", + "syn 2.0.90", ] [[package]] @@ -4747,7 +4508,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.16", + "rustls", "rustls-pki-types", "tokio", ] @@ -4840,7 +4601,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -4858,11 +4619,11 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.6", + "h2", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -4888,7 +4649,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -5002,9 +4763,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -5019,27 +4780,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -5047,9 +4808,9 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" dependencies = [ "tracing", "tracing-subscriber", @@ -5145,7 +4906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -5244,6 +5005,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -5275,10 +5042,10 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.16", + "rustls", "rustls-pki-types", "url", - "webpki-roots 0.26.6", + "webpki-roots", ] [[package]] @@ -5337,7 +5104,7 @@ dependencies = [ "cfg-if 1.0.0", "git2", "regex", - "rustc_version 0.4.1", + "rustc_version", "rustversion", "time", ] @@ -5356,7 +5123,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -5477,7 +5244,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -5511,7 +5278,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5533,10 +5300,14 @@ dependencies = [ ] [[package]] -name = "webpki-roots" -version = "0.25.4" +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] [[package]] name = "webpki-roots" @@ -5796,16 +5567,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if 1.0.0", - "windows-sys 0.48.0", -] - [[package]] name = "wyz" version = "0.5.1" @@ -6081,12 +5842,12 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.11.0", + "serde_with", "sha2", "spandoc", "static_assertions", "tempfile", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tokio", "tracing", @@ -6129,7 +5890,7 @@ dependencies = [ "sapling-crypto", "serde", "spandoc", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tokio", "tower 0.4.13", @@ -6183,7 +5944,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.6.0", + "indexmap 2.7.0", "itertools 0.13.0", "lazy_static", "metrics", @@ -6198,7 +5959,7 @@ dependencies = [ "serde", "static_assertions", "tempfile", - "thiserror", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util 0.7.12", @@ -6217,7 +5978,7 @@ version = "1.0.0-beta.43" dependencies = [ "color-eyre", "jsonrpc-core", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "tokio", @@ -6233,7 +5994,7 @@ dependencies = [ "color-eyre", "futures", "hex", - "indexmap 2.6.0", + "indexmap 2.7.0", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -6244,7 +6005,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "thiserror", + "thiserror 2.0.3", "tokio", "tokio-stream", "tonic", @@ -6274,7 +6035,7 @@ dependencies = [ "futures", "group", "hex", - "indexmap 2.6.0", + "indexmap 2.7.0", "insta", "itertools 0.13.0", "jsonrpc", @@ -6284,7 +6045,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "sapling-crypto", - "semver 1.0.23", + "semver", "serde", "serde_json", "structopt", @@ -6315,7 +6076,7 @@ version = "1.0.0-beta.43" dependencies = [ "hex", "lazy_static", - "thiserror", + "thiserror 2.0.3", "zcash_script", "zebra-chain", "zebra-test", @@ -6337,7 +6098,7 @@ dependencies = [ "howudoin", "human_bytes", "humantime-serde", - "indexmap 2.6.0", + "indexmap 2.7.0", "insta", "itertools 0.13.0", "jubjub", @@ -6352,12 +6113,12 @@ dependencies = [ "regex", "rlimit", "rocksdb", - "semver 1.0.23", + "semver", "serde", "serde_json", "spandoc", "tempfile", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tokio", "tower 0.4.13", @@ -6374,7 +6135,7 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.6.0", + "indexmap 2.7.0", "insta", "itertools 0.13.0", "lazy_static", @@ -6385,7 +6146,7 @@ dependencies = [ "regex", "spandoc", "tempfile", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tokio", "tower 0.4.13", @@ -6400,19 +6161,19 @@ version = "1.0.0-beta.43" dependencies = [ "color-eyre", "hex", - "indexmap 2.6.0", + "indexmap 2.7.0", "itertools 0.13.0", "jsonrpc", "quote", "rand 0.8.5", "regex", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "serde_yml", "structopt", - "syn 2.0.85", - "thiserror", + "syn 2.0.90", + "thiserror 2.0.3", "tinyvec", "tokio", "tracing-error", @@ -6433,7 +6194,7 @@ dependencies = [ "atty", "bytes", "chrono", - "clap 4.5.20", + "clap 4.5.21", "color-eyre", "console-subscriber", "dirs", @@ -6443,9 +6204,9 @@ dependencies = [ "howudoin", "http-body-util", "humantime-serde", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", - "indexmap 2.6.0", + "indexmap 2.7.0", "indicatif", "inferno", "insta", @@ -6463,12 +6224,12 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", - "semver 1.0.23", + "semver", "sentry", "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 2.0.3", "thread-priority", "tinyvec", "tokio", @@ -6515,7 +6276,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] @@ -6535,7 +6296,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.90", ] [[package]] diff --git a/deny.toml b/deny.toml index 6c809cabd12..7f804946767 100644 --- a/deny.toml +++ b/deny.toml @@ -107,7 +107,11 @@ skip-tree = [ # wait for zebra to update tower { name = "tower", version = "=0.4.13" }, - { name = "hashbrown", version = "=0.12.3" }, + { name = "hashbrown", version = "=0.14.5" }, + + # wait for zebra to update vergen + { name = "thiserror", version = "=1.0.69" }, + { name = "thiserror-impl", version = "=1.0.69" }, # Remove after release candicate period is over and the ECC crates are not patched anymore { name = "equihash", version = "=0.2.0" }, diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 68ef0ca18bc..52b63b3ec34 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -26,10 +26,10 @@ futures = "0.3.31" futures-core = "0.3.28" pin-project = "1.1.6" rayon = "1.10.0" -tokio = { version = "1.41.0", features = ["time", "sync", "tracing", "macros"] } +tokio = { version = "1.41.1", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.12" tower = { version = "0.4.13", features = ["util", "buffer"] } -tracing = "0.1.39" +tracing = "0.1.41" tracing-futures = "0.2.5" [dev-dependencies] @@ -41,7 +41,7 @@ tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.3" rand = "0.8.5" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index a717ff0e4c2..97991f315f8 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -19,9 +19,9 @@ categories = ["algorithms", "asynchronous"] pin-project = "1.1.6" tower = "0.4.13" futures-core = "0.3.28" -tracing = "0.1.39" +tracing = "0.1.41" [dev-dependencies] -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 2502f7405f3..d6b0bb02f7f 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -81,7 +81,7 @@ group = "0.13.0" incrementalmerkletree.workspace = true jubjub = "0.10.0" lazy_static = "1.4.0" -tempfile = "3.13.0" +tempfile = "3.14.0" dirs = "5.0.1" num-integer = "0.1.46" primitive-types = "0.12.2" @@ -110,12 +110,12 @@ humantime = "2.1.0" # Error Handling & Formatting static_assertions = "1.1.0" -thiserror = "1.0.64" -tracing = "0.1.39" +thiserror = "2.0.3" +tracing = "0.1.41" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.211", features = ["serde_derive", "rc"] } +serde = { version = "1.0.215", features = ["serde_derive", "rc"] } serde_with = "3.11.0" serde-big-array = "0.5.1" @@ -130,10 +130,10 @@ redjubjub = "0.7.0" reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.132", optional = true } +serde_json = { version = "1.0.133", optional = true } # Production feature async-error and testing feature proptest-impl -tokio = { version = "1.41.0", optional = true } +tokio = { version = "1.41.1", optional = true } # Experimental feature shielded-scan zcash_client_backend = { workspace = true, optional = true } @@ -157,7 +157,7 @@ color-eyre = "0.6.3" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } spandoc = "0.2.2" -tracing = "0.1.39" +tracing = "0.1.41" # Make the optional testing dependencies required proptest = "1.4.0" @@ -166,7 +166,7 @@ proptest-derive = "0.5.0" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 7228f4ff490..6c6d731d3d7 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -46,15 +46,15 @@ rayon = "1.10.0" chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } lazy_static = "1.4.0" once_cell = "1.20.2" -serde = { version = "1.0.211", features = ["serde_derive"] } +serde = { version = "1.0.215", features = ["serde_derive"] } futures = "0.3.31" futures-util = "0.3.28" -metrics = "0.24.0" -thiserror = "1.0.64" -tokio = { version = "1.41.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +metrics = "0.24.1" +thiserror = "2.0.3" +tokio = { version = "1.41.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } -tracing = "0.1.39" +tracing = "0.1.41" tracing-futures = "0.2.5" sapling-crypto.workspace = true @@ -90,8 +90,8 @@ proptest = "1.4.0" proptest-derive = "0.5.0" spandoc = "0.2.2" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } -tracing-error = "0.2.0" +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tracing-error = "0.2.1" tracing-subscriber = "0.3.18" zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index d0c273cac46..f21051c8dae 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -20,8 +20,8 @@ futures-util = "0.3.28" tonic = "0.12.3" tonic-reflection = "0.12.3" prost = "0.13.3" -serde = { version = "1.0.211", features = ["serde_derive"] } -tokio = { version = "1.41.0", features = ["macros", "rt-multi-thread"] } +serde = { version = "1.0.215", features = ["serde_derive"] } +tokio = { version = "1.41.1", features = ["macros", "rt-multi-thread"] } tokio-stream = "0.1.16" tower = { version = "0.4.13", features = ["util", "buffer", "timeout"] } color-eyre = "0.6.3" @@ -35,7 +35,7 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } tonic-build = "0.12.3" [dev-dependencies] -insta = { version = "1.40.0", features = ["redactions", "json", "ron"] } +insta = { version = "1.41.1", features = ["redactions", "json", "ron"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-state = { path = "../zebra-state" } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index cb9e2b14918..ff83458ef20 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -42,12 +42,12 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] bitflags = "2.5.0" byteorder = "1.5.0" -bytes = "1.8.0" +bytes = "1.9.0" chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "2.6.0", features = ["serde"] } +indexmap = { version = "2.7.0", features = ["serde"] } itertools = "0.13.0" lazy_static = "1.4.0" num-integer = "0.1.46" @@ -56,20 +56,20 @@ pin-project = "1.1.6" rand = "0.8.5" rayon = "1.10.0" regex = "1.11.0" -serde = { version = "1.0.211", features = ["serde_derive"] } -tempfile = "3.13.0" -thiserror = "1.0.64" +serde = { version = "1.0.215", features = ["serde_derive"] } +tempfile = "3.14.0" +thiserror = "2.0.3" futures = "0.3.31" -tokio = { version = "1.41.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.41.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.16", features = ["sync", "time"] } tokio-util = { version = "0.7.12", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } -metrics = "0.24.0" +metrics = "0.24.1" tracing-futures = "0.2.5" -tracing-error = { version = "0.2.0", features = ["traced-error"] } -tracing = "0.1.39" +tracing-error = { version = "0.2.1", features = ["traced-error"] } +tracing = "0.1.41" # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } @@ -90,7 +90,7 @@ proptest = "1.4.0" proptest-derive = "0.5.0" static_assertions = "1.1.0" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } toml = "0.8.19" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 5ab39204d21..bccc34ab91c 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -45,15 +45,15 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } color-eyre = { version = "0.6.3", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl -reqwest = { version = "0.11.26", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.211", optional = true } -serde_json = { version = "1.0.132", optional = true } -tokio = { version = "1.41.0", features = ["time", "sync"] } +reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } +serde = { version = "1.0.215", optional = true } +serde_json = { version = "1.0.133", optional = true } +tokio = { version = "1.41.1", features = ["time", "sync"] } [dev-dependencies] color-eyre = "0.6.3" jsonrpc-core = "18.0.0" -reqwest = { version = "0.11.26", default-features = false, features = ["rustls-tls"] } -serde = "1.0.211" -serde_json = "1.0.132" +reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"] } +serde = "1.0.215" +serde_json = "1.0.133" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index a542c0e1374..79f02c60966 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -64,8 +64,8 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.132", features = ["preserve_order"] } -indexmap = { version = "2.6.0", features = ["serde"] } +serde_json = { version = "1.0.133", features = ["preserve_order"] } +indexmap = { version = "2.7.0", features = ["serde"] } # RPC endpoint basic auth base64 = "0.22.1" @@ -74,7 +74,7 @@ rand = "0.8.5" # Error handling color-eyre = "0.6.3" -tokio = { version = "1.41.0", features = [ +tokio = { version = "1.41.1", features = [ "time", "rt-multi-thread", "macros", @@ -88,10 +88,10 @@ tonic-reflection = { version = "0.12.3", optional = true } prost = { version = "0.13.3", optional = true } tokio-stream = { version = "0.1.16", optional = true } -tracing = "0.1.39" +tracing = "0.1.41" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.211", features = ["serde_derive"] } +serde = { version = "1.0.215", features = ["serde_derive"] } # For the `stop` RPC method. nix = { version = "0.29.0", features = ["signal"] } @@ -119,12 +119,12 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } tonic-build = { version = "0.12.3", optional = true } [dev-dependencies] -insta = { version = "1.40.0", features = ["redactions", "json", "ron"] } +insta = { version = "1.41.1", features = ["redactions", "json", "ron"] } proptest = "1.4.0" -thiserror = "1.0.64" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +thiserror = "2.0.3" +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ "proptest-impl", diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 6d5a0f0a3c5..413dc367319 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -61,13 +61,13 @@ results-reader = [ [dependencies] color-eyre = "0.6.3" -indexmap = { version = "2.6.0", features = ["serde"] } +indexmap = { version = "2.7.0", features = ["serde"] } itertools = "0.13.0" semver = "1.0.23" -serde = { version = "1.0.211", features = ["serde_derive"] } -tokio = { version = "1.41.0", features = ["time"] } +serde = { version = "1.0.215", features = ["serde_derive"] } +tokio = { version = "1.41.1", features = ["time"] } tower = "0.4.13" -tracing = "0.1.39" +tracing = "0.1.41" futures = "0.3.31" # ECC dependencies. @@ -102,7 +102,7 @@ zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43", optional = tru tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } structopt = "0.3.26" lazy_static = "1.4.0" -serde_json = "1.0.132" +serde_json = "1.0.133" jsonrpc = { version = "0.18.0", optional = true } hex = { version = "0.4.3", optional = true } @@ -110,8 +110,8 @@ hex = { version = "0.4.3", optional = true } zebrad = { path = "../zebrad", version = "2.1.0" } [dev-dependencies] -insta = { version = "1.40.0", features = ["ron", "redactions"] } -tokio = { version = "1.41.0", features = ["test-util"] } +insta = { version = "1.41.1", features = ["ron", "redactions"] } +tokio = { version = "1.41.1", features = ["test-util"] } proptest = "1.4.0" proptest-derive = "0.5.0" @@ -120,7 +120,7 @@ ff = "0.13.0" group = "0.13.0" jubjub = "0.10.0" rand = "0.8.5" -tempfile = "3.13.0" +tempfile = "3.14.0" zcash_note_encryption = "0.4.0" toml = "0.8.19" tonic = "0.12.3" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index abe5d7d1b55..62f437a5f6e 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -18,7 +18,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] zcash_script = "0.2.0" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } -thiserror = "1.0.64" +thiserror = "2.0.3" [dev-dependencies] hex = "0.4.3" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index d3ce8a080b0..f844c8c193c 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -54,28 +54,28 @@ hex = "0.4.3" hex-literal = "0.4.1" humantime-serde = "1.1.1" human_bytes = { version = "0.4.3", default-features = false } -indexmap = "2.6.0" +indexmap = "2.7.0" itertools = "0.13.0" lazy_static = "1.4.0" -metrics = "0.24.0" +metrics = "0.24.1" mset = "0.1.1" regex = "1.11.0" rlimit = "0.10.2" rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } semver = "1.0.23" -serde = { version = "1.0.211", features = ["serde_derive"] } -tempfile = "3.13.0" -thiserror = "1.0.64" +serde = { version = "1.0.215", features = ["serde_derive"] } +tempfile = "3.14.0" +thiserror = "2.0.3" rayon = "1.10.0" -tokio = { version = "1.41.0", features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { version = "1.41.1", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } -tracing = "0.1.39" +tracing = "0.1.41" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl -elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.132", package = "serde_json", optional = true } +elasticsearch = { version = "8.16.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } +serde_json = { version = "1.0.133", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["async-error"] } @@ -97,7 +97,7 @@ once_cell = "1.20.2" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.40.0", features = ["ron", "redactions"] } +insta = { version = "1.41.1", features = ["ron", "redactions"] } proptest = "1.4.0" proptest-derive = "0.5.0" @@ -106,7 +106,7 @@ rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 9ccd37d8dcd..fc125068e69 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -16,16 +16,16 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "2.6.0" +indexmap = "2.7.0" lazy_static = "1.4.0" -insta = "1.40.0" +insta = "1.41.1" itertools = "0.13.0" proptest = "1.4.0" once_cell = "1.20.2" rand = "0.8.5" regex = "1.11.0" -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.31" @@ -37,11 +37,11 @@ tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "4.1.0" spandoc = "0.2.2" -thiserror = "1.0.64" +thiserror = "2.0.3" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } -tracing-error = "0.2.0" -tracing = "0.1.39" +tracing-error = "0.2.1" +tracing = "0.1.41" [dev-dependencies] -tempfile = "3.13.0" +tempfile = "3.14.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 02cb7299d79..a1451c3b1c6 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -89,10 +89,10 @@ tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.132" -tracing-error = "0.2.0" +serde_json = "1.0.133" +tracing-error = "0.2.1" tracing-subscriber = "0.3.18" -thiserror = "1.0.64" +thiserror = "2.0.3" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } @@ -106,10 +106,10 @@ itertools = { version = "0.13.0", optional = true } # These crates are needed for the search-issue-refs binary regex = { version = "1.11.0", optional = true } # Avoid default openssl dependency to reduce the dependency tree and security alerts. -reqwest = { version = "0.11.26", default-features = false, features = ["rustls-tls"], optional = true } +reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.41.0", features = ["full"], optional = true } +tokio = { version = "1.41.1", features = ["full"], optional = true } jsonrpc = { version = "0.18.0", optional = true } @@ -122,6 +122,6 @@ rand = "0.8.5" syn = { version = "2.0.79", features = ["full"], optional = true } quote = { version = "1.0.37", optional = true } serde_yml = { version = "0.0.12", optional = true } -serde = { version = "1.0.211", features = ["serde_derive"], optional = true } -indexmap = "2.6.0" +serde = { version = "1.0.215", features = ["serde_derive"], optional = true } +indexmap = "2.7.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6bcb08ca5b7..1ed91dd72a3 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -168,18 +168,18 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.5.20", features = ["cargo"] } +clap = { version = "4.5.21", features = ["cargo"] } chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "2.6.0" +indexmap = "2.7.0" lazy_static = "1.4.0" semver = "1.0.23" -serde = { version = "1.0.211", features = ["serde_derive"] } +serde = { version = "1.0.215", features = ["serde_derive"] } toml = "0.8.19" futures = "0.3.31" rayon = "1.10.0" -tokio = { version = "1.41.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.41.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tokio-stream = { version = "0.1.16", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.6" @@ -189,15 +189,15 @@ color-eyre = { version = "0.6.3", default-features = false, features = ["issue-u # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } -thiserror = "1.0.64" +thiserror = "2.0.3" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } tracing-appender = "0.2.3" -tracing-error = "0.2.0" +tracing-error = "0.2.1" tracing-futures = "0.2.5" -tracing = "0.1.39" +tracing = "0.1.41" -metrics = "0.24.0" +metrics = "0.24.1" dirs = "5.0.1" atty = "0.2.14" @@ -206,23 +206,23 @@ num-integer = "0.1.46" rand = "0.8.5" # prod feature internal-miner -thread-priority = { version = "1.0.0", optional = true } +thread-priority = { version = "1.2.0", optional = true } # prod feature sentry -sentry = { version = "0.32.2", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.35.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.11.21", default-features = false, optional = true } +inferno = { version = "0.12.0", default-features = false, optional = true } # prod feature journald tracing-journald = { version = "0.3.0", optional = true } # prod feature filter-reload -hyper = { version = "1.5.0", features = ["http1", "http2", "server"], optional = true } +hyper = { version = "1.5.1", features = ["http1", "http2", "server"], optional = true } http-body-util = { version = "0.1.2", optional = true } hyper-util = { version = "0.1.9", optional = true } -bytes = { version = "1.8.0", optional = true } +bytes = { version = "1.9.0", optional = true } # prod feature prometheus metrics-exporter-prometheus = { version = "0.16.0", default-features = false, features = ["http-listener"], optional = true } @@ -235,7 +235,7 @@ log = "0.4.22" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.8", optional = true } +indicatif = { version = "0.17.9", optional = true } # test feature proptest-impl proptest = { version = "1.4.0", optional = true } @@ -257,16 +257,16 @@ hex-literal = "0.4.1" jsonrpc-core = "18.0.0" once_cell = "1.20.2" regex = "1.11.0" -insta = { version = "1.40.0", features = ["json"] } +insta = { version = "1.41.1", features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.132", features = ["preserve_order"] } -tempfile = "3.13.0" +serde_json = { version = "1.0.133", features = ["preserve_order"] } +tempfile = "3.14.0" -hyper = { version = "1.5.0", features = ["http1", "http2", "server"]} +hyper = { version = "1.5.1", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.41.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.16" # test feature lightwalletd-grpc-tests From eb1d129feaa03d9ec801cf55eabd95366795cb53 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 11 Dec 2024 09:52:59 -0500 Subject: [PATCH 037/245] fix(test): Fixes bugs in the lightwalletd integration tests (#9052) * Fixes bug in send transaction test * fixes new bug in send_transaction_test * Removes unused `load_transactions_from_future_blocks` and factors out code for sending transactions to its own fn * corrects tx count updates to exclude coinbase txs * fixes formatting * Calls zebra's sendrawtransaction method if lwd's send_transaction() return an error for more detailed error info * removes instrument * avoids panic when a future block has only a coinbase transaction * fixes check for gossip log (only happens when 10 txs have been added * fixes a concurrency bug, adds more detailed errors. * removes unnecessary wait_for_stdout calls and fixes condition for early return * Fixes issue around missing stdout line * Fixes bug around expected tx ids and removes outdated TODO * Fixes issue with expected ZF funding stream address balance in post-NU6 chains * fixes the rest of wallet_grpc_test * Update zebrad/src/components/mempool/downloads.rs Co-authored-by: Conrado Gouvea --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Conrado Gouvea --- .../lightwalletd/send_transaction_test.rs | 192 +++++++++++------- zebrad/tests/common/lightwalletd/sync.rs | 2 +- .../common/lightwalletd/wallet_grpc_test.rs | 173 ++++++++-------- 3 files changed, 204 insertions(+), 163 deletions(-) diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index bee6cf78356..6ac031e491b 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -16,15 +16,18 @@ //! were obtained. This is to ensure that zebra does not reject the transactions because they have //! already been seen in a block. -use std::{cmp::min, sync::Arc, time::Duration}; +use std::{cmp::min, collections::HashSet, sync::Arc}; +use tower::BoxError; -use color_eyre::eyre::Result; +use color_eyre::eyre::{eyre, Result}; use zebra_chain::{ - parameters::Network::{self, *}, + block::Block, + parameters::Network::*, serialization::ZcashSerialize, transaction::{self, Transaction}, }; +use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::queue::CHANNEL_AND_QUEUE_CAPACITY; use zebrad::components::mempool::downloads::MAX_INBOUND_CONCURRENCY; @@ -34,10 +37,13 @@ use crate::common::{ lightwalletd::{ can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc, sync::wait_for_zebrad_and_lightwalletd_sync, - wallet_grpc::{self, connect_to_lightwalletd, Empty, Exclude}, + wallet_grpc::{ + self, compact_tx_streamer_client::CompactTxStreamerClient, connect_to_lightwalletd, + Empty, Exclude, + }, }, - sync::LARGE_CHECKPOINT_TIMEOUT, - test_type::TestType::{self, *}, + regtest::MiningRpcMethods, + test_type::TestType::*, }; /// The maximum number of transactions we want to send in the test. @@ -85,11 +91,19 @@ pub async fn run() -> Result<()> { "running gRPC send transaction test using lightwalletd & zebrad", ); - let transactions = - load_transactions_from_future_blocks(network.clone(), test_type, test_name).await?; + let mut count = 0; + let blocks: Vec = + get_future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS) + .await? + .into_iter() + .take_while(|block| { + count += block.transactions.len() - 1; + count <= max_sent_transactions() + }) + .collect(); tracing::info!( - transaction_count = ?transactions.len(), + blocks_count = ?blocks.len(), partial_sync_path = ?zebrad_state_path, "got transactions to send, spawning isolated zebrad...", ); @@ -113,6 +127,8 @@ pub async fn run() -> Result<()> { let zebra_rpc_address = zebra_rpc_address.expect("lightwalletd test must have RPC port"); + let zebrad_rpc_client = RpcRequestClient::new(zebra_rpc_address); + tracing::info!( ?test_type, ?zebra_rpc_address, @@ -134,7 +150,7 @@ pub async fn run() -> Result<()> { "spawned lightwalletd connected to zebrad, waiting for them both to sync...", ); - let (_lightwalletd, mut zebrad) = wait_for_zebrad_and_lightwalletd_sync( + let (_lightwalletd, _zebrad) = wait_for_zebrad_and_lightwalletd_sync( lightwalletd, lightwalletd_rpc_port, zebrad, @@ -164,6 +180,53 @@ pub async fn run() -> Result<()> { .await? .into_inner(); + let mut transaction_hashes = HashSet::new(); + let mut has_tx_with_shielded_elements = false; + let mut counter = 0; + + for block in blocks { + let (has_shielded_elements, count) = send_transactions_from_block( + &mut rpc_client, + &zebrad_rpc_client, + block.clone(), + &mut transaction_hashes, + ) + .await?; + + has_tx_with_shielded_elements |= has_shielded_elements; + counter += count; + + tracing::info!( + height = ?block.coinbase_height(), + "submitting block at height" + ); + + let submit_block_response = zebrad_rpc_client.submit_block(block).await; + tracing::info!(?submit_block_response, "submitted block"); + } + + // GetMempoolTx: make sure at least one of the transactions were inserted into the mempool. + assert!( + !has_tx_with_shielded_elements || counter >= 1, + "failed to read v4+ transactions with shielded elements \ + from future blocks in mempool via lightwalletd" + ); + + Ok(()) +} + +/// Sends non-coinbase transactions from a block to the mempool, verifies that the transactions +/// can be found in the mempool via lightwalletd, and commits the block to Zebra's chainstate. +/// +/// Returns the zebrad test child that's handling the RPC requests. + +#[tracing::instrument(skip_all)] +async fn send_transactions_from_block( + rpc_client: &mut CompactTxStreamerClient, + zebrad_rpc_client: &RpcRequestClient, + block: Block, + transaction_hashes: &mut HashSet, +) -> Result<(bool, usize)> { // Lightwalletd won't call `get_raw_mempool` again until 2 seconds after the last call: // // @@ -171,8 +234,17 @@ pub async fn run() -> Result<()> { let sleep_until_lwd_last_mempool_refresh = tokio::time::sleep(std::time::Duration::from_secs(4)); - let transaction_hashes: Vec = - transactions.iter().map(|tx| tx.hash()).collect(); + let transactions: Vec<_> = block + .transactions + .iter() + .filter(|tx| !tx.is_coinbase()) + .collect(); + + if transactions.is_empty() { + return Ok((false, 0)); + } + + transaction_hashes.extend(transactions.iter().map(|tx| tx.hash())); tracing::info!( transaction_count = ?transactions.len(), @@ -181,7 +253,7 @@ pub async fn run() -> Result<()> { ); let mut has_tx_with_shielded_elements = false; - for transaction in transactions { + for &transaction in &transactions { let transaction_hash = transaction.hash(); // See @@ -195,20 +267,24 @@ pub async fn run() -> Result<()> { tracing::info!(?transaction_hash, "sending transaction..."); - let request = prepare_send_transaction_request(transaction); + let request = prepare_send_transaction_request(transaction.clone()); - let response = rpc_client.send_transaction(request).await?.into_inner(); + match rpc_client.send_transaction(request).await { + Ok(response) => assert_eq!(response.into_inner(), expected_response), + Err(err) => { + tracing::warn!(?err, "failed to send transaction"); + let send_tx_rsp = zebrad_rpc_client + .send_transaction(transaction) + .await + .map_err(|e| eyre!(e)); - assert_eq!(response, expected_response); + tracing::warn!(?send_tx_rsp, "failed to send tx twice"); + } + }; } - // Check if some transaction is sent to mempool, - // Fails if there are only coinbase transactions in the first 50 future blocks - tracing::info!("waiting for mempool to verify some transactions..."); - zebrad.expect_stdout_line_matches("sending mempool transaction broadcast")?; // Wait for more transactions to verify, `GetMempoolTx` only returns txs where tx.HasShieldedElements() // - tokio::time::sleep(std::time::Duration::from_secs(2)).await; sleep_until_lwd_last_mempool_refresh.await; tracing::info!("calling GetMempoolTx gRPC to fetch transactions..."); @@ -217,25 +293,6 @@ pub async fn run() -> Result<()> { .await? .into_inner(); - // Sometimes lightwalletd doesn't check the mempool, and waits for the next block instead. - // If that happens, we skip the rest of the test. - tracing::info!("checking if lightwalletd has queried the mempool..."); - - // We need a short timeout here, because sometimes this message is not logged. - zebrad = zebrad.with_timeout(Duration::from_secs(60)); - let tx_log = - zebrad.expect_stdout_line_matches("answered mempool request .*req.*=.*TransactionIds"); - // Reset the failed timeout and give the rest of the test enough time to finish. - #[allow(unused_assignments)] - { - zebrad = zebrad.with_timeout(LARGE_CHECKPOINT_TIMEOUT); - } - - if tx_log.is_err() { - tracing::info!("lightwalletd didn't query the mempool, skipping mempool contents checks"); - return Ok(()); - } - tracing::info!("checking the mempool contains some of the sent transactions..."); let mut counter = 0; while let Some(tx) = transactions_stream.message().await? { @@ -251,16 +308,6 @@ pub async fn run() -> Result<()> { counter += 1; } - // GetMempoolTx: make sure at least one of the transactions were inserted into the mempool. - // - // TODO: Update `load_transactions_from_future_blocks()` to return block height offsets and, - // only check if a transaction from the first block has shielded elements - assert!( - !has_tx_with_shielded_elements || counter >= 1, - "failed to read v4+ transactions with shielded elements from future blocks in mempool via lightwalletd" - ); - - // TODO: GetMempoolStream: make sure at least one of the transactions were inserted into the mempool. tracing::info!("calling GetMempoolStream gRPC to fetch transactions..."); let mut transaction_stream = rpc_client.get_mempool_stream(Empty {}).await?.into_inner(); @@ -270,32 +317,7 @@ pub async fn run() -> Result<()> { _counter += 1; } - Ok(()) -} - -/// Loads transactions from a few block(s) after the chain tip of the cached state. -/// -/// Returns a list of non-coinbase transactions from blocks that have not been finalized to disk -/// in the `ZEBRA_CACHED_STATE_DIR`. -/// -/// ## Panics -/// -/// If the provided `test_type` doesn't need an rpc server and cached state -#[tracing::instrument] -async fn load_transactions_from_future_blocks( - network: Network, - test_type: TestType, - test_name: &str, -) -> Result>> { - let transactions = get_future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS) - .await? - .into_iter() - .flat_map(|block| block.transactions) - .filter(|transaction| !transaction.is_coinbase()) - .take(max_sent_transactions()) - .collect(); - - Ok(transactions) + Ok((has_tx_with_shielded_elements, counter)) } /// Prepare a request to send to lightwalletd that contains a transaction to be sent. @@ -307,3 +329,21 @@ fn prepare_send_transaction_request(transaction: Arc) -> wallet_grp height: 0, } } + +trait SendTransactionMethod { + async fn send_transaction( + &self, + transaction: &Arc, + ) -> Result; +} + +impl SendTransactionMethod for RpcRequestClient { + async fn send_transaction( + &self, + transaction: &Arc, + ) -> Result { + let tx_data = hex::encode(transaction.zcash_serialize_to_vec()?); + self.json_result_from_call("sendrawtransaction", format!(r#"["{tx_data}"]"#)) + .await + } +} diff --git a/zebrad/tests/common/lightwalletd/sync.rs b/zebrad/tests/common/lightwalletd/sync.rs index 8dce05a9150..3a55aedb5c2 100644 --- a/zebrad/tests/common/lightwalletd/sync.rs +++ b/zebrad/tests/common/lightwalletd/sync.rs @@ -32,7 +32,7 @@ pub fn wait_for_zebrad_and_lightwalletd_sync< wait_for_zebrad_mempool: bool, wait_for_zebrad_tip: bool, ) -> Result<(TestChild, TestChild

)> { - let is_zebrad_finished = AtomicBool::new(false); + let is_zebrad_finished = AtomicBool::new(!wait_for_zebrad_tip); let is_lightwalletd_finished = AtomicBool::new(false); let is_zebrad_finished = &is_zebrad_finished; diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index 702bb740142..a26ada3f9c3 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -37,11 +37,14 @@ use color_eyre::eyre::Result; use hex_literal::hex; use zebra_chain::{ - block::Block, - parameters::Network, - parameters::NetworkUpgrade::{Nu5, Sapling}, + block::{Block, Height}, + parameters::{ + Network, + NetworkUpgrade::{Nu5, Sapling}, + }, serialization::ZcashDeserializeInto, }; +use zebra_consensus::funding_stream_address; use zebra_state::state_database_format_version_in_code; use crate::common::{ @@ -291,60 +294,89 @@ pub async fn run() -> Result<()> { // For the provided address in the first 10 blocks there are 10 transactions in the mainnet assert_eq!(10, counter); - // Call `GetTaddressBalance` with the ZF funding stream address - let balance = rpc_client - .get_taddress_balance(AddressList { - addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()], - }) - .await? - .into_inner(); - - // With ZFND or Major Grants funding stream address, the balance will always be greater than zero, - // because new coins are created in each block - assert!(balance.value_zat > 0); - - // Call `GetTaddressBalanceStream` with the ZF funding stream address as a stream argument - let zf_stream_address = Address { - address: "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string(), - }; - - let balance_zf = rpc_client - .get_taddress_balance_stream(tokio_stream::iter(vec![zf_stream_address.clone()])) - .await? - .into_inner(); - - // With ZFND funding stream address, the balance will always be greater than zero, - // because new coins are created in each block - assert!(balance_zf.value_zat > 0); - - // Call `GetTaddressBalanceStream` with the MG funding stream address as a stream argument - let mg_stream_address = Address { - address: "t3XyYW8yBFRuMnfvm5KLGFbEVz25kckZXym".to_string(), - }; - - let balance_mg = rpc_client - .get_taddress_balance_stream(tokio_stream::iter(vec![mg_stream_address.clone()])) - .await? - .into_inner(); + let lwd_tip_height: Height = u32::try_from(block_tip.height) + .expect("should be below max block height") + .try_into() + .expect("should be below max block height"); + + let mut all_stream_addresses = Vec::new(); + let mut all_balance_streams = Vec::new(); + for &fs_receiver in network.funding_streams(lwd_tip_height).recipients().keys() { + let Some(fs_address) = funding_stream_address(lwd_tip_height, &network, fs_receiver) else { + // Skip if the lightwalletd tip height is above the funding stream end height. + continue; + }; + + tracing::info!(?fs_address, "getting balance for active fs address"); + + // Call `GetTaddressBalance` with the active funding stream address. + let balance = rpc_client + .get_taddress_balance(AddressList { + addresses: vec![fs_address.to_string()], + }) + .await? + .into_inner(); + + // Call `GetTaddressBalanceStream` with the active funding stream address as a stream argument. + let stream_address = Address { + address: fs_address.to_string(), + }; + + let balance_stream = rpc_client + .get_taddress_balance_stream(tokio_stream::iter(vec![stream_address.clone()])) + .await? + .into_inner(); + + // With any active funding stream address, the balance will always be greater than zero for blocks + // below the funding stream end height because new coins are created in each block. + assert!(balance.value_zat > 0); + assert!(balance_stream.value_zat > 0); + + all_stream_addresses.push(stream_address); + all_balance_streams.push(balance_stream.value_zat); + + // Call `GetAddressUtxos` with the active funding stream address that will always have utxos + let utxos = rpc_client + .get_address_utxos(GetAddressUtxosArg { + addresses: vec![fs_address.to_string()], + start_height: 1, + max_entries: 1, + }) + .await? + .into_inner(); + + // As we requested one entry we should get a response of length 1 + assert_eq!(utxos.address_utxos.len(), 1); + + // Call `GetAddressUtxosStream` with the active funding stream address that will always have utxos + let mut utxos_zf = rpc_client + .get_address_utxos_stream(GetAddressUtxosArg { + addresses: vec![fs_address.to_string()], + start_height: 1, + max_entries: 2, + }) + .await? + .into_inner(); + + let mut counter = 0; + while let Some(_utxos) = utxos_zf.message().await? { + counter += 1; + } + // As we are in a "in sync" chain we know there are more than 2 utxos for this address (coinbase maturity rule) + // but we will receive the max of 2 from the stream response because we used a limit of 2 `max_entries`. + assert_eq!(2, counter); + } - // With Major Grants funding stream address, the balance will always be greater than zero, - // because new coins are created in each block - assert!(balance_mg.value_zat > 0); + if let Some(expected_total_balance) = all_balance_streams.into_iter().reduce(|a, b| a + b) { + // Call `GetTaddressBalanceStream` for all active funding stream addresses as a stream argument. + let total_balance = rpc_client + .get_taddress_balance_stream(tokio_stream::iter(all_stream_addresses)) + .await? + .into_inner(); - // Call `GetTaddressBalanceStream` with both, the ZFND and the MG funding stream addresses as a stream argument - let balance_both = rpc_client - .get_taddress_balance_stream(tokio_stream::iter(vec![ - zf_stream_address, - mg_stream_address, - ])) - .await? - .into_inner(); - - // The result is the sum of the values in both addresses - assert_eq!( - balance_both.value_zat, - balance_zf.value_zat + balance_mg.value_zat - ); + // The result should be the sum of the values in all active funding stream addresses. + assert_eq!(total_balance.value_zat, expected_total_balance); + } let sapling_treestate_init_height = sapling_activation_height + 1; @@ -374,37 +406,6 @@ pub async fn run() -> Result<()> { *zebra_test::vectors::SAPLING_TREESTATE_MAINNET_419201_STRING ); - // Call `GetAddressUtxos` with the ZF funding stream address that will always have utxos - let utxos = rpc_client - .get_address_utxos(GetAddressUtxosArg { - addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()], - start_height: 1, - max_entries: 1, - }) - .await? - .into_inner(); - - // As we requested one entry we should get a response of length 1 - assert_eq!(utxos.address_utxos.len(), 1); - - // Call `GetAddressUtxosStream` with the ZF funding stream address that will always have utxos - let mut utxos_zf = rpc_client - .get_address_utxos_stream(GetAddressUtxosArg { - addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()], - start_height: 1, - max_entries: 2, - }) - .await? - .into_inner(); - - let mut counter = 0; - while let Some(_utxos) = utxos_zf.message().await? { - counter += 1; - } - // As we are in a "in sync" chain we know there are more than 2 utxos for this address - // but we will receive the max of 2 from the stream response because we used a limit of 2 `max_entries`. - assert_eq!(2, counter); - // Call `GetLightdInfo` let lightd_info = rpc_client.get_lightd_info(Empty {}).await?.into_inner(); From 568b25e590ab98a6e6e46125b7ea07799c319910 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:29:44 +0000 Subject: [PATCH 038/245] build(deps): bump the prod group with 10 updates (#9077) * build(deps): bump the prod group with 10 updates Bumps the prod group with 10 updates: | Package | From | To | | --- | --- | --- | | [clap](https://github.com/clap-rs/clap) | `4.5.21` | `4.5.23` | | [chrono](https://github.com/chronotope/chrono) | `0.4.38` | `0.4.39` | | [tokio](https://github.com/tokio-rs/tokio) | `1.41.1` | `1.42.0` | | [tokio-stream](https://github.com/tokio-rs/tokio) | `0.1.16` | `0.1.17` | | [tower](https://github.com/tower-rs/tower) | `0.4.13` | `0.5.1` | | [thiserror](https://github.com/dtolnay/thiserror) | `2.0.3` | `2.0.6` | | [tracing-subscriber](https://github.com/tokio-rs/tracing) | `0.3.18` | `0.3.19` | | [prost](https://github.com/tokio-rs/prost) | `0.13.3` | `0.13.4` | | [primitive-types](https://github.com/paritytech/parity-common) | `0.12.2` | `0.13.1` | | [tokio-util](https://github.com/tokio-rs/tokio) | `0.7.12` | `0.7.13` | Updates `clap` from 4.5.21 to 4.5.23 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.21...clap_complete-v4.5.23) Updates `chrono` from 0.4.38 to 0.4.39 - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.38...v0.4.39) Updates `tokio` from 1.41.1 to 1.42.0 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.1...tokio-1.42.0) Updates `tokio-stream` from 0.1.16 to 0.1.17 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-stream-0.1.16...tokio-stream-0.1.17) Updates `tower` from 0.4.13 to 0.5.1 - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/tower-0.4.13...tower-0.5.1) Updates `thiserror` from 2.0.3 to 2.0.6 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/2.0.3...2.0.6) Updates `tracing-subscriber` from 0.3.18 to 0.3.19 - [Release notes](https://github.com/tokio-rs/tracing/releases) - [Commits](https://github.com/tokio-rs/tracing/compare/tracing-subscriber-0.3.18...tracing-subscriber-0.3.19) Updates `prost` from 0.13.3 to 0.13.4 - [Release notes](https://github.com/tokio-rs/prost/releases) - [Changelog](https://github.com/tokio-rs/prost/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/prost/compare/v0.13.3...v0.13.4) Updates `primitive-types` from 0.12.2 to 0.13.1 - [Commits](https://github.com/paritytech/parity-common/commits/primitive-types-v0.13.1) Updates `tokio-util` from 0.7.12 to 0.7.13 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.12...tokio-util-0.7.13) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: tokio-stream dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tower dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tracing-subscriber dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: prost dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: primitive-types dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: tokio-util dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod ... Signed-off-by: dependabot[bot] * downgrade tower and primitive-types --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 86 +++++++++++++++++----------------- tower-batch-control/Cargo.toml | 6 +-- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 8 ++-- zebra-consensus/Cargo.toml | 10 ++-- zebra-grpc/Cargo.toml | 6 +-- zebra-network/Cargo.toml | 12 ++--- zebra-node-services/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 12 ++--- zebra-scan/Cargo.toml | 8 ++-- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 8 ++-- zebra-test/Cargo.toml | 6 +-- zebra-utils/Cargo.toml | 6 +-- zebrad/Cargo.toml | 18 +++---- 15 files changed, 96 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6af563af25..78a19264e9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.5.21", + "clap 4.5.23", "color-eyre", "fs-err", "once_cell", @@ -741,9 +741,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -818,9 +818,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -828,9 +828,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -852,9 +852,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "color-eyre" @@ -987,7 +987,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.21", + "clap 4.5.23", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1621,7 +1621,7 @@ dependencies = [ "indexmap 2.7.0", "slab", "tokio", - "tokio-util 0.7.12", + "tokio-util 0.7.13", "tracing", ] @@ -3197,9 +3197,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" dependencies = [ "bytes", "prost-derive", @@ -3228,9 +3228,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", "itertools 0.13.0", @@ -3613,7 +3613,7 @@ dependencies = [ "sync_wrapper 1.0.1", "tokio", "tokio-rustls", - "tokio-util 0.7.12", + "tokio-util 0.7.13", "tower-service", "url", "wasm-bindgen", @@ -4361,11 +4361,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.6", ] [[package]] @@ -4381,9 +4381,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", @@ -4474,9 +4474,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -4515,14 +4515,14 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.12", + "tokio-util 0.7.13", ] [[package]] @@ -4554,9 +4554,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -4680,7 +4680,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.12", + "tokio-util 0.7.13", "tower-layer", "tower-service", "tracing", @@ -4714,7 +4714,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-test", - "tokio-util 0.7.12", + "tokio-util 0.7.13", "tower 0.4.13", "tower-fallback", "tower-test", @@ -4872,9 +4872,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -5847,7 +5847,7 @@ dependencies = [ "spandoc", "static_assertions", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tokio", "tracing", @@ -5890,7 +5890,7 @@ dependencies = [ "sapling-crypto", "serde", "spandoc", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tokio", "tower 0.4.13", @@ -5959,10 +5959,10 @@ dependencies = [ "serde", "static_assertions", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.6", "tokio", "tokio-stream", - "tokio-util 0.7.12", + "tokio-util 0.7.13", "toml 0.8.19", "tower 0.4.13", "tracing", @@ -6005,7 +6005,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.6", "tokio", "tokio-stream", "tonic", @@ -6076,7 +6076,7 @@ version = "1.0.0-beta.43" dependencies = [ "hex", "lazy_static", - "thiserror 2.0.3", + "thiserror 2.0.6", "zcash_script", "zebra-chain", "zebra-test", @@ -6118,7 +6118,7 @@ dependencies = [ "serde_json", "spandoc", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tokio", "tower 0.4.13", @@ -6146,7 +6146,7 @@ dependencies = [ "regex", "spandoc", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tokio", "tower 0.4.13", @@ -6173,7 +6173,7 @@ dependencies = [ "serde_yml", "structopt", "syn 2.0.90", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tokio", "tracing-error", @@ -6194,7 +6194,7 @@ dependencies = [ "atty", "bytes", "chrono", - "clap 4.5.21", + "clap 4.5.23", "color-eyre", "console-subscriber", "dirs", @@ -6229,7 +6229,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.6", "thread-priority", "tinyvec", "tokio", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 52b63b3ec34..9f9dd5661f6 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -26,8 +26,8 @@ futures = "0.3.31" futures-core = "0.3.28" pin-project = "1.1.6" rayon = "1.10.0" -tokio = { version = "1.41.1", features = ["time", "sync", "tracing", "macros"] } -tokio-util = "0.7.12" +tokio = { version = "1.42.0", features = ["time", "sync", "tracing", "macros"] } +tokio-util = "0.7.13" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.41" tracing-futures = "0.2.5" @@ -41,7 +41,7 @@ tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.3" rand = "0.8.5" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 97991f315f8..bc20a49ef7a 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -22,6 +22,6 @@ futures-core = "0.3.28" tracing = "0.1.41" [dev-dependencies] -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index d6b0bb02f7f..b7a9b5d9d32 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -105,12 +105,12 @@ zcash_protocol.workspace = true zcash_address.workspace = true # Time -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting static_assertions = "1.1.0" -thiserror = "2.0.3" +thiserror = "2.0.6" tracing = "0.1.41" # Serialization @@ -133,7 +133,7 @@ reddsa = "0.5.1" serde_json = { version = "1.0.133", optional = true } # Production feature async-error and testing feature proptest-impl -tokio = { version = "1.41.1", optional = true } +tokio = { version = "1.42.0", optional = true } # Experimental feature shielded-scan zcash_client_backend = { workspace = true, optional = true } @@ -166,7 +166,7 @@ proptest-derive = "0.5.0" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 6c6d731d3d7..aac5cfc0c06 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -43,7 +43,7 @@ jubjub = "0.10.0" rand = "0.8.5" rayon = "1.10.0" -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } lazy_static = "1.4.0" once_cell = "1.20.2" serde = { version = "1.0.215", features = ["serde_derive"] } @@ -51,8 +51,8 @@ serde = { version = "1.0.215", features = ["serde_derive"] } futures = "0.3.31" futures-util = "0.3.28" metrics = "0.24.1" -thiserror = "2.0.3" -tokio = { version = "1.41.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } +thiserror = "2.0.6" +tokio = { version = "1.42.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.41" tracing-futures = "0.2.5" @@ -90,9 +90,9 @@ proptest = "1.4.0" proptest-derive = "0.5.0" spandoc = "0.2.2" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.1" -tracing-subscriber = "0.3.18" +tracing-subscriber = "0.3.19" zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index f21051c8dae..cf01365553f 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -19,10 +19,10 @@ categories = ["cryptography::cryptocurrencies"] futures-util = "0.3.28" tonic = "0.12.3" tonic-reflection = "0.12.3" -prost = "0.13.3" +prost = "0.13.4" serde = { version = "1.0.215", features = ["serde_derive"] } -tokio = { version = "1.41.1", features = ["macros", "rt-multi-thread"] } -tokio-stream = "0.1.16" +tokio = { version = "1.42.0", features = ["macros", "rt-multi-thread"] } +tokio-stream = "0.1.17" tower = { version = "0.4.13", features = ["util", "buffer", "timeout"] } color-eyre = "0.6.3" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index ff83458ef20..86d373fa8d9 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -43,7 +43,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "2.5.0" byteorder = "1.5.0" bytes = "1.9.0" -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" @@ -58,12 +58,12 @@ rayon = "1.10.0" regex = "1.11.0" serde = { version = "1.0.215", features = ["serde_derive"] } tempfile = "3.14.0" -thiserror = "2.0.3" +thiserror = "2.0.6" futures = "0.3.31" -tokio = { version = "1.41.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } -tokio-stream = { version = "0.1.16", features = ["sync", "time"] } -tokio-util = { version = "0.7.12", features = ["codec"] } +tokio = { version = "1.42.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio-stream = { version = "0.1.17", features = ["sync", "time"] } +tokio-util = { version = "0.7.13", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } metrics = "0.24.1" @@ -90,7 +90,7 @@ proptest = "1.4.0" proptest-derive = "0.5.0" static_assertions = "1.1.0" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } toml = "0.8.19" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index bccc34ab91c..d9fcbba5bdd 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -48,7 +48,7 @@ jsonrpc-core = { version = "18.0.0", optional = true } reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.215", optional = true } serde_json = { version = "1.0.133", optional = true } -tokio = { version = "1.41.1", features = ["time", "sync"] } +tokio = { version = "1.42.0", features = ["time", "sync"] } [dev-dependencies] diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 79f02c60966..56b7f3c60f0 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -53,7 +53,7 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.38", default-features = false, features = [ +chrono = { version = "0.4.39", default-features = false, features = [ "clock", "std", ] } @@ -74,7 +74,7 @@ rand = "0.8.5" # Error handling color-eyre = "0.6.3" -tokio = { version = "1.41.1", features = [ +tokio = { version = "1.42.0", features = [ "time", "rt-multi-thread", "macros", @@ -85,8 +85,8 @@ tower = "0.4.13" # indexer-rpcs dependencies tonic = { version = "0.12.3", optional = true } tonic-reflection = { version = "0.12.3", optional = true } -prost = { version = "0.13.3", optional = true } -tokio-stream = { version = "0.1.16", optional = true } +prost = { version = "0.13.4", optional = true } +tokio-stream = { version = "0.1.17", optional = true } tracing = "0.1.41" @@ -123,8 +123,8 @@ insta = { version = "1.41.1", features = ["redactions", "json", "ron"] } proptest = "1.4.0" -thiserror = "2.0.3" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +thiserror = "2.0.6" +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ "proptest-impl", diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 413dc367319..17bd29baed7 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -65,7 +65,7 @@ indexmap = { version = "2.7.0", features = ["serde"] } itertools = "0.13.0" semver = "1.0.23" serde = { version = "1.0.215", features = ["serde_derive"] } -tokio = { version = "1.41.1", features = ["time"] } +tokio = { version = "1.42.0", features = ["time"] } tower = "0.4.13" tracing = "0.1.41" futures = "0.3.31" @@ -83,7 +83,7 @@ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.4 zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43" } -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } # test feature proptest-impl proptest = { version = "1.4.0", optional = true } @@ -99,7 +99,7 @@ zcash_note_encryption = { version = "0.4.0", optional = true } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43", optional = true } # zebra-scanner binary dependencies -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } structopt = "0.3.26" lazy_static = "1.4.0" serde_json = "1.0.133" @@ -111,7 +111,7 @@ zebrad = { path = "../zebrad", version = "2.1.0" } [dev-dependencies] insta = { version = "1.41.1", features = ["ron", "redactions"] } -tokio = { version = "1.41.1", features = ["test-util"] } +tokio = { version = "1.42.0", features = ["test-util"] } proptest = "1.4.0" proptest-derive = "0.5.0" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 62f437a5f6e..e0d3094ef98 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -18,7 +18,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] zcash_script = "0.2.0" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } -thiserror = "2.0.3" +thiserror = "2.0.6" [dev-dependencies] hex = "0.4.3" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index f844c8c193c..8ca769f6910 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -47,7 +47,7 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" futures = "0.3.31" hex = "0.4.3" @@ -65,10 +65,10 @@ rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } semver = "1.0.23" serde = { version = "1.0.215", features = ["serde_derive"] } tempfile = "3.14.0" -thiserror = "2.0.3" +thiserror = "2.0.6" rayon = "1.10.0" -tokio = { version = "1.41.1", features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { version = "1.42.0", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.41" @@ -106,7 +106,7 @@ rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index fc125068e69..db111c88f35 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -25,7 +25,7 @@ once_cell = "1.20.2" rand = "0.8.5" regex = "1.11.0" -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.31" @@ -37,9 +37,9 @@ tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "4.1.0" spandoc = "0.2.2" -thiserror = "2.0.3" +thiserror = "2.0.6" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } tracing-error = "0.2.1" tracing = "0.1.41" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index a1451c3b1c6..2dc0a382a04 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -91,8 +91,8 @@ structopt = "0.3.26" hex = "0.4.3" serde_json = "1.0.133" tracing-error = "0.2.1" -tracing-subscriber = "0.3.18" -thiserror = "2.0.3" +tracing-subscriber = "0.3.19" +thiserror = "2.0.6" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } @@ -109,7 +109,7 @@ regex = { version = "1.11.0", optional = true } reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.41.1", features = ["full"], optional = true } +tokio = { version = "1.42.0", features = ["full"], optional = true } jsonrpc = { version = "0.18.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 1ed91dd72a3..cb3e417d0cf 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -168,8 +168,8 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.5.21", features = ["cargo"] } -chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } +clap = { version = "4.5.23", features = ["cargo"] } +chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.7.0" lazy_static = "1.4.0" @@ -179,8 +179,8 @@ toml = "0.8.19" futures = "0.3.31" rayon = "1.10.0" -tokio = { version = "1.41.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } -tokio-stream = { version = "0.1.16", features = ["time"] } +tokio = { version = "1.42.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio-stream = { version = "0.1.17", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.6" @@ -189,9 +189,9 @@ color-eyre = { version = "0.6.3", default-features = false, features = ["issue-u # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } -thiserror = "2.0.3" +thiserror = "2.0.6" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } tracing-appender = "0.2.3" tracing-error = "0.2.1" tracing-futures = "0.2.5" @@ -266,11 +266,11 @@ tempfile = "3.14.0" hyper = { version = "1.5.1", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.41.1", features = ["full", "tracing", "test-util"] } -tokio-stream = "0.1.16" +tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio-stream = "0.1.17" # test feature lightwalletd-grpc-tests -prost = "0.13.3" +prost = "0.13.4" tonic = "0.12.3" proptest = "1.4.0" From b0c4d19a7ccb543a47d1c5809bfa72f1ffcf9df0 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 13 Dec 2024 15:01:53 +0100 Subject: [PATCH 039/245] fix(rpc): Refactor `getrawtransaction` & RPC error handling (#9049) * clean-up: simplify the def of `MapServerError` * Use `HexData` instead of `String` for TXIDs * Remove a redundant test We don't need such a test anymore because the deserialization is handled by Serde now. * Adjust tests for using `HexData` * Make `height` and `confirmations` optional * Use legacy error codes * fmt * Remove unneeded error codes * Remove `zebra-rpc/src/constants.rs` * Rename `MapServerError` to `MapError` * Rename `OkOrServerError` to `OkOrError` * Allow specifying error codes when mapping errors * Allow setting error codes when mapping options * Use the right error code for `getrawtransaction` * fmt * Add docs for the error conversion traits * Refactor the error handling for `getblock` * Refactor error handling in `sendrawtransaction` * Refactor the error handling for `getblock` * Update the error handling for `getrawtransaction` * Refactor error handling for `z_gettreestate` * Refactor the error handling for address parsing * Refactor the error handling for getrawtransaction * Update `z_gettreestate` snapshots * Cosmetics * Refactor error handling in `getblock` * Refactor error handling in `getblockheader` * Simplify `getrawtransaction` * Check errors for `getrawtransaction` * fmt * Simplify proptests * Fix unit tests for `getaddresstxids` * Fix unit tests for `getaddressutxos` * fix docs * Update snapshots for `getrawtransaction` * Update zebra-rpc/src/server/error.rs Co-authored-by: Arya * Use `transaction::Hash` instead of `HexData` * Simplify error handling * Update zebra-rpc/src/server/error.rs Co-authored-by: Alfredo Garcia * Move a note on performance * Fix a typo * Use `String` instead of `transaction::Hash` * Adjust and add proptests * Reintroduce snapshots for invalid TXIDs * Don't derive `Serialize` & `Deserialize` for txids Deriving `serde::Serialize` & `serde::Deserialize` for `transaction::Hash` was superfluous, and we didn't need it anywhere in the code. --------- Co-authored-by: Arya Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/transaction/hash.rs | 3 +- zebra-rpc/src/constants.rs | 44 -- zebra-rpc/src/lib.rs | 1 - zebra-rpc/src/methods.rs | 313 ++++---- zebra-rpc/src/methods/errors.rs | 37 - .../src/methods/get_block_template_rpcs.rs | 93 ++- .../get_block_template.rs | 18 +- zebra-rpc/src/methods/tests/prop.rs | 699 ++++++------------ zebra-rpc/src/methods/tests/snapshot.rs | 65 +- ...aw_transaction_verbosity_0@mainnet_10.snap | 5 - ...aw_transaction_verbosity_0@testnet_10.snap | 5 - ...aw_transaction_verbosity_1@mainnet_10.snap | 9 - ...aw_transaction_verbosity_1@testnet_10.snap | 9 - ...awtransaction_invalid_txid@mainnet_10.snap | 11 + ...awtransaction_invalid_txid@testnet_10.snap | 11 + ...awtransaction_unknown_txid@mainnet_10.snap | 11 + ...awtransaction_unknown_txid@testnet_10.snap | 11 + ...rawtransaction_verbosity=0@mainnet_10.snap | 8 + ...rawtransaction_verbosity=0@testnet_10.snap | 8 + ...rawtransaction_verbosity=1@mainnet_10.snap | 12 + ...rawtransaction_verbosity=1@testnet_10.snap | 12 + ...e_by_non_existent_hash@custom_testnet.snap | 3 +- ...excessive_block_height@custom_testnet.snap | 3 +- ...arsable_hash_or_height@custom_testnet.snap | 3 +- zebra-rpc/src/methods/tests/vectors.rs | 64 +- zebra-rpc/src/server.rs | 1 + zebra-rpc/src/server/error.rs | 118 +++ .../src/server/rpc_call_compatibility.rs | 20 +- zebra-rpc/src/sync.rs | 6 +- zebra-rpc/src/tests/vectors.rs | 27 +- zebrad/tests/common/regtest.rs | 7 +- 31 files changed, 762 insertions(+), 875 deletions(-) delete mode 100644 zebra-rpc/src/constants.rs delete mode 100644 zebra-rpc/src/methods/errors.rs delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@mainnet_10.snap delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@testnet_10.snap delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@mainnet_10.snap delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap create mode 100644 zebra-rpc/src/server/error.rs diff --git a/zebra-chain/src/transaction/hash.rs b/zebra-chain/src/transaction/hash.rs index a7fa60066d2..98ba39692b7 100644 --- a/zebra-chain/src/transaction/hash.rs +++ b/zebra-chain/src/transaction/hash.rs @@ -34,7 +34,6 @@ use std::{fmt, sync::Arc}; use proptest_derive::Arbitrary; use hex::{FromHex, ToHex}; -use serde::{Deserialize, Serialize}; use crate::serialization::{ ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashSerialize, @@ -56,7 +55,7 @@ use super::{txid::TxIdBuilder, AuthDigest, Transaction}; /// /// [ZIP-244]: https://zips.z.cash/zip-0244 /// [Spec: Transaction Identifiers]: https://zips.z.cash/protocol/protocol.pdf#txnidentifiers -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)] +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub struct Hash(pub [u8; 32]); diff --git a/zebra-rpc/src/constants.rs b/zebra-rpc/src/constants.rs deleted file mode 100644 index 14f89df6618..00000000000 --- a/zebra-rpc/src/constants.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! Constants for RPC methods and server responses. - -use jsonrpc_core::{Error, ErrorCode}; - -/// The RPC error code used by `zcashd` for incorrect RPC parameters. -/// -/// [`jsonrpc_core`] uses these codes: -/// -/// -/// `node-stratum-pool` mining pool library expects error code `-1` to detect available RPC methods: -/// -pub const INVALID_PARAMETERS_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-1); - -/// The RPC error code used by `zcashd` for missing blocks, when looked up -/// by hash. -pub const INVALID_ADDRESS_OR_KEY_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-5); - -/// The RPC error code used by `zcashd` for missing blocks. -/// -/// `lightwalletd` expects error code `-8` when a block is not found: -/// -pub const MISSING_BLOCK_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-8); - -/// The RPC error code used by `zcashd` when there are no blocks in the state. -/// -/// `lightwalletd` expects error code `0` when there are no blocks in the state. -// -// TODO: find the source code that expects or generates this error -pub const NO_BLOCKS_IN_STATE_ERROR_CODE: ErrorCode = ErrorCode::ServerError(0); - -/// The RPC error used by `zcashd` when there are no blocks in the state. -// -// TODO: find the source code that expects or generates this error text, if there is any -// replace literal Error { ... } with this error -pub fn no_blocks_in_state_error() -> Error { - Error { - code: NO_BLOCKS_IN_STATE_ERROR_CODE, - message: "No blocks in state".to_string(), - data: None, - } -} - -/// When logging parameter data, only log this much data. -pub const MAX_PARAMS_LOG_LENGTH: usize = 100; diff --git a/zebra-rpc/src/lib.rs b/zebra-rpc/src/lib.rs index 778788c9edf..a5c2f3e5a17 100644 --- a/zebra-rpc/src/lib.rs +++ b/zebra-rpc/src/lib.rs @@ -5,7 +5,6 @@ #![doc(html_root_url = "https://docs.rs/zebra_rpc")] pub mod config; -pub mod constants; pub mod methods; pub mod queue; pub mod server; diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 52f28d606b3..0d7986f033f 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,7 +6,7 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. -use std::{collections::HashSet, fmt::Debug, sync::Arc}; +use std::{collections::HashSet, fmt::Debug}; use chrono::Utc; use futures::{stream::FuturesOrdered, FutureExt, StreamExt, TryFutureExt}; @@ -34,21 +34,19 @@ use zebra_chain::{ }, }; use zebra_node_services::mempool; -use zebra_state::{HashOrHeight, MinedTx, OutputIndex, OutputLocation, TransactionLocation}; +use zebra_state::{HashOrHeight, OutputIndex, OutputLocation, TransactionLocation}; use crate::{ - constants::{ - INVALID_ADDRESS_OR_KEY_ERROR_CODE, INVALID_PARAMETERS_ERROR_CODE, MISSING_BLOCK_ERROR_CODE, - }, methods::trees::{GetSubtrees, GetTreestate, SubtreeRpcData}, queue::Queue, + server::{ + self, + error::{MapError, OkOrError}, + }, }; -mod errors; pub mod hex_data; -use errors::{MapServerError, OkOrServerError}; - // We don't use a types/ module here, because it is redundant. pub mod trees; @@ -291,7 +289,7 @@ pub trait Rpc { #[rpc(name = "getrawtransaction")] fn get_raw_transaction( &self, - txid_hex: String, + txid: String, verbose: Option, ) -> BoxFuture>; @@ -564,7 +562,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let zebra_state::ReadResponse::TipPoolValues { tip_height, @@ -580,7 +578,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { unreachable!("unmatched response to a BlockHeader request") @@ -671,7 +669,7 @@ where let valid_addresses = address_strings.valid_addresses()?; let request = zebra_state::ReadRequest::AddressBalance(valid_addresses); - let response = state.oneshot(request).await.map_server_error()?; + let response = state.oneshot(request).await.map_misc_error()?; match response { zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { @@ -692,11 +690,12 @@ where let queue_sender = self.queue_sender.clone(); async move { - let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex).map_err(|_| { - Error::invalid_params("raw transaction is not specified as a hex string") - })?; + // Reference for the legacy error code: + // + let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex) + .map_error(server::error::LegacyCode::Deserialization)?; let raw_transaction = Transaction::zcash_deserialize(&*raw_transaction_bytes) - .map_err(|_| Error::invalid_params("raw transaction is structurally invalid"))?; + .map_error(server::error::LegacyCode::Deserialization)?; let transaction_hash = raw_transaction.hash(); @@ -707,7 +706,7 @@ where let transaction_parameter = mempool::Gossip::Tx(raw_transaction.into()); let request = mempool::Request::Queue(vec![transaction_parameter]); - let response = mempool.oneshot(request).await.map_server_error()?; + let response = mempool.oneshot(request).await.map_misc_error()?; let mut queue_results = match response { mempool::Response::Queued(results) => results, @@ -724,19 +723,30 @@ where .pop() .expect("there should be exactly one item in Vec") .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) - .map_server_error()? - .await; + .map_misc_error()? + .await + .map_misc_error()?; tracing::debug!("sent transaction to mempool: {:?}", &queue_result); queue_result - .map_server_error()? .map(|_| SentTransactionHash(transaction_hash)) - .map_server_error() + // Reference for the legacy error code: + // + // Note that this error code might not exactly match the one returned by zcashd + // since zcashd's error code selection logic is more granular. We'd need to + // propagate the error coming from the verifier to be able to return more specific + // error codes. + .map_error(server::error::LegacyCode::Verify) } .boxed() } + // # Performance + // + // `lightwalletd` calls this RPC with verosity 1 for its initial sync of 2 million blocks, the + // performance of this RPC with verbosity 1 significantly affects `lightwalletd`s sync time. + // // TODO: // - use `height_from_signed_int()` to handle negative heights // (this might be better in the state request, because it needs the state height) @@ -745,11 +755,8 @@ where hash_or_height: String, verbosity: Option, ) -> BoxFuture> { - // From - const DEFAULT_GETBLOCK_VERBOSITY: u8 = 1; - let mut state = self.state.clone(); - let verbosity = verbosity.unwrap_or(DEFAULT_GETBLOCK_VERBOSITY); + let verbosity = verbosity.unwrap_or(1); let network = self.network.clone(); let original_hash_or_height = hash_or_height.clone(); @@ -761,29 +768,26 @@ where }; async move { - let hash_or_height: HashOrHeight = hash_or_height.parse().map_server_error()?; + let hash_or_height: HashOrHeight = hash_or_height + .parse() + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; if verbosity == 0 { - // # Performance - // - // This RPC is used in `lightwalletd`'s initial sync of 2 million blocks, - // so it needs to load block data very efficiently. let request = zebra_state::ReadRequest::Block(hash_or_height); let response = state .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; match response { zebra_state::ReadResponse::Block(Some(block)) => { Ok(GetBlock::Raw(block.into())) } - zebra_state::ReadResponse::Block(None) => Err(Error { - code: MISSING_BLOCK_ERROR_CODE, - message: "Block not found".to_string(), - data: None, - }), + zebra_state::ReadResponse::Block(None) => Err("Block not found") + .map_error(server::error::LegacyCode::InvalidParameter), _ => unreachable!("unmatched response to a block request"), } } else if let Some(get_block_header_future) = get_block_header_future { @@ -835,9 +839,9 @@ where } let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); - let tx = match tx_ids_response.map_server_error()? { + let tx = match tx_ids_response.map_misc_error()? { zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids - .ok_or_server_error("Block not found")? + .ok_or_misc_error("block not found")? .iter() .map(|tx_id| tx_id.encode_hex()) .collect(), @@ -846,7 +850,7 @@ where let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); let zebra_state::ReadResponse::OrchardTree(orchard_tree) = - orchard_tree_response.map_server_error()? + orchard_tree_response.map_misc_error()? else { unreachable!("unmatched response to a OrchardTree request"); }; @@ -854,8 +858,7 @@ where let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); // This could be `None` if there's a chain reorg between state queries. - let orchard_tree = - orchard_tree.ok_or_server_error("missing orchard tree for block")?; + let orchard_tree = orchard_tree.ok_or_misc_error("missing Orchard tree")?; let final_orchard_root = match nu5_activation { Some(activation_height) if height >= activation_height => { @@ -895,11 +898,8 @@ where next_block_hash, }) } else { - Err(Error { - code: ErrorCode::InvalidParams, - message: "Invalid verbosity value".to_string(), - data: None, - }) + Err("invalid verbosity value") + .map_error(server::error::LegacyCode::InvalidParameter) } } .boxed() @@ -915,7 +915,9 @@ where let network = self.network.clone(); async move { - let hash_or_height: HashOrHeight = hash_or_height.parse().map_server_error()?; + let hash_or_height: HashOrHeight = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; let zebra_state::ReadResponse::BlockHeader { header, hash, @@ -925,43 +927,42 @@ where .clone() .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) .await - .map_err(|_| Error { - // Compatibility with zcashd. Note that since this function - // is reused by getblock(), we return the errors expected - // by it (they differ whether a hash or a height was passed) - code: if hash_or_height.hash().is_some() { - INVALID_ADDRESS_OR_KEY_ERROR_CODE + .map_err(|_| "block height not in best chain") + .map_error( + // ## Compatibility with `zcashd`. + // + // Since this function is reused by getblock(), we return the errors + // expected by it (they differ whether a hash or a height was passed). + if hash_or_height.hash().is_some() { + server::error::LegacyCode::InvalidAddressOrKey } else { - MISSING_BLOCK_ERROR_CODE + server::error::LegacyCode::InvalidParameter }, - message: "block height not in best chain".to_string(), - data: None, - })? + )? else { panic!("unexpected response to BlockHeader request") }; let response = if !verbose { - GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_server_error()?)) + GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_misc_error()?)) } else { let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state .clone() .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) .await - .map_server_error()? + .map_misc_error()? else { panic!("unexpected response to SaplingTree request") }; // This could be `None` if there's a chain reorg between state queries. - let sapling_tree = - sapling_tree.ok_or_server_error("missing sapling tree for block")?; + let sapling_tree = sapling_tree.ok_or_misc_error("missing Sapling tree")?; let zebra_state::ReadResponse::Depth(depth) = state .clone() .oneshot(zebra_state::ReadRequest::Depth(hash)) .await - .map_server_error()? + .map_misc_error()? else { panic!("unexpected response to SaplingTree request") }; @@ -1021,14 +1022,14 @@ where self.latest_chain_tip .best_tip_hash() .map(GetBlockHash) - .ok_or_server_error("No blocks in state") + .ok_or_misc_error("No blocks in state") } fn get_best_block_height_and_hash(&self) -> Result { self.latest_chain_tip .best_tip_height_and_hash() .map(|(height, hash)| GetBlockHeightAndHash { height, hash }) - .ok_or_server_error("No blocks in state") + .ok_or_misc_error("No blocks in state") } fn get_raw_mempool(&self) -> BoxFuture>> { @@ -1057,7 +1058,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; match response { #[cfg(feature = "getblocktemplate-rpcs")] @@ -1104,73 +1105,76 @@ where .boxed() } - // TODO: use HexData or SentTransactionHash to handle the transaction ID fn get_raw_transaction( &self, - txid_hex: String, + txid: String, verbose: Option, ) -> BoxFuture> { let mut state = self.state.clone(); let mut mempool = self.mempool.clone(); - let verbose = verbose.unwrap_or(0); - let verbose = verbose != 0; + let verbose = verbose.unwrap_or(0) != 0; async move { - let txid = transaction::Hash::from_hex(txid_hex).map_err(|_| { - Error::invalid_params("transaction ID is not specified as a hex string") - })?; + // Reference for the legacy error code: + // + let txid = transaction::Hash::from_hex(txid) + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; // Check the mempool first. - // - // # Correctness - // - // Transactions are removed from the mempool after they are mined into blocks, - // so the transaction could be just in the mempool, just in the state, or in both. - // (And the mempool and state transactions could have different authorising data.) - // But it doesn't matter which transaction we choose, because the effects are the same. - let mut txid_set = HashSet::new(); - txid_set.insert(txid); - let request = mempool::Request::TransactionsByMinedId(txid_set); - - let response = mempool + match mempool .ready() - .and_then(|service| service.call(request)) + .and_then(|service| { + service.call(mempool::Request::TransactionsByMinedId([txid].into())) + }) .await - .map_server_error()?; - - match response { - mempool::Response::Transactions(unmined_transactions) => { - if !unmined_transactions.is_empty() { - let tx = unmined_transactions[0].transaction.clone(); - return Ok(GetRawTransaction::from_transaction(tx, None, 0, verbose)); + .map_misc_error()? + { + mempool::Response::Transactions(txns) => { + if let Some(tx) = txns.first() { + let hex = tx.transaction.clone().into(); + + return Ok(if verbose { + GetRawTransaction::Object { + hex, + height: None, + confirmations: None, + } + } else { + GetRawTransaction::Raw(hex) + }); } } - _ => unreachable!("unmatched response to a transactionids request"), + + _ => unreachable!("unmatched response to a `TransactionsByMinedId` request"), }; - // Now check the state - let request = zebra_state::ReadRequest::Transaction(txid); - let response = state + // If the tx wasn't in the mempool, check the state. + match state .ready() - .and_then(|service| service.call(request)) + .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) .await - .map_server_error()?; + .map_misc_error()? + { + zebra_state::ReadResponse::Transaction(Some(tx)) => { + let hex = tx.tx.into(); + + Ok(if verbose { + GetRawTransaction::Object { + hex, + height: Some(tx.height.0), + confirmations: Some(tx.confirmations), + } + } else { + GetRawTransaction::Raw(hex) + }) + } - match response { - zebra_state::ReadResponse::Transaction(Some(MinedTx { - tx, - height, - confirmations, - })) => Ok(GetRawTransaction::from_transaction( - tx, - Some(height), - confirmations, - verbose, - )), zebra_state::ReadResponse::Transaction(None) => { - Err("Transaction not found").map_server_error() + Err("No such mempool or main chain transaction") + .map_error(server::error::LegacyCode::InvalidAddressOrKey) } - _ => unreachable!("unmatched response to a transaction request"), + + _ => unreachable!("unmatched response to a `Transaction` read request"), } } .boxed() @@ -1184,8 +1188,11 @@ where let network = self.network.clone(); async move { - // Convert the [`hash_or_height`] string into an actual hash or height. - let hash_or_height = hash_or_height.parse().map_server_error()?; + // Reference for the legacy error code: + // + let hash_or_height = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidParameter)?; // Fetch the block referenced by [`hash_or_height`] from the state. // @@ -1199,15 +1206,14 @@ where .ready() .and_then(|service| service.call(zebra_state::ReadRequest::Block(hash_or_height))) .await - .map_server_error()? + .map_misc_error()? { zebra_state::ReadResponse::Block(Some(block)) => block, zebra_state::ReadResponse::Block(None) => { - return Err(Error { - code: MISSING_BLOCK_ERROR_CODE, - message: "the requested block was not found".to_string(), - data: None, - }) + // Reference for the legacy error code: + // + return Err("the requested block is not in the main chain") + .map_error(server::error::LegacyCode::InvalidParameter); } _ => unreachable!("unmatched response to a block request"), }; @@ -1231,7 +1237,7 @@ where service.call(zebra_state::ReadRequest::SaplingTree(hash.into())) }) .await - .map_server_error()? + .map_misc_error()? { zebra_state::ReadResponse::SaplingTree(tree) => tree.map(|t| t.to_rpc_bytes()), _ => unreachable!("unmatched response to a Sapling tree request"), @@ -1248,7 +1254,7 @@ where service.call(zebra_state::ReadRequest::OrchardTree(hash.into())) }) .await - .map_server_error()? + .map_misc_error()? { zebra_state::ReadResponse::OrchardTree(tree) => tree.map(|t| t.to_rpc_bytes()), _ => unreachable!("unmatched response to an Orchard tree request"), @@ -1281,7 +1287,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let subtrees = match response { zebra_state::ReadResponse::SaplingSubtrees(subtrees) => subtrees, @@ -1307,7 +1313,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let subtrees = match response { zebra_state::ReadResponse::OrchardSubtrees(subtrees) => subtrees, @@ -1329,7 +1335,7 @@ where }) } else { Err(Error { - code: INVALID_PARAMETERS_ERROR_CODE, + code: server::error::LegacyCode::Misc.into(), message: format!("invalid pool name, must be one of: {:?}", POOL_LIST), data: None, }) @@ -1367,7 +1373,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let hashes = match response { zebra_state::ReadResponse::AddressesTransactionIds(hashes) => { @@ -1414,7 +1420,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_misc_error()?; let utxos = match response { zebra_state::ReadResponse::AddressUtxos(utxos) => utxos, _ => unreachable!("unmatched response to a UtxosByAddresses request"), @@ -1492,7 +1498,7 @@ where { latest_chain_tip .best_tip_height() - .ok_or_server_error("No blocks in state") + .ok_or_misc_error("No blocks in state") } /// Response to a `getinfo` RPC request. @@ -1586,13 +1592,15 @@ impl AddressStrings { /// - check if provided list have all valid transparent addresses. /// - return valid addresses as a set of `Address`. pub fn valid_addresses(self) -> Result> { + // Reference for the legacy error code: + // let valid_addresses: HashSet

= self .addresses .into_iter() .map(|address| { - address.parse().map_err(|error| { - Error::invalid_params(format!("invalid address {address:?}: {error}")) - }) + address + .parse() + .map_error(server::error::LegacyCode::InvalidAddressOrKey) }) .collect::>()?; @@ -1991,12 +1999,14 @@ pub enum GetRawTransaction { /// The raw transaction, encoded as hex bytes. #[serde(with = "hex")] hex: SerializedTransaction, - /// The height of the block in the best chain that contains the transaction, or -1 if - /// the transaction is in the mempool. - height: i32, - /// The confirmations of the block in the best chain that contains the transaction, - /// or 0 if the transaction is in the mempool. - confirmations: u32, + /// The height of the block in the best chain that contains the tx or `None` if the tx is in + /// the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + height: Option, + /// The height diff between the block containing the tx and the best chain tip + 1 or `None` + /// if the tx is in the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + confirmations: Option, }, } @@ -2006,8 +2016,8 @@ impl Default for GetRawTransaction { hex: SerializedTransaction::from( [0u8; zebra_chain::transaction::MIN_TRANSPARENT_TX_SIZE as usize].to_vec(), ), - height: i32::default(), - confirmations: u32::default(), + height: Option::default(), + confirmations: Option::default(), } } } @@ -2070,33 +2080,6 @@ pub struct GetAddressTxIdsRequest { end: u32, } -impl GetRawTransaction { - /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. - #[allow(clippy::unwrap_in_result)] - fn from_transaction( - tx: Arc, - height: Option, - confirmations: u32, - verbose: bool, - ) -> Self { - if verbose { - GetRawTransaction::Object { - hex: tx.into(), - height: match height { - Some(height) => height - .0 - .try_into() - .expect("valid block heights are limited to i32::MAX"), - None => -1, - }, - confirmations, - } - } else { - GetRawTransaction::Raw(tx.into()) - } - } -} - /// Information about the sapling and orchard note commitment trees if any. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] pub struct GetBlockTrees { diff --git a/zebra-rpc/src/methods/errors.rs b/zebra-rpc/src/methods/errors.rs deleted file mode 100644 index be9231d058d..00000000000 --- a/zebra-rpc/src/methods/errors.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Error conversions for Zebra's RPC methods. - -use jsonrpc_core::ErrorCode; - -pub(crate) trait MapServerError { - fn map_server_error(self) -> std::result::Result; -} - -pub(crate) trait OkOrServerError { - fn ok_or_server_error( - self, - message: S, - ) -> std::result::Result; -} - -impl MapServerError for Result -where - E: ToString, -{ - fn map_server_error(self) -> Result { - self.map_err(|error| jsonrpc_core::Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - }) - } -} - -impl OkOrServerError for Option { - fn ok_or_server_error(self, message: S) -> Result { - self.ok_or(jsonrpc_core::Error { - code: ErrorCode::ServerError(0), - message: message.to_string(), - data: None, - }) - } -} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index aed926b3635..42c5d282bed 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -32,35 +32,37 @@ use zebra_network::AddressBookPeers; use zebra_node_services::mempool; use zebra_state::{ReadRequest, ReadResponse}; -use crate::methods::{ - best_chain_tip_height, - errors::MapServerError, - get_block_template_rpcs::{ - constants::{ - DEFAULT_SOLUTION_RATE_WINDOW_SIZE, GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - ZCASHD_FUNDING_STREAM_ORDER, - }, - get_block_template::{ - check_miner_address, check_synced_to_tip, fetch_mempool_transactions, - fetch_state_tip_and_local_time, validate_block_proposal, - }, - // TODO: move the types/* modules directly under get_block_template_rpcs, - // and combine any modules with the same names. - types::{ +use crate::{ + methods::{ + best_chain_tip_height, + get_block_template_rpcs::{ + constants::{ + DEFAULT_SOLUTION_RATE_WINDOW_SIZE, GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + ZCASHD_FUNDING_STREAM_ORDER, + }, get_block_template::{ - proposal::TimeSource, proposal_block_from_template, GetBlockTemplate, + check_miner_address, check_synced_to_tip, fetch_mempool_transactions, + fetch_state_tip_and_local_time, validate_block_proposal, + }, + // TODO: move the types/* modules directly under get_block_template_rpcs, + // and combine any modules with the same names. + types::{ + get_block_template::{ + proposal::TimeSource, proposal_block_from_template, GetBlockTemplate, + }, + get_mining_info, + long_poll::LongPollInput, + peer_info::PeerInfo, + submit_block, + subsidy::{BlockSubsidy, FundingStream}, + unified_address, validate_address, z_validate_address, }, - get_mining_info, - long_poll::LongPollInput, - peer_info::PeerInfo, - submit_block, - subsidy::{BlockSubsidy, FundingStream}, - unified_address, validate_address, z_validate_address, }, + height_from_signed_int, + hex_data::HexData, + GetBlockHash, }, - height_from_signed_int, - hex_data::HexData, - GetBlockHash, MISSING_BLOCK_ERROR_CODE, + server::{self, error::MapError}, }; pub mod constants; @@ -584,12 +586,12 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_error(server::error::LegacyCode::default())?; match response { zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), zebra_state::ReadResponse::BlockHash(None) => Err(Error { - code: MISSING_BLOCK_ERROR_CODE, + code: server::error::LegacyCode::InvalidParameter.into(), message: "Block not found".to_string(), data: None, }), @@ -850,7 +852,7 @@ where Is Zebra shutting down?" ); - return Err(recv_error).map_server_error(); + return Err(recv_error).map_error(server::error::LegacyCode::default()); } } } @@ -1042,7 +1044,7 @@ where .ready() .and_then(|service| service.call(request)) .await - .map_server_error()?; + .map_error(server::error::LegacyCode::default())?; current_block_size = match response { zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), _ => None, @@ -1231,13 +1233,14 @@ where // Always zero for post-halving blocks let founders = Amount::zero(); - let total_block_subsidy = block_subsidy(height, &network).map_server_error()?; - let miner_subsidy = - miner_subsidy(height, &network, total_block_subsidy).map_server_error()?; + let total_block_subsidy = + block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; + let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())?; let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = funding_stream_values(height, &network, total_block_subsidy) - .map_server_error()? + .map_error(server::error::LegacyCode::default())? .into_iter() // Separate the funding streams into deferred and non-deferred streams .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); @@ -1274,8 +1277,12 @@ where founders: founders.into(), funding_streams, lockbox_streams, - funding_streams_total: funding_streams_total.map_server_error()?.into(), - lockbox_total: lockbox_total.map_server_error()?.into(), + funding_streams_total: funding_streams_total + .map_error(server::error::LegacyCode::default())? + .into(), + lockbox_total: lockbox_total + .map_error(server::error::LegacyCode::default())? + .into(), total_block_subsidy: total_block_subsidy.into(), }) } @@ -1436,7 +1443,10 @@ where let mut block_hashes = Vec::new(); for _ in 0..num_blocks { - let block_template = rpc.get_block_template(None).await.map_server_error()?; + let block_template = rpc + .get_block_template(None) + .await + .map_error(server::error::LegacyCode::default())?; let get_block_template::Response::TemplateMode(block_template) = block_template else { @@ -1452,14 +1462,17 @@ where TimeSource::CurTime, NetworkUpgrade::current(&network, Height(block_template.height)), ) - .map_server_error()?; - let hex_proposal_block = - HexData(proposal_block.zcash_serialize_to_vec().map_server_error()?); + .map_error(server::error::LegacyCode::default())?; + let hex_proposal_block = HexData( + proposal_block + .zcash_serialize_to_vec() + .map_error(server::error::LegacyCode::default())?, + ); let _submit = rpc .submit_block(hex_proposal_block, None) .await - .map_server_error()?; + .map_error(server::error::LegacyCode::default())?; block_hashes.push(GetBlockHash(proposal_block.hash())); } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 7ab1a48e20a..3a934d629ff 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -25,12 +25,12 @@ use zebra_consensus::{ use zebra_node_services::mempool::{self, TransactionDependencies}; use zebra_state::GetBlockTemplateChainInfo; -use crate::methods::{ - errors::OkOrServerError, - get_block_template_rpcs::{ +use crate::{ + methods::get_block_template_rpcs::{ constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE}, types::{default_roots::DefaultRoots, transaction::TransactionTemplate}, }, + server::error::OkOrError, }; pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; @@ -87,13 +87,9 @@ pub fn check_parameters(parameters: &Option) -> Result<()> { pub fn check_miner_address( miner_address: Option, ) -> Result { - miner_address.ok_or_else(|| Error { - code: ErrorCode::ServerError(0), - message: "configure mining.miner_address in zebrad.toml \ - with a transparent address" - .to_string(), - data: None, - }) + miner_address.ok_or_misc_error( + "set `mining.miner_address` in `zebrad.toml` to a transparent address".to_string(), + ) } /// Attempts to validate block proposal against all of the server's @@ -181,7 +177,7 @@ where // but this is ok for an estimate let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip .estimate_distance_to_network_chain_tip(network) - .ok_or_server_error("no chain tip available yet")?; + .ok_or_misc_error("no chain tip available yet")?; if !sync_status.is_close_to_tip() || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 9435a68ac7e..0af5e03b0b9 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -1,9 +1,9 @@ //! Randomised property tests for RPC methods. -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; use futures::{join, FutureExt, TryFutureExt}; -use hex::ToHex; +use hex::{FromHex, ToHex}; use jsonrpc_core::{Error, ErrorCode}; use proptest::{collection::vec, prelude::*}; use thiserror::Error; @@ -13,11 +13,8 @@ use tower::buffer::Buffer; use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, Height}, - chain_tip::{mock::MockChainTip, NoChainTip}, - parameters::{ - Network::{self, *}, - NetworkUpgrade, - }, + chain_tip::{mock::MockChainTip, ChainTip, NoChainTip}, + parameters::{Network, NetworkUpgrade}, serialization::{ZcashDeserialize, ZcashSerialize}, transaction::{self, Transaction, UnminedTx, VerifiedUnminedTx}, transparent, @@ -28,6 +25,8 @@ use zebra_state::BoxError; use zebra_test::mock_service::MockService; +use crate::methods; + use super::super::{ AddressBalance, AddressStrings, NetworkUpgradeStatus, Rpc, RpcImpl, SentTransactionHash, }; @@ -35,27 +34,19 @@ use super::super::{ proptest! { /// Test that when sending a raw transaction, it is received by the mempool service. #[test] - fn mempool_receives_raw_transaction(transaction in any::()) { + fn mempool_receives_raw_tx(transaction in any::(), network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); + + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); let hash = SentTransactionHash(transaction.hash()); - let transaction_bytes = transaction - .zcash_serialize_to_vec() - .expect("Transaction serializes successfully"); + let transaction_bytes = transaction.zcash_serialize_to_vec()?; + let transaction_hex = hex::encode(&transaction_bytes); let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); @@ -73,17 +64,14 @@ proptest! { state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); + let result = send_task.await?; prop_assert_eq!(result, Ok(hash)); // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } @@ -91,27 +79,16 @@ proptest! { /// /// Mempool service errors should become server errors. #[test] - fn mempool_errors_are_forwarded(transaction in any::()) { + fn mempool_errors_are_forwarded(transaction in any::(), network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); - runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); - let transaction_bytes = transaction - .zcash_serialize_to_vec() - .expect("Transaction serializes successfully"); + runtime.block_on(async move { + let transaction_bytes = transaction.zcash_serialize_to_vec()?; let transaction_hex = hex::encode(&transaction_bytes); let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex.clone())); @@ -126,20 +103,9 @@ proptest! { state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::ServerError(_), - .. - }) - ), - "Result is not a server error: {result:?}" - ); + let result = send_task.await?; + + check_err_code(result, ErrorCode::ServerError(-1))?; let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); @@ -152,87 +118,44 @@ proptest! { .await? .respond(Ok::<_, BoxError>(mempool::Response::Queued(vec![Ok(rsp_rx)]))); - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::ServerError(_), - .. - }) - ), - "Result is not a server error: {result:?}" - ); + let result = send_task.await?; + + check_err_code(result, ErrorCode::ServerError(-25))?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } /// Test that when the mempool rejects a transaction the caller receives an error. #[test] - fn rejected_transactions_are_reported(transaction in any::()) { + fn rejected_txs_are_reported(transaction in any::(), network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); - runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - - let transaction_bytes = transaction - .zcash_serialize_to_vec() - .expect("Transaction serializes successfully"); - let transaction_hex = hex::encode(&transaction_bytes); + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + runtime.block_on(async move { + let tx = hex::encode(&transaction.zcash_serialize_to_vec()?); + let req = mempool::Request::Queue(vec![UnminedTx::from(transaction).into()]); + let rsp = mempool::Response::Queued(vec![Err(DummyError.into())]); + let mempool_query = mempool.expect_request(req).map_ok(|r| r.respond(rsp)); - let unmined_transaction = UnminedTx::from(transaction); - let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); - let response = mempool::Response::Queued(vec![Err(DummyError.into())]); + let (rpc_rsp, _) = tokio::join!(rpc.send_raw_transaction(tx), mempool_query); - mempool - .expect_request(expected_request) - .await? - .respond(response); + check_err_code(rpc_rsp, ErrorCode::ServerError(-1))?; + // Check that no state request was made. state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::ServerError(_), - .. - }) - ), - "Result is not a server error: {result:?}" - ); - // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } @@ -241,53 +164,27 @@ proptest! { /// Try to call `send_raw_transaction` using a string parameter that has at least one /// non-hexadecimal character, and check that it fails with an expected error. #[test] - fn non_hexadecimal_string_results_in_an_error(non_hex_string in ".*[^0-9A-Fa-f].*") { + fn non_hex_string_is_error(non_hex_string in ".*[^0-9A-Fa-f].*", network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. tokio::time::pause(); runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - let send_task = tokio::spawn(rpc.send_raw_transaction(non_hex_string)); + // Check that there are no further requests. mempool.expect_no_requests().await?; state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::InvalidParams, - .. - }) - ), - "Result is not an invalid parameters error: {result:?}" - ); + check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } @@ -296,9 +193,10 @@ proptest! { /// Try to call `send_raw_transaction` using random bytes that fail to deserialize as a /// transaction, and check that it fails with an expected error. #[test] - fn invalid_transaction_results_in_an_error(random_bytes in any::>()) { + fn invalid_tx_results_in_an_error(random_bytes in any::>(), network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. tokio::time::pause(); @@ -306,45 +204,17 @@ proptest! { prop_assume!(Transaction::zcash_deserialize(&*random_bytes).is_err()); runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - let send_task = tokio::spawn(rpc.send_raw_transaction(hex::encode(random_bytes))); mempool.expect_no_requests().await?; state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::InvalidParams, - .. - }) - ), - "Result is not an invalid parameters error: {result:?}" - ); + check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } @@ -353,33 +223,18 @@ proptest! { /// Make the mock mempool service return a list of transaction IDs, and check that the RPC call /// returns those IDs as hexadecimal strings. #[test] - fn mempool_transactions_are_sent_to_caller(transactions in any::>()) { + fn mempool_transactions_are_sent_to_caller(transactions in any::>(), + network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. tokio::time::pause(); runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - - let call_task = tokio::spawn(rpc.get_raw_mempool()); - - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let expected_response = { + let (expected_response, mempool_query) = { let transaction_ids: HashSet<_> = transactions .iter() .map(|tx| tx.transaction.id) @@ -391,18 +246,18 @@ proptest! { .collect(); expected_response.sort(); - mempool + let mempool_query = mempool .expect_request(mempool::Request::TransactionIds) - .await? - .respond(mempool::Response::TransactionIds(transaction_ids)); + .map_ok(|r|r.respond(mempool::Response::TransactionIds(transaction_ids))); - expected_response + (expected_response, mempool_query) }; // Note: this depends on `SHOULD_USE_ZCASHD_ORDER` being true. #[cfg(feature = "getblocktemplate-rpcs")] - let expected_response = { + let (expected_response, mempool_query) = { let mut expected_response = transactions.clone(); + expected_response.sort_by_cached_key(|tx| { // zcashd uses modified fee here but Zebra doesn't currently // support prioritizing transactions @@ -416,151 +271,80 @@ proptest! { let expected_response = expected_response .iter() - .map(|tx| tx.transaction.id.mined_id().encode_hex()) - .collect(); + .map(|tx| tx.transaction.id.mined_id().encode_hex::()) + .collect::>(); - mempool + let mempool_query = mempool .expect_request(mempool::Request::FullTransactions) - .await? - .respond(mempool::Response::FullTransactions { + .map_ok(|r| r.respond(mempool::Response::FullTransactions { transactions, transaction_dependencies: Default::default(), last_seen_tip_hash: [0; 32].into(), - }); + })); - expected_response + (expected_response, mempool_query) }; - mempool.expect_no_requests().await?; - state.expect_no_requests().await?; + let (rpc_rsp, _) = tokio::join!(rpc.get_raw_mempool(), mempool_query); - let result = call_task - .await - .expect("Sending raw transactions should not panic"); + prop_assert_eq!(rpc_rsp?, expected_response); - prop_assert_eq!(result, Ok(expected_response)); + mempool.expect_no_requests().await?; + state.expect_no_requests().await?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } - /// Test that the method rejects non-hexadecimal characters. + /// Calls `get_raw_transaction` with: /// - /// Try to call `get_raw_transaction` using a string parameter that has at least one - /// non-hexadecimal character, and check that it fails with an expected error. + /// 1. an invalid TXID that won't deserialize; + /// 2. a valid TXID that is not in the mempool nor in the state; + /// + /// and checks that the RPC returns the right error code. #[test] - fn get_raw_transaction_non_hexadecimal_string_results_in_an_error( - non_hex_string in ".*[^0-9A-Fa-f].*", - ) { + fn check_err_for_get_raw_transaction(unknown_txid: transaction::Hash, + invalid_txid in invalid_txid(), + network: Network) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. tokio::time::pause(); runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); + // Check the invalid TXID first. + let rpc_rsp = rpc.get_raw_transaction(invalid_txid, Some(1)).await; - let send_task = tokio::spawn(rpc.get_raw_transaction(non_hex_string, Some(0))); + check_err_code(rpc_rsp, ErrorCode::ServerError(-5))?; + // Check that no further requests were made. mempool.expect_no_requests().await?; state.expect_no_requests().await?; - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::InvalidParams, - .. - }) - ), - "Result is not an invalid parameters error: {result:?}" - ); - - // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + // Now check the unknown TXID. + let mempool_query = mempool + .expect_request(mempool::Request::TransactionsByMinedId([unknown_txid].into())) + .map_ok(|r| r.respond(mempool::Response::Transactions(vec![]))); - Ok::<_, TestCaseError>(()) - })?; - } - - /// Test that the method rejects an input that's not a transaction. - /// - /// Try to call `get_raw_transaction` using random bytes that fail to deserialize as a - /// transaction, and check that it fails with an expected error. - #[test] - fn get_raw_transaction_invalid_transaction_results_in_an_error( - random_bytes in any::>(), - ) { - let (runtime, _init_guard) = zebra_test::init_async(); - let _guard = runtime.enter(); - - // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. - tokio::time::pause(); - - prop_assume!(transaction::Hash::zcash_deserialize(&*random_bytes).is_err()); - - runtime.block_on(async move { - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); + let state_query = state + .expect_request(zebra_state::ReadRequest::Transaction(unknown_txid)) + .map_ok(|r| r.respond(zebra_state::ReadResponse::Transaction(None))); - let send_task = tokio::spawn(rpc.get_raw_transaction(hex::encode(random_bytes), Some(0))); + let rpc_query = rpc.get_raw_transaction(unknown_txid.encode_hex(), Some(1)); - mempool.expect_no_requests().await?; - state.expect_no_requests().await?; + let (rpc_rsp, _, _) = tokio::join!(rpc_query, mempool_query, state_query); - let result = send_task - .await - .expect("Sending raw transactions should not panic"); - - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::InvalidParams, - .. - }) - ), - "Result is not an invalid parameters error: {result:?}" - ); + check_err_code(rpc_rsp, ErrorCode::ServerError(-5))?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } @@ -569,21 +353,10 @@ proptest! { fn get_blockchain_info_response_without_a_chain_tip(network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - // look for an error with a `NoChainTip` - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - network, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); runtime.block_on(async move { let response_fut = rpc.get_blockchain_info(); @@ -605,14 +378,13 @@ proptest! { "no chain tip available yet" ); - // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); - mempool.expect_no_requests().await?; state.expect_no_requests().await?; - Ok::<_, TestCaseError>(()) + // The queue task should continue without errors or panics + prop_assert!(mempool_tx_queue.now_or_never().is_none()); + + Ok(()) })?; } @@ -624,26 +396,17 @@ proptest! { ) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = + mock_services(network.clone(), NoChainTip); + + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); // get arbitrary chain tip data let block_height = block.coinbase_height().unwrap(); let block_hash = block.hash(); let block_time = block.header.time; - // Start RPC with the mocked `ChainTip` - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - network.clone(), - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - // check no requests were made during this test runtime.block_on(async move { let response_fut = rpc.get_blockchain_info(); @@ -718,14 +481,13 @@ proptest! { } }; - // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); - mempool.expect_no_requests().await?; state.expect_no_requests().await?; - Ok::<_, TestCaseError>(()) + // The queue task should continue without errors or panics + prop_assert!(mempool_tx_queue.now_or_never().is_none()); + + Ok(()) })?; } @@ -738,12 +500,11 @@ proptest! { ) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); - - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - // Create a mocked `ChainTip` let (chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, chain_tip); + + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); // Prepare the list of addresses. let address_strings = AddressStrings { @@ -753,21 +514,8 @@ proptest! { .collect(), }; - tokio::time::pause(); - // Start RPC with the mocked `ChainTip` runtime.block_on(async move { - let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - network, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - chain_tip, - ); - // Build the future to call the RPC let call = rpc.get_address_balance(address_strings); @@ -792,7 +540,10 @@ proptest! { mempool.expect_no_requests().await?; state.expect_no_requests().await?; - Ok::<_, TestCaseError>(()) + // The queue task should continue without errors or panics + prop_assert!(mempool_tx_queue.now_or_never().is_none()); + + Ok(()) })?; } @@ -806,31 +557,17 @@ proptest! { ) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, chain_tip); + + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); prop_assume!(at_least_one_invalid_address .iter() .any(|string| string.parse::().is_err())); - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - // Create a mocked `ChainTip` - let (chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); - - tokio::time::pause(); - - // Start RPC with the mocked `ChainTip` runtime.block_on(async move { - let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - network, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - chain_tip, - ); let address_strings = AddressStrings { addresses: at_least_one_invalid_address, @@ -839,55 +576,32 @@ proptest! { // Build the future to call the RPC let result = rpc.get_address_balance(address_strings).await; - // Check that the invalid addresses lead to an error - prop_assert!( - matches!( - result, - Err(Error { - code: ErrorCode::InvalidParams, - .. - }) - ), - "Result is not a server error: {result:?}" - ); + check_err_code(result, ErrorCode::ServerError(-5))?; // Check no requests were made during this test mempool.expect_no_requests().await?; state.expect_no_requests().await?; - Ok::<_, TestCaseError>(()) + // The queue task should continue without errors or panics + prop_assert!(mempool_tx_queue.now_or_never().is_none()); + + Ok(()) })?; } /// Test the queue functionality using `send_raw_transaction` #[test] - fn rpc_queue_main_loop(tx in any::()) { + fn rpc_queue_main_loop(tx in any::(), network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); - let transaction_hash = tx.hash(); + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); runtime.block_on(async move { - tokio::time::pause(); - - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); - - // send a transaction - let tx_bytes = tx - .zcash_serialize_to_vec() - .expect("Transaction serializes successfully"); + let transaction_hash = tx.hash(); + let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); @@ -901,9 +615,7 @@ proptest! { .unwrap() .respond(Err(DummyError)); - let _ = send_task - .await - .expect("Sending raw transactions should not panic"); + let _ = send_task.await?; // advance enough time to have a new runner iteration let spacing = chrono::Duration::seconds(150); @@ -946,42 +658,28 @@ proptest! { state.expect_no_requests().await?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } /// Test we receive all transactions that are sent in a channel #[test] - fn rpc_queue_receives_all_transactions_from_channel(txs in any::<[Transaction; 2]>()) { + fn rpc_queue_receives_all_txs_from_channel(txs in any::<[Transaction; 2]>(), + network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); - runtime.block_on(async move { - tokio::time::pause(); - - let mut mempool = MockService::build().for_prop_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); - - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", - "RPC test", - Mainnet, - false, - true, - mempool.clone(), - Buffer::new(state.clone(), 1), - NoChainTip, - ); + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); + runtime.block_on(async move { let mut transactions_hash_set = HashSet::new(); for tx in txs.clone() { // send a transaction - let tx_bytes = tx - .zcash_serialize_to_vec() - .expect("Transaction serializes successfully"); + let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); @@ -999,9 +697,7 @@ proptest! { .unwrap() .respond(Err(DummyError)); - let _ = send_task - .await - .expect("Sending raw transactions should not panic"); + let _ = send_task.await?; } // advance enough time to have a new runner iteration @@ -1049,10 +745,9 @@ proptest! { state.expect_no_requests().await?; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(rpc_tx_queue_task_result.is_none()); + prop_assert!(mempool_tx_queue.now_or_never().is_none()); - Ok::<_, TestCaseError>(()) + Ok(()) })?; } } @@ -1060,3 +755,83 @@ proptest! { #[derive(Clone, Copy, Debug, Error)] #[error("a dummy error type")] pub struct DummyError; + +// Helper functions + +/// Creates [`String`]s that won't deserialize into [`transaction::Hash`]. +fn invalid_txid() -> BoxedStrategy { + any::() + .prop_filter("string must not deserialize into TXID", |s| { + transaction::Hash::from_hex(s).is_err() + }) + .boxed() +} + +/// Checks that the given RPC response contains the given error code. +fn check_err_code(rsp: Result, error_code: ErrorCode) -> Result<(), TestCaseError> { + prop_assert!( + matches!(&rsp, Err(Error { code, .. }) if *code == error_code), + "the RPC response must match the error code: {error_code:?}" + ); + + Ok(()) +} + +/// Creates mocked: +/// +/// 1. mempool service, +/// 2. state service, +/// 3. rpc service, +/// +/// and a handle to the mempool tx queue. +fn mock_services( + network: Network, + chain_tip: Tip, +) -> ( + zebra_test::mock_service::MockService< + zebra_node_services::mempool::Request, + zebra_node_services::mempool::Response, + zebra_test::mock_service::PropTestAssertion, + >, + zebra_test::mock_service::MockService< + zebra_state::ReadRequest, + zebra_state::ReadResponse, + zebra_test::mock_service::PropTestAssertion, + >, + methods::RpcImpl< + zebra_test::mock_service::MockService< + zebra_node_services::mempool::Request, + zebra_node_services::mempool::Response, + zebra_test::mock_service::PropTestAssertion, + >, + tower::buffer::Buffer< + zebra_test::mock_service::MockService< + zebra_state::ReadRequest, + zebra_state::ReadResponse, + zebra_test::mock_service::PropTestAssertion, + >, + zebra_state::ReadRequest, + >, + Tip, + >, + tokio::task::JoinHandle<()>, +) +where + Tip: ChainTip + Clone + Send + Sync + 'static, +{ + let mempool = MockService::build().for_prop_tests(); + let state = MockService::build().for_prop_tests(); + + let (rpc, mempool_tx_queue) = RpcImpl::new( + "RPC test", + "RPC test", + network, + false, + true, + mempool.clone(), + Buffer::new(state.clone(), 1), + chain_tip, + ); + + (mempool, state, rpc, mempool_tx_queue) +} diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 02c633af260..2bdec5d7497 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -5,7 +5,7 @@ //! cargo insta test --review --release -p zebra-rpc --lib -- test_rpc_response_data //! ``` -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; use insta::dynamic_redaction; use tower::buffer::Buffer; @@ -229,12 +229,10 @@ async fn test_rpc_response_data_for_network(network: &Network) { snapshot_rpc_getblockchaininfo("", get_blockchain_info, &settings); // get the first transaction of the first block which is not the genesis - let first_block_first_transaction = &blocks[1].transactions[0]; + let first_block_first_tx = &blocks[1].transactions[0]; // build addresses - let address = &first_block_first_transaction.outputs()[1] - .address(network) - .unwrap(); + let address = &first_block_first_tx.outputs()[1].address(network).unwrap(); let addresses = vec![address.to_string()]; // `getaddressbalance` @@ -407,8 +405,9 @@ async fn test_rpc_response_data_for_network(network: &Network) { // `getrawtransaction` verbosity=0 // - // - similar to `getrawmempool` described above, a mempool request will be made to get the requested - // transaction from the mempool, response will be empty as we have this transaction in state + // - Similarly to `getrawmempool` described above, a mempool request will be made to get the + // requested transaction from the mempool. Response will be empty as we have this transaction + // in the state. let mempool_req = mempool .expect_request_that(|request| { matches!(request, mempool::Request::TransactionsByMinedId(_)) @@ -417,13 +416,12 @@ async fn test_rpc_response_data_for_network(network: &Network) { responder.respond(mempool::Response::Transactions(vec![])); }); - // make the api call - let get_raw_transaction = - rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), Some(0u8)); - let (response, _) = futures::join!(get_raw_transaction, mempool_req); - let get_raw_transaction = response.expect("We should have a GetRawTransaction struct"); + let txid = first_block_first_tx.hash().encode_hex::(); - snapshot_rpc_getrawtransaction("verbosity_0", get_raw_transaction, &settings); + let rpc_req = rpc.get_raw_transaction(txid.clone(), Some(0u8)); + let (rsp, _) = futures::join!(rpc_req, mempool_req); + settings.bind(|| insta::assert_json_snapshot!(format!("getrawtransaction_verbosity=0"), rsp)); + mempool.expect_no_requests().await; // `getrawtransaction` verbosity=1 let mempool_req = mempool @@ -434,13 +432,31 @@ async fn test_rpc_response_data_for_network(network: &Network) { responder.respond(mempool::Response::Transactions(vec![])); }); - // make the api call - let get_raw_transaction = - rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), Some(1u8)); - let (response, _) = futures::join!(get_raw_transaction, mempool_req); - let get_raw_transaction = response.expect("We should have a GetRawTransaction struct"); + let rpc_req = rpc.get_raw_transaction(txid, Some(1u8)); + let (rsp, _) = futures::join!(rpc_req, mempool_req); + settings.bind(|| insta::assert_json_snapshot!(format!("getrawtransaction_verbosity=1"), rsp)); + mempool.expect_no_requests().await; + + // `getrawtransaction` with unknown txid + let mempool_req = mempool + .expect_request_that(|request| { + matches!(request, mempool::Request::TransactionsByMinedId(_)) + }) + .map(|responder| { + responder.respond(mempool::Response::Transactions(vec![])); + }); - snapshot_rpc_getrawtransaction("verbosity_1", get_raw_transaction, &settings); + let rpc_req = rpc.get_raw_transaction(transaction::Hash::from([0; 32]).encode_hex(), Some(1)); + let (rsp, _) = futures::join!(rpc_req, mempool_req); + settings.bind(|| insta::assert_json_snapshot!(format!("getrawtransaction_unknown_txid"), rsp)); + mempool.expect_no_requests().await; + + // `getrawtransaction` with an invalid TXID + let rsp = rpc + .get_raw_transaction("aBadC0de".to_owned(), Some(1)) + .await; + settings.bind(|| insta::assert_json_snapshot!(format!("getrawtransaction_invalid_txid"), rsp)); + mempool.expect_no_requests().await; // `getaddresstxids` let get_address_tx_ids = rpc @@ -666,17 +682,6 @@ fn snapshot_rpc_getrawmempool(raw_mempool: Vec, settings: &insta::Settin settings.bind(|| insta::assert_json_snapshot!("get_raw_mempool", raw_mempool)); } -/// Snapshot `getrawtransaction` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getrawtransaction( - variant: &'static str, - raw_transaction: GetRawTransaction, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("get_raw_transaction_{variant}"), raw_transaction) - }); -} - /// Snapshot valid `getaddressbalance` response, using `cargo insta` and JSON serialization. fn snapshot_rpc_getaddresstxids_valid( variant: &'static str, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@mainnet_10.snap deleted file mode 100644 index fe57f682126..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@mainnet_10.snap +++ /dev/null @@ -1,5 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: raw_transaction ---- -"01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@testnet_10.snap deleted file mode 100644 index 6f7145404de..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_0@testnet_10.snap +++ /dev/null @@ -1,5 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: raw_transaction ---- -"01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@mainnet_10.snap deleted file mode 100644 index 25091fe3fb5..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@mainnet_10.snap +++ /dev/null @@ -1,9 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: raw_transaction ---- -{ - "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", - "height": 1, - "confirmations": 10 -} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@testnet_10.snap deleted file mode 100644 index 61499b2e880..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_raw_transaction_verbosity_1@testnet_10.snap +++ /dev/null @@ -1,9 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: raw_transaction ---- -{ - "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", - "height": 1, - "confirmations": 10 -} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@mainnet_10.snap new file mode 100644 index 00000000000..e048f7ad516 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@mainnet_10.snap @@ -0,0 +1,11 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Err": { + "code": -5, + "message": "Invalid string length" + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@testnet_10.snap new file mode 100644 index 00000000000..e048f7ad516 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_invalid_txid@testnet_10.snap @@ -0,0 +1,11 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Err": { + "code": -5, + "message": "Invalid string length" + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@mainnet_10.snap new file mode 100644 index 00000000000..878c8505a19 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@mainnet_10.snap @@ -0,0 +1,11 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Err": { + "code": -5, + "message": "No such mempool or main chain transaction" + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@testnet_10.snap new file mode 100644 index 00000000000..878c8505a19 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_unknown_txid@testnet_10.snap @@ -0,0 +1,11 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Err": { + "code": -5, + "message": "No such mempool or main chain transaction" + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@mainnet_10.snap new file mode 100644 index 00000000000..90fa5021b56 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@mainnet_10.snap @@ -0,0 +1,8 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Ok": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@testnet_10.snap new file mode 100644 index 00000000000..673c9f7ce89 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=0@testnet_10.snap @@ -0,0 +1,8 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Ok": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000" +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap new file mode 100644 index 00000000000..b78a6686336 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap @@ -0,0 +1,12 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Ok": { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", + "height": 1, + "confirmations": 10 + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap new file mode 100644 index 00000000000..ab133db9b1a --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap @@ -0,0 +1,12 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: rsp +snapshot_kind: text +--- +{ + "Ok": { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", + "height": 1, + "confirmations": 10 + } +} diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_non_existent_hash@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_non_existent_hash@custom_testnet.snap index d0013994ab0..7801c859a27 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_non_existent_hash@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_non_existent_hash@custom_testnet.snap @@ -1,10 +1,11 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs expression: treestate +snapshot_kind: text --- { "Err": { "code": -8, - "message": "the requested block was not found" + "message": "the requested block is not in the main chain" } } diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_excessive_block_height@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_excessive_block_height@custom_testnet.snap index d0013994ab0..7801c859a27 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_excessive_block_height@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_excessive_block_height@custom_testnet.snap @@ -1,10 +1,11 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs expression: treestate +snapshot_kind: text --- { "Err": { "code": -8, - "message": "the requested block was not found" + "message": "the requested block is not in the main chain" } } diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_unparsable_hash_or_height@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_unparsable_hash_or_height@custom_testnet.snap index a45d7e298dc..d7b3c2b1ff0 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_unparsable_hash_or_height@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_unparsable_hash_or_height@custom_testnet.snap @@ -1,10 +1,11 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs expression: treestate +snapshot_kind: text --- { "Err": { - "code": 0, + "code": -8, "message": "parse error: could not convert the input string to a hash or height" } } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 007a89ee893..998cdc155ee 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1,15 +1,17 @@ //! Fixed test vectors for RPC methods. use std::ops::RangeInclusive; +use std::sync::Arc; use tower::buffer::Buffer; +use zebra_chain::serialization::ZcashSerialize; use zebra_chain::{ amount::Amount, block::Block, chain_tip::{mock::MockChainTip, NoChainTip}, parameters::Network::*, - serialization::{ZcashDeserializeInto, ZcashSerialize}, + serialization::ZcashDeserializeInto, transaction::UnminedTxId, }; use zebra_node_services::BoxError; @@ -722,9 +724,12 @@ async fn rpc_getrawtransaction() { conventional_fee: Amount::zero(), }])); }); - let get_tx_req = rpc.get_raw_transaction(tx.hash().encode_hex(), Some(0u8)); - let (response, _) = futures::join!(get_tx_req, mempool_req); - let get_tx = response.expect("We should have a GetRawTransaction struct"); + + let rpc_req = rpc.get_raw_transaction(tx.hash().encode_hex(), Some(0u8)); + + let (rsp, _) = futures::join!(rpc_req, mempool_req); + let get_tx = rsp.expect("we should have a `GetRawTransaction` struct"); + if let GetRawTransaction::Raw(raw_tx) = get_tx { assert_eq!(raw_tx.as_ref(), tx.zcash_serialize_to_vec().unwrap()); } else { @@ -752,12 +757,14 @@ async fn rpc_getrawtransaction() { let run_state_test_case = |block_idx: usize, block: Arc, tx: Arc| { let read_state = read_state.clone(); - let tx_hash = tx.hash(); - let get_tx_verbose_0_req = rpc.get_raw_transaction(tx_hash.encode_hex(), Some(0u8)); - let get_tx_verbose_1_req = rpc.get_raw_transaction(tx_hash.encode_hex(), Some(1u8)); + let txid = tx.hash(); + let hex_txid = txid.encode_hex::(); + + let get_tx_verbose_0_req = rpc.get_raw_transaction(hex_txid.clone(), Some(0u8)); + let get_tx_verbose_1_req = rpc.get_raw_transaction(hex_txid, Some(1u8)); async move { - let (response, _) = futures::join!(get_tx_verbose_0_req, make_mempool_req(tx_hash)); + let (response, _) = futures::join!(get_tx_verbose_0_req, make_mempool_req(txid)); let get_tx = response.expect("We should have a GetRawTransaction struct"); if let GetRawTransaction::Raw(raw_tx) = get_tx { assert_eq!(raw_tx.as_ref(), tx.zcash_serialize_to_vec().unwrap()); @@ -765,7 +772,8 @@ async fn rpc_getrawtransaction() { unreachable!("Should return a Raw enum") } - let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(tx_hash)); + let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(txid)); + let GetRawTransaction::Object { hex, height, @@ -775,8 +783,11 @@ async fn rpc_getrawtransaction() { unreachable!("Should return a Raw enum") }; + let height = height.expect("state requests should have height"); + let confirmations = confirmations.expect("state requests should have confirmations"); + assert_eq!(hex.as_ref(), tx.zcash_serialize_to_vec().unwrap()); - assert_eq!(height, block_idx as i32); + assert_eq!(height, block_idx as u32); let depth_response = read_state .oneshot(zebra_state::ReadRequest::Depth(block.hash())) @@ -870,25 +881,18 @@ async fn rpc_getaddresstxids_invalid_arguments() { ); // call the method with an invalid address string - let address = "11111111".to_string(); - let addresses = vec![address.clone()]; - let start: u32 = 1; - let end: u32 = 2; - let error = rpc + let rpc_rsp = rpc .get_address_tx_ids(GetAddressTxIdsRequest { - addresses: addresses.clone(), - start, - end, + addresses: vec!["t1invalidaddress".to_owned()], + start: 1, + end: 2, }) .await .unwrap_err(); - assert_eq!( - error.message, - format!( - "invalid address \"{}\": parse error: t-addr decoding error", - address.clone() - ) - ); + + assert_eq!(rpc_rsp.code, ErrorCode::ServerError(-5)); + + mempool.expect_no_requests().await; // create a valid address let address = "t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd".to_string(); @@ -1078,17 +1082,13 @@ async fn rpc_getaddressutxos_invalid_arguments() { ); // call the method with an invalid address string - let address = "11111111".to_string(); - let addresses = vec![address.clone()]; let error = rpc .0 - .get_address_utxos(AddressStrings::new(addresses)) + .get_address_utxos(AddressStrings::new(vec!["t1invalidaddress".to_owned()])) .await .unwrap_err(); - assert_eq!( - error.message, - format!("invalid address \"{address}\": parse error: t-addr decoding error") - ); + + assert_eq!(error.code, ErrorCode::ServerError(-5)); mempool.expect_no_requests().await; state.expect_no_requests().await; diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 73fcde65f6b..69ab36d8c00 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -36,6 +36,7 @@ use crate::{ use crate::methods::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl}; pub mod cookie; +pub mod error; pub mod http_request_compatibility; pub mod rpc_call_compatibility; diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs new file mode 100644 index 00000000000..4cfc7b38571 --- /dev/null +++ b/zebra-rpc/src/server/error.rs @@ -0,0 +1,118 @@ +//! RPC error codes & their handling. + +/// Bitcoin RPC error codes +/// +/// Drawn from . +/// +/// ## Notes +/// +/// - All explicit discriminants fit within `i64`. +#[derive(Default)] +pub enum LegacyCode { + // General application defined errors + /// `std::exception` thrown in command handling + #[default] + Misc = -1, + /// Server is in safe mode, and command is not allowed in safe mode + ForbiddenBySafeMode = -2, + /// Unexpected type was passed as parameter + Type = -3, + /// Invalid address or key + InvalidAddressOrKey = -5, + /// Ran out of memory during operation + OutOfMemory = -7, + /// Invalid, missing or duplicate parameter + InvalidParameter = -8, + /// Database error + Database = -20, + /// Error parsing or validating structure in raw format + Deserialization = -22, + /// General error during transaction or block submission + Verify = -25, + /// Transaction or block was rejected by network rules + VerifyRejected = -26, + /// Transaction already in chain + VerifyAlreadyInChain = -27, + /// Client still warming up + InWarmup = -28, + + // P2P client errors + /// Bitcoin is not connected + ClientNotConnected = -9, + /// Still downloading initial blocks + ClientInInitialDownload = -10, + /// Node is already added + ClientNodeAlreadyAdded = -23, + /// Node has not been added before + ClientNodeNotAdded = -24, + /// Node to disconnect not found in connected nodes + ClientNodeNotConnected = -29, + /// Invalid IP/Subnet + ClientInvalidIpOrSubnet = -30, +} + +impl From for jsonrpc_core::ErrorCode { + fn from(code: LegacyCode) -> Self { + Self::ServerError(code as i64) + } +} + +/// A trait for mapping errors to [`jsonrpc_core::Error`]. +pub(crate) trait MapError: Sized { + /// Maps errors to [`jsonrpc_core::Error`] with a specific error code. + fn map_error( + self, + code: impl Into, + ) -> std::result::Result; + + /// Maps errors to [`jsonrpc_core::Error`] with a [`LegacyCode::Misc`] error code. + fn map_misc_error(self) -> std::result::Result { + self.map_error(LegacyCode::Misc) + } +} + +/// A trait for conditionally converting a value into a `Result`. +pub(crate) trait OkOrError: Sized { + /// Converts the implementing type to `Result`, using an error code and + /// message if conversion is to `Err`. + fn ok_or_error( + self, + code: impl Into, + message: impl ToString, + ) -> std::result::Result; + + /// Converts the implementing type to `Result`, using a [`LegacyCode::Misc`] error code. + fn ok_or_misc_error( + self, + message: impl ToString, + ) -> std::result::Result { + self.ok_or_error(LegacyCode::Misc, message) + } +} + +impl MapError for Result +where + E: ToString, +{ + fn map_error(self, code: impl Into) -> Result { + self.map_err(|error| jsonrpc_core::Error { + code: code.into(), + message: error.to_string(), + data: None, + }) + } +} + +impl OkOrError for Option { + fn ok_or_error( + self, + code: impl Into, + message: impl ToString, + ) -> Result { + self.ok_or(jsonrpc_core::Error { + code: code.into(), + message: message.to_string(), + data: None, + }) + } +} diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index c3974ac3cf8..209596180c0 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -6,13 +6,14 @@ use std::future::Future; use futures::future::{Either, FutureExt}; + use jsonrpc_core::{ middleware::Middleware, types::{Call, Failure, Output, Response}, - BoxFuture, ErrorCode, Metadata, MethodCall, Notification, + BoxFuture, Metadata, MethodCall, Notification, }; -use crate::constants::{INVALID_PARAMETERS_ERROR_CODE, MAX_PARAMS_LOG_LENGTH}; +use crate::server; /// JSON-RPC [`Middleware`] with compatibility workarounds. /// @@ -57,13 +58,20 @@ impl Middleware for FixRpcResponseMiddleware { } impl FixRpcResponseMiddleware { - /// Replace [`jsonrpc_core`] server error codes in `output` with the `zcashd` equivalents. + /// Replaces [`jsonrpc_core::ErrorCode`]s in the [`Output`] with their `zcashd` equivalents. + /// + /// ## Replaced Codes + /// + /// 1. [`jsonrpc_core::ErrorCode::InvalidParams`] -> [`server::error::LegacyCode::Misc`] + /// Rationale: + /// The `node-stratum-pool` mining pool library expects error code `-1` to detect available RPC methods: + /// fn fix_error_codes(output: &mut Option) { if let Some(Output::Failure(Failure { ref mut error, .. })) = output { - if matches!(error.code, ErrorCode::InvalidParams) { + if matches!(error.code, jsonrpc_core::ErrorCode::InvalidParams) { let original_code = error.code.clone(); - error.code = INVALID_PARAMETERS_ERROR_CODE; + error.code = server::error::LegacyCode::Misc.into(); tracing::debug!("Replacing RPC error: {original_code:?} with {error}"); } } @@ -73,6 +81,8 @@ impl FixRpcResponseMiddleware { /// /// Prints out only the method name and the received parameters. fn call_description(call: &Call) -> String { + const MAX_PARAMS_LOG_LENGTH: usize = 100; + match call { Call::MethodCall(MethodCall { method, params, .. }) => { let mut params = format!("{params:?}"); diff --git a/zebra-rpc/src/sync.rs b/zebra-rpc/src/sync.rs index fd323ef64bb..40373d0eaed 100644 --- a/zebra-rpc/src/sync.rs +++ b/zebra-rpc/src/sync.rs @@ -21,8 +21,8 @@ use zebra_state::{ use zebra_chain::diagnostic::task::WaitForPanics; use crate::{ - constants::MISSING_BLOCK_ERROR_CODE, methods::{hex_data::HexData, GetBlockHeightAndHash}, + server, }; /// How long to wait between calls to `getbestblockheightandhash` when it: @@ -383,7 +383,9 @@ impl SyncerRpcMethods for RpcRequestClient { Err(err) if err .downcast_ref::() - .is_some_and(|err| err.code == MISSING_BLOCK_ERROR_CODE) => + .is_some_and(|err| { + err.code == server::error::LegacyCode::InvalidParameter.into() + }) => { Ok(None) } diff --git a/zebra-rpc/src/tests/vectors.rs b/zebra-rpc/src/tests/vectors.rs index 84ed937d6ef..a93158b8b38 100644 --- a/zebra-rpc/src/tests/vectors.rs +++ b/zebra-rpc/src/tests/vectors.rs @@ -4,21 +4,28 @@ use crate::methods::{GetBlock, GetRawTransaction}; #[test] pub fn test_transaction_serialization() { - let expected_tx = GetRawTransaction::Raw(vec![0x42].into()); - let expected_json = r#""42""#; - let j = serde_json::to_string(&expected_tx).unwrap(); + let tx = GetRawTransaction::Raw(vec![0x42].into()); - assert_eq!(j, expected_json); + assert_eq!(serde_json::to_string(&tx).unwrap(), r#""42""#); - let expected_tx = GetRawTransaction::Object { + let tx = GetRawTransaction::Object { hex: vec![0x42].into(), - height: 1, - confirmations: 0, + height: Some(1), + confirmations: Some(0), }; - let expected_json = r#"{"hex":"42","height":1,"confirmations":0}"#; - let j = serde_json::to_string(&expected_tx).unwrap(); - assert_eq!(j, expected_json); + assert_eq!( + serde_json::to_string(&tx).unwrap(), + r#"{"hex":"42","height":1,"confirmations":0}"# + ); + + let tx = GetRawTransaction::Object { + hex: vec![0x42].into(), + height: None, + confirmations: None, + }; + + assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"42"}"#); } #[test] diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index bf1cba697de..acd89d89aba 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -17,7 +17,6 @@ use zebra_chain::{ }; use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::{ - constants::MISSING_BLOCK_ERROR_CODE, methods::{ get_block_template_rpcs::{ get_block_template::{ @@ -27,7 +26,7 @@ use zebra_rpc::{ }, hex_data::HexData, }, - server::OPENED_RPC_ENDPOINT_MSG, + server::{self, OPENED_RPC_ENDPOINT_MSG}, }; use zebra_test::args; @@ -163,7 +162,9 @@ impl MiningRpcMethods for RpcRequestClient { Err(err) if err .downcast_ref::() - .is_some_and(|err| err.code == MISSING_BLOCK_ERROR_CODE) => + .is_some_and(|err| { + err.code == server::error::LegacyCode::InvalidParameter.into() + }) => { Ok(None) } From 543f0669e15a7d7da4e43d9d009fbfe9583bcec0 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Sat, 14 Dec 2024 09:28:57 -0300 Subject: [PATCH 040/245] add(rpc): getblock: return transaction details with verbosity=2 (#9083) * getblock: return tx objects with verbosity=2 * fix test * use FuturesOrdered * Suggestion for "add(rpc): getblock: return transaction details with verbosity=2" (#9084) * Replaces multiple service calls (per transaction) with a single call to the state service for all of a block's transactions. * adjustments to reuse code from getrawtransaction --------- Co-authored-by: Conrado Gouvea * update snapshot --------- Co-authored-by: Arya --- zebra-rpc/src/methods.rs | 141 ++++++++++++------ ...k_verbose_hash_verbosity_2@mainnet_10.snap | 6 +- ...k_verbose_hash_verbosity_2@testnet_10.snap | 6 +- ...verbose_height_verbosity_2@mainnet_10.snap | 6 +- ...verbose_height_verbosity_2@testnet_10.snap | 6 +- zebra-rpc/src/methods/tests/vectors.rs | 24 ++- zebra-rpc/src/tests/vectors.rs | 10 +- 7 files changed, 139 insertions(+), 60 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 0d7986f033f..cfd8260d2ba 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,7 +6,7 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. -use std::{collections::HashSet, fmt::Debug}; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; use futures::{stream::FuturesOrdered, FutureExt, StreamExt, TryFutureExt}; @@ -814,6 +814,12 @@ where next_block_hash, } = *block_header; + let transactions_request = match verbosity { + 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), + 2 => zebra_state::ReadRequest::Block(hash_or_height), + _other => panic!("get_block_header_fut should be none"), + }; + // # Concurrency // // We look up by block hash so the hash, transaction IDs, and confirmations @@ -827,7 +833,7 @@ where // A block's transaction IDs are never modified, so all possible responses are // valid. Clients that query block heights must be able to handle chain forks, // including getting transaction IDs from any chain fork. - zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), + transactions_request, // Orchard trees zebra_state::ReadRequest::OrchardTree(hash_or_height), ]; @@ -839,11 +845,27 @@ where } let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); - let tx = match tx_ids_response.map_misc_error()? { + let tx: Vec<_> = match tx_ids_response.map_misc_error()? { zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids .ok_or_misc_error("block not found")? .iter() - .map(|tx_id| tx_id.encode_hex()) + .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) + .collect(), + zebra_state::ReadResponse::Block(block) => block + .ok_or_misc_error("Block not found")? + .transactions + .iter() + .map(|tx| { + GetBlockTransaction::Object(TransactionObject::from_transaction( + tx.clone(), + Some(height), + Some( + confirmations + .try_into() + .expect("should be less than max block height, i32::MAX"), + ), + )) + }) .collect(), _ => unreachable!("unmatched response to a transaction_ids_for_block request"), }; @@ -1131,15 +1153,14 @@ where { mempool::Response::Transactions(txns) => { if let Some(tx) = txns.first() { - let hex = tx.transaction.clone().into(); - return Ok(if verbose { - GetRawTransaction::Object { - hex, - height: None, - confirmations: None, - } + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.transaction.clone(), + None, + None, + )) } else { + let hex = tx.transaction.clone().into(); GetRawTransaction::Raw(hex) }); } @@ -1155,19 +1176,16 @@ where .await .map_misc_error()? { - zebra_state::ReadResponse::Transaction(Some(tx)) => { + zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.tx.clone(), + Some(tx.height), + Some(tx.confirmations), + )) + } else { let hex = tx.tx.into(); - - Ok(if verbose { - GetRawTransaction::Object { - hex, - height: Some(tx.height.0), - confirmations: Some(tx.confirmations), - } - } else { - GetRawTransaction::Raw(hex) - }) - } + GetRawTransaction::Raw(hex) + }), zebra_state::ReadResponse::Transaction(None) => { Err("No such mempool or main chain transaction") @@ -1779,11 +1797,9 @@ pub enum GetBlock { // `chainhistoryroot` would be here. Undocumented. TODO: decide if we want to support it // - /// List of transaction IDs in block order, hex-encoded. - // - // TODO: use a typed Vec here - // TODO: support Objects - tx: Vec, + /// List of transactions in block order, hex-encoded if verbosity=1 or + /// as objects if verbosity=2. + tx: Vec, /// The height of the requested block. #[serde(skip_serializing_if = "Option::is_none")] @@ -1811,7 +1827,7 @@ pub enum GetBlock { difficulty: Option, // `chainwork` would be here, but we don't plan on supporting it - // `anchor` would be here. Undocumented. TODO: decide if we want to support it + // `anchor` would be here. Not planned to be supported. // `chainSupply` would be here, TODO: implement // `valuePools` would be here, TODO: implement // @@ -1852,6 +1868,17 @@ impl Default for GetBlock { } } +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[serde(untagged)] +/// The transaction list in a `getblock` call. Can be a list of transaction +/// IDs or the full transaction details depending on verbosity. +pub enum GetBlockTransaction { + /// The transaction hash, hex-encoded. + Hash(#[serde(with = "hex")] transaction::Hash), + /// The block object. + Object(TransactionObject), +} + /// Response to a `getblockheader` RPC request. /// /// See the notes for the [`Rpc::get_block_header`] method. @@ -1995,24 +2022,36 @@ pub enum GetRawTransaction { /// The raw transaction, encoded as hex bytes. Raw(#[serde(with = "hex")] SerializedTransaction), /// The transaction object. - Object { - /// The raw transaction, encoded as hex bytes. - #[serde(with = "hex")] - hex: SerializedTransaction, - /// The height of the block in the best chain that contains the tx or `None` if the tx is in - /// the mempool. - #[serde(skip_serializing_if = "Option::is_none")] - height: Option, - /// The height diff between the block containing the tx and the best chain tip + 1 or `None` - /// if the tx is in the mempool. - #[serde(skip_serializing_if = "Option::is_none")] - confirmations: Option, - }, + Object(TransactionObject), } impl Default for GetRawTransaction { fn default() -> Self { - Self::Object { + Self::Object(TransactionObject::default()) + } +} + +/// A Transaction object as returned by `getrawtransaction` and `getblock` RPC +/// requests. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct TransactionObject { + /// The raw transaction, encoded as hex bytes. + #[serde(with = "hex")] + pub hex: SerializedTransaction, + /// The height of the block in the best chain that contains the tx or `None` if the tx is in + /// the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + pub height: Option, + /// The height diff between the block containing the tx and the best chain tip + 1 or `None` + /// if the tx is in the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + pub confirmations: Option, + // TODO: many fields not yet supported +} + +impl Default for TransactionObject { + fn default() -> Self { + Self { hex: SerializedTransaction::from( [0u8; zebra_chain::transaction::MIN_TRANSPARENT_TX_SIZE as usize].to_vec(), ), @@ -2022,6 +2061,22 @@ impl Default for GetRawTransaction { } } +impl TransactionObject { + /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. + #[allow(clippy::unwrap_in_result)] + fn from_transaction( + tx: Arc, + height: Option, + confirmations: Option, + ) -> Self { + Self { + hex: tx.into(), + height: height.map(|height| height.0), + confirmations, + } + } +} + /// Response to a `getaddressutxos` RPC request. /// /// See the notes for the [`Rpc::get_address_utxos` method]. diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 93010ad42d4..51729b13293 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -10,7 +10,11 @@ expression: block "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ - "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" + { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", + "height": 1, + "confirmations": 10 + } ], "time": 1477671596, "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 5bd22590f1b..51bbfc72f05 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -10,7 +10,11 @@ expression: block "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ - "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" + { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", + "height": 1, + "confirmations": 10 + } ], "time": 1477674473, "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index 93010ad42d4..51729b13293 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -10,7 +10,11 @@ expression: block "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ - "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" + { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", + "height": 1, + "confirmations": 10 + } ], "time": 1477671596, "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index 5bd22590f1b..51bbfc72f05 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -10,7 +10,11 @@ expression: block "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ - "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" + { + "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", + "height": 1, + "confirmations": 10 + } ], "time": 1477674473, "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 998cdc155ee..1ecde97c634 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -178,7 +178,7 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Hash(tx.hash())) .collect(), trees, size: None, @@ -221,7 +221,7 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Hash(tx.hash())) .collect(), trees, size: None, @@ -264,7 +264,11 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Object(TransactionObject { + hex: (*tx).clone().into(), + height: Some(i.try_into().expect("valid u32")), + confirmations: Some((blocks.len() - i).try_into().expect("valid i64")) + })) .collect(), trees, size: None, @@ -307,7 +311,11 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Object(TransactionObject { + hex: (*tx).clone().into(), + height: Some(i.try_into().expect("valid u32")), + confirmations: Some((blocks.len() - i).try_into().expect("valid i64")) + })) .collect(), trees, size: None, @@ -350,7 +358,7 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Hash(tx.hash())) .collect(), trees, size: None, @@ -393,7 +401,7 @@ async fn rpc_getblock() { tx: block .transactions .iter() - .map(|tx| tx.hash().encode_hex()) + .map(|tx| GetBlockTransaction::Hash(tx.hash())) .collect(), trees, size: None, @@ -774,11 +782,11 @@ async fn rpc_getrawtransaction() { let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(txid)); - let GetRawTransaction::Object { + let GetRawTransaction::Object(TransactionObject { hex, height, confirmations, - } = response.expect("We should have a GetRawTransaction struct") + }) = response.expect("We should have a GetRawTransaction struct") else { unreachable!("Should return a Raw enum") }; diff --git a/zebra-rpc/src/tests/vectors.rs b/zebra-rpc/src/tests/vectors.rs index a93158b8b38..0ca221c2cf5 100644 --- a/zebra-rpc/src/tests/vectors.rs +++ b/zebra-rpc/src/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed Zebra RPC serialization test vectors. -use crate::methods::{GetBlock, GetRawTransaction}; +use crate::methods::{GetBlock, GetRawTransaction, TransactionObject}; #[test] pub fn test_transaction_serialization() { @@ -8,22 +8,22 @@ pub fn test_transaction_serialization() { assert_eq!(serde_json::to_string(&tx).unwrap(), r#""42""#); - let tx = GetRawTransaction::Object { + let tx = GetRawTransaction::Object(TransactionObject { hex: vec![0x42].into(), height: Some(1), confirmations: Some(0), - }; + }); assert_eq!( serde_json::to_string(&tx).unwrap(), r#"{"hex":"42","height":1,"confirmations":0}"# ); - let tx = GetRawTransaction::Object { + let tx = GetRawTransaction::Object(TransactionObject { hex: vec![0x42].into(), height: None, confirmations: None, - }; + }); assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"42"}"#); } From 1974fea885e00c678aa43114a51b0edc802323b2 Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 19 Dec 2024 13:46:27 -0500 Subject: [PATCH 041/245] change(rpc): Avoid re-verifying transactions in blocks if those transactions are in the mempool (#8951) * skips re-verifying transactions in blocks that are present in the mempool. * clippy fix * adds a test * fixes clippy lint * Use NU6 & V5 tx in new test * Uses correct consensus branch id in test --- zebra-consensus/src/block.rs | 13 +- zebra-consensus/src/transaction.rs | 85 +++++- zebra-consensus/src/transaction/tests.rs | 252 +++++++++++++++++- zebra-consensus/src/transaction/tests/prop.rs | 8 +- zebra-node-services/src/mempool.rs | 16 +- .../tests/snapshot/get_block_template_rpcs.rs | 3 +- zebra-rpc/src/methods/tests/vectors.rs | 6 +- zebrad/src/components/mempool.rs | 20 +- zebrad/src/components/mempool/storage.rs | 17 ++ 9 files changed, 400 insertions(+), 20 deletions(-) diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 247079c401a..1c959dd284e 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -8,6 +8,7 @@ //! verification, where it may be accepted or rejected. use std::{ + collections::HashSet, future::Future, pin::Pin, sync::Arc, @@ -25,7 +26,7 @@ use zebra_chain::{ amount::Amount, block, parameters::{subsidy::FundingStreamReceiver, Network}, - transparent, + transaction, transparent, work::equihash, }; use zebra_state as zs; @@ -232,13 +233,21 @@ where &block, &transaction_hashes, )); - for transaction in &block.transactions { + + let known_outpoint_hashes: Arc> = + Arc::new(known_utxos.keys().map(|outpoint| outpoint.hash).collect()); + + for (&transaction_hash, transaction) in + transaction_hashes.iter().zip(block.transactions.iter()) + { let rsp = transaction_verifier .ready() .await .expect("transaction verifier is always ready") .call(tx::Request::Block { + transaction_hash, transaction: transaction.clone(), + known_outpoint_hashes: known_outpoint_hashes.clone(), known_utxos: known_utxos.clone(), height, time: block.header.time, diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index ef20881bbbf..c3ccb78452c 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -1,7 +1,7 @@ //! Asynchronous verification of transactions. use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, future::Future, pin::Pin, sync::Arc, @@ -146,8 +146,12 @@ where pub enum Request { /// Verify the supplied transaction as part of a block. Block { + /// The transaction hash. + transaction_hash: transaction::Hash, /// The transaction itself. transaction: Arc, + /// Set of transaction hashes that create new transparent outputs. + known_outpoint_hashes: Arc>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc>, /// The height of the block containing this transaction. @@ -259,6 +263,16 @@ impl Request { } } + /// The mined transaction ID for the transaction in this request. + pub fn tx_mined_id(&self) -> transaction::Hash { + match self { + Request::Block { + transaction_hash, .. + } => *transaction_hash, + Request::Mempool { transaction, .. } => transaction.id.mined_id(), + } + } + /// The set of additional known unspent transaction outputs that's in this request. pub fn known_utxos(&self) -> Arc> { match self { @@ -267,6 +281,17 @@ impl Request { } } + /// The set of additional known [`transparent::OutPoint`]s of unspent transaction outputs that's in this request. + pub fn known_outpoint_hashes(&self) -> Arc> { + match self { + Request::Block { + known_outpoint_hashes, + .. + } => known_outpoint_hashes.clone(), + Request::Mempool { .. } => HashSet::new().into(), + } + } + /// The height used to select the consensus rules for verifying this transaction. pub fn height(&self) -> block::Height { match self { @@ -377,6 +402,16 @@ where async move { tracing::trace!(?tx_id, ?req, "got tx verify request"); + if let Some(result) = Self::try_find_verified_unmined_tx(&req, mempool.clone()).await { + let verified_tx = result?; + + return Ok(Response::Block { + tx_id, + miner_fee: Some(verified_tx.miner_fee), + legacy_sigop_count: verified_tx.legacy_sigop_count + }); + } + // Do quick checks first check::has_inputs_and_outputs(&tx)?; check::has_enough_orchard_flags(&tx)?; @@ -609,8 +644,52 @@ where } } - /// Waits for the UTXOs that are being spent by the given transaction to arrive in - /// the state for [`Block`](Request::Block) requests. + /// Attempts to find a transaction in the mempool by its transaction hash and checks + /// that all of its dependencies are available in the block. + /// + /// Returns [`Some(Ok(VerifiedUnminedTx))`](VerifiedUnminedTx) if successful, + /// None if the transaction id was not found in the mempool, + /// or `Some(Err(TransparentInputNotFound))` if the transaction was found, but some of its + /// dependencies are missing in the block. + async fn try_find_verified_unmined_tx( + req: &Request, + mempool: Option>, + ) -> Option> { + if req.is_mempool() || req.transaction().is_coinbase() { + return None; + } + + let mempool = mempool?; + let known_outpoint_hashes = req.known_outpoint_hashes(); + let tx_id = req.tx_mined_id(); + + let mempool::Response::TransactionWithDeps { + transaction, + dependencies, + } = mempool + .oneshot(mempool::Request::TransactionWithDepsByMinedId(tx_id)) + .await + .ok()? + else { + panic!("unexpected response to TransactionWithDepsByMinedId request"); + }; + + // Note: This does not verify that the spends are in order, the spend order + // should be verified during contextual validation in zebra-state. + let has_all_tx_deps = dependencies + .into_iter() + .all(|dependency_id| known_outpoint_hashes.contains(&dependency_id)); + + let result = if has_all_tx_deps { + Ok(transaction) + } else { + Err(TransactionError::TransparentInputNotFound) + }; + + Some(result) + } + + /// Wait for the UTXOs that are being spent by the given transaction. /// /// Looks up UTXOs that are being spent by the given transaction in the state or waits /// for them to be added to the mempool for [`Mempool`](Request::Mempool) requests. diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 8627a578c62..417298cc7f3 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -2,7 +2,10 @@ // // TODO: split fixed test vectors into a `vectors` module? -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use chrono::{DateTime, TimeZone, Utc}; use color_eyre::eyre::Report; @@ -694,13 +697,180 @@ async fn mempool_request_with_unmined_output_spends_is_accepted() { ); tokio::time::sleep(POLL_MEMPOOL_DELAY * 2).await; + // polled before AwaitOutput request and after a mempool transaction with transparent outputs + // is successfully verified assert_eq!( mempool.poll_count(), 2, - "the mempool service should have been polled twice, \ - first before being called with an AwaitOutput request, \ - then again shortly after a mempool transaction with transparent outputs \ - is successfully verified" + "the mempool service should have been polled twice" + ); +} + +#[tokio::test] +async fn skips_verification_of_block_transactions_in_mempool() { + let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); + let mempool: MockService<_, _, _, _> = MockService::build().for_prop_tests(); + let (mempool_setup_tx, mempool_setup_rx) = tokio::sync::oneshot::channel(); + let verifier = Verifier::new(&Network::Mainnet, state.clone(), mempool_setup_rx); + let verifier = Buffer::new(verifier, 1); + + mempool_setup_tx + .send(mempool.clone()) + .ok() + .expect("send should succeed"); + + let height = NetworkUpgrade::Nu6 + .activation_height(&Network::Mainnet) + .expect("Canopy activation height is specified"); + let fund_height = (height - 1).expect("fake source fund block height is too small"); + let (input, output, known_utxos) = mock_transparent_transfer( + fund_height, + true, + 0, + Amount::try_from(10001).expect("invalid value"), + ); + + // Create a non-coinbase V4 tx with the last valid expiry height. + let tx = Transaction::V5 { + network_upgrade: NetworkUpgrade::Nu6, + inputs: vec![input], + outputs: vec![output], + lock_time: LockTime::min_lock_time_timestamp(), + expiry_height: height, + sapling_shielded_data: None, + orchard_shielded_data: None, + }; + + let tx_hash = tx.hash(); + let input_outpoint = match tx.inputs()[0] { + transparent::Input::PrevOut { outpoint, .. } => outpoint, + transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), + }; + + tokio::spawn(async move { + state + .expect_request(zebra_state::Request::BestChainNextMedianTimePast) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::BestChainNextMedianTimePast( + DateTime32::MAX, + )); + + state + .expect_request(zebra_state::Request::UnspentBestChainUtxo(input_outpoint)) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::UnspentBestChainUtxo(None)); + + state + .expect_request_that(|req| { + matches!( + req, + zebra_state::Request::CheckBestChainTipNullifiersAndAnchors(_) + ) + }) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors); + }); + + let mut mempool_clone = mempool.clone(); + tokio::spawn(async move { + mempool_clone + .expect_request(mempool::Request::AwaitOutput(input_outpoint)) + .await + .expect("verifier should call mock state service with correct request") + .respond(mempool::Response::UnspentOutput( + known_utxos + .get(&input_outpoint) + .expect("input outpoint should exist in known_utxos") + .utxo + .output + .clone(), + )); + }); + + let verifier_response = verifier + .clone() + .oneshot(Request::Mempool { + transaction: tx.clone().into(), + height, + }) + .await; + + assert!( + verifier_response.is_ok(), + "expected successful verification, got: {verifier_response:?}" + ); + + let crate::transaction::Response::Mempool { + transaction, + spent_mempool_outpoints, + } = verifier_response.expect("already checked that response is ok") + else { + panic!("unexpected response variant from transaction verifier for Mempool request") + }; + + assert_eq!( + spent_mempool_outpoints, + vec![input_outpoint], + "spent_mempool_outpoints in tx verifier response should match input_outpoint" + ); + + let mut mempool_clone = mempool.clone(); + tokio::spawn(async move { + for _ in 0..2 { + mempool_clone + .expect_request(mempool::Request::TransactionWithDepsByMinedId(tx_hash)) + .await + .expect("verifier should call mock state service with correct request") + .respond(mempool::Response::TransactionWithDeps { + transaction: transaction.clone(), + dependencies: [input_outpoint.hash].into(), + }); + } + }); + + let make_request = |known_outpoint_hashes| Request::Block { + transaction_hash: tx_hash, + transaction: Arc::new(tx), + known_outpoint_hashes, + known_utxos: Arc::new(HashMap::new()), + height, + time: Utc::now(), + }; + + let crate::transaction::Response::Block { .. } = verifier + .clone() + .oneshot(make_request.clone()(Arc::new([input_outpoint.hash].into()))) + .await + .expect("should return Ok without calling state service") + else { + panic!("unexpected response variant from transaction verifier for Block request") + }; + + let verifier_response_err = *verifier + .clone() + .oneshot(make_request(Arc::new(HashSet::new()))) + .await + .expect_err("should return Err without calling state service") + .downcast::() + .expect("tx verifier error type should be TransactionError"); + + assert_eq!( + verifier_response_err, + TransactionError::TransparentInputNotFound, + "should be a transparent input not found error" + ); + + tokio::time::sleep(POLL_MEMPOOL_DELAY * 2).await; + // polled before AwaitOutput request, after a mempool transaction with transparent outputs, + // is successfully verified, and twice more when checking if a transaction in a block is + // already the mempool. + assert_eq!( + mempool.poll_count(), + 4, + "the mempool service should have been polled 4 times" ); } @@ -983,8 +1153,10 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: canopy .activation_height(&network) .expect("Canopy activation height is specified"), @@ -1029,8 +1201,10 @@ fn v5_transaction_is_accepted_after_nu5_activation() { let verification_result = Verifier::new_for_tests(&network, state) .oneshot(Request::Block { + transaction_hash: tx.hash(), transaction: Arc::new(tx), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: expiry_height, time: DateTime::::MAX_UTC, }) @@ -1087,8 +1261,10 @@ async fn v4_transaction_with_transparent_transfer_is_accepted() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1131,8 +1307,10 @@ async fn v4_transaction_with_last_valid_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1176,8 +1354,10 @@ async fn v4_coinbase_transaction_with_low_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1223,8 +1403,10 @@ async fn v4_transaction_with_too_low_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1273,8 +1455,10 @@ async fn v4_transaction_with_exceeding_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1326,8 +1510,10 @@ async fn v4_coinbase_transaction_with_exceeding_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1377,8 +1563,10 @@ async fn v4_coinbase_transaction_is_accepted() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1432,8 +1620,10 @@ async fn v4_transaction_with_transparent_transfer_is_rejected_by_the_script() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1487,8 +1677,10 @@ async fn v4_transaction_with_conflicting_transparent_spend_is_rejected() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1558,8 +1750,10 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1634,8 +1828,10 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1693,8 +1889,10 @@ async fn v5_transaction_with_transparent_transfer_is_accepted() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -1739,8 +1937,10 @@ async fn v5_transaction_with_last_valid_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1784,8 +1984,10 @@ async fn v5_coinbase_transaction_expiry_height() { let result = verifier .clone() .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1805,8 +2007,10 @@ async fn v5_coinbase_transaction_expiry_height() { let result = verifier .clone() .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1834,8 +2038,10 @@ async fn v5_coinbase_transaction_expiry_height() { let result = verifier .clone() .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1871,8 +2077,10 @@ async fn v5_coinbase_transaction_expiry_height() { let verification_result = verifier .clone() .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height: new_expiry_height, time: DateTime::::MAX_UTC, }) @@ -1922,8 +2130,10 @@ async fn v5_transaction_with_too_low_expiry_height() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: block_height, time: DateTime::::MAX_UTC, }) @@ -1971,8 +2181,10 @@ async fn v5_transaction_with_exceeding_expiry_height() { let verification_result = Verifier::new_for_tests(&Network::Mainnet, state) .oneshot(Request::Block { - transaction: Arc::new(transaction), + transaction_hash: transaction.hash(), + transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: height_max, time: DateTime::::MAX_UTC, }) @@ -2025,8 +2237,10 @@ async fn v5_coinbase_transaction_is_accepted() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -2082,8 +2296,10 @@ async fn v5_transaction_with_transparent_transfer_is_rejected_by_the_script() { let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height: transaction_block_height, time: DateTime::::MAX_UTC, }) @@ -2130,8 +2346,10 @@ async fn v5_transaction_with_conflicting_transparent_spend_is_rejected() { let verification_result = Verifier::new_for_tests(&network, state) .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2173,8 +2391,10 @@ fn v4_with_signed_sprout_transfer_is_accepted() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction, known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2262,8 +2482,10 @@ async fn v4_with_joinsplit_is_rejected_for_modification( let result = verifier .clone() .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: transaction.clone(), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2308,8 +2530,10 @@ fn v4_with_sapling_spends() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction, known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2350,8 +2574,10 @@ fn v4_with_duplicate_sapling_spends() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction, known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2394,8 +2620,10 @@ fn v4_with_sapling_outputs_and_no_spends() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction, known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2442,8 +2670,10 @@ fn v5_with_sapling_spends() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2485,8 +2715,10 @@ fn v5_with_duplicate_sapling_spends() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2547,8 +2779,10 @@ fn v5_with_duplicate_orchard_action() { // Test the transaction verifier let result = verifier .oneshot(Request::Block { + transaction_hash: transaction.hash(), transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: DateTime::::MAX_UTC, }) @@ -2605,8 +2839,10 @@ async fn v5_consensus_branch_ids() { let block_req = verifier .clone() .oneshot(Request::Block { + transaction_hash: tx.hash(), transaction: Arc::new(tx.clone()), known_utxos: known_utxos.clone(), + known_outpoint_hashes: Arc::new(HashSet::new()), // The consensus branch ID of the tx is outdated for this height. height, time: DateTime::::MAX_UTC, @@ -2633,8 +2869,10 @@ async fn v5_consensus_branch_ids() { let block_req = verifier .clone() .oneshot(Request::Block { + transaction_hash: tx.hash(), transaction: Arc::new(tx.clone()), known_utxos: known_utxos.clone(), + known_outpoint_hashes: Arc::new(HashSet::new()), // The consensus branch ID of the tx is supported by this height. height, time: DateTime::::MAX_UTC, @@ -2690,8 +2928,10 @@ async fn v5_consensus_branch_ids() { let block_req = verifier .clone() .oneshot(Request::Block { + transaction_hash: tx.hash(), transaction: Arc::new(tx.clone()), known_utxos: known_utxos.clone(), + known_outpoint_hashes: Arc::new(HashSet::new()), // The consensus branch ID of the tx is not supported by this height. height, time: DateTime::::MAX_UTC, diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index 856742e5d74..8fea9cf3433 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -1,6 +1,9 @@ //! Randomised property tests for transaction verification. -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use chrono::{DateTime, Duration, Utc}; use proptest::{collection::vec, prelude::*}; @@ -452,13 +455,16 @@ fn validate( tower::service_fn(|_| async { unreachable!("State service should not be called") }); let verifier = transaction::Verifier::new_for_tests(&network, state_service); let verifier = Buffer::new(verifier, 10); + let transaction_hash = transaction.hash(); // Test the transaction verifier verifier .clone() .oneshot(transaction::Request::Block { + transaction_hash, transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), + known_outpoint_hashes: Arc::new(HashSet::new()), height, time: block_time, }) diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index ad3e28c7eec..6c035e6dc44 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -6,13 +6,10 @@ use std::collections::HashSet; use tokio::sync::oneshot; use zebra_chain::{ - transaction::{self, UnminedTx, UnminedTxId}, + transaction::{self, UnminedTx, UnminedTxId, VerifiedUnminedTx}, transparent, }; -#[cfg(feature = "getblocktemplate-rpcs")] -use zebra_chain::transaction::VerifiedUnminedTx; - use crate::BoxError; mod gossip; @@ -58,6 +55,9 @@ pub enum Request { /// Outdated requests are pruned on a regular basis. AwaitOutput(transparent::OutPoint), + /// Request a [`VerifiedUnminedTx`] and its dependencies by its mined id. + TransactionWithDepsByMinedId(transaction::Hash), + /// Get all the [`VerifiedUnminedTx`] in the mempool. /// /// Equivalent to `TransactionsById(TransactionIds)`, @@ -121,6 +121,14 @@ pub enum Response { /// Response to [`Request::AwaitOutput`] with the transparent output UnspentOutput(transparent::Output), + /// Response to [`Request::TransactionWithDepsByMinedId`]. + TransactionWithDeps { + /// The queried transaction + transaction: VerifiedUnminedTx, + /// A list of dependencies of the queried transaction. + dependencies: HashSet, + }, + /// Returns all [`VerifiedUnminedTx`] in the mempool. // // TODO: make the Transactions response return VerifiedUnminedTx, diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index b2e012c7bcd..4949b419c43 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -105,7 +105,8 @@ pub async fn test_responses( extra_coinbase_data: None, debug_like_zcashd: true, // TODO: Use default field values when optional features are enabled in tests #8183 - ..Default::default() + #[cfg(feature = "internal-miner")] + internal_miner: true, }; // nu5 block height diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 1ecde97c634..01ddb4c3d31 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1560,7 +1560,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { extra_coinbase_data: None, debug_like_zcashd: true, // TODO: Use default field values when optional features are enabled in tests #8183 - ..Default::default() + #[cfg(feature = "internal-miner")] + internal_miner: true, }; // nu5 block height @@ -2014,7 +2015,8 @@ async fn rpc_getdifficulty() { extra_coinbase_data: None, debug_like_zcashd: true, // TODO: Use default field values when optional features are enabled in tests #8183 - ..Default::default() + #[cfg(feature = "internal-miner")] + internal_miner: true, }; // nu5 block height diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 6986f601e9c..0d76b778d87 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -737,6 +737,24 @@ impl Service for Mempool { async move { Ok(Response::Transactions(res)) }.boxed() } + Request::TransactionWithDepsByMinedId(tx_id) => { + trace!(?req, "got mempool request"); + + let res = if let Some((transaction, dependencies)) = + storage.transaction_with_deps(tx_id) + { + Ok(Response::TransactionWithDeps { + transaction, + dependencies, + }) + } else { + Err("transaction not found in mempool".into()) + }; + + trace!(?req, ?res, "answered mempool request"); + + async move { res }.boxed() + } Request::AwaitOutput(outpoint) => { trace!(?req, "got mempool request"); @@ -832,7 +850,7 @@ impl Service for Mempool { Request::TransactionsById(_) => Response::Transactions(Default::default()), Request::TransactionsByMinedId(_) => Response::Transactions(Default::default()), - Request::AwaitOutput(_) => { + Request::TransactionWithDepsByMinedId(_) | Request::AwaitOutput(_) => { return async move { Err("mempool is not active: wait for Zebra to sync to the tip".into()) } diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index be7cbc9593f..cee0845ba2b 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -513,6 +513,23 @@ impl Storage { .map(|(_, tx)| &tx.transaction) } + /// Returns a transaction and the transaction ids of its dependencies, if it is in the verified set. + pub fn transaction_with_deps( + &self, + tx_id: transaction::Hash, + ) -> Option<(VerifiedUnminedTx, HashSet)> { + let tx = self.verified.transactions().get(&tx_id).cloned()?; + let deps = self + .verified + .transaction_dependencies() + .dependencies() + .get(&tx_id) + .cloned() + .unwrap_or_default(); + + Some((tx, deps)) + } + /// Returns `true` if a transaction exactly matching an [`UnminedTxId`] is in /// the mempool. /// From 7597cf1caee5763f6d08837d2a84dbea69051008 Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 19 Dec 2024 20:29:42 -0500 Subject: [PATCH 042/245] change(consensus): Allow transactions spending coinbase outputs to have transparent outputs on Regtest (#9085) * Updates `coinbase_spend_restriction()` method to always return `OnlyShieldedOutputs` on Regtest. * Adds a `should_allow_unshielded_coinbase_spends` field to testnet::Parameters * Adds a test * Apply suggestions from code review Co-authored-by: Marek * Renames CoinbaseSpendRestriction variants and updates their documentation. Updates a comment. --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Marek --- zebra-chain/src/block/arbitrary.rs | 5 +- zebra-chain/src/parameters/network/testnet.rs | 41 ++++++++ zebra-chain/src/transaction.rs | 13 +-- zebra-chain/src/transparent/utxo.rs | 10 +- zebra-consensus/src/transaction.rs | 4 +- zebra-consensus/src/transaction/check.rs | 3 +- zebra-consensus/src/transaction/tests.rs | 98 ++++++++++++++++++- zebra-state/src/service/check/tests/utxo.rs | 6 +- zebra-state/src/service/check/utxo.rs | 10 +- 9 files changed, 168 insertions(+), 22 deletions(-) diff --git a/zebra-chain/src/block/arbitrary.rs b/zebra-chain/src/block/arbitrary.rs index 5a39afa2ee4..4961c873b3e 100644 --- a/zebra-chain/src/block/arbitrary.rs +++ b/zebra-chain/src/block/arbitrary.rs @@ -568,7 +568,7 @@ where + Copy + 'static, { - let mut spend_restriction = transaction.coinbase_spend_restriction(height); + let mut spend_restriction = transaction.coinbase_spend_restriction(&Network::Mainnet, height); let mut new_inputs = Vec::new(); let mut spent_outputs = HashMap::new(); @@ -650,7 +650,8 @@ where + 'static, { let has_shielded_outputs = transaction.has_shielded_outputs(); - let delete_transparent_outputs = CoinbaseSpendRestriction::OnlyShieldedOutputs { spend_height }; + let delete_transparent_outputs = + CoinbaseSpendRestriction::CheckCoinbaseMaturity { spend_height }; let mut attempts: usize = 0; // choose an arbitrary spendable UTXO, in hash set order diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 78f7a69a302..1f77e95e750 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -232,6 +232,9 @@ pub struct ParametersBuilder { target_difficulty_limit: ExpandedDifficulty, /// A flag for disabling proof-of-work checks when Zebra is validating blocks disable_pow: bool, + /// Whether to allow transactions with transparent outputs to spend coinbase outputs, + /// similar to `fCoinbaseMustBeShielded` in zcashd. + should_allow_unshielded_coinbase_spends: bool, /// The pre-Blossom halving interval for this network pre_blossom_halving_interval: HeightDiff, /// The post-Blossom halving interval for this network @@ -271,6 +274,7 @@ impl Default for ParametersBuilder { should_lock_funding_stream_address_period: false, pre_blossom_halving_interval: PRE_BLOSSOM_HALVING_INTERVAL, post_blossom_halving_interval: POST_BLOSSOM_HALVING_INTERVAL, + should_allow_unshielded_coinbase_spends: false, } } } @@ -439,6 +443,15 @@ impl ParametersBuilder { self } + /// Sets the `disable_pow` flag to be used in the [`Parameters`] being built. + pub fn with_unshielded_coinbase_spends( + mut self, + should_allow_unshielded_coinbase_spends: bool, + ) -> Self { + self.should_allow_unshielded_coinbase_spends = should_allow_unshielded_coinbase_spends; + self + } + /// Sets the pre and post Blosssom halving intervals to be used in the [`Parameters`] being built. pub fn with_halving_interval(mut self, pre_blossom_halving_interval: HeightDiff) -> Self { if self.should_lock_funding_stream_address_period { @@ -464,6 +477,7 @@ impl ParametersBuilder { should_lock_funding_stream_address_period: _, target_difficulty_limit, disable_pow, + should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, } = self; @@ -478,6 +492,7 @@ impl ParametersBuilder { post_nu6_funding_streams, target_difficulty_limit, disable_pow, + should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, } @@ -516,6 +531,7 @@ impl ParametersBuilder { should_lock_funding_stream_address_period: _, target_difficulty_limit, disable_pow, + should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, } = Self::default(); @@ -528,6 +544,8 @@ impl ParametersBuilder { && self.post_nu6_funding_streams == post_nu6_funding_streams && self.target_difficulty_limit == target_difficulty_limit && self.disable_pow == disable_pow + && self.should_allow_unshielded_coinbase_spends + == should_allow_unshielded_coinbase_spends && self.pre_blossom_halving_interval == pre_blossom_halving_interval && self.post_blossom_halving_interval == post_blossom_halving_interval } @@ -560,6 +578,9 @@ pub struct Parameters { target_difficulty_limit: ExpandedDifficulty, /// A flag for disabling proof-of-work checks when Zebra is validating blocks disable_pow: bool, + /// Whether to allow transactions with transparent outputs to spend coinbase outputs, + /// similar to `fCoinbaseMustBeShielded` in zcashd. + should_allow_unshielded_coinbase_spends: bool, /// Pre-Blossom halving interval for this network pre_blossom_halving_interval: HeightDiff, /// Post-Blossom halving interval for this network @@ -597,6 +618,7 @@ impl Parameters { // This value is chosen to match zcashd, see: .with_target_difficulty_limit(U256::from_big_endian(&[0x0f; 32])) .with_disable_pow(true) + .with_unshielded_coinbase_spends(true) .with_slow_start_interval(Height::MIN) // Removes default Testnet activation heights if not configured, // most network upgrades are disabled by default for Regtest in zcashd @@ -645,6 +667,7 @@ impl Parameters { post_nu6_funding_streams, target_difficulty_limit, disable_pow, + should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, } = Self::new_regtest(None, None); @@ -657,6 +680,8 @@ impl Parameters { && self.post_nu6_funding_streams == post_nu6_funding_streams && self.target_difficulty_limit == target_difficulty_limit && self.disable_pow == disable_pow + && self.should_allow_unshielded_coinbase_spends + == should_allow_unshielded_coinbase_spends && self.pre_blossom_halving_interval == pre_blossom_halving_interval && self.post_blossom_halving_interval == post_blossom_halving_interval } @@ -711,6 +736,12 @@ impl Parameters { self.disable_pow } + /// Returns true if this network should allow transactions with transparent outputs + /// that spend coinbase outputs. + pub fn should_allow_unshielded_coinbase_spends(&self) -> bool { + self.should_allow_unshielded_coinbase_spends + } + /// Returns the pre-Blossom halving interval for this network pub fn pre_blossom_halving_interval(&self) -> HeightDiff { self.pre_blossom_halving_interval @@ -786,4 +817,14 @@ impl Network { self.post_nu6_funding_streams() } } + + /// Returns true if this network should allow transactions with transparent outputs + /// that spend coinbase outputs. + pub fn should_allow_unshielded_coinbase_spends(&self) -> bool { + if let Self::Testnet(params) = self { + params.should_allow_unshielded_coinbase_spends() + } else { + false + } + } } diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 1c121130fcc..d29eadff8cf 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -41,7 +41,7 @@ pub use unmined::{ use crate::{ amount::{Amount, Error as AmountError, NegativeAllowed, NonNegative}, block, orchard, - parameters::{ConsensusBranchId, NetworkUpgrade}, + parameters::{ConsensusBranchId, Network, NetworkUpgrade}, primitives::{ed25519, Bctv14Proof, Groth16Proof}, sapling, serialization::ZcashSerialize, @@ -303,14 +303,15 @@ impl Transaction { /// assuming it is mined at `spend_height`. pub fn coinbase_spend_restriction( &self, + network: &Network, spend_height: block::Height, ) -> CoinbaseSpendRestriction { - if self.outputs().is_empty() { - // we know this transaction must have shielded outputs, - // because of other consensus rules - OnlyShieldedOutputs { spend_height } + if self.outputs().is_empty() || network.should_allow_unshielded_coinbase_spends() { + // we know this transaction must have shielded outputs if it has no + // transparent outputs, because of other consensus rules. + CheckCoinbaseMaturity { spend_height } } else { - SomeTransparentOutputs + DisallowCoinbaseSpend } } diff --git a/zebra-chain/src/transparent/utxo.rs b/zebra-chain/src/transparent/utxo.rs index 0158165193a..1b5f49bcd89 100644 --- a/zebra-chain/src/transparent/utxo.rs +++ b/zebra-chain/src/transparent/utxo.rs @@ -126,10 +126,14 @@ impl OrderedUtxo { )] pub enum CoinbaseSpendRestriction { /// The UTXO is spent in a transaction with one or more transparent outputs - SomeTransparentOutputs, + /// on a network where coinbase outputs must not be spent by transactions + /// with transparent outputs. + DisallowCoinbaseSpend, - /// The UTXO is spent in a transaction which only has shielded outputs - OnlyShieldedOutputs { + /// The UTXO is spent in a transaction which only has shielded outputs, or + /// transactions spending coinbase outputs may have transparent outputs on + /// this network. + CheckCoinbaseMaturity { /// The height at which the UTXO is spent spend_height: block::Height, }, diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index c3ccb78452c..044a9569f9a 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -486,7 +486,7 @@ where // WONTFIX: Return an error for Request::Block as well to replace this check in // the state once #2336 has been implemented? if req.is_mempool() { - Self::check_maturity_height(&req, &spent_utxos)?; + Self::check_maturity_height(&network, &req, &spent_utxos)?; } let cached_ffi_transaction = @@ -807,10 +807,12 @@ where /// mature and valid for the request height, or a [`TransactionError`] if the transaction /// spends transparent coinbase outputs that are immature and invalid for the request height. pub fn check_maturity_height( + network: &Network, request: &Request, spent_utxos: &HashMap, ) -> Result<(), TransactionError> { check::tx_transparent_coinbase_spends_maturity( + network, request.transaction(), request.height(), request.known_utxos(), diff --git a/zebra-consensus/src/transaction/check.rs b/zebra-consensus/src/transaction/check.rs index d3ddc460264..b7338bbdadd 100644 --- a/zebra-consensus/src/transaction/check.rs +++ b/zebra-consensus/src/transaction/check.rs @@ -476,6 +476,7 @@ fn validate_expiry_height_mined( /// Returns `Ok(())` if spent transparent coinbase outputs are /// valid for the block height, or a [`Err(TransactionError)`](TransactionError) pub fn tx_transparent_coinbase_spends_maturity( + network: &Network, tx: Arc, height: Height, block_new_outputs: Arc>, @@ -488,7 +489,7 @@ pub fn tx_transparent_coinbase_spends_maturity( .or_else(|| spent_utxos.get(&spend).cloned()) .expect("load_spent_utxos_fut.await should return an error if a utxo is missing"); - let spend_restriction = tx.coinbase_spend_restriction(height); + let spend_restriction = tx.coinbase_spend_restriction(network, height); zebra_state::check::transparent_coinbase_spend(spend, spend_restriction, &utxo)?; } diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 417298cc7f3..ac1a42fea5c 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -29,7 +29,7 @@ use zebra_chain::{ }, zip317, Hash, HashType, JoinSplitData, LockTime, Transaction, }, - transparent::{self, CoinbaseData}, + transparent::{self, CoinbaseData, CoinbaseSpendRestriction}, }; use zebra_node_services::mempool; @@ -909,7 +909,7 @@ async fn mempool_request_with_immature_spend_is_rejected() { transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), }; - let spend_restriction = tx.coinbase_spend_restriction(height); + let spend_restriction = tx.coinbase_spend_restriction(&Network::Mainnet, height); let coinbase_spend_height = Height(5); @@ -977,6 +977,100 @@ async fn mempool_request_with_immature_spend_is_rejected() { ); } +/// Tests that calls to the transaction verifier with a mempool request that spends +/// mature coinbase outputs to transparent outputs will return Ok() on Regtest. +#[tokio::test] +async fn mempool_request_with_transparent_coinbase_spend_is_accepted_on_regtest() { + let _init_guard = zebra_test::init(); + + let network = Network::new_regtest(None, Some(1_000)); + let mut state: MockService<_, _, _, _> = MockService::build().for_unit_tests(); + let verifier = Verifier::new_for_tests(&network, state.clone()); + + let height = NetworkUpgrade::Nu6 + .activation_height(&network) + .expect("NU6 activation height is specified"); + let fund_height = (height - 1).expect("fake source fund block height is too small"); + let (input, output, known_utxos) = mock_transparent_transfer( + fund_height, + true, + 0, + Amount::try_from(10001).expect("invalid value"), + ); + + // Create a non-coinbase V5 tx with the last valid expiry height. + let tx = Transaction::V5 { + network_upgrade: NetworkUpgrade::Nu6, + inputs: vec![input], + outputs: vec![output], + lock_time: LockTime::min_lock_time_timestamp(), + expiry_height: height, + sapling_shielded_data: None, + orchard_shielded_data: None, + }; + + let input_outpoint = match tx.inputs()[0] { + transparent::Input::PrevOut { outpoint, .. } => outpoint, + transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), + }; + + let spend_restriction = tx.coinbase_spend_restriction(&network, height); + + assert_eq!( + spend_restriction, + CoinbaseSpendRestriction::CheckCoinbaseMaturity { + spend_height: height + } + ); + + let coinbase_spend_height = Height(5); + + let utxo = known_utxos + .get(&input_outpoint) + .map(|utxo| { + let mut utxo = utxo.utxo.clone(); + utxo.height = coinbase_spend_height; + utxo.from_coinbase = true; + utxo + }) + .expect("known_utxos should contain the outpoint"); + + zebra_state::check::transparent_coinbase_spend(input_outpoint, spend_restriction, &utxo) + .expect("check should pass"); + + tokio::spawn(async move { + state + .expect_request(zebra_state::Request::BestChainNextMedianTimePast) + .await + .respond(zebra_state::Response::BestChainNextMedianTimePast( + DateTime32::MAX, + )); + + state + .expect_request(zebra_state::Request::UnspentBestChainUtxo(input_outpoint)) + .await + .respond(zebra_state::Response::UnspentBestChainUtxo(Some(utxo))); + + state + .expect_request_that(|req| { + matches!( + req, + zebra_state::Request::CheckBestChainTipNullifiersAndAnchors(_) + ) + }) + .await + .respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors); + }); + + verifier + .oneshot(Request::Mempool { + transaction: tx.into(), + height, + }) + .await + .expect("verification of transaction with mature spend to transparent outputs should pass"); +} + /// Tests that errors from the read state service are correctly converted into /// transaction verifier errors. #[tokio::test] diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index acdc2d399a7..57d087c552d 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -48,7 +48,7 @@ fn accept_shielded_mature_coinbase_utxo_spend() { let ordered_utxo = transparent::OrderedUtxo::new(output, created_height, 0); let min_spend_height = Height(created_height.0 + MIN_TRANSPARENT_COINBASE_MATURITY); - let spend_restriction = transparent::CoinbaseSpendRestriction::OnlyShieldedOutputs { + let spend_restriction = transparent::CoinbaseSpendRestriction::CheckCoinbaseMaturity { spend_height: min_spend_height, }; @@ -78,7 +78,7 @@ fn reject_unshielded_coinbase_utxo_spend() { }; let ordered_utxo = transparent::OrderedUtxo::new(output, created_height, 0); - let spend_restriction = transparent::CoinbaseSpendRestriction::SomeTransparentOutputs; + let spend_restriction = transparent::CoinbaseSpendRestriction::DisallowCoinbaseSpend; let result = check::utxo::transparent_coinbase_spend(outpoint, spend_restriction, ordered_utxo.as_ref()); @@ -104,7 +104,7 @@ fn reject_immature_coinbase_utxo_spend() { let min_spend_height = Height(created_height.0 + MIN_TRANSPARENT_COINBASE_MATURITY); let spend_height = Height(min_spend_height.0 - 1); let spend_restriction = - transparent::CoinbaseSpendRestriction::OnlyShieldedOutputs { spend_height }; + transparent::CoinbaseSpendRestriction::CheckCoinbaseMaturity { spend_height }; let result = check::utxo::transparent_coinbase_spend(outpoint, spend_restriction, ordered_utxo.as_ref()); diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index 324efa3c035..df3981ec0b8 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -72,8 +72,10 @@ pub fn transparent_spend( // We don't want to use UTXOs from invalid pending blocks, // so we check transparent coinbase maturity and shielding // using known valid UTXOs during non-finalized chain validation. - let spend_restriction = - transaction.coinbase_spend_restriction(semantically_verified.height); + let spend_restriction = transaction.coinbase_spend_restriction( + &finalized_state.network(), + semantically_verified.height, + ); transparent_coinbase_spend(spend, spend_restriction, utxo.as_ref())?; // We don't delete the UTXOs until the block is committed, @@ -195,7 +197,7 @@ pub fn transparent_coinbase_spend( } match spend_restriction { - OnlyShieldedOutputs { spend_height } => { + CheckCoinbaseMaturity { spend_height } => { let min_spend_height = utxo.height + MIN_TRANSPARENT_COINBASE_MATURITY.into(); let min_spend_height = min_spend_height.expect("valid UTXOs have coinbase heights far below Height::MAX"); @@ -210,7 +212,7 @@ pub fn transparent_coinbase_spend( }) } } - SomeTransparentOutputs => Err(UnshieldedTransparentCoinbaseSpend { outpoint }), + DisallowCoinbaseSpend => Err(UnshieldedTransparentCoinbaseSpend { outpoint }), } } From 1ecf6551bc35d3720ed03663c0b38aa5a4e35d3f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 03:55:11 +0000 Subject: [PATCH 043/245] build(deps): bump the devops group across 1 directory with 4 updates (#9091) Bumps the devops group with 4 updates in the / directory: [codecov/codecov-action](https://github.com/codecov/codecov-action), [tj-actions/changed-files](https://github.com/tj-actions/changed-files), [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) and [docker/scout-action](https://github.com/docker/scout-action). Updates `codecov/codecov-action` from 5.0.7 to 5.1.1 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.0.7...v5.1.1) Updates `tj-actions/changed-files` from 45.0.4 to 45.0.5 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v45.0.4...v45.0.5) Updates `docker/setup-buildx-action` from 3.7.1 to 3.8.0 - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v3.7.1...v3.8.0) Updates `docker/scout-action` from 1.15.1 to 1.16.1 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.15.1...v1.16.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/scout-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 4 ++-- .github/workflows/sub-build-docker-image.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 1b87753b508..4a97563143b 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 43acadbd8ec..26ec61089b3 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.4 + uses: tj-actions/changed-files@v45.0.5 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.4 + uses: tj-actions/changed-files@v45.0.5 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 9050d223080..ac6d5bbbecc 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -152,7 +152,7 @@ jobs: # Setup Docker Buildx to use Docker Build Cloud - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.7.1 + uses: docker/setup-buildx-action@v3.8.0 with: version: "lab:latest" driver: cloud @@ -193,7 +193,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.15.1 + uses: docker/scout-action@v1.16.1 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. From 0fe47bbbbbb9e7139bdf79c4fecac0a6d421339c Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Fri, 20 Dec 2024 23:29:16 -0300 Subject: [PATCH 044/245] feature(rpc): Migrate from deprecated `jsonrpc_*` crates to `jsonrpsee` (#9059) * update methods * update get block template rpc methods * update other getblocktemplate files * upgrade server and middlewares * upgrade zebrad start command * remove unused imports * add a todo for unauthenticated rpc error * upgrade tests, temporally comment out some of them * fix the rpc tx queue * update denies * fix links * clippy * fir more doc links * fix queue tests Co-authored-by: Arya * add suggestions from code review * fix snapshots * try `block_on` instead of `now_or_never` in the http middleware * move import * Apply suggestions from code review Co-authored-by: Arya * fix bounds --------- Co-authored-by: Arya --- Cargo.lock | 414 ++--- deny.toml | 15 +- zebra-rpc/Cargo.toml | 8 +- zebra-rpc/src/config.rs | 24 +- zebra-rpc/src/methods.rs | 1577 ++++++++--------- .../src/methods/get_block_template_rpcs.rs | 1465 ++++++++------- .../get_block_template_rpcs/constants.rs | 2 +- .../get_block_template.rs | 61 +- .../types/get_mining_info.rs | 2 +- .../types/submit_block.rs | 8 +- zebra-rpc/src/methods/tests/prop.rs | 60 +- zebra-rpc/src/methods/tests/snapshot.rs | 2 + .../tests/snapshot/get_block_template_rpcs.rs | 22 +- zebra-rpc/src/methods/tests/vectors.rs | 53 +- zebra-rpc/src/server.rs | 228 +-- zebra-rpc/src/server/error.rs | 55 +- .../src/server/http_request_compatibility.rs | 251 +-- .../src/server/rpc_call_compatibility.rs | 136 +- zebra-rpc/src/server/tests/vectors.rs | 485 ++--- zebra-rpc/src/sync.rs | 5 +- zebrad/Cargo.toml | 2 +- zebrad/src/commands/start.rs | 69 +- zebrad/src/components/miner.rs | 2 +- zebrad/tests/acceptance.rs | 2 +- zebrad/tests/common/regtest.rs | 5 +- ...etsubtreesbyindex_mainnet_orchard_0_1.snap | 4 +- ...subtreesbyindex_mainnet_orchard_338_1.snap | 4 +- ...subtreesbyindex_mainnet_orchard_585_1.snap | 4 +- ...etsubtreesbyindex_mainnet_sapling_0_1.snap | 4 +- ...tsubtreesbyindex_mainnet_sapling_0_11.snap | 4 +- ...ubtreesbyindex_mainnet_sapling_1090_6.snap | 4 +- ...tsubtreesbyindex_mainnet_sapling_17_1.snap | 4 +- 32 files changed, 2238 insertions(+), 2743 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78a19264e9c..30273b8d3f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,7 +78,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -89,7 +89,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "getrandom 0.2.15", "once_cell", "version_check", @@ -293,8 +293,8 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "itoa", "matchit", @@ -319,8 +319,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -338,7 +338,7 @@ checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide 0.7.4", "object", @@ -573,16 +573,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "bstr" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "bumpalo" version = "3.16.0" @@ -697,12 +687,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -721,7 +705,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -975,7 +959,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1070,7 +1054,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", @@ -1538,7 +1522,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -1549,7 +1533,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -1581,19 +1565,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "globset" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" -dependencies = [ - "aho-corasick", - "bstr", - "log", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", -] - [[package]] name = "group" version = "0.13.0" @@ -1617,11 +1588,11 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.1.0", + "http", "indexmap 2.7.0", "slab", "tokio", - "tokio-util 0.7.13", + "tokio-util", "tracing", ] @@ -1631,7 +1602,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crunchy", ] @@ -1788,7 +1759,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "windows", ] @@ -1801,18 +1772,7 @@ checksum = "f34059280f617a59ee59a0455e93460d67e5c76dec42dd262d38f0f390f437b2" dependencies = [ "flume", "indicatif", - "parking_lot 0.12.3", -] - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", + "parking_lot", ] [[package]] @@ -1826,17 +1786,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1844,7 +1793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http", ] [[package]] @@ -1855,8 +1804,8 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "pin-project-lite", ] @@ -1894,29 +1843,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hyper" -version = "0.14.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.5.1" @@ -1927,8 +1853,8 @@ dependencies = [ "futures-channel", "futures-util", "h2", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "httparse", "httpdate", "itoa", @@ -1945,8 +1871,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.5.1", + "http", + "hyper", "hyper-util", "rustls", "rustls-pki-types", @@ -1962,7 +1888,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.5.1", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -1978,9 +1904,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.5.1", + "http", + "http-body", + "hyper", "pin-project-lite", "socket2", "tokio", @@ -2138,15 +2064,6 @@ dependencies = [ "similar", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "ipnet" version = "2.10.1" @@ -2248,49 +2165,90 @@ dependencies = [ ] [[package]] -name = "jsonrpc-derive" -version = "18.0.0" +name = "jsonrpsee" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn 1.0.109", + "jsonrpsee-core", + "jsonrpsee-server", + "jsonrpsee-types", + "tokio", ] [[package]] -name = "jsonrpc-http-server" -version = "18.0.0" +name = "jsonrpsee-core" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ - "futures", - "hyper 0.14.31", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.11.2", - "unicase", + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "jsonrpsee-types", + "parking_lot", + "rand 0.8.5", + "rustc-hash 2.0.0", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", ] [[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" +name = "jsonrpsee-proc-macros" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ - "bytes", - "futures", - "globset", - "jsonrpc-core", - "lazy_static", - "log", + "heck 0.5.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.24.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" +dependencies = [ + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project", + "route-recognizer", + "serde", + "serde_json", + "soketto", + "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.6.10", - "unicase", + "tokio-util", + "tower 0.4.13", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.24.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" +dependencies = [ + "http", + "serde", + "serde_json", + "thiserror 1.0.69", ] [[package]] @@ -2355,7 +2313,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-targets 0.52.6", ] @@ -2477,7 +2435,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "rayon", ] @@ -2514,7 +2472,7 @@ checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-util", "indexmap 2.7.0", "ipnet", @@ -2603,17 +2561,6 @@ dependencies = [ "getrandom 0.2.15", ] -[[package]] -name = "net2" -version = "0.2.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi", -] - [[package]] name = "nix" version = "0.29.0" @@ -2621,7 +2568,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "cfg_aliases", "libc", ] @@ -2854,23 +2801,12 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -2878,21 +2814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -2901,9 +2823,9 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -3113,15 +3035,6 @@ dependencies = [ "uint 0.9.5", ] -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml 0.5.11", -] - [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -3505,15 +3418,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.7" @@ -3590,10 +3494,10 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-rustls", "hyper-util", "ipnet", @@ -3613,7 +3517,7 @@ dependencies = [ "sync_wrapper 1.0.1", "tokio", "tokio-rustls", - "tokio-util 0.7.13", + "tokio-util", "tower-service", "url", "wasm-bindgen", @@ -3639,7 +3543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if", "getrandom 0.2.15", "libc", "spin", @@ -3686,6 +3590,12 @@ dependencies = [ "serde", ] +[[package]] +name = "route-recognizer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -4076,13 +3986,24 @@ dependencies = [ "version_check", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -4169,6 +4090,22 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "soketto" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha1", +] + [[package]] name = "spandoc" version = "0.2.2" @@ -4325,7 +4262,7 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "once_cell", "rustix", @@ -4397,7 +4334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "rustversion", @@ -4410,7 +4347,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -4482,7 +4419,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", @@ -4522,7 +4459,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.13", + "tokio-util", ] [[package]] @@ -4538,20 +4475,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.13" @@ -4560,6 +4483,7 @@ checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -4620,10 +4544,10 @@ dependencies = [ "base64 0.22.1", "bytes", "h2", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-timeout", "hyper-util", "percent-encoding", @@ -4680,7 +4604,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.13", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4714,7 +4638,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-test", - "tokio-util 0.7.13", + "tokio-util", "tower 0.4.13", "tower-fallback", "tower-test", @@ -4966,12 +4890,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicase" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" - [[package]] name = "unicode-bidi" version = "0.3.17" @@ -5101,7 +5019,7 @@ checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", "cargo_metadata", - "cfg-if 1.0.0", + "cfg-if", "git2", "regex", "rustc_version", @@ -5228,7 +5146,7 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "wasm-bindgen-macro", ] @@ -5254,7 +5172,7 @@ version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -5962,7 +5880,7 @@ dependencies = [ "thiserror 2.0.6", "tokio", "tokio-stream", - "tokio-util 0.7.13", + "tokio-util", "toml 0.8.19", "tower 0.4.13", "tracing", @@ -5994,11 +5912,13 @@ dependencies = [ "color-eyre", "futures", "hex", + "http-body-util", + "hyper", "indexmap 2.7.0", "insta", - "jsonrpc-core", - "jsonrpc-derive", - "jsonrpc-http-server", + "jsonrpsee", + "jsonrpsee-proc-macros", + "jsonrpsee-types", "nix", "proptest", "prost", @@ -6204,13 +6124,13 @@ dependencies = [ "howudoin", "http-body-util", "humantime-serde", - "hyper 1.5.1", + "hyper", "hyper-util", "indexmap 2.7.0", "indicatif", "inferno", "insta", - "jsonrpc-core", + "jsonrpsee-types", "lazy_static", "log", "metrics", diff --git a/deny.toml b/deny.toml index 7f804946767..3ae46206943 100644 --- a/deny.toml +++ b/deny.toml @@ -78,19 +78,8 @@ skip-tree = [ { name = "base64", version = "=0.21.7" }, { name = "sync_wrapper", version = "0.1.2" }, - # wait for jsonrpc-http-server to update hyper or for Zebra to replace jsonrpc (#8682) - { name = "h2", version = "=0.3.26" }, - { name = "http", version = "=0.2.12" }, - { name = "http-body", version = "=0.4.6" }, - { name = "hyper", version = "=0.14.31" }, - { name = "hyper-rustls", version = "=0.24.2" }, - - { name = "reqwest", version = "=0.11.27" }, - { name = "rustls", version = "=0.21.12" }, - { name = "rustls-pemfile", version = "=1.0.4" }, - { name = "rustls-webpki", version = "=0.101.7" }, - { name = "tokio-rustls", version = "=0.24.1" }, - { name = "webpki-roots", version = "=0.25.4" }, + # wait for abscissa_core to update toml + { name = "toml", version = "=0.5.11" }, # wait for structopt-derive to update heck { name = "heck", version = "=0.3.3" }, diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 56b7f3c60f0..d180f049dc5 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -59,9 +59,11 @@ chrono = { version = "0.4.39", default-features = false, features = [ ] } futures = "0.3.31" -jsonrpc-core = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-http-server = "18.0.0" +jsonrpsee = { version = "0.24.7", features = ["server"] } +jsonrpsee-types = "0.24.7" +jsonrpsee-proc-macros = "0.24.7" +hyper = "1.5.0" +http-body-util = "0.1.2" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core serde_json = { version = "1.0.133", features = ["preserve_order"] } diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index 57187163e55..2a91d14334b 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -50,24 +50,12 @@ pub struct Config { /// The number of threads used to process RPC requests and responses. /// - /// Zebra's RPC server has a separate thread pool and a `tokio` executor for each thread. - /// State queries are run concurrently using the shared thread pool controlled by - /// the [`SyncSection.parallel_cpu_threads`](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.parallel_cpu_threads) config. - /// - /// If the number of threads is not configured or zero, Zebra uses the number of logical cores. - /// If the number of logical cores can't be detected, Zebra uses one thread. - /// - /// Set to `1` to run all RPC queries on a single thread, and detect RPC port conflicts from - /// multiple Zebra or `zcashd` instances. - /// - /// For details, see [the `jsonrpc_http_server` documentation](https://docs.rs/jsonrpc-http-server/latest/jsonrpc_http_server/struct.ServerBuilder.html#method.threads). - /// - /// ## Warning - /// - /// The default config uses multiple threads, which disables RPC port conflict detection. - /// This can allow multiple Zebra instances to share the same RPC port. - /// - /// If some of those instances are outdated or failed, RPC queries can be slow or inconsistent. + /// This field is deprecated and could be removed in a future release. + /// We keep it just for backward compatibility but it actually do nothing. + /// It was something configurable when the RPC server was based in the jsonrpc-core crate, + /// not anymore since we migrated to jsonrpsee. + // TODO: Prefix this field name with an underscore so it's clear that it's now unused, and + // use serde(rename) to continue successfully deserializing old configs. pub parallel_cpu_threads: usize, /// Test-only option that makes Zebra say it is at the chain tip, diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index cfd8260d2ba..8634ec43ef5 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -9,12 +9,13 @@ use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; -use futures::{stream::FuturesOrdered, FutureExt, StreamExt, TryFutureExt}; +use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt}; use hex::{FromHex, ToHex}; use hex_data::HexData; use indexmap::IndexMap; -use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use jsonrpsee::core::{async_trait, RpcResult as Result}; +use jsonrpsee_proc_macros::rpc; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use tokio::{sync::broadcast, task::JoinHandle}; use tower::{Service, ServiceExt}; use tracing::Instrument; @@ -56,7 +57,7 @@ pub mod types; pub mod get_block_template_rpcs; #[cfg(feature = "getblocktemplate-rpcs")] -pub use get_block_template_rpcs::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl}; +pub use get_block_template_rpcs::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; #[cfg(test)] mod tests; @@ -64,7 +65,6 @@ mod tests; #[rpc(server)] /// RPC method signatures. pub trait Rpc { - #[rpc(name = "getinfo")] /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. /// /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) @@ -79,6 +79,7 @@ pub trait Rpc { /// /// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95) + #[method(name = "getinfo")] fn get_info(&self) -> Result; /// Returns blockchain state information, as a [`GetBlockChainInfo`] JSON struct. @@ -91,8 +92,8 @@ pub trait Rpc { /// /// Some fields from the zcashd reference are missing from Zebra's [`GetBlockChainInfo`]. It only contains the fields /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L72-L89) - #[rpc(name = "getblockchaininfo")] - fn get_blockchain_info(&self) -> BoxFuture>; + #[method(name = "getblockchaininfo")] + async fn get_blockchain_info(&self) -> Result; /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. /// @@ -116,11 +117,8 @@ pub trait Rpc { /// The RPC documentation says that the returned object has a string `balance` field, but /// zcashd actually [returns an /// integer](https://github.com/zcash/lightwalletd/blob/bdaac63f3ee0dbef62bde04f6817a9f90d483b00/common/common.go#L128-L130). - #[rpc(name = "getaddressbalance")] - fn get_address_balance( - &self, - address_strings: AddressStrings, - ) -> BoxFuture>; + #[method(name = "getaddressbalance")] + async fn get_address_balance(&self, address_strings: AddressStrings) -> Result; /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. @@ -137,11 +135,11 @@ pub trait Rpc { /// /// zcashd accepts an optional `allowhighfees` parameter. Zebra doesn't support this parameter, /// because lightwalletd doesn't use it. - #[rpc(name = "sendrawtransaction")] - fn send_raw_transaction( + #[method(name = "sendrawtransaction")] + async fn send_raw_transaction( &self, raw_transaction_hex: String, - ) -> BoxFuture>; + ) -> Result; /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. /// If the block is not in Zebra's state, returns @@ -167,12 +165,8 @@ pub trait Rpc { /// use verbosity=3. /// /// The undocumented `chainwork` field is not returned. - #[rpc(name = "getblock")] - fn get_block( - &self, - hash_or_height: String, - verbosity: Option, - ) -> BoxFuture>; + #[method(name = "getblock")] + async fn get_block(&self, hash_or_height: String, verbosity: Option) -> Result; /// Returns the requested block header by hash or height, as a [`GetBlockHeader`] JSON string. /// If the block is not in Zebra's state, @@ -191,19 +185,19 @@ pub trait Rpc { /// # Notes /// /// The undocumented `chainwork` field is not returned. - #[rpc(name = "getblockheader")] - fn get_block_header( + #[method(name = "getblockheader")] + async fn get_block_header( &self, hash_or_height: String, verbose: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the hash of the current best blockchain tip block, as a [`GetBlockHash`] JSON string. /// /// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) /// method: post /// tags: blockchain - #[rpc(name = "getbestblockhash")] + #[method(name = "getbestblockhash")] fn get_best_block_hash(&self) -> Result; /// Returns the height and hash of the current best blockchain tip block, as a [`GetBlockHeightAndHash`] JSON struct. @@ -211,7 +205,7 @@ pub trait Rpc { /// zcashd reference: none /// method: post /// tags: blockchain - #[rpc(name = "getbestblockheightandhash")] + #[method(name = "getbestblockheightandhash")] fn get_best_block_height_and_hash(&self) -> Result; /// Returns all transaction ids in the memory pool, as a JSON array. @@ -219,8 +213,8 @@ pub trait Rpc { /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) /// method: post /// tags: blockchain - #[rpc(name = "getrawmempool")] - fn get_raw_mempool(&self) -> BoxFuture>>; + #[method(name = "getrawmempool")] + async fn get_raw_mempool(&self) -> Result>; /// Returns information about the given block's Sapling & Orchard tree state. /// @@ -238,8 +232,8 @@ pub trait Rpc { /// negative where -1 is the last known valid block". On the other hand, /// `lightwalletd` only uses positive heights, so Zebra does not support /// negative heights. - #[rpc(name = "z_gettreestate")] - fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture>; + #[method(name = "z_gettreestate")] + async fn z_get_treestate(&self, hash_or_height: String) -> Result; /// Returns information about a range of Sapling or Orchard subtrees. /// @@ -259,13 +253,13 @@ pub trait Rpc { /// starting at the chain tip. This RPC will return an empty list if the `start_index` subtree /// exists, but has not been rebuilt yet. This matches `zcashd`'s behaviour when subtrees aren't /// available yet. (But `zcashd` does its rebuild before syncing any blocks.) - #[rpc(name = "z_getsubtreesbyindex")] - fn z_get_subtrees_by_index( + #[method(name = "z_getsubtreesbyindex")] + async fn z_get_subtrees_by_index( &self, pool: String, start_index: NoteCommitmentSubtreeIndex, limit: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. /// @@ -286,12 +280,12 @@ pub trait Rpc { /// In verbose mode, we only expose the `hex` and `height` fields since /// lightwalletd uses only those: /// - #[rpc(name = "getrawtransaction")] - fn get_raw_transaction( + #[method(name = "getrawtransaction")] + async fn get_raw_transaction( &self, txid: String, verbose: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the transaction ids made by the provided transparent addresses. /// @@ -310,9 +304,8 @@ pub trait Rpc { /// /// Only the multi-argument format is used by lightwalletd and this is what we currently support: /// - #[rpc(name = "getaddresstxids")] - fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) - -> BoxFuture>>; + #[method(name = "getaddresstxids")] + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result>; /// Returns all unspent outputs for a list of addresses. /// @@ -328,11 +321,11 @@ pub trait Rpc { /// /// lightwalletd always uses the multi-address request, without chaininfo: /// - #[rpc(name = "getaddressutxos")] - fn get_address_utxos( + #[method(name = "getaddressutxos")] + async fn get_address_utxos( &self, address_strings: AddressStrings, - ) -> BoxFuture>>; + ) -> Result>; /// Stop the running zebrad process. /// @@ -344,7 +337,7 @@ pub trait Rpc { /// zcashd reference: [`stop`](https://zcash.github.io/rpc/stop.html) /// method: post /// tags: control - #[rpc(name = "stop")] + #[method(name = "stop")] fn stop(&self) -> Result; } @@ -516,7 +509,8 @@ where } } -impl Rpc for RpcImpl +#[async_trait] +impl RpcServer for RpcImpl where Mempool: Service< mempool::Request, @@ -548,198 +542,186 @@ where } #[allow(clippy::unwrap_in_result)] - fn get_blockchain_info(&self) -> BoxFuture> { + async fn get_blockchain_info(&self) -> Result { let network = self.network.clone(); let debug_force_finished_sync = self.debug_force_finished_sync; let mut state = self.state.clone(); - async move { - // `chain` field - let chain = network.bip70_network_name(); - - let request = zebra_state::ReadRequest::TipPoolValues; - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let zebra_state::ReadResponse::TipPoolValues { - tip_height, - tip_hash, - value_balance, - } = response - else { - unreachable!("unmatched response to a TipPoolValues request") - }; - - let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { - unreachable!("unmatched response to a BlockHeader request") - }; + // `chain` field + let chain = network.bip70_network_name(); + + let request = zebra_state::ReadRequest::TipPoolValues; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + let zebra_state::ReadResponse::TipPoolValues { + tip_height, + tip_hash, + value_balance, + } = response + else { + unreachable!("unmatched response to a TipPoolValues request") + }; - let tip_block_time = header.time; + let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let now = Utc::now(); - let zebra_estimated_height = - NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) - .estimate_height_at(now); + let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { + unreachable!("unmatched response to a BlockHeader request") + }; - // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, - // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is - // later than the current time on the local clock. - let estimated_height = if tip_block_time > now - || zebra_estimated_height < tip_height - || debug_force_finished_sync - { - tip_height - } else { - zebra_estimated_height - }; + let tip_block_time = header.time; + + let now = Utc::now(); + let zebra_estimated_height = + NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) + .estimate_height_at(now); + + // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, + // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is + // later than the current time on the local clock. + let estimated_height = if tip_block_time > now + || zebra_estimated_height < tip_height + || debug_force_finished_sync + { + tip_height + } else { + zebra_estimated_height + }; - // `upgrades` object + // `upgrades` object + // + // Get the network upgrades in height order, like `zcashd`. + let mut upgrades = IndexMap::new(); + for (activation_height, network_upgrade) in network.full_activation_list() { + // Zebra defines network upgrades based on incompatible consensus rule changes, + // but zcashd defines them based on ZIPs. // - // Get the network upgrades in height order, like `zcashd`. - let mut upgrades = IndexMap::new(); - for (activation_height, network_upgrade) in network.full_activation_list() { - // Zebra defines network upgrades based on incompatible consensus rule changes, - // but zcashd defines them based on ZIPs. - // - // All the network upgrades with a consensus branch ID are the same in Zebra and zcashd. - if let Some(branch_id) = network_upgrade.branch_id() { - // zcashd's RPC seems to ignore Disabled network upgrades, so Zebra does too. - let status = if tip_height >= activation_height { - NetworkUpgradeStatus::Active - } else { - NetworkUpgradeStatus::Pending - }; - - let upgrade = NetworkUpgradeInfo { - name: network_upgrade, - activation_height, - status, - }; - upgrades.insert(ConsensusBranchIdHex(branch_id), upgrade); - } + // All the network upgrades with a consensus branch ID are the same in Zebra and zcashd. + if let Some(branch_id) = network_upgrade.branch_id() { + // zcashd's RPC seems to ignore Disabled network upgrades, so Zebra does too. + let status = if tip_height >= activation_height { + NetworkUpgradeStatus::Active + } else { + NetworkUpgradeStatus::Pending + }; + + let upgrade = NetworkUpgradeInfo { + name: network_upgrade, + activation_height, + status, + }; + upgrades.insert(ConsensusBranchIdHex(branch_id), upgrade); } + } - // `consensus` object - let next_block_height = - (tip_height + 1).expect("valid chain tips are a lot less than Height::MAX"); - let consensus = TipConsensusBranch { - chain_tip: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, tip_height) - .branch_id() - .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), - ), - next_block: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, next_block_height) - .branch_id() - .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), - ), - }; + // `consensus` object + let next_block_height = + (tip_height + 1).expect("valid chain tips are a lot less than Height::MAX"); + let consensus = TipConsensusBranch { + chain_tip: ConsensusBranchIdHex( + NetworkUpgrade::current(&network, tip_height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), + ), + next_block: ConsensusBranchIdHex( + NetworkUpgrade::current(&network, next_block_height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), + ), + }; - let response = GetBlockChainInfo { - chain, - blocks: tip_height, - best_block_hash: tip_hash, - estimated_height, - value_pools: types::ValuePoolBalance::from_value_balance(value_balance), - upgrades, - consensus, - }; + let response = GetBlockChainInfo { + chain, + blocks: tip_height, + best_block_hash: tip_hash, + estimated_height, + value_pools: types::ValuePoolBalance::from_value_balance(value_balance), + upgrades, + consensus, + }; - Ok(response) - } - .boxed() + Ok(response) } - fn get_address_balance( - &self, - address_strings: AddressStrings, - ) -> BoxFuture> { + async fn get_address_balance(&self, address_strings: AddressStrings) -> Result { let state = self.state.clone(); - async move { - let valid_addresses = address_strings.valid_addresses()?; + let valid_addresses = address_strings.valid_addresses()?; - let request = zebra_state::ReadRequest::AddressBalance(valid_addresses); - let response = state.oneshot(request).await.map_misc_error()?; + let request = zebra_state::ReadRequest::AddressBalance(valid_addresses); + let response = state.oneshot(request).await.map_misc_error()?; - match response { - zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { - balance: u64::from(balance), - }), - _ => unreachable!("Unexpected response from state service: {response:?}"), - } + match response { + zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { + balance: u64::from(balance), + }), + _ => unreachable!("Unexpected response from state service: {response:?}"), } - .boxed() } // TODO: use HexData or GetRawTransaction::Bytes to handle the transaction data argument - fn send_raw_transaction( + async fn send_raw_transaction( &self, raw_transaction_hex: String, - ) -> BoxFuture> { + ) -> Result { let mempool = self.mempool.clone(); let queue_sender = self.queue_sender.clone(); - async move { - // Reference for the legacy error code: - // - let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex) - .map_error(server::error::LegacyCode::Deserialization)?; - let raw_transaction = Transaction::zcash_deserialize(&*raw_transaction_bytes) - .map_error(server::error::LegacyCode::Deserialization)?; + // Reference for the legacy error code: + // + let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex) + .map_error(server::error::LegacyCode::Deserialization)?; + let raw_transaction = Transaction::zcash_deserialize(&*raw_transaction_bytes) + .map_error(server::error::LegacyCode::Deserialization)?; - let transaction_hash = raw_transaction.hash(); + let transaction_hash = raw_transaction.hash(); - // send transaction to the rpc queue, ignore any error. - let unmined_transaction = UnminedTx::from(raw_transaction.clone()); - let _ = queue_sender.send(unmined_transaction); + // send transaction to the rpc queue, ignore any error. + let unmined_transaction = UnminedTx::from(raw_transaction.clone()); + let _ = queue_sender.send(unmined_transaction); - let transaction_parameter = mempool::Gossip::Tx(raw_transaction.into()); - let request = mempool::Request::Queue(vec![transaction_parameter]); + let transaction_parameter = mempool::Gossip::Tx(raw_transaction.into()); + let request = mempool::Request::Queue(vec![transaction_parameter]); - let response = mempool.oneshot(request).await.map_misc_error()?; + let response = mempool.oneshot(request).await.map_misc_error()?; - let mut queue_results = match response { - mempool::Response::Queued(results) => results, - _ => unreachable!("incorrect response variant from mempool service"), - }; + let mut queue_results = match response { + mempool::Response::Queued(results) => results, + _ => unreachable!("incorrect response variant from mempool service"), + }; - assert_eq!( - queue_results.len(), - 1, - "mempool service returned more results than expected" - ); + assert_eq!( + queue_results.len(), + 1, + "mempool service returned more results than expected" + ); - let queue_result = queue_results - .pop() - .expect("there should be exactly one item in Vec") - .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) - .map_misc_error()? - .await - .map_misc_error()?; + let queue_result = queue_results + .pop() + .expect("there should be exactly one item in Vec") + .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) + .map_misc_error()? + .await + .map_misc_error()?; - tracing::debug!("sent transaction to mempool: {:?}", &queue_result); + tracing::debug!("sent transaction to mempool: {:?}", &queue_result); - queue_result - .map(|_| SentTransactionHash(transaction_hash)) - // Reference for the legacy error code: - // - // Note that this error code might not exactly match the one returned by zcashd - // since zcashd's error code selection logic is more granular. We'd need to - // propagate the error coming from the verifier to be able to return more specific - // error codes. - .map_error(server::error::LegacyCode::Verify) - } - .boxed() + queue_result + .map(|_| SentTransactionHash(transaction_hash)) + // Reference for the legacy error code: + // + // Note that this error code might not exactly match the one returned by zcashd + // since zcashd's error code selection logic is more granular. We'd need to + // propagate the error coming from the verifier to be able to return more specific + // error codes. + .map_error(server::error::LegacyCode::Verify) } // # Performance @@ -750,11 +732,7 @@ where // TODO: // - use `height_from_signed_int()` to handle negative heights // (this might be better in the state request, because it needs the state height) - fn get_block( - &self, - hash_or_height: String, - verbosity: Option, - ) -> BoxFuture> { + async fn get_block(&self, hash_or_height: String, verbosity: Option) -> Result { let mut state = self.state.clone(); let verbosity = verbosity.unwrap_or(1); let network = self.network.clone(); @@ -767,277 +745,269 @@ where None }; - async move { - let hash_or_height: HashOrHeight = hash_or_height - .parse() - // Reference for the legacy error code: - // - .map_error(server::error::LegacyCode::InvalidParameter)?; - - if verbosity == 0 { - let request = zebra_state::ReadRequest::Block(hash_or_height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - match response { - zebra_state::ReadResponse::Block(Some(block)) => { - Ok(GetBlock::Raw(block.into())) - } - zebra_state::ReadResponse::Block(None) => Err("Block not found") - .map_error(server::error::LegacyCode::InvalidParameter), - _ => unreachable!("unmatched response to a block request"), + let hash_or_height: HashOrHeight = hash_or_height + .parse() + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; + + if verbosity == 0 { + let request = zebra_state::ReadRequest::Block(hash_or_height); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + match response { + zebra_state::ReadResponse::Block(Some(block)) => Ok(GetBlock::Raw(block.into())), + zebra_state::ReadResponse::Block(None) => { + Err("Block not found").map_error(server::error::LegacyCode::InvalidParameter) } - } else if let Some(get_block_header_future) = get_block_header_future { - let get_block_header_result: Result = get_block_header_future.await; + _ => unreachable!("unmatched response to a block request"), + } + } else if let Some(get_block_header_future) = get_block_header_future { + let get_block_header_result: Result = get_block_header_future.await; - let GetBlockHeader::Object(block_header) = get_block_header_result? else { - panic!("must return Object") - }; + let GetBlockHeader::Object(block_header) = get_block_header_result? else { + panic!("must return Object") + }; - let GetBlockHeaderObject { - hash, - confirmations, - height, - version, - merkle_root, - final_sapling_root, - sapling_tree_size, - time, - nonce, - solution, - bits, - difficulty, - previous_block_hash, - next_block_hash, - } = *block_header; - - let transactions_request = match verbosity { - 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), - 2 => zebra_state::ReadRequest::Block(hash_or_height), - _other => panic!("get_block_header_fut should be none"), - }; + let GetBlockHeaderObject { + hash, + confirmations, + height, + version, + merkle_root, + final_sapling_root, + sapling_tree_size, + time, + nonce, + solution, + bits, + difficulty, + previous_block_hash, + next_block_hash, + } = *block_header; + + let transactions_request = match verbosity { + 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), + 2 => zebra_state::ReadRequest::Block(hash_or_height), + _other => panic!("get_block_header_fut should be none"), + }; + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let hash_or_height = hash.0.into(); + let requests = vec![ + // Get transaction IDs from the transaction index by block hash + // // # Concurrency // - // We look up by block hash so the hash, transaction IDs, and confirmations - // are consistent. - let hash_or_height = hash.0.into(); - let requests = vec![ - // Get transaction IDs from the transaction index by block hash - // - // # Concurrency - // - // A block's transaction IDs are never modified, so all possible responses are - // valid. Clients that query block heights must be able to handle chain forks, - // including getting transaction IDs from any chain fork. - transactions_request, - // Orchard trees - zebra_state::ReadRequest::OrchardTree(hash_or_height), - ]; - - let mut futs = FuturesOrdered::new(); - - for request in requests { - futs.push_back(state.clone().oneshot(request)); - } + // A block's transaction IDs are never modified, so all possible responses are + // valid. Clients that query block heights must be able to handle chain forks, + // including getting transaction IDs from any chain fork. + transactions_request, + // Orchard trees + zebra_state::ReadRequest::OrchardTree(hash_or_height), + ]; + + let mut futs = FuturesOrdered::new(); + + for request in requests { + futs.push_back(state.clone().oneshot(request)); + } - let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); - let tx: Vec<_> = match tx_ids_response.map_misc_error()? { - zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids - .ok_or_misc_error("block not found")? - .iter() - .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) - .collect(), - zebra_state::ReadResponse::Block(block) => block - .ok_or_misc_error("Block not found")? - .transactions - .iter() - .map(|tx| { - GetBlockTransaction::Object(TransactionObject::from_transaction( - tx.clone(), - Some(height), - Some( - confirmations - .try_into() - .expect("should be less than max block height, i32::MAX"), - ), - )) - }) - .collect(), - _ => unreachable!("unmatched response to a transaction_ids_for_block request"), - }; + let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); + let tx: Vec<_> = match tx_ids_response.map_misc_error()? { + zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids + .ok_or_misc_error("block not found")? + .iter() + .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) + .collect(), + zebra_state::ReadResponse::Block(block) => block + .ok_or_misc_error("Block not found")? + .transactions + .iter() + .map(|tx| { + GetBlockTransaction::Object(TransactionObject::from_transaction( + tx.clone(), + Some(height), + Some( + confirmations + .try_into() + .expect("should be less than max block height, i32::MAX"), + ), + )) + }) + .collect(), + _ => unreachable!("unmatched response to a transaction_ids_for_block request"), + }; - let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); - let zebra_state::ReadResponse::OrchardTree(orchard_tree) = - orchard_tree_response.map_misc_error()? - else { - unreachable!("unmatched response to a OrchardTree request"); - }; + let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); + let zebra_state::ReadResponse::OrchardTree(orchard_tree) = + orchard_tree_response.map_misc_error()? + else { + unreachable!("unmatched response to a OrchardTree request"); + }; - let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); + let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); - // This could be `None` if there's a chain reorg between state queries. - let orchard_tree = orchard_tree.ok_or_misc_error("missing Orchard tree")?; + // This could be `None` if there's a chain reorg between state queries. + let orchard_tree = orchard_tree.ok_or_misc_error("missing Orchard tree")?; - let final_orchard_root = match nu5_activation { - Some(activation_height) if height >= activation_height => { - Some(orchard_tree.root().into()) - } - _other => None, - }; + let final_orchard_root = match nu5_activation { + Some(activation_height) if height >= activation_height => { + Some(orchard_tree.root().into()) + } + _other => None, + }; - let sapling = SaplingTrees { - size: sapling_tree_size, - }; + let sapling = SaplingTrees { + size: sapling_tree_size, + }; - let orchard_tree_size = orchard_tree.count(); - let orchard = OrchardTrees { - size: orchard_tree_size, - }; + let orchard_tree_size = orchard_tree.count(); + let orchard = OrchardTrees { + size: orchard_tree_size, + }; - let trees = GetBlockTrees { sapling, orchard }; - - Ok(GetBlock::Object { - hash, - confirmations, - height: Some(height), - version: Some(version), - merkle_root: Some(merkle_root), - time: Some(time), - nonce: Some(nonce), - solution: Some(solution), - bits: Some(bits), - difficulty: Some(difficulty), - tx, - trees, - size: None, - final_sapling_root: Some(final_sapling_root), - final_orchard_root, - previous_block_hash: Some(previous_block_hash), - next_block_hash, - }) - } else { - Err("invalid verbosity value") - .map_error(server::error::LegacyCode::InvalidParameter) - } + let trees = GetBlockTrees { sapling, orchard }; + + Ok(GetBlock::Object { + hash, + confirmations, + height: Some(height), + version: Some(version), + merkle_root: Some(merkle_root), + time: Some(time), + nonce: Some(nonce), + solution: Some(solution), + bits: Some(bits), + difficulty: Some(difficulty), + tx, + trees, + size: None, + final_sapling_root: Some(final_sapling_root), + final_orchard_root, + previous_block_hash: Some(previous_block_hash), + next_block_hash, + }) + } else { + Err("invalid verbosity value").map_error(server::error::LegacyCode::InvalidParameter) } - .boxed() } - fn get_block_header( + async fn get_block_header( &self, hash_or_height: String, verbose: Option, - ) -> BoxFuture> { + ) -> Result { let state = self.state.clone(); let verbose = verbose.unwrap_or(true); let network = self.network.clone(); - async move { - let hash_or_height: HashOrHeight = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; - let zebra_state::ReadResponse::BlockHeader { - header, - hash, - height, - next_block_hash, - } = state + let hash_or_height: HashOrHeight = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; + let zebra_state::ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash, + } = state + .clone() + .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) + .await + .map_err(|_| "block height not in best chain") + .map_error( + // ## Compatibility with `zcashd`. + // + // Since this function is reused by getblock(), we return the errors + // expected by it (they differ whether a hash or a height was passed). + if hash_or_height.hash().is_some() { + server::error::LegacyCode::InvalidAddressOrKey + } else { + server::error::LegacyCode::InvalidParameter + }, + )? + else { + panic!("unexpected response to BlockHeader request") + }; + + let response = if !verbose { + GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_misc_error()?)) + } else { + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state .clone() - .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) + .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) .await - .map_err(|_| "block height not in best chain") - .map_error( - // ## Compatibility with `zcashd`. - // - // Since this function is reused by getblock(), we return the errors - // expected by it (they differ whether a hash or a height was passed). - if hash_or_height.hash().is_some() { - server::error::LegacyCode::InvalidAddressOrKey - } else { - server::error::LegacyCode::InvalidParameter - }, - )? + .map_misc_error()? else { - panic!("unexpected response to BlockHeader request") + panic!("unexpected response to SaplingTree request") }; - let response = if !verbose { - GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_misc_error()?)) - } else { - let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state - .clone() - .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) - .await - .map_misc_error()? - else { - panic!("unexpected response to SaplingTree request") - }; + // This could be `None` if there's a chain reorg between state queries. + let sapling_tree = sapling_tree.ok_or_misc_error("missing Sapling tree")?; - // This could be `None` if there's a chain reorg between state queries. - let sapling_tree = sapling_tree.ok_or_misc_error("missing Sapling tree")?; + let zebra_state::ReadResponse::Depth(depth) = state + .clone() + .oneshot(zebra_state::ReadRequest::Depth(hash)) + .await + .map_misc_error()? + else { + panic!("unexpected response to SaplingTree request") + }; - let zebra_state::ReadResponse::Depth(depth) = state - .clone() - .oneshot(zebra_state::ReadRequest::Depth(hash)) - .await - .map_misc_error()? - else { - panic!("unexpected response to SaplingTree request") + // From + // TODO: Deduplicate const definition, consider refactoring this to avoid duplicate logic + const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; + + // Confirmations are one more than the depth. + // Depth is limited by height, so it will never overflow an i64. + let confirmations = depth + .map(|depth| i64::from(depth) + 1) + .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); + + let mut nonce = *header.nonce; + nonce.reverse(); + + let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); + let sapling_tree_size = sapling_tree.count(); + let final_sapling_root: [u8; 32] = + if sapling_activation.is_some() && height >= sapling_activation.unwrap() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] }; - // From - // TODO: Deduplicate const definition, consider refactoring this to avoid duplicate logic - const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; - - // Confirmations are one more than the depth. - // Depth is limited by height, so it will never overflow an i64. - let confirmations = depth - .map(|depth| i64::from(depth) + 1) - .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); - - let mut nonce = *header.nonce; - nonce.reverse(); - - let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); - let sapling_tree_size = sapling_tree.count(); - let final_sapling_root: [u8; 32] = - if sapling_activation.is_some() && height >= sapling_activation.unwrap() { - let mut root: [u8; 32] = sapling_tree.root().into(); - root.reverse(); - root - } else { - [0; 32] - }; - - let difficulty = header.difficulty_threshold.relative_to_network(&network); - - let block_header = GetBlockHeaderObject { - hash: GetBlockHash(hash), - confirmations, - height, - version: header.version, - merkle_root: header.merkle_root, - final_sapling_root, - sapling_tree_size, - time: header.time.timestamp(), - nonce, - solution: header.solution, - bits: header.difficulty_threshold, - difficulty, - previous_block_hash: GetBlockHash(header.previous_block_hash), - next_block_hash: next_block_hash.map(GetBlockHash), - }; + let difficulty = header.difficulty_threshold.relative_to_network(&network); - GetBlockHeader::Object(Box::new(block_header)) + let block_header = GetBlockHeaderObject { + hash: GetBlockHash(hash), + confirmations, + height, + version: header.version, + merkle_root: header.merkle_root, + final_sapling_root, + sapling_tree_size, + time: header.time.timestamp(), + nonce, + solution: header.solution, + bits: header.difficulty_threshold, + difficulty, + previous_block_hash: GetBlockHash(header.previous_block_hash), + next_block_hash: next_block_hash.map(GetBlockHash), }; - Ok(response) - } - .boxed() + GetBlockHeader::Object(Box::new(block_header)) + }; + + Ok(response) } fn get_best_block_hash(&self) -> Result { @@ -1054,7 +1024,7 @@ where .ok_or_misc_error("No blocks in state") } - fn get_raw_mempool(&self) -> BoxFuture>> { + async fn get_raw_mempool(&self) -> Result> { #[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::block::MAX_BLOCK_BYTES; @@ -1064,421 +1034,400 @@ where let mut mempool = self.mempool.clone(); - async move { - #[cfg(feature = "getblocktemplate-rpcs")] - let request = if should_use_zcashd_order { - mempool::Request::FullTransactions - } else { - mempool::Request::TransactionIds - }; - - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let request = mempool::Request::TransactionIds; + #[cfg(feature = "getblocktemplate-rpcs")] + let request = if should_use_zcashd_order { + mempool::Request::FullTransactions + } else { + mempool::Request::TransactionIds + }; - // `zcashd` doesn't check if it is synced to the tip here, so we don't either. - let response = mempool - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; + #[cfg(not(feature = "getblocktemplate-rpcs"))] + let request = mempool::Request::TransactionIds; - match response { - #[cfg(feature = "getblocktemplate-rpcs")] - mempool::Response::FullTransactions { - mut transactions, - transaction_dependencies: _, - last_seen_tip_hash: _, - } => { - // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker - transactions.sort_by_cached_key(|tx| { - // zcashd uses modified fee here but Zebra doesn't currently - // support prioritizing transactions - std::cmp::Reverse(( - i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 - / tx.transaction.size as u128, - // transaction hashes are compared in their serialized byte-order. - tx.transaction.id.mined_id(), - )) - }); + // `zcashd` doesn't check if it is synced to the tip here, so we don't either. + let response = mempool + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let tx_ids: Vec = transactions - .iter() - .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) - .collect(); + match response { + #[cfg(feature = "getblocktemplate-rpcs")] + mempool::Response::FullTransactions { + mut transactions, + transaction_dependencies: _, + last_seen_tip_hash: _, + } => { + // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker + transactions.sort_by_cached_key(|tx| { + // zcashd uses modified fee here but Zebra doesn't currently + // support prioritizing transactions + std::cmp::Reverse(( + i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 + / tx.transaction.size as u128, + // transaction hashes are compared in their serialized byte-order. + tx.transaction.id.mined_id(), + )) + }); - Ok(tx_ids) - } + let tx_ids: Vec = transactions + .iter() + .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) + .collect(); - mempool::Response::TransactionIds(unmined_transaction_ids) => { - let mut tx_ids: Vec = unmined_transaction_ids - .iter() - .map(|id| id.mined_id().encode_hex()) - .collect(); + Ok(tx_ids) + } - // Sort returned transaction IDs in numeric/string order. - tx_ids.sort(); + mempool::Response::TransactionIds(unmined_transaction_ids) => { + let mut tx_ids: Vec = unmined_transaction_ids + .iter() + .map(|id| id.mined_id().encode_hex()) + .collect(); - Ok(tx_ids) - } + // Sort returned transaction IDs in numeric/string order. + tx_ids.sort(); - _ => unreachable!("unmatched response to a transactionids request"), + Ok(tx_ids) } + + _ => unreachable!("unmatched response to a transactionids request"), } - .boxed() } - fn get_raw_transaction( + async fn get_raw_transaction( &self, txid: String, verbose: Option, - ) -> BoxFuture> { + ) -> Result { let mut state = self.state.clone(); let mut mempool = self.mempool.clone(); let verbose = verbose.unwrap_or(0) != 0; - async move { - // Reference for the legacy error code: - // - let txid = transaction::Hash::from_hex(txid) - .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; - - // Check the mempool first. - match mempool - .ready() - .and_then(|service| { - service.call(mempool::Request::TransactionsByMinedId([txid].into())) - }) - .await - .map_misc_error()? - { - mempool::Response::Transactions(txns) => { - if let Some(tx) = txns.first() { - return Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( - tx.transaction.clone(), - None, - None, - )) - } else { - let hex = tx.transaction.clone().into(); - GetRawTransaction::Raw(hex) - }); - } + // Reference for the legacy error code: + // + let txid = transaction::Hash::from_hex(txid) + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; + + // Check the mempool first. + match mempool + .ready() + .and_then(|service| { + service.call(mempool::Request::TransactionsByMinedId([txid].into())) + }) + .await + .map_misc_error()? + { + mempool::Response::Transactions(txns) => { + if let Some(tx) = txns.first() { + return Ok(if verbose { + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.transaction.clone(), + None, + None, + )) + } else { + let hex = tx.transaction.clone().into(); + GetRawTransaction::Raw(hex) + }); } + } - _ => unreachable!("unmatched response to a `TransactionsByMinedId` request"), - }; - - // If the tx wasn't in the mempool, check the state. - match state - .ready() - .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( - tx.tx.clone(), - Some(tx.height), - Some(tx.confirmations), - )) - } else { - let hex = tx.tx.into(); - GetRawTransaction::Raw(hex) - }), + _ => unreachable!("unmatched response to a `TransactionsByMinedId` request"), + }; - zebra_state::ReadResponse::Transaction(None) => { - Err("No such mempool or main chain transaction") - .map_error(server::error::LegacyCode::InvalidAddressOrKey) - } + // If the tx wasn't in the mempool, check the state. + match state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.tx.clone(), + Some(tx.height), + Some(tx.confirmations), + )) + } else { + let hex = tx.tx.into(); + GetRawTransaction::Raw(hex) + }), - _ => unreachable!("unmatched response to a `Transaction` read request"), + zebra_state::ReadResponse::Transaction(None) => { + Err("No such mempool or main chain transaction") + .map_error(server::error::LegacyCode::InvalidAddressOrKey) } + + _ => unreachable!("unmatched response to a `Transaction` read request"), } - .boxed() } // TODO: // - use `height_from_signed_int()` to handle negative heights // (this might be better in the state request, because it needs the state height) - fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture> { + async fn z_get_treestate(&self, hash_or_height: String) -> Result { let mut state = self.state.clone(); let network = self.network.clone(); - async move { - // Reference for the legacy error code: - // - let hash_or_height = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidParameter)?; + // Reference for the legacy error code: + // + let hash_or_height = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidParameter)?; - // Fetch the block referenced by [`hash_or_height`] from the state. - // - // # Concurrency - // - // For consistency, this lookup must be performed first, then all the other lookups must - // be based on the hash. - // - // TODO: If this RPC is called a lot, just get the block header, rather than the whole block. - let block = match state - .ready() - .and_then(|service| service.call(zebra_state::ReadRequest::Block(hash_or_height))) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::Block(Some(block)) => block, - zebra_state::ReadResponse::Block(None) => { - // Reference for the legacy error code: - // - return Err("the requested block is not in the main chain") - .map_error(server::error::LegacyCode::InvalidParameter); - } - _ => unreachable!("unmatched response to a block request"), - }; + // Fetch the block referenced by [`hash_or_height`] from the state. + // + // # Concurrency + // + // For consistency, this lookup must be performed first, then all the other lookups must + // be based on the hash. + // + // TODO: If this RPC is called a lot, just get the block header, rather than the whole block. + let block = match state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Block(hash_or_height))) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::Block(Some(block)) => block, + zebra_state::ReadResponse::Block(None) => { + // Reference for the legacy error code: + // + return Err("the requested block is not in the main chain") + .map_error(server::error::LegacyCode::InvalidParameter); + } + _ => unreachable!("unmatched response to a block request"), + }; - let hash = hash_or_height - .hash_or_else(|_| Some(block.hash())) - .expect("block hash"); + let hash = hash_or_height + .hash_or_else(|_| Some(block.hash())) + .expect("block hash"); - let height = hash_or_height - .height_or_else(|_| block.coinbase_height()) - .expect("verified blocks have a coinbase height"); + let height = hash_or_height + .height_or_else(|_| block.coinbase_height()) + .expect("verified blocks have a coinbase height"); - let time = u32::try_from(block.header.time.timestamp()) - .expect("Timestamps of valid blocks always fit into u32."); + let time = u32::try_from(block.header.time.timestamp()) + .expect("Timestamps of valid blocks always fit into u32."); - let sapling_nu = zcash_primitives::consensus::NetworkUpgrade::Sapling; - let sapling = if network.is_nu_active(sapling_nu, height.into()) { - match state - .ready() - .and_then(|service| { - service.call(zebra_state::ReadRequest::SaplingTree(hash.into())) - }) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::SaplingTree(tree) => tree.map(|t| t.to_rpc_bytes()), - _ => unreachable!("unmatched response to a Sapling tree request"), - } - } else { - None - }; + let sapling_nu = zcash_primitives::consensus::NetworkUpgrade::Sapling; + let sapling = if network.is_nu_active(sapling_nu, height.into()) { + match state + .ready() + .and_then(|service| { + service.call(zebra_state::ReadRequest::SaplingTree(hash.into())) + }) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::SaplingTree(tree) => tree.map(|t| t.to_rpc_bytes()), + _ => unreachable!("unmatched response to a Sapling tree request"), + } + } else { + None + }; - let orchard_nu = zcash_primitives::consensus::NetworkUpgrade::Nu5; - let orchard = if network.is_nu_active(orchard_nu, height.into()) { - match state - .ready() - .and_then(|service| { - service.call(zebra_state::ReadRequest::OrchardTree(hash.into())) - }) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::OrchardTree(tree) => tree.map(|t| t.to_rpc_bytes()), - _ => unreachable!("unmatched response to an Orchard tree request"), - } - } else { - None - }; + let orchard_nu = zcash_primitives::consensus::NetworkUpgrade::Nu5; + let orchard = if network.is_nu_active(orchard_nu, height.into()) { + match state + .ready() + .and_then(|service| { + service.call(zebra_state::ReadRequest::OrchardTree(hash.into())) + }) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::OrchardTree(tree) => tree.map(|t| t.to_rpc_bytes()), + _ => unreachable!("unmatched response to an Orchard tree request"), + } + } else { + None + }; - Ok(GetTreestate::from_parts( - hash, height, time, sapling, orchard, - )) - } - .boxed() + Ok(GetTreestate::from_parts( + hash, height, time, sapling, orchard, + )) } - fn z_get_subtrees_by_index( + async fn z_get_subtrees_by_index( &self, pool: String, start_index: NoteCommitmentSubtreeIndex, limit: Option, - ) -> BoxFuture> { + ) -> Result { let mut state = self.state.clone(); - async move { - const POOL_LIST: &[&str] = &["sapling", "orchard"]; + const POOL_LIST: &[&str] = &["sapling", "orchard"]; - if pool == "sapling" { - let request = zebra_state::ReadRequest::SaplingSubtrees { start_index, limit }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let subtrees = match response { - zebra_state::ReadResponse::SaplingSubtrees(subtrees) => subtrees, - _ => unreachable!("unmatched response to a subtrees request"), - }; + if pool == "sapling" { + let request = zebra_state::ReadRequest::SaplingSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let subtrees = subtrees - .values() - .map(|subtree| SubtreeRpcData { - root: subtree.root.encode_hex(), - end_height: subtree.end_height, - }) - .collect(); + let subtrees = match response { + zebra_state::ReadResponse::SaplingSubtrees(subtrees) => subtrees, + _ => unreachable!("unmatched response to a subtrees request"), + }; - Ok(GetSubtrees { - pool, - start_index, - subtrees, + let subtrees = subtrees + .values() + .map(|subtree| SubtreeRpcData { + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) - } else if pool == "orchard" { - let request = zebra_state::ReadRequest::OrchardSubtrees { start_index, limit }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let subtrees = match response { - zebra_state::ReadResponse::OrchardSubtrees(subtrees) => subtrees, - _ => unreachable!("unmatched response to a subtrees request"), - }; + .collect(); - let subtrees = subtrees - .values() - .map(|subtree| SubtreeRpcData { - root: subtree.root.encode_hex(), - end_height: subtree.end_height, - }) - .collect(); + Ok(GetSubtrees { + pool, + start_index, + subtrees, + }) + } else if pool == "orchard" { + let request = zebra_state::ReadRequest::OrchardSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - Ok(GetSubtrees { - pool, - start_index, - subtrees, - }) - } else { - Err(Error { - code: server::error::LegacyCode::Misc.into(), - message: format!("invalid pool name, must be one of: {:?}", POOL_LIST), - data: None, + let subtrees = match response { + zebra_state::ReadResponse::OrchardSubtrees(subtrees) => subtrees, + _ => unreachable!("unmatched response to a subtrees request"), + }; + + let subtrees = subtrees + .values() + .map(|subtree| SubtreeRpcData { + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) - } + .collect(); + + Ok(GetSubtrees { + pool, + start_index, + subtrees, + }) + } else { + Err(ErrorObject::owned( + server::error::LegacyCode::Misc.into(), + format!("invalid pool name, must be one of: {:?}", POOL_LIST).as_str(), + None::<()>, + )) } - .boxed() } - fn get_address_tx_ids( - &self, - request: GetAddressTxIdsRequest, - ) -> BoxFuture>> { + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result> { let mut state = self.state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); let start = Height(request.start); let end = Height(request.end); - async move { - let chain_height = best_chain_tip_height(&latest_chain_tip)?; - - // height range checks - check_height_range(start, end, chain_height)?; + let chain_height = best_chain_tip_height(&latest_chain_tip)?; - let valid_addresses = AddressStrings { - addresses: request.addresses, - } - .valid_addresses()?; + // height range checks + check_height_range(start, end, chain_height)?; - let request = zebra_state::ReadRequest::TransactionIdsByAddresses { - addresses: valid_addresses, - height_range: start..=end, - }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; + let valid_addresses = AddressStrings { + addresses: request.addresses, + } + .valid_addresses()?; - let hashes = match response { - zebra_state::ReadResponse::AddressesTransactionIds(hashes) => { - let mut last_tx_location = TransactionLocation::from_usize(Height(0), 0); - - hashes - .iter() - .map(|(tx_loc, tx_id)| { - // Check that the returned transactions are in chain order. - assert!( - *tx_loc > last_tx_location, - "Transactions were not in chain order:\n\ + let request = zebra_state::ReadRequest::TransactionIdsByAddresses { + addresses: valid_addresses, + height_range: start..=end, + }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + let hashes = match response { + zebra_state::ReadResponse::AddressesTransactionIds(hashes) => { + let mut last_tx_location = TransactionLocation::from_usize(Height(0), 0); + + hashes + .iter() + .map(|(tx_loc, tx_id)| { + // Check that the returned transactions are in chain order. + assert!( + *tx_loc > last_tx_location, + "Transactions were not in chain order:\n\ {tx_loc:?} {tx_id:?} was after:\n\ {last_tx_location:?}", - ); + ); - last_tx_location = *tx_loc; + last_tx_location = *tx_loc; - tx_id.to_string() - }) - .collect() - } - _ => unreachable!("unmatched response to a TransactionsByAddresses request"), - }; + tx_id.to_string() + }) + .collect() + } + _ => unreachable!("unmatched response to a TransactionsByAddresses request"), + }; - Ok(hashes) - } - .boxed() + Ok(hashes) } - fn get_address_utxos( + async fn get_address_utxos( &self, address_strings: AddressStrings, - ) -> BoxFuture>> { + ) -> Result> { let mut state = self.state.clone(); let mut response_utxos = vec![]; - async move { - let valid_addresses = address_strings.valid_addresses()?; - - // get utxos data for addresses - let request = zebra_state::ReadRequest::UtxosByAddresses(valid_addresses); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - let utxos = match response { - zebra_state::ReadResponse::AddressUtxos(utxos) => utxos, - _ => unreachable!("unmatched response to a UtxosByAddresses request"), - }; + let valid_addresses = address_strings.valid_addresses()?; + + // get utxos data for addresses + let request = zebra_state::ReadRequest::UtxosByAddresses(valid_addresses); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + let utxos = match response { + zebra_state::ReadResponse::AddressUtxos(utxos) => utxos, + _ => unreachable!("unmatched response to a UtxosByAddresses request"), + }; - let mut last_output_location = OutputLocation::from_usize(Height(0), 0, 0); - - for utxo_data in utxos.utxos() { - let address = utxo_data.0; - let txid = *utxo_data.1; - let height = utxo_data.2.height(); - let output_index = utxo_data.2.output_index(); - let script = utxo_data.3.lock_script.clone(); - let satoshis = u64::from(utxo_data.3.value); - - let output_location = *utxo_data.2; - // Check that the returned UTXOs are in chain order. - assert!( - output_location > last_output_location, - "UTXOs were not in chain order:\n\ + let mut last_output_location = OutputLocation::from_usize(Height(0), 0, 0); + + for utxo_data in utxos.utxos() { + let address = utxo_data.0; + let txid = *utxo_data.1; + let height = utxo_data.2.height(); + let output_index = utxo_data.2.output_index(); + let script = utxo_data.3.lock_script.clone(); + let satoshis = u64::from(utxo_data.3.value); + + let output_location = *utxo_data.2; + // Check that the returned UTXOs are in chain order. + assert!( + output_location > last_output_location, + "UTXOs were not in chain order:\n\ {output_location:?} {address:?} {txid:?} was after:\n\ {last_output_location:?}", - ); - - let entry = GetAddressUtxos { - address, - txid, - output_index, - script, - satoshis, - height, - }; - response_utxos.push(entry); + ); - last_output_location = output_location; - } + let entry = GetAddressUtxos { + address, + txid, + output_index, + script, + satoshis, + height, + }; + response_utxos.push(entry); - Ok(response_utxos) + last_output_location = output_location; } - .boxed() + + Ok(response_utxos) } fn stop(&self) -> Result { @@ -1486,25 +1435,25 @@ where if self.network.is_regtest() { match nix::sys::signal::raise(nix::sys::signal::SIGINT) { Ok(_) => Ok("Zebra server stopping".to_string()), - Err(error) => Err(Error { - code: ErrorCode::InternalError, - message: format!("Failed to shut down: {}", error), - data: None, - }), + Err(error) => Err(ErrorObject::owned( + ErrorCode::InternalError.code(), + format!("Failed to shut down: {}", error).as_str(), + None::<()>, + )), } } else { - Err(Error { - code: ErrorCode::MethodNotFound, - message: "stop is only available on regtest networks".to_string(), - data: None, - }) + Err(ErrorObject::borrowed( + ErrorCode::MethodNotFound.code(), + "stop is only available on regtest networks", + None, + )) } #[cfg(target_os = "windows")] - Err(Error { - code: ErrorCode::MethodNotFound, - message: "stop is not available in windows targets".to_string(), - data: None, - }) + Err(ErrorObject::borrowed( + ErrorCode::MethodNotFound.code(), + "stop is not available in windows targets", + None, + )) } } @@ -1591,8 +1540,8 @@ impl Default for GetBlockChainInfo { /// A wrapper type with a list of transparent address strings. /// -/// This is used for the input parameter of [`Rpc::get_address_balance`], -/// [`Rpc::get_address_tx_ids`] and [`Rpc::get_address_utxos`]. +/// This is used for the input parameter of [`RpcServer::get_address_balance`], +/// [`RpcServer::get_address_tx_ids`] and [`RpcServer::get_address_utxos`]. #[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize)] pub struct AddressStrings { /// A list of transparent address strings. @@ -1749,7 +1698,7 @@ impl Default for SentTransactionHash { /// Response to a `getblock` RPC request. /// -/// See the notes for the [`Rpc::get_block`] method. +/// See the notes for the [`RpcServer::get_block`] method. #[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] #[allow(clippy::large_enum_variant)] //TODO: create a struct for the Object and Box it @@ -1881,7 +1830,7 @@ pub enum GetBlockTransaction { /// Response to a `getblockheader` RPC request. /// -/// See the notes for the [`Rpc::get_block_header`] method. +/// See the notes for the [`RpcServer::get_block_header`] method. #[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] pub enum GetBlockHeader { @@ -1895,7 +1844,7 @@ pub enum GetBlockHeader { #[derive(Clone, Debug, PartialEq, serde::Serialize)] /// Verbose response to a `getblockheader` RPC request. /// -/// See the notes for the [`Rpc::get_block_header`] method. +/// See the notes for the [`RpcServer::get_block_header`] method. pub struct GetBlockHeaderObject { /// The hash of the requested block. pub hash: GetBlockHash, @@ -1984,7 +1933,7 @@ impl Default for GetBlockHeaderObject { /// /// Contains the hex-encoded hash of the requested block. /// -/// Also see the notes for the [`Rpc::get_best_block_hash`] and `get_block_hash` methods. +/// Also see the notes for the [`RpcServer::get_best_block_hash`] and `get_block_hash` methods. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] #[serde(transparent)] pub struct GetBlockHash(#[serde(with = "hex")] pub block::Hash); @@ -2200,19 +2149,25 @@ impl OrchardTrees { /// Check if provided height range is valid for address indexes. fn check_height_range(start: Height, end: Height, chain_height: Height) -> Result<()> { if start == Height(0) || end == Height(0) { - return Err(Error::invalid_params(format!( - "start {start:?} and end {end:?} must both be greater than zero" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} and end {end:?} must both be greater than zero"), + None::<()>, + )); } if start > end { - return Err(Error::invalid_params(format!( - "start {start:?} must be less than or equal to end {end:?}" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} must be less than or equal to end {end:?}"), + None::<()>, + )); } if start > chain_height || end > chain_height { - return Err(Error::invalid_params(format!( - "start {start:?} and end {end:?} must both be less than or equal to the chain tip {chain_height:?}" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} and end {end:?} must both be less than or equal to the chain tip {chain_height:?}"), + None::<()>, + )); } Ok(()) @@ -2230,8 +2185,10 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result if index >= 0 { let height = index.try_into().expect("Positive i32 always fits in u32"); if height > tip_height.0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided index is greater than the current tip", + None, )); } Ok(Height(height)) @@ -2242,17 +2199,27 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result .checked_add(index + 1); let sanitized_height = match height { - None => return Err(Error::invalid_params("Provided index is not valid")), + None => { + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "Provided index is not valid", + None, + )) + } Some(h) => { if h < 0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided negative index ends up with a negative height", + None, )); } let h: u32 = h.try_into().expect("Positive i32 always fits in u32"); if h > tip_height.0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided index is greater than the current tip", + None, )); } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 42c5d282bed..2bb9a0ca393 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -2,9 +2,10 @@ use std::{fmt::Debug, sync::Arc, time::Duration}; -use futures::{future::OptionFuture, FutureExt, TryFutureExt}; -use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use futures::{future::OptionFuture, TryFutureExt}; +use jsonrpsee::core::{async_trait, RpcResult as Result}; +use jsonrpsee_proc_macros::rpc; +use jsonrpsee_types::ErrorObject; use tower::{Service, ServiceExt}; use zcash_address::{unified::Encoding, TryFromAddress}; @@ -83,7 +84,7 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblockcount")] + #[method(name = "getblockcount")] fn get_block_count(&self) -> Result; /// Returns the hash of the block of a given height iff the index argument correspond @@ -102,8 +103,8 @@ pub trait GetBlockTemplateRpc { /// - If `index` is positive then index = block height. /// - If `index` is negative then -1 is the last known valid block. /// - This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblockhash")] - fn get_block_hash(&self, index: i32) -> BoxFuture>; + #[method(name = "getblockhash")] + async fn get_block_hash(&self, index: i32) -> Result; /// Returns a block template for mining new Zcash blocks. /// @@ -128,11 +129,11 @@ pub trait GetBlockTemplateRpc { /// so moving between chains and forking chains is very cheap. /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblocktemplate")] - fn get_block_template( + #[method(name = "getblocktemplate")] + async fn get_block_template( &self, parameters: Option, - ) -> BoxFuture>; + ) -> Result; /// Submits block to the node to be validated and committed. /// Returns the [`submit_block::Response`] for the operation, as a JSON string. @@ -149,20 +150,20 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - `jsonparametersobject` holds a single field, workid, that must be included in submissions if provided by the server. - #[rpc(name = "submitblock")] - fn submit_block( + #[method(name = "submitblock")] + async fn submit_block( &self, hex_data: HexData, _parameters: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns mining-related information. /// /// zcashd reference: [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) /// method: post /// tags: mining - #[rpc(name = "getmininginfo")] - fn get_mining_info(&self) -> BoxFuture>; + #[method(name = "getmininginfo")] + async fn get_mining_info(&self) -> Result; /// Returns the estimated network solutions per second based on the last `num_blocks` before /// `height`. @@ -174,12 +175,9 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) /// method: post /// tags: mining - #[rpc(name = "getnetworksolps")] - fn get_network_sol_ps( - &self, - num_blocks: Option, - height: Option, - ) -> BoxFuture>; + #[method(name = "getnetworksolps")] + async fn get_network_sol_ps(&self, num_blocks: Option, height: Option) + -> Result; /// Returns the estimated network solutions per second based on the last `num_blocks` before /// `height`. @@ -190,13 +188,13 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getnetworkhashps`](https://zcash.github.io/rpc/getnetworkhashps.html) /// method: post /// tags: mining - #[rpc(name = "getnetworkhashps")] - fn get_network_hash_ps( + #[method(name = "getnetworkhashps")] + async fn get_network_hash_ps( &self, num_blocks: Option, height: Option, - ) -> BoxFuture> { - self.get_network_sol_ps(num_blocks, height) + ) -> Result { + self.get_network_sol_ps(num_blocks, height).await } /// Returns data about each connected network node. @@ -204,8 +202,8 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) /// method: post /// tags: network - #[rpc(name = "getpeerinfo")] - fn get_peer_info(&self) -> BoxFuture>>; + #[method(name = "getpeerinfo")] + async fn get_peer_info(&self) -> Result>; /// Checks if a zcash address is valid. /// Returns information about the given address if valid. @@ -221,8 +219,8 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "validateaddress")] - fn validate_address(&self, address: String) -> BoxFuture>; + #[method(name = "validateaddress")] + async fn validate_address(&self, address: String) -> Result; /// Checks if a zcash address is valid. /// Returns information about the given address if valid. @@ -238,11 +236,11 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "z_validateaddress")] - fn z_validate_address( + #[method(name = "z_validateaddress")] + async fn z_validate_address( &self, address: String, - ) -> BoxFuture>; + ) -> Result; /// Returns the block subsidy reward of the block at `height`, taking into account the mining slow start. /// Returns an error if `height` is less than the height of the first halving for the current network. @@ -258,16 +256,16 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// If `height` is not supplied, uses the tip height. - #[rpc(name = "getblocksubsidy")] - fn get_block_subsidy(&self, height: Option) -> BoxFuture>; + #[method(name = "getblocksubsidy")] + async fn get_block_subsidy(&self, height: Option) -> Result; /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. /// /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) /// method: post /// tags: blockchain - #[rpc(name = "getdifficulty")] - fn get_difficulty(&self) -> BoxFuture>; + #[method(name = "getdifficulty")] + async fn get_difficulty(&self) -> Result; /// Returns the list of individual payment addresses given a unified address. /// @@ -282,13 +280,10 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "z_listunifiedreceivers")] - fn z_list_unified_receivers( - &self, - address: String, - ) -> BoxFuture>; + #[method(name = "z_listunifiedreceivers")] + async fn z_list_unified_receivers(&self, address: String) -> Result; - #[rpc(name = "generate")] + #[method(name = "generate")] /// Mine blocks immediately. Returns the block hashes of the generated blocks. /// /// # Parameters @@ -302,7 +297,7 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html) /// method: post /// tags: generating - fn generate(&self, num_blocks: u32) -> BoxFuture>>; + async fn generate(&self, num_blocks: u32) -> Result>; } /// RPC method implementations. @@ -536,7 +531,8 @@ where } } -impl GetBlockTemplateRpc +#[async_trait] +impl GetBlockTemplateRpcServer for GetBlockTemplateRpcImpl where Mempool: Service< @@ -571,40 +567,37 @@ where best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0) } - fn get_block_hash(&self, index: i32) -> BoxFuture> { + async fn get_block_hash(&self, index: i32) -> Result { let mut state = self.state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); - async move { - // TODO: look up this height as part of the state request? - let tip_height = best_chain_tip_height(&latest_chain_tip)?; - - let height = height_from_signed_int(index, tip_height)?; - - let request = zebra_state::ReadRequest::BestChainBlockHash(height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - - match response { - zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), - zebra_state::ReadResponse::BlockHash(None) => Err(Error { - code: server::error::LegacyCode::InvalidParameter.into(), - message: "Block not found".to_string(), - data: None, - }), - _ => unreachable!("unmatched response to a block request"), - } + // TODO: look up this height as part of the state request? + let tip_height = best_chain_tip_height(&latest_chain_tip)?; + + let height = height_from_signed_int(index, tip_height)?; + + let request = zebra_state::ReadRequest::BestChainBlockHash(height); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + + match response { + zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), + zebra_state::ReadResponse::BlockHash(None) => Err(ErrorObject::borrowed( + server::error::LegacyCode::InvalidParameter.into(), + "Block not found", + None, + )), + _ => unreachable!("unmatched response to a block request"), } - .boxed() } - fn get_block_template( + async fn get_block_template( &self, parameters: Option, - ) -> BoxFuture> { + ) -> Result { // Clone Configs let network = self.network.clone(); let miner_address = self.miner_address.clone(); @@ -628,399 +621,392 @@ where latest_chain_tip, sync_status, ) - .boxed(); + .await; } // To implement long polling correctly, we split this RPC into multiple phases. - async move { - get_block_template::check_parameters(¶meters)?; - - let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); - - // - One-off checks - - // Check config and parameters. - // These checks always have the same result during long polling. - let miner_address = check_miner_address(miner_address)?; + get_block_template::check_parameters(¶meters)?; + + let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); + + // - One-off checks + + // Check config and parameters. + // These checks always have the same result during long polling. + let miner_address = check_miner_address(miner_address)?; + + // - Checks and fetches that can change during long polling + // + // Set up the loop. + let mut max_time_reached = false; + + // The loop returns the server long poll ID, + // which should be different to the client long poll ID. + let ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ) = loop { + // Check if we are synced to the tip. + // The result of this check can change during long polling. + // + // Optional TODO: + // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) + check_synced_to_tip(&network, latest_chain_tip.clone(), sync_status.clone())?; + // TODO: return an error if we have no peers, like `zcashd` does, + // and add a developer config that mines regardless of how many peers we have. + // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 + + // We're just about to fetch state data, then maybe wait for any changes. + // Mark all the changes before the fetch as seen. + // Changes are also ignored in any clones made after the mark. + latest_chain_tip.mark_best_tip_seen(); + + // Fetch the state data and local time for the block template: + // - if the tip block hash changes, we must return from long polling, + // - if the local clock changes on testnet, we might return from long polling + // + // We always return after 90 minutes on mainnet, even if we have the same response, + // because the max time has been reached. + let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { + tip_hash, + tip_height, + max_time, + cur_time, + .. + } = fetch_state_tip_and_local_time(state.clone()).await?; - // - Checks and fetches that can change during long polling + // Fetch the mempool data for the block template: + // - if the mempool transactions change, we might return from long polling. // - // Set up the loop. - let mut max_time_reached = false; - - // The loop returns the server long poll ID, - // which should be different to the client long poll ID. - let ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ) = loop { - // Check if we are synced to the tip. - // The result of this check can change during long polling. - // - // Optional TODO: - // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) - check_synced_to_tip(&network, latest_chain_tip.clone(), sync_status.clone())?; - // TODO: return an error if we have no peers, like `zcashd` does, - // and add a developer config that mines regardless of how many peers we have. - // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 - - // We're just about to fetch state data, then maybe wait for any changes. - // Mark all the changes before the fetch as seen. - // Changes are also ignored in any clones made after the mark. - latest_chain_tip.mark_best_tip_seen(); - - // Fetch the state data and local time for the block template: - // - if the tip block hash changes, we must return from long polling, - // - if the local clock changes on testnet, we might return from long polling - // - // We always return after 90 minutes on mainnet, even if we have the same response, - // because the max time has been reached. - let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { - tip_hash, - tip_height, - max_time, - cur_time, - .. - } = fetch_state_tip_and_local_time(state.clone()).await?; - - // Fetch the mempool data for the block template: - // - if the mempool transactions change, we might return from long polling. - // - // If the chain fork has just changed, miners want to get the new block as fast - // as possible, rather than wait for transactions to re-verify. This increases - // miner profits (and any delays can cause chain forks). So we don't wait between - // the chain tip changing and getting mempool transactions. - // - // Optional TODO: - // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some((mempool_txs, mempool_tx_deps)) = - fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Default::default)) - else { - continue; - }; - - // - Long poll ID calculation - let server_long_poll_id = LongPollInput::new( - tip_height, - tip_hash, - max_time, - mempool_txs.iter().map(|tx| tx.transaction.id), - ) - .generate_id(); - - // The loop finishes if: - // - the client didn't pass a long poll ID, - // - the server long poll ID is different to the client long poll ID, or - // - the previous loop iteration waited until the max time. - if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { - let mut submit_old = client_long_poll_id - .as_ref() - .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); - - // On testnet, the max time changes the block difficulty, so old shares are - // invalid. On mainnet, this means there has been 90 minutes without a new - // block or mempool transaction, which is very unlikely. So the miner should - // probably reset anyway. - if max_time_reached { - submit_old = Some(false); - } + // If the chain fork has just changed, miners want to get the new block as fast + // as possible, rather than wait for transactions to re-verify. This increases + // miner profits (and any delays can cause chain forks). So we don't wait between + // the chain tip changing and getting mempool transactions. + // + // Optional TODO: + // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) + let Some((mempool_txs, mempool_tx_deps)) = + fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Default::default)) + else { + continue; + }; - break ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ); + // - Long poll ID calculation + let server_long_poll_id = LongPollInput::new( + tip_height, + tip_hash, + max_time, + mempool_txs.iter().map(|tx| tx.transaction.id), + ) + .generate_id(); + + // The loop finishes if: + // - the client didn't pass a long poll ID, + // - the server long poll ID is different to the client long poll ID, or + // - the previous loop iteration waited until the max time. + if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { + let mut submit_old = client_long_poll_id + .as_ref() + .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); + + // On testnet, the max time changes the block difficulty, so old shares are + // invalid. On mainnet, this means there has been 90 minutes without a new + // block or mempool transaction, which is very unlikely. So the miner should + // probably reset anyway. + if max_time_reached { + submit_old = Some(false); } - // - Polling wait conditions - // - // TODO: when we're happy with this code, split it into a function. - // - // Periodically check the mempool for changes. - // - // Optional TODO: - // Remove this polling wait if we switch to using futures to detect sync status - // and mempool changes. - let wait_for_mempool_request = tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )); + break ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ); + } - // Return immediately if the chain tip has changed. - // The clone preserves the seen status of the chain tip. - let mut wait_for_best_tip_change = latest_chain_tip.clone(); - let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); - - // Wait for the maximum block time to elapse. This can change the block header - // on testnet. (On mainnet it can happen due to a network disconnection, or a - // rapid drop in hash rate.) - // - // This duration might be slightly lower than the actual maximum, - // if cur_time was clamped to min_time. In that case the wait is very long, - // and it's ok to return early. - // - // It can also be zero if cur_time was clamped to max_time. In that case, - // we want to wait for another change, and ignore this timeout. So we use an - // `OptionFuture::None`. - let duration_until_max_time = max_time.saturating_duration_since(cur_time); - let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { - Some(tokio::time::sleep(duration_until_max_time.to_std())) - } else { - None + // - Polling wait conditions + // + // TODO: when we're happy with this code, split it into a function. + // + // Periodically check the mempool for changes. + // + // Optional TODO: + // Remove this polling wait if we switch to using futures to detect sync status + // and mempool changes. + let wait_for_mempool_request = tokio::time::sleep(Duration::from_secs( + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + )); + + // Return immediately if the chain tip has changed. + // The clone preserves the seen status of the chain tip. + let mut wait_for_best_tip_change = latest_chain_tip.clone(); + let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); + + // Wait for the maximum block time to elapse. This can change the block header + // on testnet. (On mainnet it can happen due to a network disconnection, or a + // rapid drop in hash rate.) + // + // This duration might be slightly lower than the actual maximum, + // if cur_time was clamped to min_time. In that case the wait is very long, + // and it's ok to return early. + // + // It can also be zero if cur_time was clamped to max_time. In that case, + // we want to wait for another change, and ignore this timeout. So we use an + // `OptionFuture::None`. + let duration_until_max_time = max_time.saturating_duration_since(cur_time); + let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { + Some(tokio::time::sleep(duration_until_max_time.to_std())) + } else { + None + } + .into(); + + // Optional TODO: + // `zcashd` generates the next coinbase transaction while waiting for changes. + // When Zebra supports shielded coinbase, we might want to do this in parallel. + // But the coinbase value depends on the selected transactions, so this needs + // further analysis to check if it actually saves us any time. + + tokio::select! { + // Poll the futures in the listed order, for efficiency. + // We put the most frequent conditions first. + biased; + + // This timer elapses every few seconds + _elapsed = wait_for_mempool_request => { + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + "checking for a new mempool change after waiting a few seconds" + ); } - .into(); - - // Optional TODO: - // `zcashd` generates the next coinbase transaction while waiting for changes. - // When Zebra supports shielded coinbase, we might want to do this in parallel. - // But the coinbase value depends on the selected transactions, so this needs - // further analysis to check if it actually saves us any time. - - tokio::select! { - // Poll the futures in the listed order, for efficiency. - // We put the most frequent conditions first. - biased; - - // This timer elapses every few seconds - _elapsed = wait_for_mempool_request => { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - "checking for a new mempool change after waiting a few seconds" - ); - } - - // The state changes after around a target block interval (75s) - tip_changed_result = wait_for_best_tip_change => { - match tip_changed_result { - Ok(()) => { - // Spurious updates shouldn't happen in the state, because the - // difficulty and hash ordering is a stable total order. But - // since they could cause a busy-loop, guard against them here. - latest_chain_tip.mark_best_tip_seen(); - - let new_tip_hash = latest_chain_tip.best_tip_hash(); - if new_tip_hash == Some(tip_hash) { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - ?tip_hash, - ?tip_height, - "ignoring spurious state change notification" - ); - - // Wait for the mempool interval, then check for any changes. - tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )).await; - - continue; - } + // The state changes after around a target block interval (75s) + tip_changed_result = wait_for_best_tip_change => { + match tip_changed_result { + Ok(()) => { + // Spurious updates shouldn't happen in the state, because the + // difficulty and hash ordering is a stable total order. But + // since they could cause a busy-loop, guard against them here. + latest_chain_tip.mark_best_tip_seen(); + + let new_tip_hash = latest_chain_tip.best_tip_hash(); + if new_tip_hash == Some(tip_hash) { tracing::debug!( ?max_time, ?cur_time, ?server_long_poll_id, ?client_long_poll_id, - "returning from long poll because state has changed" + ?tip_hash, + ?tip_height, + "ignoring spurious state change notification" ); - } - Err(recv_error) => { - // This log is rare and helps with debugging, so it's ok to be info. - tracing::info!( - ?recv_error, - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll due to a state error.\ - Is Zebra shutting down?" - ); + // Wait for the mempool interval, then check for any changes. + tokio::time::sleep(Duration::from_secs( + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + )).await; - return Err(recv_error).map_error(server::error::LegacyCode::default()); + continue; } + + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because state has changed" + ); } - } - // The max time does not elapse during normal operation on mainnet, - // and it rarely elapses on testnet. - Some(_elapsed) = wait_for_max_time => { - // This log is very rare so it's ok to be info. - tracing::info!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll because max time was reached" - ); - - max_time_reached = true; + Err(recv_error) => { + // This log is rare and helps with debugging, so it's ok to be info. + tracing::info!( + ?recv_error, + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll due to a state error.\ + Is Zebra shutting down?" + ); + + return Err(recv_error).map_error(server::error::LegacyCode::default()); + } } } - }; - // - Processing fetched data to create a transaction template - // - // Apart from random weighted transaction selection, - // the template only depends on the previously fetched data. - // This processing never fails. - - // Calculate the next block height. - let next_block_height = - (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); - - tracing::debug!( - mempool_tx_hashes = ?mempool_txs - .iter() - .map(|tx| tx.transaction.id.mined_id()) - .collect::>(), - "selecting transactions for the template from the mempool" - ); + // The max time does not elapse during normal operation on mainnet, + // and it rarely elapses on testnet. + Some(_elapsed) = wait_for_max_time => { + // This log is very rare so it's ok to be info. + tracing::info!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because max time was reached" + ); - // Randomly select some mempool transactions. - let mempool_txs = zip317::select_mempool_transactions( - &network, - next_block_height, - &miner_address, - mempool_txs, - mempool_tx_deps, - debug_like_zcashd, - extra_coinbase_data.clone(), - ); + max_time_reached = true; + } + } + }; + + // - Processing fetched data to create a transaction template + // + // Apart from random weighted transaction selection, + // the template only depends on the previously fetched data. + // This processing never fails. + + // Calculate the next block height. + let next_block_height = + (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); + + tracing::debug!( + mempool_tx_hashes = ?mempool_txs + .iter() + .map(|tx| tx.transaction.id.mined_id()) + .collect::>(), + "selecting transactions for the template from the mempool" + ); - tracing::debug!( - selected_mempool_tx_hashes = ?mempool_txs - .iter() - .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) - .collect::>(), - "selected transactions for the template from the mempool" - ); + // Randomly select some mempool transactions. + let mempool_txs = zip317::select_mempool_transactions( + &network, + next_block_height, + &miner_address, + mempool_txs, + mempool_tx_deps, + debug_like_zcashd, + extra_coinbase_data.clone(), + ); - // - After this point, the template only depends on the previously fetched data. - - let response = GetBlockTemplate::new( - &network, - &miner_address, - &chain_tip_and_local_time, - server_long_poll_id, - mempool_txs, - submit_old, - debug_like_zcashd, - extra_coinbase_data, - ); + tracing::debug!( + selected_mempool_tx_hashes = ?mempool_txs + .iter() + .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) + .collect::>(), + "selected transactions for the template from the mempool" + ); - Ok(response.into()) - } - .boxed() + // - After this point, the template only depends on the previously fetched data. + + let response = GetBlockTemplate::new( + &network, + &miner_address, + &chain_tip_and_local_time, + server_long_poll_id, + mempool_txs, + submit_old, + debug_like_zcashd, + extra_coinbase_data, + ); + + Ok(response.into()) } - fn submit_block( + async fn submit_block( &self, HexData(block_bytes): HexData, _parameters: Option, - ) -> BoxFuture> { + ) -> Result { let mut block_verifier_router = self.block_verifier_router.clone(); - async move { - let block: Block = match block_bytes.zcash_deserialize_into() { - Ok(block_bytes) => block_bytes, - Err(error) => { - tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); - - return Ok(submit_block::ErrorResponse::Rejected.into()); - } - }; + let block: Block = match block_bytes.zcash_deserialize_into() { + Ok(block_bytes) => block_bytes, + Err(error) => { + tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); - let block_height = block - .coinbase_height() - .map(|height| height.0.to_string()) - .unwrap_or_else(|| "invalid coinbase height".to_string()); - let block_hash = block.hash(); + return Ok(submit_block::ErrorResponse::Rejected.into()); + } + }; + + let block_height = block + .coinbase_height() + .map(|height| height.0.to_string()) + .unwrap_or_else(|| "invalid coinbase height".to_string()); + let block_hash = block.hash(); + + let block_verifier_router_response = block_verifier_router + .ready() + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? + .call(zebra_consensus::Request::Commit(Arc::new(block))) + .await; + + let chain_error = match block_verifier_router_response { + // Currently, this match arm returns `null` (Accepted) for blocks committed + // to any chain, but Accepted is only for blocks in the best chain. + // + // TODO (#5487): + // - Inconclusive: check if the block is on a side-chain + // The difference is important to miners, because they want to mine on the best chain. + Ok(block_hash) => { + tracing::info!(?block_hash, ?block_height, "submit block accepted"); + return Ok(submit_block::Response::Accepted); + } - let block_verifier_router_response = block_verifier_router - .ready() - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })? - .call(zebra_consensus::Request::Commit(Arc::new(block))) - .await; - - let chain_error = match block_verifier_router_response { - // Currently, this match arm returns `null` (Accepted) for blocks committed - // to any chain, but Accepted is only for blocks in the best chain. - // - // TODO (#5487): - // - Inconclusive: check if the block is on a side-chain - // The difference is important to miners, because they want to mine on the best chain. - Ok(block_hash) => { - tracing::info!(?block_hash, ?block_height, "submit block accepted"); - return Ok(submit_block::Response::Accepted); - } + // Turns BoxError into Result, + // by downcasting from Any to VerifyChainError. + Err(box_error) => { + let error = box_error + .downcast::() + .map(|boxed_chain_error| *boxed_chain_error); - // Turns BoxError into Result, - // by downcasting from Any to VerifyChainError. - Err(box_error) => { - let error = box_error - .downcast::() - .map(|boxed_chain_error| *boxed_chain_error); + tracing::info!( + ?error, + ?block_hash, + ?block_height, + "submit block failed verification" + ); - tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification"); + error + } + }; - error - } - }; + let response = match chain_error { + Ok(source) if source.is_duplicate_request() => submit_block::ErrorResponse::Duplicate, - let response = match chain_error { - Ok(source) if source.is_duplicate_request() => { - submit_block::ErrorResponse::Duplicate - } + // Currently, these match arms return Reject for the older duplicate in a queue, + // but queued duplicates should be DuplicateInconclusive. + // + // Optional TODO (#5487): + // - DuplicateInconclusive: turn these non-finalized state duplicate block errors + // into BlockError enum variants, and handle them as DuplicateInconclusive: + // - "block already sent to be committed to the state" + // - "replaced by newer request" + // - keep the older request in the queue, + // and return a duplicate error for the newer request immediately. + // This improves the speed of the RPC response. + // + // Checking the download queues and BlockVerifierRouter buffer for duplicates + // might require architectural changes to Zebra, so we should only do it + // if mining pools really need it. + Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, - // Currently, these match arms return Reject for the older duplicate in a queue, - // but queued duplicates should be DuplicateInconclusive. - // - // Optional TODO (#5487): - // - DuplicateInconclusive: turn these non-finalized state duplicate block errors - // into BlockError enum variants, and handle them as DuplicateInconclusive: - // - "block already sent to be committed to the state" - // - "replaced by newer request" - // - keep the older request in the queue, - // and return a duplicate error for the newer request immediately. - // This improves the speed of the RPC response. - // - // Checking the download queues and BlockVerifierRouter buffer for duplicates - // might require architectural changes to Zebra, so we should only do it - // if mining pools really need it. - Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, - - // This match arm is currently unreachable, but if future changes add extra error types, - // we want to turn them into `Rejected`. - Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, - }; + // This match arm is currently unreachable, but if future changes add extra error types, + // we want to turn them into `Rejected`. + Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, + }; - Ok(response.into()) - } - .boxed() + Ok(response.into()) } - fn get_mining_info(&self) -> BoxFuture> { + async fn get_mining_info(&self) -> Result { let network = self.network.clone(); let mut state = self.state.clone(); @@ -1035,38 +1021,35 @@ where } let solution_rate_fut = self.get_network_sol_ps(None, None); - async move { - // Get the current block size. - let mut current_block_size = None; - if tip_height > 0 { - let request = zebra_state::ReadRequest::TipBlockSize; - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - current_block_size = match response { - zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), - _ => None, - }; - } - - Ok(get_mining_info::Response::new( - tip_height, - current_block_size, - current_block_tx, - network, - solution_rate_fut.await?, - )) + // Get the current block size. + let mut current_block_size = None; + if tip_height > 0 { + let request = zebra_state::ReadRequest::TipBlockSize; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + current_block_size = match response { + zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), + _ => None, + }; } - .boxed() + + Ok(get_mining_info::Response::new( + tip_height, + current_block_size, + current_block_tx, + network, + solution_rate_fut.await?, + )) } - fn get_network_sol_ps( + async fn get_network_sol_ps( &self, num_blocks: Option, height: Option, - ) -> BoxFuture> { + ) -> Result { // Default number of blocks is 120 if not supplied. let mut num_blocks = num_blocks.unwrap_or(DEFAULT_SOLUTION_RATE_WINDOW_SIZE); // But if it is 0 or negative, it uses the proof of work averaging window. @@ -1082,346 +1065,296 @@ where let mut state = self.state.clone(); - async move { - let request = ReadRequest::SolutionRate { num_blocks, height }; + let request = ReadRequest::SolutionRate { num_blocks, height }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - let solution_rate = match response { - // zcashd returns a 0 rate when the calculation is invalid - ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), + let solution_rate = match response { + // zcashd returns a 0 rate when the calculation is invalid + ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), - _ => unreachable!("unmatched response to a solution rate request"), - }; + _ => unreachable!("unmatched response to a solution rate request"), + }; - Ok(solution_rate - .try_into() - .expect("per-second solution rate always fits in u64")) - } - .boxed() + Ok(solution_rate + .try_into() + .expect("per-second solution rate always fits in u64")) } - fn get_peer_info(&self) -> BoxFuture>> { + async fn get_peer_info(&self) -> Result> { let address_book = self.address_book.clone(); - async move { - Ok(address_book - .recently_live_peers(chrono::Utc::now()) - .into_iter() - .map(PeerInfo::from) - .collect()) - } - .boxed() + Ok(address_book + .recently_live_peers(chrono::Utc::now()) + .into_iter() + .map(PeerInfo::from) + .collect()) } - fn validate_address( - &self, - raw_address: String, - ) -> BoxFuture> { + async fn validate_address(&self, raw_address: String) -> Result { let network = self.network.clone(); - async move { - let Ok(address) = raw_address - .parse::() else { - return Ok(validate_address::Response::invalid()); - }; - - let address = match address - .convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(validate_address::Response::invalid()); - } - }; + let Ok(address) = raw_address.parse::() else { + return Ok(validate_address::Response::invalid()); + }; - // we want to match zcashd's behaviour - if !address.is_transparent() { + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); return Ok(validate_address::Response::invalid()); } + }; - if address.network() == network.kind() { - Ok(validate_address::Response { - address: Some(raw_address), - is_valid: true, - is_script: Some(address.is_script_hash()), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address in validateaddress RPC: Zebra's configured network must match address network" - ); + // we want to match zcashd's behaviour + if !address.is_transparent() { + return Ok(validate_address::Response::invalid()); + } - Ok(validate_address::Response::invalid()) - } + if address.network() == network.kind() { + Ok(validate_address::Response { + address: Some(raw_address), + is_valid: true, + is_script: Some(address.is_script_hash()), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address in validateaddress RPC: Zebra's configured network must match address network" + ); + + Ok(validate_address::Response::invalid()) } - .boxed() } - fn z_validate_address( + async fn z_validate_address( &self, raw_address: String, - ) -> BoxFuture> { + ) -> Result { let network = self.network.clone(); - async move { - let Ok(address) = raw_address - .parse::() else { - return Ok(z_validate_address::Response::invalid()); - }; - - let address = match address - .convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(z_validate_address::Response::invalid()); - } - }; - - if address.network() == network.kind() { - Ok(z_validate_address::Response { - is_valid: true, - address: Some(raw_address), - address_type: Some(z_validate_address::AddressType::from(&address)), - is_mine: Some(false), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", - address.network(), - network - ); + let Ok(address) = raw_address.parse::() else { + return Ok(z_validate_address::Response::invalid()); + }; - Ok(z_validate_address::Response::invalid()) + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); + return Ok(z_validate_address::Response::invalid()); } + }; + + if address.network() == network.kind() { + Ok(z_validate_address::Response { + is_valid: true, + address: Some(raw_address), + address_type: Some(z_validate_address::AddressType::from(&address)), + is_mine: Some(false), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", + address.network(), + network + ); + + Ok(z_validate_address::Response::invalid()) } - .boxed() } - fn get_block_subsidy(&self, height: Option) -> BoxFuture> { + async fn get_block_subsidy(&self, height: Option) -> Result { let latest_chain_tip = self.latest_chain_tip.clone(); let network = self.network.clone(); - async move { - let height = if let Some(height) = height { - Height(height) - } else { - best_chain_tip_height(&latest_chain_tip)? - }; + let height = if let Some(height) = height { + Height(height) + } else { + best_chain_tip_height(&latest_chain_tip)? + }; + + if height < network.height_for_first_halving() { + return Err(ErrorObject::borrowed( + 0, + "Zebra does not support founders' reward subsidies, \ + use a block height that is after the first halving", + None, + )); + } - if height < network.height_for_first_halving() { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "Zebra does not support founders' reward subsidies, \ - use a block height that is after the first halving" - .into(), - data: None, - }); - } + // Always zero for post-halving blocks + let founders = Amount::zero(); - // Always zero for post-halving blocks - let founders = Amount::zero(); + let total_block_subsidy = + block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; + let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())?; - let total_block_subsidy = - block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; - let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())?; + let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = + funding_stream_values(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())? + .into_iter() + // Separate the funding streams into deferred and non-deferred streams + .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); + + let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; + + let [lockbox_total, funding_streams_total]: [std::result::Result< + Amount, + amount::Error, + >; 2] = [&lockbox_streams, &funding_streams] + .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); + + // Use the same funding stream order as zcashd + funding_streams.sort_by_key(|(receiver, _funding_stream)| { + ZCASHD_FUNDING_STREAM_ORDER + .iter() + .position(|zcashd_receiver| zcashd_receiver == receiver) + }); - let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = - funding_stream_values(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())? + // Format the funding streams and lockbox streams + let [funding_streams, lockbox_streams]: [Vec<_>; 2] = [funding_streams, lockbox_streams] + .map(|streams| { + streams .into_iter() - // Separate the funding streams into deferred and non-deferred streams - .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); - - let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; - - let [lockbox_total, funding_streams_total]: [std::result::Result< - Amount, - amount::Error, - >; 2] = [&lockbox_streams, &funding_streams] - .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); - - // Use the same funding stream order as zcashd - funding_streams.sort_by_key(|(receiver, _funding_stream)| { - ZCASHD_FUNDING_STREAM_ORDER - .iter() - .position(|zcashd_receiver| zcashd_receiver == receiver) + .map(|(receiver, value)| { + let address = funding_stream_address(height, &network, receiver); + FundingStream::new(is_nu6, receiver, value, address) + }) + .collect() }); - // Format the funding streams and lockbox streams - let [funding_streams, lockbox_streams]: [Vec<_>; 2] = - [funding_streams, lockbox_streams].map(|streams| { - streams - .into_iter() - .map(|(receiver, value)| { - let address = funding_stream_address(height, &network, receiver); - FundingStream::new(is_nu6, receiver, value, address) - }) - .collect() - }); - - Ok(BlockSubsidy { - miner: miner_subsidy.into(), - founders: founders.into(), - funding_streams, - lockbox_streams, - funding_streams_total: funding_streams_total - .map_error(server::error::LegacyCode::default())? - .into(), - lockbox_total: lockbox_total - .map_error(server::error::LegacyCode::default())? - .into(), - total_block_subsidy: total_block_subsidy.into(), - }) - } - .boxed() + Ok(BlockSubsidy { + miner: miner_subsidy.into(), + founders: founders.into(), + funding_streams, + lockbox_streams, + funding_streams_total: funding_streams_total + .map_error(server::error::LegacyCode::default())? + .into(), + lockbox_total: lockbox_total + .map_error(server::error::LegacyCode::default())? + .into(), + total_block_subsidy: total_block_subsidy.into(), + }) } - fn get_difficulty(&self) -> BoxFuture> { + async fn get_difficulty(&self) -> Result { let network = self.network.clone(); let mut state = self.state.clone(); - async move { - let request = ReadRequest::ChainInfo; - - // # TODO - // - add a separate request like BestChainNextMedianTimePast, but skipping the - // consistency check, because any block's difficulty is ok for display - // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: - // - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; - - let chain_info = match response { - ReadResponse::ChainInfo(info) => info, - _ => unreachable!("unmatched response to a chain info request"), - }; - - // This RPC is typically used for display purposes, so it is not consensus-critical. - // But it uses the difficulty consensus rules for its calculations. - // - // Consensus: - // https://zips.z.cash/protocol/protocol.pdf#nbits - // - // The zcashd implementation performs to_expanded() on f64, - // and then does an inverse division: - // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 - // - // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives - // a similar result, because the lower 128 bits are insignificant after conversion - // to `f64` with a 53-bit mantissa. - // - // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation - // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. - // - // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's - // difficulty currently uses 68 bits, so even it would still have full precision - // using this calculation.) - - // Get expanded difficulties (256 bits), these are the inverse of the work - let pow_limit: U256 = network.target_difficulty_limit().into(); - let difficulty: U256 = chain_info - .expected_difficulty - .to_expanded() - .expect("valid blocks have valid difficulties") - .into(); - - // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) - let pow_limit = pow_limit >> 128; - let difficulty = difficulty >> 128; - - // Convert to u128 then f64. - // We could also convert U256 to String, then parse as f64, but that's slower. - let pow_limit = pow_limit.as_u128() as f64; - let difficulty = difficulty.as_u128() as f64; - - // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` - Ok(pow_limit / difficulty) - } - .boxed() + let request = ReadRequest::ChainInfo; + + // # TODO + // - add a separate request like BestChainNextMedianTimePast, but skipping the + // consistency check, because any block's difficulty is ok for display + // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: + // + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let chain_info = match response { + ReadResponse::ChainInfo(info) => info, + _ => unreachable!("unmatched response to a chain info request"), + }; + + // This RPC is typically used for display purposes, so it is not consensus-critical. + // But it uses the difficulty consensus rules for its calculations. + // + // Consensus: + // https://zips.z.cash/protocol/protocol.pdf#nbits + // + // The zcashd implementation performs to_expanded() on f64, + // and then does an inverse division: + // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 + // + // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives + // a similar result, because the lower 128 bits are insignificant after conversion + // to `f64` with a 53-bit mantissa. + // + // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation + // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. + // + // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's + // difficulty currently uses 68 bits, so even it would still have full precision + // using this calculation.) + + // Get expanded difficulties (256 bits), these are the inverse of the work + let pow_limit: U256 = network.target_difficulty_limit().into(); + let difficulty: U256 = chain_info + .expected_difficulty + .to_expanded() + .expect("valid blocks have valid difficulties") + .into(); + + // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) + let pow_limit = pow_limit >> 128; + let difficulty = difficulty >> 128; + + // Convert to u128 then f64. + // We could also convert U256 to String, then parse as f64, but that's slower. + let pow_limit = pow_limit.as_u128() as f64; + let difficulty = difficulty.as_u128() as f64; + + // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` + Ok(pow_limit / difficulty) } - fn z_list_unified_receivers( - &self, - address: String, - ) -> BoxFuture> { + async fn z_list_unified_receivers(&self, address: String) -> Result { use zcash_address::unified::Container; - async move { - let (network, unified_address): ( - zcash_address::Network, - zcash_address::unified::Address, - ) = zcash_address::unified::Encoding::decode(address.clone().as_str()).map_err( - |error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - }, - )?; + let (network, unified_address): (zcash_address::Network, zcash_address::unified::Address) = + zcash_address::unified::Encoding::decode(address.clone().as_str()) + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - let mut p2pkh = String::new(); - let mut p2sh = String::new(); - let mut orchard = String::new(); - let mut sapling = String::new(); + let mut p2pkh = String::new(); + let mut p2sh = String::new(); + let mut orchard = String::new(); + let mut sapling = String::new(); - for item in unified_address.items() { - match item { - zcash_address::unified::Receiver::Orchard(_data) => { - let addr = zcash_address::unified::Address::try_from_items(vec![item]) - .expect("using data already decoded as valid"); - orchard = addr.encode(&network); - } - zcash_address::unified::Receiver::Sapling(data) => { - let addr = - zebra_chain::primitives::Address::try_from_sapling(network, data) - .expect("using data already decoded as valid"); - sapling = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2pkh(data) => { - let addr = zebra_chain::primitives::Address::try_from_transparent_p2pkh( - network, data, - ) + for item in unified_address.items() { + match item { + zcash_address::unified::Receiver::Orchard(_data) => { + let addr = zcash_address::unified::Address::try_from_items(vec![item]) .expect("using data already decoded as valid"); - p2pkh = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2sh(data) => { - let addr = zebra_chain::primitives::Address::try_from_transparent_p2sh( - network, data, - ) + orchard = addr.encode(&network); + } + zcash_address::unified::Receiver::Sapling(data) => { + let addr = zebra_chain::primitives::Address::try_from_sapling(network, data) .expect("using data already decoded as valid"); - p2sh = addr.payment_address().unwrap_or_default(); - } - _ => (), + sapling = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2pkh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2pkh(network, data) + .expect("using data already decoded as valid"); + p2pkh = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2sh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2sh(network, data) + .expect("using data already decoded as valid"); + p2sh = addr.payment_address().unwrap_or_default(); } + _ => (), } - - Ok(unified_address::Response::new( - orchard, sapling, p2pkh, p2sh, - )) } - .boxed() + + Ok(unified_address::Response::new( + orchard, sapling, p2pkh, p2sh, + )) } - fn generate(&self, num_blocks: u32) -> BoxFuture>> { + async fn generate(&self, num_blocks: u32) -> Result> { let rpc: GetBlockTemplateRpcImpl< Mempool, State, @@ -1432,54 +1365,50 @@ where > = self.clone(); let network = self.network.clone(); - async move { - if !network.is_regtest() { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "generate is only supported on regtest".to_string(), - data: None, - }); - } + if !network.is_regtest() { + return Err(ErrorObject::borrowed( + 0, + "generate is only supported on regtest", + None, + )); + } - let mut block_hashes = Vec::new(); - for _ in 0..num_blocks { - let block_template = rpc - .get_block_template(None) - .await - .map_error(server::error::LegacyCode::default())?; - - let get_block_template::Response::TemplateMode(block_template) = block_template - else { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "error generating block template".to_string(), - data: None, - }); - }; - - let proposal_block = proposal_block_from_template( - &block_template, - TimeSource::CurTime, - NetworkUpgrade::current(&network, Height(block_template.height)), - ) + let mut block_hashes = Vec::new(); + for _ in 0..num_blocks { + let block_template = rpc + .get_block_template(None) + .await .map_error(server::error::LegacyCode::default())?; - let hex_proposal_block = HexData( - proposal_block - .zcash_serialize_to_vec() - .map_error(server::error::LegacyCode::default())?, - ); - let _submit = rpc - .submit_block(hex_proposal_block, None) - .await - .map_error(server::error::LegacyCode::default())?; + let get_block_template::Response::TemplateMode(block_template) = block_template else { + return Err(ErrorObject::borrowed( + 0, + "error generating block template", + None, + )); + }; - block_hashes.push(GetBlockHash(proposal_block.hash())); - } + let proposal_block = proposal_block_from_template( + &block_template, + TimeSource::CurTime, + NetworkUpgrade::current(&network, Height(block_template.height)), + ) + .map_error(server::error::LegacyCode::default())?; + let hex_proposal_block = HexData( + proposal_block + .zcash_serialize_to_vec() + .map_error(server::error::LegacyCode::default())?, + ); - Ok(block_hashes) + let _submit = rpc + .submit_block(hex_proposal_block, None) + .await + .map_error(server::error::LegacyCode::default())?; + + block_hashes.push(GetBlockHash(proposal_block.hash())); } - .boxed() + + Ok(block_hashes) } } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs b/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs index 3fd4696980d..950dff5db5d 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs @@ -1,6 +1,6 @@ //! Constant values used in mining rpcs methods. -use jsonrpc_core::ErrorCode; +use jsonrpsee_types::ErrorCode; use zebra_chain::{ block, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 3a934d629ff..baa0200db1f 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -2,7 +2,8 @@ use std::{collections::HashMap, iter, sync::Arc}; -use jsonrpc_core::{Error, ErrorCode, Result}; +use jsonrpsee::core::RpcResult as Result; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use tower::{Service, ServiceExt}; use zebra_chain::{ @@ -61,25 +62,23 @@ pub fn check_parameters(parameters: &Option) -> Result<()> { mode: GetBlockTemplateRequestMode::Proposal, data: None, .. - } => Err(Error { - code: ErrorCode::InvalidParams, - message: "\"data\" parameter must be \ - provided in \"proposal\" mode" - .to_string(), - data: None, - }), + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + provided in \"proposal\" mode", + None, + )), JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: Some(_), .. - } => Err(Error { - code: ErrorCode::InvalidParams, - message: "\"data\" parameter must be \ - omitted in \"template\" mode" - .to_string(), - data: None, - }), + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + omitted in \"template\" mode", + None, + )), } } @@ -131,11 +130,7 @@ where let block_verifier_router_response = block_verifier_router .ready() .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })? + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; @@ -189,16 +184,14 @@ where Hint: check your network connection, clock, and time zone settings." ); - return Err(Error { - code: NOT_SYNCED_ERROR_CODE, - message: format!( - "Zebra has not synced to the chain tip, \ + return Err(ErrorObject::borrowed( + NOT_SYNCED_ERROR_CODE.code(), + "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ - Hint: check your network connection, clock, and time zone settings." - ), - data: None, - }); + Hint: check your network connection, clock, and time zone settings.", + None, + )); } Ok(()) @@ -227,11 +220,7 @@ where let response = state .oneshot(request.clone()) .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; let chain_info = match response { zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, @@ -261,11 +250,7 @@ where let response = mempool .oneshot(mempool::Request::FullTransactions) .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; // TODO: Order transactions in block templates based on their dependencies diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs index 21627d509db..1caa1593c27 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs @@ -3,7 +3,7 @@ use zebra_chain::parameters::Network; /// Response to a `getmininginfo` RPC request. -#[derive(Debug, Default, PartialEq, Eq, serde::Serialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize)] pub struct Response { /// The current tip height. #[serde(rename = "blocks")] diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs index 2513af85aa6..cec806901bb 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs @@ -2,11 +2,11 @@ // Allow doc links to these imports. #[allow(unused_imports)] -use crate::methods::get_block_template_rpcs::GetBlockTemplateRpc; +use crate::methods::get_block_template_rpcs::GetBlockTemplate; /// Optional argument `jsonparametersobject` for `submitblock` RPC request /// -/// See notes for [`GetBlockTemplateRpc::submit_block`] method +/// See notes for [`crate::methods::GetBlockTemplateRpcServer::submit_block`] method #[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)] pub struct JsonParameters { /// The workid for the block template. Currently unused. @@ -28,7 +28,7 @@ pub struct JsonParameters { /// Response to a `submitblock` RPC request. /// /// Zebra never returns "duplicate-invalid", because it does not store invalid blocks. -#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "kebab-case")] pub enum ErrorResponse { /// Block was already committed to the non-finalized or finalized state @@ -44,7 +44,7 @@ pub enum ErrorResponse { /// Response to a `submitblock` RPC request. /// /// Zebra never returns "duplicate-invalid", because it does not store invalid blocks. -#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum Response { /// Block was not successfully submitted, return error diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 0af5e03b0b9..8753d514c23 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -4,7 +4,7 @@ use std::{collections::HashSet, fmt::Debug, sync::Arc}; use futures::{join, FutureExt, TryFutureExt}; use hex::{FromHex, ToHex}; -use jsonrpc_core::{Error, ErrorCode}; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use proptest::{collection::vec, prelude::*}; use thiserror::Error; use tokio::sync::oneshot; @@ -28,7 +28,7 @@ use zebra_test::mock_service::MockService; use crate::methods; use super::super::{ - AddressBalance, AddressStrings, NetworkUpgradeStatus, Rpc, RpcImpl, SentTransactionHash, + AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, }; proptest! { @@ -49,7 +49,7 @@ proptest! { let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); @@ -64,7 +64,7 @@ proptest! { state.expect_no_requests().await?; - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); prop_assert_eq!(result, Ok(hash)); @@ -91,7 +91,9 @@ proptest! { let transaction_bytes = transaction.zcash_serialize_to_vec()?; let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex.clone())); + let _rpc = rpc.clone(); + let _transaction_hex = transaction_hex.clone(); + let send_task = tokio::spawn(async move { _rpc.send_raw_transaction(_transaction_hex).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -103,11 +105,11 @@ proptest! { state.expect_no_requests().await?; - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); check_err_code(result, ErrorCode::ServerError(-1))?; - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex.clone()).await }); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -118,7 +120,7 @@ proptest! { .await? .respond(Ok::<_, BoxError>(mempool::Response::Queued(vec![Ok(rsp_rx)]))); - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); check_err_code(result, ErrorCode::ServerError(-25))?; @@ -173,13 +175,13 @@ proptest! { tokio::time::pause(); runtime.block_on(async move { - let send_task = tokio::spawn(rpc.send_raw_transaction(non_hex_string)); + let send_task = rpc.send_raw_transaction(non_hex_string); // Check that there are no further requests. mempool.expect_no_requests().await?; state.expect_no_requests().await?; - check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; + check_err_code(send_task.await, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics prop_assert!(mempool_tx_queue.now_or_never().is_none()); @@ -204,12 +206,12 @@ proptest! { prop_assume!(Transaction::zcash_deserialize(&*random_bytes).is_err()); runtime.block_on(async move { - let send_task = tokio::spawn(rpc.send_raw_transaction(hex::encode(random_bytes))); + let send_task = rpc.send_raw_transaction(hex::encode(random_bytes)); mempool.expect_no_requests().await?; state.expect_no_requests().await?; - check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; + check_err_code(send_task.await, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics prop_assert!(mempool_tx_queue.now_or_never().is_none()); @@ -374,8 +376,8 @@ proptest! { let (response, _) = tokio::join!(response_fut, mock_state_handler); prop_assert_eq!( - &response.err().unwrap().message, - "no chain tip available yet" + response.err().unwrap().message().to_string(), + "no chain tip available yet".to_string() ); mempool.expect_no_requests().await?; @@ -603,8 +605,10 @@ proptest! { let transaction_hash = tx.hash(); let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); - + let send_task = { + let rpc = rpc.clone(); + tokio::task::spawn(async move { rpc.send_raw_transaction(tx_hex).await }) + }; let tx_unmined = UnminedTx::from(tx); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); @@ -678,10 +682,11 @@ proptest! { runtime.block_on(async move { let mut transactions_hash_set = HashSet::new(); for tx in txs.clone() { + let rpc_clone = rpc.clone(); // send a transaction let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); + let send_task = tokio::task::spawn(async move { rpc_clone.send_raw_transaction(tx_hex).await }); let tx_unmined = UnminedTx::from(tx.clone()); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); @@ -768,11 +773,22 @@ fn invalid_txid() -> BoxedStrategy { } /// Checks that the given RPC response contains the given error code. -fn check_err_code(rsp: Result, error_code: ErrorCode) -> Result<(), TestCaseError> { - prop_assert!( - matches!(&rsp, Err(Error { code, .. }) if *code == error_code), - "the RPC response must match the error code: {error_code:?}" - ); +fn check_err_code( + rsp: Result, + error_code: ErrorCode, +) -> Result<(), TestCaseError> { + match rsp { + Err(e) => { + prop_assert!( + e.code() == error_code.code(), + "the RPC response must match the error code: {:?}", + error_code.code() + ); + } + Ok(_) => { + prop_assert!(false, "expected an error response, but got Ok"); + } + } Ok(()) } diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 2bdec5d7497..89ee464c70a 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -7,7 +7,9 @@ use std::{collections::BTreeMap, sync::Arc}; +use futures::FutureExt; use insta::dynamic_redaction; +use jsonrpsee::core::RpcResult as Result; use tower::buffer::Buffer; use zebra_chain::{ diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 4949b419c43..a512faf7cfc 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -12,7 +12,7 @@ use std::{ use hex::FromHex; use insta::Settings; -use jsonrpc_core::Result; +use jsonrpsee::core::RpcResult as Result; use tower::{buffer::Buffer, Service}; use zebra_chain::{ @@ -47,7 +47,7 @@ use crate::methods::{ }, hex_data::HexData, tests::{snapshot::EXCESSIVE_BLOCK_HEIGHT, utils::fake_history_tree}, - GetBlockHash, GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockHash, GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }; pub async fn test_responses( @@ -488,20 +488,18 @@ pub async fn test_responses( // `z_listunifiedreceivers` let ua1 = String::from("u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf"); - let z_list_unified_receivers = - tokio::spawn(get_block_template_rpc.z_list_unified_receivers(ua1)) - .await - .expect("unexpected panic in z_list_unified_receivers RPC task") - .expect("unexpected error in z_list_unified_receivers RPC call"); + let z_list_unified_receivers = get_block_template_rpc + .z_list_unified_receivers(ua1) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); snapshot_rpc_z_listunifiedreceivers("ua1", z_list_unified_receivers, &settings); let ua2 = String::from("u1uf4qsmh037x2jp6k042h9d2w22wfp39y9cqdf8kcg0gqnkma2gf4g80nucnfeyde8ev7a6kf0029gnwqsgadvaye9740gzzpmr67nfkjjvzef7rkwqunqga4u4jges4tgptcju5ysd0"); - let z_list_unified_receivers = - tokio::spawn(get_block_template_rpc.z_list_unified_receivers(ua2)) - .await - .expect("unexpected panic in z_list_unified_receivers RPC task") - .expect("unexpected error in z_list_unified_receivers RPC call"); + let z_list_unified_receivers = get_block_template_rpc + .z_list_unified_receivers(ua2) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); snapshot_rpc_z_listunifiedreceivers("ua2", z_list_unified_receivers, &settings); } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 01ddb4c3d31..e1f559b8e4f 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -3,6 +3,7 @@ use std::ops::RangeInclusive; use std::sync::Arc; +use futures::FutureExt; use tower::buffer::Buffer; use zebra_chain::serialization::ZcashSerialize; @@ -495,7 +496,7 @@ async fn rpc_getblock_missing_error() { // Make sure Zebra returns the correct error code `-8` for missing blocks // https://github.com/zcash/lightwalletd/blob/v0.4.16/common/common.go#L287-L290 - let block_future = tokio::spawn(rpc.get_block("0".to_string(), Some(0u8))); + let block_future = tokio::spawn(async move { rpc.get_block("0".to_string(), Some(0u8)).await }); // Make the mock service respond with no block let response_handler = state @@ -503,11 +504,10 @@ async fn rpc_getblock_missing_error() { .await; response_handler.respond(zebra_state::ReadResponse::Block(None)); - let block_response = block_future.await; - let block_response = block_response - .expect("unexpected panic in spawned request future") - .expect_err("unexpected success from missing block state response"); - assert_eq!(block_response.code, ErrorCode::ServerError(-8),); + let block_response = block_future.await.expect("block future should not panic"); + let block_response = + block_response.expect_err("unexpected success from missing block state response"); + assert_eq!(block_response.code(), ErrorCode::ServerError(-8).code()); // Now check the error string the way `lightwalletd` checks it assert_eq!( @@ -898,7 +898,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); - assert_eq!(rpc_rsp.code, ErrorCode::ServerError(-5)); + assert_eq!(rpc_rsp.code(), ErrorCode::ServerError(-5).code()); mempool.expect_no_requests().await; @@ -918,7 +918,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(2) must be less than or equal to end Height(1)".to_string() ); @@ -934,7 +934,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(0) and end Height(1) must both be greater than zero".to_string() ); @@ -950,7 +950,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(1) and end Height(11) must both be less than or equal to the chain tip Height(10)".to_string() ); @@ -1096,7 +1096,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { .await .unwrap_err(); - assert_eq!(error.code, ErrorCode::ServerError(-5)); + assert_eq!(error.code(), ErrorCode::ServerError(-5).code()); mempool.expect_no_requests().await; state.expect_no_requests().await; @@ -1253,7 +1253,10 @@ async fn rpc_getblockcount_empty_state() { assert!(get_block_count.is_err()); // Check the error we got is the correct one - assert_eq!(get_block_count.err().unwrap().message, "No blocks in state"); + assert_eq!( + get_block_count.err().unwrap().message(), + "No blocks in state" + ); mempool.expect_no_requests().await; } @@ -1697,8 +1700,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when estimated distance to network chain tip is far"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); mock_sync_status.set_is_close_to_tip(false); @@ -1710,8 +1713,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when syncer is not close to tip"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); @@ -1721,8 +1724,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when syncer is not close to tip or estimated distance to network chain tip is far"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); let get_block_template_sync_error = get_block_template_rpc @@ -1733,7 +1736,10 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .await .expect_err("needs an error when called in proposal mode without data"); - assert_eq!(get_block_template_sync_error.code, ErrorCode::InvalidParams); + assert_eq!( + get_block_template_sync_error.code(), + ErrorCode::InvalidParams.code() + ); let get_block_template_sync_error = get_block_template_rpc .get_block_template(Some(get_block_template::JsonParameters { @@ -1743,7 +1749,10 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .await .expect_err("needs an error when passing in block data in template mode"); - assert_eq!(get_block_template_sync_error.code, ErrorCode::InvalidParams); + assert_eq!( + get_block_template_sync_error.code(), + ErrorCode::InvalidParams.code() + ); // The long poll id is valid, so it returns a state error instead let get_block_template_sync_error = get_block_template_rpc @@ -1761,8 +1770,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when the state is empty"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); // Try getting mempool transactions with a different tip hash diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 69ab36d8c00..c787071d74d 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -7,12 +7,11 @@ //! See the full list of //! [Differences between JSON-RPC 1.0 and 2.0.](https://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0) -use std::{fmt, panic, thread::available_parallelism}; +use std::{fmt, panic}; use cookie::Cookie; -use http_request_compatibility::With; -use jsonrpc_core::{Compatibility, MetaIoHandler}; -use jsonrpc_http_server::{CloseHandle, ServerBuilder}; +use jsonrpsee::server::middleware::rpc::RpcServiceBuilder; +use jsonrpsee::server::{Server, ServerHandle}; use tokio::task::JoinHandle; use tower::Service; use tracing::*; @@ -25,15 +24,15 @@ use zebra_node_services::mempool; use crate::{ config::Config, - methods::{Rpc, RpcImpl}, + methods::{RpcImpl, RpcServer as _}, server::{ - http_request_compatibility::HttpRequestMiddleware, + http_request_compatibility::HttpRequestMiddlewareLayer, rpc_call_compatibility::FixRpcResponseMiddleware, }, }; #[cfg(feature = "getblocktemplate-rpcs")] -use crate::methods::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl}; +use crate::methods::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; pub mod cookie; pub mod error; @@ -55,8 +54,8 @@ pub struct RpcServer { /// Zebra's application version, with build metadata. build_version: String, - /// A handle that shuts down the RPC server. - close_handle: CloseHandle, + /// A server handle used to shuts down the RPC server. + close_handle: ServerHandle, } impl fmt::Debug for RpcServer { @@ -68,7 +67,7 @@ impl fmt::Debug for RpcServer { .field( "close_handle", // TODO: when it stabilises, use std::any::type_name_of_val(&self.close_handle) - &"CloseHandle", + &"ServerHandle", ) .finish() } @@ -77,6 +76,8 @@ impl fmt::Debug for RpcServer { /// The message to log when logging the RPC server's listen address pub const OPENED_RPC_ENDPOINT_MSG: &str = "Opened RPC endpoint at "; +type ServerTask = JoinHandle>; + impl RpcServer { /// Start a new RPC server endpoint using the supplied configs and services. /// @@ -90,7 +91,7 @@ impl RpcServer { // - put some of the configs or services in their own struct? // - replace VersionString with semver::Version, and update the tests to provide valid versions #[allow(clippy::too_many_arguments)] - pub fn spawn< + pub async fn spawn< VersionString, UserAgentString, Mempool, @@ -115,7 +116,7 @@ impl RpcServer { address_book: AddressBook, latest_chain_tip: Tip, network: Network, - ) -> (JoinHandle<()>, JoinHandle<()>, Option) + ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> where VersionString: ToString + Clone + Send + 'static, UserAgentString: ToString + Clone + Send + 'static, @@ -150,136 +151,79 @@ impl RpcServer { SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { - if let Some(listen_addr) = config.listen_addr { - info!("Trying to open RPC endpoint at {}...", listen_addr,); - - // Create handler compatible with V1 and V2 RPC protocols - let mut io: MetaIoHandler<(), _> = - MetaIoHandler::new(Compatibility::Both, FixRpcResponseMiddleware); - + let listen_addr = config + .listen_addr + .expect("caller should make sure listen_addr is set"); + + #[cfg(feature = "getblocktemplate-rpcs")] + // Initialize the getblocktemplate rpc method handler + let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( + &network, + mining_config.clone(), + mempool.clone(), + state.clone(), + latest_chain_tip.clone(), + block_verifier_router, + sync_status, + address_book, + ); + + // Initialize the rpc methods with the zebra version + let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( + build_version.clone(), + user_agent, + network.clone(), + config.debug_force_finished_sync, #[cfg(feature = "getblocktemplate-rpcs")] - { - // Initialize the getblocktemplate rpc method handler - let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( - &network, - mining_config.clone(), - mempool.clone(), - state.clone(), - latest_chain_tip.clone(), - block_verifier_router, - sync_status, - address_book, - ); - - io.extend_with(get_block_template_rpc_impl.to_delegate()); - } - - // Initialize the rpc methods with the zebra version - let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( - build_version.clone(), - user_agent, - network.clone(), - config.debug_force_finished_sync, - #[cfg(feature = "getblocktemplate-rpcs")] - mining_config.debug_like_zcashd, - #[cfg(not(feature = "getblocktemplate-rpcs"))] - true, - mempool, - state, - latest_chain_tip, - ); - - io.extend_with(rpc_impl.to_delegate()); - - // If zero, automatically scale threads to the number of CPU cores - let mut parallel_cpu_threads = config.parallel_cpu_threads; - if parallel_cpu_threads == 0 { - parallel_cpu_threads = available_parallelism().map(usize::from).unwrap_or(1); - } - - // The server is a blocking task, which blocks on executor shutdown. - // So we need to start it in a std::thread. - // (Otherwise tokio panics on RPC port conflict, which shuts down the RPC server.) - let span = Span::current(); - let start_server = move || { - span.in_scope(|| { - let middleware = if config.enable_cookie_auth { - let cookie = Cookie::default(); - cookie::write_to_disk(&cookie, &config.cookie_dir) - .expect("Zebra must be able to write the auth cookie to the disk"); - HttpRequestMiddleware::default().with(cookie) - } else { - HttpRequestMiddleware::default() - }; - - // Use a different tokio executor from the rest of Zebra, - // so that large RPCs and any task handling bugs don't impact Zebra. - let server_instance = ServerBuilder::new(io) - .threads(parallel_cpu_threads) - // TODO: disable this security check if we see errors from lightwalletd - //.allowed_hosts(DomainsValidation::Disabled) - .request_middleware(middleware) - .start_http(&listen_addr) - .expect("Unable to start RPC server"); - - info!("{OPENED_RPC_ENDPOINT_MSG}{}", server_instance.address()); - - let close_handle = server_instance.close_handle(); - - let rpc_server_handle = RpcServer { - config, - network, - build_version: build_version.to_string(), - close_handle, - }; - - (server_instance, rpc_server_handle) - }) - }; - - // Propagate panics from the std::thread - let (server_instance, rpc_server_handle) = match std::thread::spawn(start_server).join() - { - Ok(rpc_server) => rpc_server, - Err(panic_object) => panic::resume_unwind(panic_object), - }; - - // The server is a blocking task, which blocks on executor shutdown. - // So we need to wait on it on a std::thread, inside a tokio blocking task. - // (Otherwise tokio panics when we shut down the RPC server.) - let span = Span::current(); - let wait_on_server = move || { - span.in_scope(|| { - server_instance.wait(); - - info!("Stopped RPC endpoint"); - }) - }; - - let span = Span::current(); - let rpc_server_task_handle = tokio::task::spawn_blocking(move || { - let thread_handle = std::thread::spawn(wait_on_server); - - // Propagate panics from the inner std::thread to the outer tokio blocking task - span.in_scope(|| match thread_handle.join() { - Ok(()) => (), - Err(panic_object) => panic::resume_unwind(panic_object), - }) - }); - - ( - rpc_server_task_handle, - rpc_tx_queue_task_handle, - Some(rpc_server_handle), - ) + mining_config.debug_like_zcashd, + #[cfg(not(feature = "getblocktemplate-rpcs"))] + true, + mempool, + state, + latest_chain_tip, + ); + + let http_middleware_layer = if config.enable_cookie_auth { + let cookie = Cookie::default(); + cookie::write_to_disk(&cookie, &config.cookie_dir) + .expect("Zebra must be able to write the auth cookie to the disk"); + HttpRequestMiddlewareLayer::new(Some(cookie)) } else { - // There is no RPC port, so the RPC tasks do nothing. - ( - tokio::task::spawn(futures::future::pending().in_current_span()), - tokio::task::spawn(futures::future::pending().in_current_span()), - None, - ) - } + HttpRequestMiddlewareLayer::new(None) + }; + + let http_middleware = tower::ServiceBuilder::new().layer(http_middleware_layer); + + let rpc_middleware = RpcServiceBuilder::new() + .rpc_logger(1024) + .layer_fn(FixRpcResponseMiddleware::new); + + let server_instance = Server::builder() + .http_only() + .set_http_middleware(http_middleware) + .set_rpc_middleware(rpc_middleware) + .build(listen_addr) + .await + .expect("Unable to start RPC server"); + let addr = server_instance + .local_addr() + .expect("Unable to get local address"); + info!("{OPENED_RPC_ENDPOINT_MSG}{}", addr); + + #[cfg(feature = "getblocktemplate-rpcs")] + let mut rpc_module = rpc_impl.into_rpc(); + #[cfg(not(feature = "getblocktemplate-rpcs"))] + let rpc_module = rpc_impl.into_rpc(); + #[cfg(feature = "getblocktemplate-rpcs")] + rpc_module + .merge(get_block_template_rpc_impl.into_rpc()) + .unwrap(); + + let server_task: JoinHandle> = tokio::spawn(async move { + server_instance.start(rpc_module).stopped().await; + Ok(()) + }); + Ok((server_task, rpc_tx_queue_task_handle)) } /// Shut down this RPC server, blocking the current thread. @@ -305,7 +249,7 @@ impl RpcServer { /// Shuts down this RPC server using its `close_handle`. /// /// See `shutdown_blocking()` for details. - fn shutdown_blocking_inner(close_handle: CloseHandle, config: Config) { + fn shutdown_blocking_inner(close_handle: ServerHandle, config: Config) { // The server is a blocking task, so it can't run inside a tokio thread. // See the note at wait_on_server. let span = Span::current(); @@ -321,7 +265,7 @@ impl RpcServer { } info!("Stopping RPC server"); - close_handle.clone().close(); + let _ = close_handle.stop(); debug!("Stopped RPC server"); }) }; diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs index 4cfc7b38571..5130a16d533 100644 --- a/zebra-rpc/src/server/error.rs +++ b/zebra-rpc/src/server/error.rs @@ -1,4 +1,5 @@ //! RPC error codes & their handling. +use jsonrpsee_types::{ErrorCode, ErrorObject, ErrorObjectOwned}; /// Bitcoin RPC error codes /// @@ -51,22 +52,25 @@ pub enum LegacyCode { ClientInvalidIpOrSubnet = -30, } -impl From for jsonrpc_core::ErrorCode { +impl From for ErrorCode { fn from(code: LegacyCode) -> Self { - Self::ServerError(code as i64) + Self::ServerError(code as i32) } } -/// A trait for mapping errors to [`jsonrpc_core::Error`]. +impl From for i32 { + fn from(code: LegacyCode) -> Self { + code as i32 + } +} + +/// A trait for mapping errors to [`jsonrpsee_types::ErrorObjectOwned`]. pub(crate) trait MapError: Sized { - /// Maps errors to [`jsonrpc_core::Error`] with a specific error code. - fn map_error( - self, - code: impl Into, - ) -> std::result::Result; + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a specific error code. + fn map_error(self, code: impl Into) -> std::result::Result; - /// Maps errors to [`jsonrpc_core::Error`] with a [`LegacyCode::Misc`] error code. - fn map_misc_error(self) -> std::result::Result { + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a [`LegacyCode::Misc`] error code. + fn map_misc_error(self) -> std::result::Result { self.map_error(LegacyCode::Misc) } } @@ -77,15 +81,12 @@ pub(crate) trait OkOrError: Sized { /// message if conversion is to `Err`. fn ok_or_error( self, - code: impl Into, + code: impl Into, message: impl ToString, - ) -> std::result::Result; + ) -> std::result::Result; /// Converts the implementing type to `Result`, using a [`LegacyCode::Misc`] error code. - fn ok_or_misc_error( - self, - message: impl ToString, - ) -> std::result::Result { + fn ok_or_misc_error(self, message: impl ToString) -> std::result::Result { self.ok_or_error(LegacyCode::Misc, message) } } @@ -94,25 +95,21 @@ impl MapError for Result where E: ToString, { - fn map_error(self, code: impl Into) -> Result { - self.map_err(|error| jsonrpc_core::Error { - code: code.into(), - message: error.to_string(), - data: None, - }) + fn map_error(self, code: impl Into) -> Result { + self.map_err(|error| ErrorObject::owned(code.into().code(), error.to_string(), None::<()>)) } } impl OkOrError for Option { fn ok_or_error( self, - code: impl Into, + code: impl Into, message: impl ToString, - ) -> Result { - self.ok_or(jsonrpc_core::Error { - code: code.into(), - message: message.to_string(), - data: None, - }) + ) -> Result { + self.ok_or(ErrorObject::owned( + code.into().code(), + message.to_string(), + None::<()>, + )) } } diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 89925c229b8..ebbf49c05d3 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -2,16 +2,25 @@ //! //! These fixes are applied at the HTTP level, before the RPC request is parsed. -use base64::{engine::general_purpose::URL_SAFE, Engine as _}; -use futures::TryStreamExt; -use jsonrpc_http_server::{ - hyper::{body::Bytes, header, Body, Request}, - RequestMiddleware, RequestMiddlewareAction, +use std::future::Future; + +use std::pin::Pin; + +use futures::{future, FutureExt}; +use http_body_util::BodyExt; +use hyper::{body::Bytes, header}; +use jsonrpsee::{ + core::BoxError, + server::{HttpBody, HttpRequest, HttpResponse}, }; +use jsonrpsee_types::ErrorObject; +use tower::Service; use super::cookie::Cookie; -/// HTTP [`RequestMiddleware`] with compatibility workarounds. +use base64::{engine::general_purpose::URL_SAFE, Engine as _}; + +/// HTTP [`HttpRequestMiddleware`] with compatibility workarounds. /// /// This middleware makes the following changes to HTTP requests: /// @@ -25,7 +34,7 @@ use super::cookie::Cookie; /// ### Add missing `content-type` HTTP header /// /// Some RPC clients don't include a `content-type` HTTP header. -/// But unlike web browsers, [`jsonrpc_http_server`] does not do content sniffing. +/// But unlike web browsers, [`jsonrpsee`] does not do content sniffing. /// /// If there is no `content-type` header, we assume the content is JSON, /// and let the parser error if we are incorrect. @@ -42,103 +51,30 @@ use super::cookie::Cookie; /// Any user-specified data in RPC requests is hex or base58check encoded. /// We assume lightwalletd validates data encodings before sending it on to Zebra. /// So any fixes Zebra performs won't change user-specified data. -#[derive(Clone, Debug, Default)] -pub struct HttpRequestMiddleware { +#[derive(Clone, Debug)] +pub struct HttpRequestMiddleware { + service: S, cookie: Option, } -/// A trait for updating an object, consuming it and returning the updated version. -pub trait With { - /// Updates `self` with an instance of type `T` and returns the updated version of `self`. - fn with(self, _: T) -> Self; -} - -impl With for HttpRequestMiddleware { - fn with(mut self, cookie: Cookie) -> Self { - self.cookie = Some(cookie); - self - } -} - -impl RequestMiddleware for HttpRequestMiddleware { - fn on_request(&self, mut request: Request) -> RequestMiddlewareAction { - tracing::trace!(?request, "original HTTP request"); - - // Check if the request is authenticated - if !self.check_credentials(request.headers_mut()) { - let error = jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(401), - message: "unauthenticated method".to_string(), - data: None, - }; - return jsonrpc_http_server::Response { - code: jsonrpc_http_server::hyper::StatusCode::from_u16(401) - .expect("hard-coded status code should be valid"), - content_type: header::HeaderValue::from_static("application/json; charset=utf-8"), - content: serde_json::to_string(&jsonrpc_core::Response::from(error, None)) - .expect("hard-coded result should serialize"), - } - .into(); - } - - // Fix the request headers if needed and we can do so. - HttpRequestMiddleware::insert_or_replace_content_type_header(request.headers_mut()); - - // Fix the request body - let request = request.map(|body| { - let body = body.map_ok(|data| { - // To simplify data handling, we assume that any search strings won't be split - // across multiple `Bytes` data buffers. - // - // To simplify error handling, Zebra only supports valid UTF-8 requests, - // and uses lossy UTF-8 conversion. - // - // JSON-RPC requires all requests to be valid UTF-8. - // The lower layers should reject invalid requests with lossy changes. - // But if they accept some lossy changes, that's ok, - // because the request was non-standard anyway. - // - // We're not concerned about performance here, so we just clone the Cow - let data = String::from_utf8_lossy(data.as_ref()).to_string(); - - // Fix up the request. - let data = Self::remove_json_1_fields(data); - - Bytes::from(data) - }); - - Body::wrap_stream(body) - }); - - tracing::trace!(?request, "modified HTTP request"); - - RequestMiddlewareAction::Proceed { - // TODO: disable this security check if we see errors from lightwalletd. - should_continue_on_invalid_cors: false, - request, - } +impl HttpRequestMiddleware { + /// Create a new `HttpRequestMiddleware` with the given service and cookie. + pub fn new(service: S, cookie: Option) -> Self { + Self { service, cookie } } -} -impl HttpRequestMiddleware { - /// Remove any "jsonrpc: 1.0" fields in `data`, and return the resulting string. - pub fn remove_json_1_fields(data: String) -> String { - // Replace "jsonrpc = 1.0": - // - at the start or middle of a list, and - // - at the end of a list; - // with no spaces (lightwalletd format), and spaces after separators (example format). - // - // TODO: if we see errors from lightwalletd, make this replacement more accurate: - // - use a partial JSON fragment parser - // - combine the whole request into a single buffer, and use a JSON parser - // - use a regular expression - // - // We could also just handle the exact lightwalletd format, - // by replacing `{"jsonrpc":"1.0",` with `{`. - data.replace("\"jsonrpc\":\"1.0\",", "") - .replace("\"jsonrpc\": \"1.0\",", "") - .replace(",\"jsonrpc\":\"1.0\"", "") - .replace(", \"jsonrpc\": \"1.0\"", "") + /// Check if the request is authenticated. + pub fn check_credentials(&self, headers: &header::HeaderMap) -> bool { + self.cookie.as_ref().map_or(true, |internal_cookie| { + headers + .get(header::AUTHORIZATION) + .and_then(|auth_header| auth_header.to_str().ok()) + .and_then(|auth_header| auth_header.split_whitespace().nth(1)) + .and_then(|encoded| URL_SAFE.decode(encoded).ok()) + .and_then(|decoded| String::from_utf8(decoded).ok()) + .and_then(|request_cookie| request_cookie.split(':').nth(1).map(String::from)) + .map_or(false, |passwd| internal_cookie.authenticate(passwd)) + }) } /// Insert or replace client supplied `content-type` HTTP header to `application/json` in the following cases: @@ -182,17 +118,110 @@ impl HttpRequestMiddleware { } } - /// Check if the request is authenticated. - pub fn check_credentials(&self, headers: &header::HeaderMap) -> bool { - self.cookie.as_ref().map_or(true, |internal_cookie| { - headers - .get(header::AUTHORIZATION) - .and_then(|auth_header| auth_header.to_str().ok()) - .and_then(|auth_header| auth_header.split_whitespace().nth(1)) - .and_then(|encoded| URL_SAFE.decode(encoded).ok()) - .and_then(|decoded| String::from_utf8(decoded).ok()) - .and_then(|request_cookie| request_cookie.split(':').nth(1).map(String::from)) - .map_or(false, |passwd| internal_cookie.authenticate(passwd)) - }) + /// Remove any "jsonrpc: 1.0" fields in `data`, and return the resulting string. + pub fn remove_json_1_fields(data: String) -> String { + // Replace "jsonrpc = 1.0": + // - at the start or middle of a list, and + // - at the end of a list; + // with no spaces (lightwalletd format), and spaces after separators (example format). + // + // TODO: if we see errors from lightwalletd, make this replacement more accurate: + // - use a partial JSON fragment parser + // - combine the whole request into a single buffer, and use a JSON parser + // - use a regular expression + // + // We could also just handle the exact lightwalletd format, + // by replacing `{"jsonrpc":"1.0",` with `{"jsonrpc":"2.0`. + data.replace("\"jsonrpc\":\"1.0\",", "\"jsonrpc\":\"2.0\",") + .replace("\"jsonrpc\": \"1.0\",", "\"jsonrpc\": \"2.0\",") + .replace(",\"jsonrpc\":\"1.0\"", ",\"jsonrpc\":\"2.0\"") + .replace(", \"jsonrpc\": \"1.0\"", ", \"jsonrpc\": \"2.0\"") + } +} + +/// Implement the Layer for HttpRequestMiddleware to allow injecting the cookie +#[derive(Clone)] +pub struct HttpRequestMiddlewareLayer { + cookie: Option, +} + +impl HttpRequestMiddlewareLayer { + /// Create a new `HttpRequestMiddlewareLayer` with the given cookie. + pub fn new(cookie: Option) -> Self { + Self { cookie } + } +} + +impl tower::Layer for HttpRequestMiddlewareLayer { + type Service = HttpRequestMiddleware; + + fn layer(&self, service: S) -> Self::Service { + HttpRequestMiddleware::new(service, self.cookie.clone()) + } +} + +/// A trait for updating an object, consuming it and returning the updated version. +pub trait With { + /// Updates `self` with an instance of type `T` and returns the updated version of `self`. + fn with(self, _: T) -> Self; +} + +impl With for HttpRequestMiddleware { + fn with(mut self, cookie: Cookie) -> Self { + self.cookie = Some(cookie); + self + } +} + +impl Service> for HttpRequestMiddleware +where + S: Service + std::clone::Clone + Send + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = BoxError; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.service.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, mut request: HttpRequest) -> Self::Future { + // Check if the request is authenticated + if !self.check_credentials(request.headers_mut()) { + let error = ErrorObject::borrowed(401, "unauthenticated method", None); + // TODO: Error object is not being returned to the user but an empty response. + return future::err(BoxError::from(error)).boxed(); + } + + // Fix the request headers. + Self::insert_or_replace_content_type_header(request.headers_mut()); + + let mut service = self.service.clone(); + let (parts, body) = request.into_parts(); + + async move { + let bytes = body + .collect() + .await + .expect("Failed to collect body data") + .to_bytes(); + + let data = String::from_utf8_lossy(bytes.as_ref()).to_string(); + + // Fix JSON-RPC 1.0 requests. + let data = Self::remove_json_1_fields(data); + let body = HttpBody::from(Bytes::from(data).as_ref().to_vec()); + + let request = HttpRequest::from_parts(parts, body); + + service.call(request).await.map_err(Into::into) + } + .boxed() } } diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index 209596180c0..2bd22b72924 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -3,116 +3,66 @@ //! These fixes are applied at the JSON-RPC call level, //! after the RPC request is parsed and split into calls. -use std::future::Future; - -use futures::future::{Either, FutureExt}; - -use jsonrpc_core::{ - middleware::Middleware, - types::{Call, Failure, Output, Response}, - BoxFuture, Metadata, MethodCall, Notification, +use jsonrpsee::{ + server::middleware::rpc::{layer::ResponseFuture, RpcService, RpcServiceT}, + MethodResponse, }; +use jsonrpsee_types::ErrorObject; -use crate::server; - -/// JSON-RPC [`Middleware`] with compatibility workarounds. +/// JSON-RPC [`FixRpcResponseMiddleware`] with compatibility workarounds. /// /// This middleware makes the following changes to JSON-RPC calls: /// /// ## Make RPC framework response codes match `zcashd` /// -/// [`jsonrpc_core`] returns specific error codes while parsing requests: -/// +/// [`jsonrpsee_types`] returns specific error codes while parsing requests: +/// /// /// But these codes are different from `zcashd`, and some RPC clients rely on the exact code. -/// -/// ## Read-Only Functionality -/// -/// This middleware also logs unrecognized RPC requests. -pub struct FixRpcResponseMiddleware; - -impl Middleware for FixRpcResponseMiddleware { - type Future = BoxFuture>; - type CallFuture = BoxFuture>; - - fn on_call( - &self, - call: Call, - meta: M, - next: Next, - ) -> Either - where - Next: Fn(Call, M) -> NextFuture + Send + Sync, - NextFuture: Future> + Send + 'static, - { - Either::Left( - next(call.clone(), meta) - .map(|mut output| { - Self::fix_error_codes(&mut output); - output - }) - .inspect(|output| Self::log_if_error(output, call)) - .boxed(), - ) - } +/// Specifically, the [`jsonrpsee_types::error::INVALID_PARAMS_CODE`] is different: +/// +pub struct FixRpcResponseMiddleware { + service: RpcService, } impl FixRpcResponseMiddleware { - /// Replaces [`jsonrpc_core::ErrorCode`]s in the [`Output`] with their `zcashd` equivalents. - /// - /// ## Replaced Codes - /// - /// 1. [`jsonrpc_core::ErrorCode::InvalidParams`] -> [`server::error::LegacyCode::Misc`] - /// Rationale: - /// The `node-stratum-pool` mining pool library expects error code `-1` to detect available RPC methods: - /// - fn fix_error_codes(output: &mut Option) { - if let Some(Output::Failure(Failure { ref mut error, .. })) = output { - if matches!(error.code, jsonrpc_core::ErrorCode::InvalidParams) { - let original_code = error.code.clone(); - - error.code = server::error::LegacyCode::Misc.into(); - tracing::debug!("Replacing RPC error: {original_code:?} with {error}"); - } - } + /// Create a new `FixRpcResponseMiddleware` with the given `service`. + pub fn new(service: RpcService) -> Self { + Self { service } } +} - /// Obtain a description string for a received request. - /// - /// Prints out only the method name and the received parameters. - fn call_description(call: &Call) -> String { - const MAX_PARAMS_LOG_LENGTH: usize = 100; +impl<'a> RpcServiceT<'a> for FixRpcResponseMiddleware { + type Future = ResponseFuture>; - match call { - Call::MethodCall(MethodCall { method, params, .. }) => { - let mut params = format!("{params:?}"); - if params.len() >= MAX_PARAMS_LOG_LENGTH { - params.truncate(MAX_PARAMS_LOG_LENGTH); - params.push_str("..."); - } + fn call(&self, request: jsonrpsee::types::Request<'a>) -> Self::Future { + let service = self.service.clone(); + ResponseFuture::future(Box::pin(async move { + let response = service.call(request).await; + if response.is_error() { + let original_error_code = response + .as_error_code() + .expect("response should have an error code"); + if original_error_code == jsonrpsee_types::ErrorCode::InvalidParams.code() { + let new_error_code = crate::server::error::LegacyCode::Misc.into(); + tracing::debug!( + "Replacing RPC error: {original_error_code} with {new_error_code}" + ); + let json: serde_json::Value = + serde_json::from_str(response.into_parts().0.as_str()) + .expect("response string should be valid json"); + let id = json["id"] + .as_str() + .expect("response json should have an id") + .to_string(); - format!(r#"method = {method:?}, params = {params}"#) - } - Call::Notification(Notification { method, params, .. }) => { - let mut params = format!("{params:?}"); - if params.len() >= MAX_PARAMS_LOG_LENGTH { - params.truncate(MAX_PARAMS_LOG_LENGTH); - params.push_str("..."); + return MethodResponse::error( + jsonrpsee_types::Id::Str(id.into()), + ErrorObject::borrowed(new_error_code, "Invalid params", None), + ); } - - format!(r#"notification = {method:?}, params = {params}"#) } - Call::Invalid { .. } => "invalid request".to_owned(), - } - } - - /// Check RPC output and log any errors. - // - // TODO: do we want to ignore ErrorCode::ServerError(_), or log it at debug? - fn log_if_error(output: &Option, call: Call) { - if let Some(Output::Failure(Failure { error, .. })) = output { - let call_description = Self::call_description(&call); - tracing::info!("RPC error: {error} in call: {call_description}"); - } + response + })) } } diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 8ffc3386a0d..bf850661a09 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -3,12 +3,8 @@ // These tests call functions which can take unit arguments if some features aren't enabled. #![allow(clippy::unit_arg)] -use std::{ - net::{Ipv4Addr, SocketAddrV4}, - time::Duration, -}; +use std::net::{Ipv4Addr, SocketAddrV4}; -use futures::FutureExt; use tower::buffer::Buffer; use zebra_chain::{ @@ -21,111 +17,71 @@ use zebra_test::mock_service::MockService; use super::super::*; -/// Test that the JSON-RPC server spawns when configured with a single thread. -#[test] -fn rpc_server_spawn_single_thread() { - rpc_server_spawn(false) -} - -/// Test that the JSON-RPC server spawns when configured with multiple threads. -#[test] -#[cfg(not(target_os = "windows"))] -fn rpc_server_spawn_parallel_threads() { - rpc_server_spawn(true) +/// Test that the JSON-RPC server spawns. +#[tokio::test] +async fn rpc_server_spawn_test() { + rpc_server_spawn().await } /// Test if the RPC server will spawn on a randomly generated port. -/// -/// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores. #[tracing::instrument] -fn rpc_server_spawn(parallel_cpu_threads: bool) { +async fn rpc_server_spawn() { let _init_guard = zebra_test::init(); let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0).into()), indexer_listen_addr: None, - parallel_cpu_threads: if parallel_cpu_threads { 2 } else { 1 }, + parallel_cpu_threads: 0, debug_force_finished_sync: false, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - rt.block_on(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server..."); - - let (rpc_server_task_handle, rpc_tx_queue_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server test", - "RPC server test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC server, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // The server and queue tasks should continue without errors or panics - let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(rpc_server_task_result.is_none()); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(rpc_tx_queue_task_result.is_none()); - }); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(1)); -} - -/// Test that the JSON-RPC server spawns when configured with a single thread, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_server_spawn_unallocated_port_single_thread() { - rpc_server_spawn_unallocated_port(false, false) -} - -/// Test that the JSON-RPC server spawns and shuts down when configured with a single thread, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_server_spawn_unallocated_port_single_thread_shutdown() { - rpc_server_spawn_unallocated_port(false, true) + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server..."); + + let _rpc_server_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server test", + "RPC server test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ); + + info!("spawned RPC server, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; } -/// Test that the JSON-RPC server spawns when configured with multiple threads, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_sever_spawn_unallocated_port_parallel_threads() { - rpc_server_spawn_unallocated_port(true, false) +/// Test that the JSON-RPC server spawns on an OS-assigned unallocated port. +#[tokio::test] +async fn rpc_server_spawn_unallocated_port() { + rpc_spawn_unallocated_port(false).await } -/// Test that the JSON-RPC server spawns and shuts down when configured with multiple threads, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_sever_spawn_unallocated_port_parallel_threads_shutdown() { - rpc_server_spawn_unallocated_port(true, true) +/// Test that the JSON-RPC server spawns and shuts down on an OS-assigned unallocated port. +#[tokio::test] +async fn rpc_server_spawn_unallocated_port_shutdown() { + rpc_spawn_unallocated_port(true).await } /// Test if the RPC server will spawn on an OS-assigned unallocated port. /// -/// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores, -/// and `do_shutdown` to true to close the server using the close handle. +/// Set `do_shutdown` to true to close the server using the close handle. #[tracing::instrument] -fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bool) { +async fn rpc_spawn_unallocated_port(do_shutdown: bool) { let _init_guard = zebra_test::init(); let port = zebra_test::net::random_unallocated_port(); @@ -134,300 +90,111 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, - parallel_cpu_threads: if parallel_cpu_threads { 0 } else { 1 }, + parallel_cpu_threads: 0, debug_force_finished_sync: false, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - rt.block_on(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server..."); - - let (rpc_server_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server test", - "RPC server test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC server, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - if do_shutdown { - rpc_server - .expect("unexpected missing RpcServer for configured RPC port") - .shutdown() - .await - .expect("unexpected panic during RpcServer shutdown"); - - // The server and queue tasks should shut down without errors or panics - let rpc_server_task_result = rpc_server_task_handle.await; - assert!( - matches!(rpc_server_task_result, Ok(())), - "unexpected server task panic during shutdown: {rpc_server_task_result:?}" - ); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.await; - assert!( - matches!(rpc_tx_queue_task_result, Ok(())), - "unexpected queue task panic during shutdown: {rpc_tx_queue_task_result:?}" - ); - } else { - // The server and queue tasks should continue without errors or panics - let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(rpc_server_task_result.is_none()); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(rpc_tx_queue_task_result.is_none()); - } - }); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(1)); + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server..."); + + let rpc_server_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server test", + "RPC server test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await + .expect(""); + + info!("spawned RPC server, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; + + if do_shutdown { + rpc_server_task_handle.0.abort(); + } } /// Test if the RPC server will panic correctly when there is a port conflict. /// /// This test is sometimes unreliable on Windows, and hangs on macOS. /// We believe this is a CI infrastructure issue, not a platform-specific issue. -#[test] +#[tokio::test] #[should_panic(expected = "Unable to start RPC server")] #[cfg(not(any(target_os = "windows", target_os = "macos")))] -fn rpc_server_spawn_port_conflict() { +async fn rpc_server_spawn_port_conflict() { + use std::time::Duration; let _init_guard = zebra_test::init(); let port = zebra_test::net::random_known_port(); let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, - parallel_cpu_threads: 1, debug_force_finished_sync: false, + parallel_cpu_threads: 0, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - let test_task_handle = rt.spawn(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server 1..."); - - let (_rpc_server_1_task_handle, _rpc_tx_queue_1_task_handle, _rpc_server) = - RpcServer::spawn( - config.clone(), - Default::default(), - "RPC server 1 test", - "RPC server 1 test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - tokio::time::sleep(Duration::from_secs(3)).await; - - info!("spawning conflicted RPC server 2..."); - - let (rpc_server_2_task_handle, _rpc_tx_queue_2_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server 2 conflict test", - "RPC server 2 conflict test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC servers, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // Because there is a panic inside a multi-threaded executor, - // we can't depend on the exact behaviour of the other tasks, - // particularly across different machines and OSes. - - // The second server should panic, so its task handle should return the panic - let rpc_server_2_task_result = rpc_server_2_task_handle.await; - match rpc_server_2_task_result { - Ok(()) => panic!( - "RPC server with conflicting port should exit with an error: \ - unexpected Ok result" - ), - Err(join_error) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => panic!( - "RPC server with conflicting port should exit with an error: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } - - // Ignore the queue task result - }); - - // Wait until the spawned task finishes - std::thread::sleep(Duration::from_secs(10)); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(3)); - - match test_task_handle.now_or_never() { - Some(Ok(_never)) => unreachable!("test task always panics"), - None => panic!("unexpected test task hang"), - Some(Err(join_error)) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => panic!( - "test task should exit with a RPC server panic: \ - unexpected non-panic JoinError: {cancelled_error:?}" - ), - }, - } -} - -/// Check if the RPC server detects a port conflict when running parallel threads. -/// -/// If this test fails, that's great! -/// We can make parallel the default, and remove the warnings in the config docs. -/// -/// This test is sometimes unreliable on Windows, and hangs on macOS. -/// We believe this is a CI infrastructure issue, not a platform-specific issue. -#[test] -#[cfg(not(any(target_os = "windows", target_os = "macos")))] -fn rpc_server_spawn_port_conflict_parallel_auto() { - let _init_guard = zebra_test::init(); - - let port = zebra_test::net::random_known_port(); - let config = Config { - listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), - indexer_listen_addr: None, - parallel_cpu_threads: 2, - debug_force_finished_sync: false, - cookie_dir: Default::default(), - enable_cookie_auth: false, - }; - - let rt = tokio::runtime::Runtime::new().unwrap(); - - let test_task_handle = rt.spawn(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning parallel RPC server 1..."); - - let (_rpc_server_1_task_handle, _rpc_tx_queue_1_task_handle, _rpc_server) = - RpcServer::spawn( - config.clone(), - Default::default(), - "RPC server 1 test", - "RPC server 1 test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - tokio::time::sleep(Duration::from_secs(3)).await; - - info!("spawning parallel conflicted RPC server 2..."); - - let (rpc_server_2_task_handle, _rpc_tx_queue_2_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server 2 conflict test", - "RPC server 2 conflict test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC servers, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // Because there might be a panic inside a multi-threaded executor, - // we can't depend on the exact behaviour of the other tasks, - // particularly across different machines and OSes. - - // The second server doesn't panic, but we'd like it to. - // (See the function docs for details.) - let rpc_server_2_task_result = rpc_server_2_task_handle.await; - match rpc_server_2_task_result { - Ok(()) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now" - ), - Err(join_error) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } - - // Ignore the queue task result - }); - - // Wait until the spawned task finishes - std::thread::sleep(Duration::from_secs(10)); - - info!("waiting for parallel RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(3)); - - match test_task_handle.now_or_never() { - Some(Ok(())) => { - info!("parallel RPC server task successfully exited"); - } - None => panic!("unexpected test task hang"), - Some(Err(join_error)) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server 1..."); + + let _rpc_server_1_task_handle = RpcServer::spawn( + config.clone(), + Default::default(), + "RPC server 1 test", + "RPC server 1 test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await; + + tokio::time::sleep(Duration::from_secs(3)).await; + + info!("spawning conflicted RPC server 2..."); + + let _rpc_server_2_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server 2 conflict test", + "RPC server 2 conflict test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await; + + info!("spawned RPC servers, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; } diff --git a/zebra-rpc/src/sync.rs b/zebra-rpc/src/sync.rs index 40373d0eaed..c678f580b4a 100644 --- a/zebra-rpc/src/sync.rs +++ b/zebra-rpc/src/sync.rs @@ -382,9 +382,10 @@ impl SyncerRpcMethods for RpcRequestClient { } Err(err) if err - .downcast_ref::() + .downcast_ref::() .is_some_and(|err| { - err.code == server::error::LegacyCode::InvalidParameter.into() + let code: i32 = server::error::LegacyCode::InvalidParameter.into(); + err.code() == code }) => { Ok(None) diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index cb3e417d0cf..9cf2d1e4095 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -254,7 +254,7 @@ tonic-build = { version = "0.12.3", optional = true } abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" hex-literal = "0.4.1" -jsonrpc-core = "18.0.0" +jsonrpsee-types = "0.24.7" once_cell = "1.20.2" regex = "1.11.0" insta = { version = "1.41.1", features = ["json"] } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 2f8a1563b8a..ab06e546fc8 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -243,20 +243,31 @@ impl StartCmd { } // Launch RPC server - info!("spawning RPC server"); - let (rpc_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config.rpc.clone(), - config.mining.clone(), - build_version(), - user_agent(), - mempool.clone(), - read_only_state_service.clone(), - block_verifier_router.clone(), - sync_status.clone(), - address_book.clone(), - latest_chain_tip.clone(), - config.network.network.clone(), - ); + let (rpc_task_handle, mut rpc_tx_queue_task_handle) = + if let Some(listen_addr) = config.rpc.listen_addr { + info!("spawning RPC server"); + info!("Trying to open RPC endpoint at {}...", listen_addr,); + let rpc_task_handle = RpcServer::spawn( + config.rpc.clone(), + config.mining.clone(), + build_version(), + user_agent(), + mempool.clone(), + read_only_state_service.clone(), + block_verifier_router.clone(), + sync_status.clone(), + address_book.clone(), + latest_chain_tip.clone(), + config.network.network.clone(), + ); + rpc_task_handle.await.unwrap() + } else { + warn!("configure an listen_addr to start the RPC server"); + ( + tokio::spawn(std::future::pending().in_current_span()), + tokio::spawn(std::future::pending().in_current_span()), + ) + }; // TODO: Add a shutdown signal and start the server with `serve_with_incoming_shutdown()` if // any related unit tests sometimes crash with memory errors @@ -399,7 +410,6 @@ impl StartCmd { // ongoing tasks pin!(rpc_task_handle); pin!(indexer_rpc_task_handle); - pin!(rpc_tx_queue_task_handle); pin!(syncer_task_handle); pin!(block_gossip_task_handle); pin!(mempool_crawler_task_handle); @@ -425,17 +435,10 @@ impl StartCmd { let mut exit_when_task_finishes = true; let result = select! { - rpc_result = &mut rpc_task_handle => { - rpc_result + rpc_join_result = &mut rpc_task_handle => { + let rpc_server_result = rpc_join_result .expect("unexpected panic in the rpc task"); - info!("rpc task exited"); - Ok(()) - } - - indexer_rpc_join_result = &mut indexer_rpc_task_handle => { - let indexer_rpc_server_result = indexer_rpc_join_result - .expect("unexpected panic in the rpc task"); - info!(?indexer_rpc_server_result, "indexer rpc task exited"); + info!(?rpc_server_result, "rpc task exited"); Ok(()) } @@ -446,6 +449,13 @@ impl StartCmd { Ok(()) } + indexer_rpc_join_result = &mut indexer_rpc_task_handle => { + let indexer_rpc_server_result = indexer_rpc_join_result + .expect("unexpected panic in the indexer task"); + info!(?indexer_rpc_server_result, "indexer rpc task exited"); + Ok(()) + } + sync_result = &mut syncer_task_handle => sync_result .expect("unexpected panic in the syncer task") .map(|_| info!("syncer task exited")), @@ -536,15 +546,6 @@ impl StartCmd { state_checkpoint_verify_handle.abort(); old_databases_task_handle.abort(); - // Wait until the RPC server shuts down. - // This can take around 150 seconds. - // - // Without this shutdown, Zebra's RPC unit tests sometimes crashed with memory errors. - if let Some(rpc_server) = rpc_server { - info!("waiting for RPC server to shut down"); - rpc_server.shutdown_blocking(); - } - info!("exiting Zebra: all tasks have been asked to stop, waiting for remaining tasks to finish"); exit_status diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs index cb32cc91981..ee4960a5d03 100644 --- a/zebrad/src/components/miner.rs +++ b/zebrad/src/components/miner.rs @@ -35,7 +35,7 @@ use zebra_rpc::{ GetBlockTemplateCapability::*, GetBlockTemplateRequestMode::*, }, hex_data::HexData, - GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }, }; use zebra_state::WatchReceiver; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 3dfc959eb58..ef2de55dc83 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3270,7 +3270,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { types::submit_block, }, hex_data::HexData, - GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }; use zebra_test::mock_service::MockService; let _init_guard = zebra_test::init(); diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index acd89d89aba..efd3c08875b 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -161,9 +161,10 @@ impl MiningRpcMethods for RpcRequestClient { } Err(err) if err - .downcast_ref::() + .downcast_ref::() .is_some_and(|err| { - err.code == server::error::LegacyCode::InvalidParameter.into() + let error: i32 = server::error::LegacyCode::InvalidParameter.into(); + err.code() == error }) => { Ok(None) diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap index 9e830f19e61..d277043f701 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 0, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1707429 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap index bcaa36d61fd..c683839781e 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 338, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1888929 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap index 945af42ca5f..ec880d7df6f 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 585, @@ -13,6 +14,5 @@ expression: parsed "end_height": 2000126 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap index 2cf43dd6098..08f8744fadd 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 0, @@ -13,6 +14,5 @@ expression: parsed "end_height": 558822 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap index d709a53f0c6..f76f202706c 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 0, @@ -53,6 +54,5 @@ expression: parsed "end_height": 1363036 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap index ad9e68b1620..0274c501497 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 1090, @@ -33,6 +34,5 @@ expression: parsed "end_height": 2056616 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap index 8e0ddc1fb67..d2c983d78a8 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 17, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1703171 } ] - }, - "id": 123 + } } From 79911641b9cf30b38b1d2cbe87c6b27669d267ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 07:29:37 +0000 Subject: [PATCH 045/245] build(deps): bump the devops group across 1 directory with 5 updates (#9109) Bumps the devops group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [codecov/codecov-action](https://github.com/codecov/codecov-action) | `5.1.1` | `5.1.2` | | [tj-actions/changed-files](https://github.com/tj-actions/changed-files) | `45.0.5` | `45.0.6` | | [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) | `2.7.5` | `2.7.7` | | [baptiste0928/cargo-install](https://github.com/baptiste0928/cargo-install) | `3.1.1` | `3.3.0` | | [docker/build-push-action](https://github.com/docker/build-push-action) | `6.10.0` | `6.11.0` | Updates `codecov/codecov-action` from 5.1.1 to 5.1.2 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.1.1...v5.1.2) Updates `tj-actions/changed-files` from 45.0.5 to 45.0.6 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v45.0.5...v45.0.6) Updates `Swatinem/rust-cache` from 2.7.5 to 2.7.7 - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/v2.7.5...v2.7.7) Updates `baptiste0928/cargo-install` from 3.1.1 to 3.3.0 - [Release notes](https://github.com/baptiste0928/cargo-install/releases) - [Changelog](https://github.com/baptiste0928/cargo-install/blob/main/CHANGELOG.md) - [Commits](https://github.com/baptiste0928/cargo-install/compare/v3.1.1...v3.3.0) Updates `docker/build-push-action` from 6.10.0 to 6.11.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.10.0...v6.11.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: baptiste0928/cargo-install dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 8 ++++---- .github/workflows/ci-unit-tests-os.yml | 6 +++--- .github/workflows/docs-deploy-firebase.yml | 2 +- .github/workflows/release-crates-io.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 4a97563143b..d2dfdd9ff1a 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.1.1 + uses: codecov/codecov-action@v5.1.2 diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 26ec61089b3..0e40daa7c7e 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.5 + uses: tj-actions/changed-files@v45.0.6 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.5 + uses: tj-actions/changed-files@v45.0.6 with: files: | .github/workflows/*.yml @@ -93,7 +93,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.7.5 + - uses: Swatinem/rust-cache@v2.7.7 with: shared-key: "clippy-cargo-lock" @@ -138,7 +138,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.7.5 + #- uses: Swatinem/rust-cache@v2.7.7 - run: | cargo fmt --all -- --check diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index ec1b52fd5d8..6e9dc77d91d 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -115,7 +115,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.7.5 + - uses: Swatinem/rust-cache@v2.7.7 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -224,7 +224,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.7.5 + - uses: Swatinem/rust-cache@v2.7.7 with: shared-key: "clippy-cargo-lock" @@ -283,7 +283,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-machete - uses: baptiste0928/cargo-install@v3.1.1 + uses: baptiste0928/cargo-install@v3.3.0 with: crate: cargo-machete diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 38542be45e9..0154ffe1bd7 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -155,7 +155,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.7.5 + - uses: Swatinem/rust-cache@v2.7.7 - name: Build internal docs run: | diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 27b902d1729..32586ce5fbe 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -85,7 +85,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-release - uses: baptiste0928/cargo-install@v3.1.1 + uses: baptiste0928/cargo-install@v3.3.0 with: crate: cargo-release diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index ac6d5bbbecc..31e965976a8 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -161,7 +161,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.10.0 + uses: docker/build-push-action@v6.11.0 with: target: ${{ inputs.dockerfile_target }} context: . From dad75437e34a95807a9fd3ae997afbe056a3f898 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 14 Jan 2025 14:27:52 +0000 Subject: [PATCH 046/245] fix(workflows): update patch jobs conditions and naming (#9117) - Modified job conditions in multiple workflow files to ensure they only run for pull requests from external repositories. - Updated job names for clarity and consistency across `cd-deploy-nodes-gcp`, `ci-tests`, and `docs-deploy-firebase` workflows, and to sync them with our rulesets - Enhanced comments to emphasize the importance of synchronizing job names across related workflow files. Fixes #9108 --- .../cd-deploy-nodes-gcp.patch-external.yml | 19 +++--- .../workflows/cd-deploy-nodes-gcp.patch.yml | 28 +++++---- .github/workflows/ci-tests.patch-external.yml | 58 ++++++++++++------- .github/workflows/ci-tests.patch.yml | 56 ++++++++++++------ .../docs-deploy-firebase.patch-external.yml | 12 ++-- .../workflows/docs-deploy-firebase.patch.yml | 10 ++-- 6 files changed, 114 insertions(+), 69 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 0b0cdfa5018..4cb146d4029 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -7,15 +7,15 @@ name: Deploy Nodes to GCP on: pull_request: -# IMPORTANT -# -# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and -# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. +#! IMPORTANT +#! +#! The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and +#! `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) get-disk-name: - name: Get disk name - if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + name: Get disk name / Get Mainnet cached disk + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} runs-on: ubuntu-latest steps: - run: 'echo "Skipping job on fork"' @@ -24,6 +24,7 @@ jobs: name: Build CD Docker / Build images # Only run on PRs from external repositories, skipping ZF branches and tags. runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' @@ -33,13 +34,15 @@ jobs: # change. needs: build runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-configuration-file-testnet: - name: Test CD testnet Docker config file / Test default-conf in Docker + name: Test CD testnet Docker config file / Test testnet-conf in Docker needs: build runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' @@ -47,6 +50,6 @@ jobs: name: Test CD custom Docker config file / Test custom-conf in Docker needs: build runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' - diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index 6f88cf2cfe2..b5ea40a0e2e 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -22,39 +22,43 @@ on: - '.github/workflows/cd-deploy-nodes-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' -# IMPORTANT -# -# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and -# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. +#! IMPORTANT +#! +#! The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and +#! `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + get-disk-name: + name: Get disk name / Get Mainnet cached disk + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} + steps: + - run: 'echo "No build required"' + build: name: Build CD Docker / Build images runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-configuration-file: name: Test CD default Docker config file / Test default-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-configuration-file-testnet: - name: Test CD testnet Docker config file / Test default-conf in Docker + name: Test CD testnet Docker config file / Test testnet-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' - test-zebra-conf-path: name: Test CD custom Docker config file / Test custom-conf in Docker runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - get-disk-name: - name: Get disk name - runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' diff --git a/.github/workflows/ci-tests.patch-external.yml b/.github/workflows/ci-tests.patch-external.yml index 8fef3e75889..f257315788b 100644 --- a/.github/workflows/ci-tests.patch-external.yml +++ b/.github/workflows/ci-tests.patch-external.yml @@ -15,7 +15,7 @@ jobs: build: name: Build CI Docker / Build images # Only run on PRs from external repositories. - if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} runs-on: ubuntu-latest steps: - run: 'echo "Skipping job on fork"' @@ -24,116 +24,134 @@ jobs: ## The following jobs are related to sub-ci-unit-tests-docker.yml ### test-all: - name: Test all + name: Unit tests / Test all runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-fake-activation-heights: - name: Test with fake activation heights + name: Unit tests / Test with fake activation heights runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-empty-sync: - name: Test checkpoint sync from empty state + name: Unit tests / Test checkpoint sync from empty state runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-lightwalletd-integration: - name: Test integration with lightwalletd + name: Unit tests / Test integration with lightwalletd runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-configuration-file: - name: Test CI default Docker config file / Test default-conf in Docker + name: Unit tests / Test CI default Docker config file / Test default-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-configuration-file-testnet: - name: Test CI testnet Docker config file / Test default-conf in Docker + name: Unit tests / Test CI testnet Docker config file / Test testnet-conf in Docker needs: build runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-zebra-conf-path: - name: Test CI custom Docker config file / Test custom-conf in Docker + name: Unit tests / Test CI custom Docker config file / Test custom-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' - + #### #### ## The following jobs are related to sub-ci-integration-tests-gcp.yml ### # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) get-available-disks: - name: Check if cached state disks exist for Mainnet / Check if cached state disks exist + name: Integration tests / Check if cached state disks exist for Mainnet / Get Mainnet cached disk runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-stateful-sync: - name: Zebra checkpoint update / Run sync-past-checkpoint test + name: Integration tests / Zebra checkpoint update / Run sync-past-checkpoint test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' test-update-sync: - name: Zebra tip update / Run update-to-tip test + name: Integration tests / Zebra tip update / Run update-to-tip test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' checkpoints-mainnet: - name: Generate checkpoints mainnet / Run checkpoints-mainnet test + name: Integration tests / Generate checkpoints mainnet / Run checkpoints-mainnet test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' lightwalletd-rpc-test: - name: Zebra tip JSON-RPC / Run fully-synced-rpc test + name: Integration tests / Zebra tip JSON-RPC / Run fully-synced-rpc test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' lightwalletd-transactions-test: - name: lightwalletd tip send / Run lwd-send-transactions test + name: Integration tests / lightwalletd tip send / Run lwd-send-transactions test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' get-block-template-test: - name: get block template / Run get-block-template test + name: Integration tests / get block template / Run get-block-template test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' submit-block-test: - name: submit block / Run submit-block test + name: Integration tests / submit block / Run submit-block test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' lightwalletd-full-sync: - name: lightwalletd tip / Run lwd-full-sync test + name: Integration tests / lightwalletd tip / Run lwd-full-sync test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' lightwalletd-update-sync: - name: lightwalletd tip update / Run lwd-update-sync test + name: Integration tests / lightwalletd tip update / Run lwd-update-sync test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' lightwalletd-grpc-test: - name: lightwalletd GRPC tests / Run lwd-grpc-wallet test + name: Integration tests / lightwalletd GRPC tests / Run lwd-grpc-wallet test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/ci-tests.patch.yml b/.github/workflows/ci-tests.patch.yml index 5320b149bbb..e0a7ff605fd 100644 --- a/.github/workflows/ci-tests.patch.yml +++ b/.github/workflows/ci-tests.patch.yml @@ -43,45 +43,52 @@ jobs: ## The following jobs are related to sub-ci-unit-tests-docker.yml ### test-all: - name: Test all + name: Unit tests / Test all runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-fake-activation-heights: - name: Test with fake activation heights + name: Unit tests / Test with fake activation heights runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-empty-sync: - name: Test checkpoint sync from empty state + name: Unit tests / Test checkpoint sync from empty state runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-lightwalletd-integration: - name: Test integration with lightwalletd + name: Unit tests / Test integration with lightwalletd runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-configuration-file: - name: Test CI default Docker config file / Test default-conf in Docker + name: Unit tests / Test CI default Docker config file / Test default-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' - test-configuration-file-testnet: - name: Test CI testnet Docker config file / Test default-conf in Docker + test-configuration-file-testnet: + name: Unit tests / Test CI testnet Docker config file / Test testnet-conf in Docker needs: build runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-zebra-conf-path: - name: Test CI custom Docker config file / Test custom-conf in Docker + name: Unit tests / Test CI custom Docker config file / Test custom-conf in Docker runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' @@ -92,67 +99,78 @@ jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) get-available-disks: - name: Check if cached state disks exist for Mainnet / Check if cached state disks exist + name: Integration tests / Check if cached state disks exist for Mainnet / Get Mainnet cached disk runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-stateful-sync: - name: Zebra checkpoint update / Run sync-past-checkpoint test + name: Integration tests / Zebra checkpoint update / Run sync-past-checkpoint test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' test-update-sync: - name: Zebra tip update / Run update-to-tip test + name: Integration tests / Zebra tip update / Run update-to-tip test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' checkpoints-mainnet: - name: Generate checkpoints mainnet / Run checkpoints-mainnet test + name: Integration tests / Generate checkpoints mainnet / Run checkpoints-mainnet test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' lightwalletd-rpc-test: - name: Zebra tip JSON-RPC / Run fully-synced-rpc test + name: Integration tests / Zebra tip JSON-RPC / Run fully-synced-rpc test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' lightwalletd-transactions-test: - name: lightwalletd tip send / Run lwd-send-transactions test + name: Integration tests / lightwalletd tip send / Run lwd-send-transactions test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' get-block-template-test: - name: get block template / Run get-block-template test + name: Integration tests / get block template / Run get-block-template test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' submit-block-test: - name: submit block / Run submit-block test + name: Integration tests / submit block / Run submit-block test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' lightwalletd-full-sync: - name: lightwalletd tip / Run lwd-full-sync test + name: Integration tests / lightwalletd tip / Run lwd-full-sync test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' lightwalletd-update-sync: - name: lightwalletd tip update / Run lwd-update-sync test + name: Integration tests / lightwalletd tip update / Run lwd-update-sync test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' lightwalletd-grpc-test: - name: lightwalletd GRPC tests / Run lwd-grpc-wallet test + name: Integration tests / lightwalletd GRPC tests / Run lwd-grpc-wallet test runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' diff --git a/.github/workflows/docs-deploy-firebase.patch-external.yml b/.github/workflows/docs-deploy-firebase.patch-external.yml index 3c6d9c16942..9a725ba21b6 100644 --- a/.github/workflows/docs-deploy-firebase.patch-external.yml +++ b/.github/workflows/docs-deploy-firebase.patch-external.yml @@ -7,16 +7,15 @@ name: Docs on: pull_request: -# IMPORTANT -# -# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and -# `docs-deploy-firebase.patch-external.yml` must be kept in sync. +#! IMPORTANT +#! +#! The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and +#! `docs-deploy-firebase.patch-external.yml` must be kept in sync. jobs: build-docs-book: name: Build and Deploy Zebra Book Docs - # Only run on PRs from external repositories. - if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' @@ -26,5 +25,6 @@ jobs: # change. needs: build-docs-book runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/docs-deploy-firebase.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml index 7d84ff75961..30c0793d852 100644 --- a/.github/workflows/docs-deploy-firebase.patch.yml +++ b/.github/workflows/docs-deploy-firebase.patch.yml @@ -21,19 +21,21 @@ on: # workflow definitions - '.github/workflows/docs-deploy-firebase.yml' -# IMPORTANT -# -# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and -# `docs-deploy-firebase.patch-external.yml` must be kept in sync. +#! IMPORTANT +#! +#! The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and +#! `docs-deploy-firebase.patch-external.yml` must be kept in sync. jobs: build-docs-book: name: Build and Deploy Zebra Book Docs runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' build-docs-internal: name: Build and Deploy Zebra Internal Docs runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' From 410cac0b243c28bfefbf4b2b1f4dadeb8b7d19ef Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 14 Jan 2025 21:50:56 +0100 Subject: [PATCH 047/245] chore: Fix new clippy lints (#9114) * Fix lints * chore: Release * Remove the "release crates" job (#9119) --- .../release-checklist.md | 3 - .github/workflows/release-crates-io.patch.yml | 28 ---- .github/workflows/release-crates-io.yml | 126 ------------------ .../scripts/release-crates-dry-run.sh | 40 ------ Cargo.lock | 28 ++-- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 6 +- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 6 +- zebra-consensus/Cargo.toml | 20 +-- zebra-grpc/Cargo.toml | 6 +- zebra-network/Cargo.toml | 4 +- zebra-network/src/protocol/external/codec.rs | 6 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 24 ++-- .../src/server/http_request_compatibility.rs | 2 +- zebra-scan/Cargo.toml | 20 +-- zebra-script/Cargo.toml | 6 +- zebra-state/Cargo.toml | 10 +- .../src/service/non_finalized_state/chain.rs | 42 +++--- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +- zebrad/Cargo.toml | 30 ++--- 24 files changed, 114 insertions(+), 317 deletions(-) delete mode 100644 .github/workflows/release-crates-io.patch.yml delete mode 100644 .github/workflows/release-crates-io.yml delete mode 100755 .github/workflows/scripts/release-crates-dry-run.sh diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 8679a37154f..d71683b04ba 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -101,9 +101,6 @@ cargo release replace --verbose --execute --allow-branch '*' --package zebrad cargo release commit --verbose --execute --allow-branch '*' ``` -Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode, however due to a bug in `cargo-release` we need to pass exact versions to the alpha crates: - -- [ ] Update `zebra-scan` and `zebra-grpc` alpha crates in the [release-crates-dry-run workflow script](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/scripts/release-crates-dry-run.sh) - [ ] Push the above version changes to the release branch. ## Update End of Support diff --git a/.github/workflows/release-crates-io.patch.yml b/.github/workflows/release-crates-io.patch.yml deleted file mode 100644 index e8f18d6c755..00000000000 --- a/.github/workflows/release-crates-io.patch.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Release crates - -on: - # Only patch the Release PR test job - pull_request: - paths-ignore: - # code and tests - - '**/*.rs' - # hard-coded checkpoints (and proptest regressions, which are not actually needed) - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # READMEs, which are shown on the crate page - - '**/README.md' - # workflow definitions - - '.github/workflows/release-crates.io.yml' - - -jobs: - check-release: - name: Check crate release dry run - runs-on: ubuntu-latest - steps: - - run: 'echo "No check required"' diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml deleted file mode 100644 index 32586ce5fbe..00000000000 --- a/.github/workflows/release-crates-io.yml +++ /dev/null @@ -1,126 +0,0 @@ -# This workflow checks that Zebra's crates.io release script works. -# -# We use a separate action, because the changed files are different to a Continuous Deployment -# or Docker release. -# -# This workflow is triggered when: -# - A PR that changes Rust files, a README, or this workflow is opened or updated -# - A change is pushed to the main branch -# -# TODO: -# If we decide to automate crates.io releases, we can also publish crates using this workflow, when: -# - A release is published -# - A pre-release is changed to a release - -name: Release crates - -# Ensures that only one workflow task will run at a time. Previous releases, if -# already in process, won't get cancelled. Instead, we let the first release complete, -# then queue the latest pending workflow, cancelling any workflows in between. -# -# Since the different event types do very different things (test vs release), -# we can run different event types concurrently. -# -# For pull requests, we only run the tests from this workflow, and don't do any releases. -# So an in-progress pull request gets cancelled, just like other tests. -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - - -on: -# disabled for now -# release: -# types: -# - released - - # Only runs the release tests, doesn't release any crates. - # - # We test all changes on the main branch, just in case the PR paths are too strict. - push: - branches: - - main - - pull_request: - paths: - # code and tests - - '**/*.rs' - # hard-coded checkpoints (and proptest regressions, which are not actually needed) - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # READMEs, which are shown on the crate page - - '**/README.md' - # workflow definitions - - '.github/workflows/release-crates.io.yml' - - -jobs: - # Test that Zebra can be released to crates.io using `cargo`. - # This checks that Zebra's dependencies and release configs are correct. - check-release: - name: Check crate release dry run - timeout-minutes: 15 - runs-on: ubuntu-latest - steps: - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Checkout git repository - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 - with: - short-length: 7 - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - - name: Install cargo-release - uses: baptiste0928/cargo-install@v3.3.0 - with: - crate: cargo-release - - # Make sure Zebra can be released! - # - # These steps should be kept up to date with the release checklist. - # - - name: Crate release dry run - run: | - ./.github/workflows/scripts/release-crates-dry-run.sh - - # TODO: actually do the release here - #release-crates: - # name: Release Zebra Crates - # needs: [ check-release ] - # runs-on: ubuntu-latest - # timeout-minutes: 30 - # if: ${{ !cancelled() && !failure() && github.event_name == 'release' }} - # steps: - # ... - - failure-issue: - name: Open or update issues for release crates failures - # When a new job is added to this workflow, add it to this list. - needs: [ check-release ] - # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) - if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) - runs-on: ubuntu-latest - steps: - - uses: jayqi/failed-build-issue-action@v1 - with: - title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" - # New failures open an issue with this label. - label-name: S-ci-fail-release-crates-auto-issue - # If there is already an open issue with this label, any failures become comments on that issue. - always-create-new-issue: false - github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/scripts/release-crates-dry-run.sh b/.github/workflows/scripts/release-crates-dry-run.sh deleted file mode 100755 index c83b068f5bd..00000000000 --- a/.github/workflows/scripts/release-crates-dry-run.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -set -ex - -# Check if necessary tools are installed -if ! command -v git &>/dev/null || ! command -v cargo &>/dev/null; then - echo "ERROR: Required tools (git, cargo) are not installed." - exit 1 -fi - -git config --global user.email "release-tests-no-reply@zfnd.org" -git config --global user.name "Automated Release Test" - -# Ensure cargo-release is installed -if ! cargo release --version &>/dev/null; then - echo "ERROR: cargo release must be installed." - exit 1 -fi - -# Release process -# We use the same commands as the [release drafter](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions) -# with an extra `--no-confirm` argument for non-interactive testing. -# Update everything except for alpha crates and zebrad: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta - -# Due to a bug in cargo-release, we need to pass exact versions for alpha crates: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.13 -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.11 - -# Update zebrad: -cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch -# Continue with the release process: -cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad -cargo release commit --verbose --execute --no-confirm --allow-branch '*' - -# Dry run to check the release -# Workaround for unpublished dependency version errors: https://github.com/crate-ci/cargo-release/issues/691 -# TODO: check all crates after fixing these errors -cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad --exclude zebra-scan - -echo "Release process completed." diff --git a/Cargo.lock b/Cargo.lock index 30273b8d3f2..53b5a13ba46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4626,7 +4626,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.19" +version = "0.2.41-beta.20" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4649,7 +4649,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.19" +version = "0.2.41-beta.20" dependencies = [ "futures-core", "pin-project", @@ -5718,7 +5718,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "bitflags 2.6.0", "bitflags-serde-legacy", @@ -5783,7 +5783,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "bellman", "blake2b_simd", @@ -5829,7 +5829,7 @@ dependencies = [ [[package]] name = "zebra-grpc" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "color-eyre", "futures-util", @@ -5851,7 +5851,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -5892,7 +5892,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5905,7 +5905,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "base64 0.22.1", "chrono", @@ -5946,7 +5946,7 @@ dependencies = [ [[package]] name = "zebra-scan" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" dependencies = [ "bls12_381", "chrono", @@ -5992,7 +5992,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "hex", "lazy_static", @@ -6004,7 +6004,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "bincode", "chrono", @@ -6049,7 +6049,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "color-eyre", "futures", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" dependencies = [ "color-eyre", "hex", @@ -6108,7 +6108,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "2.1.0" +version = "2.1.1" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 6dc852a57ac..bee81533175 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -37,7 +37,7 @@ docker run -d --platform linux/amd64 \ ### Build it locally ```shell -git clone --depth 1 --branch v2.1.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v2.1.1 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index c72cfbf38c2..f9e0b03c5c9 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -76,7 +76,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v2.1.0 +git checkout v2.1.1 ``` 3. Build and Run `zebrad` @@ -89,7 +89,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.1.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.1.1 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 9f9dd5661f6..fb76c19f499 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.19" +version = "0.2.41-beta.20" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -43,10 +43,10 @@ rand = "0.8.5" tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } tower-test = "0.4.0" -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index bc20a49ef7a..61a0258d4d4 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.19" +version = "0.2.41-beta.20" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -24,4 +24,4 @@ tracing = "0.1.41" [dev-dependencies] tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index b7a9b5d9d32..eb8f098dcf0 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -145,7 +145,7 @@ proptest-derive = { version = "0.5.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } [dev-dependencies] # Benchmarks @@ -168,7 +168,7 @@ rand_chacha = "0.3.1" tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } [[bench]] name = "block" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index aac5cfc0c06..a6d86e30a60 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -63,13 +63,13 @@ orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } wagyu-zcash-parameters = "0.2.0" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.19" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.19" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.20" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.43" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.44" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } @@ -94,6 +94,6 @@ tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.1" tracing-subscriber = "0.3.19" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index cf01365553f..cef52a4c584 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-grpc" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" authors = ["Zcash Foundation "] description = "Zebra gRPC interface" license = "MIT OR Apache-2.0" @@ -28,8 +28,8 @@ color-eyre = "0.6.3" zcash_primitives.workspace = true -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["shielded-scan"] } -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } [build-dependencies] tonic-build = "0.12.3" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 86d373fa8d9..7ccf4f1f259 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } [dev-dependencies] proptest = "1.4.0" diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 1df72aecaef..2ed3673107e 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -224,7 +224,7 @@ impl Codec { writer.write_u64::(nonce.0)?; - if user_agent.as_bytes().len() > MAX_USER_AGENT_LENGTH { + if user_agent.len() > MAX_USER_AGENT_LENGTH { // zcashd won't accept this version message return Err(Error::Parse( "user agent too long: must be 256 bytes or less", @@ -248,7 +248,7 @@ impl Codec { reason, data, } => { - if message.as_bytes().len() > MAX_REJECT_MESSAGE_LENGTH { + if message.len() > MAX_REJECT_MESSAGE_LENGTH { // zcashd won't accept this reject message return Err(Error::Parse( "reject message too long: must be 12 bytes or less", @@ -259,7 +259,7 @@ impl Codec { writer.write_u8(*ccode as u8)?; - if reason.as_bytes().len() > MAX_REJECT_REASON_LENGTH { + if reason.len() > MAX_REJECT_REASON_LENGTH { return Err(Error::Parse( "reject reason too long: must be 111 bytes or less", )); diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index d9fcbba5bdd..6aa11ad45e9 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -37,7 +37,7 @@ rpc-client = [ shielded-scan = [] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index d180f049dc5..270d7a63ecf 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -106,16 +106,16 @@ zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl proptest = { version = "1.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "json-conversion", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = [ "rpc-client", ] } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.43" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.44" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } [build-dependencies] tonic-build = { version = "0.12.3", optional = true } @@ -128,17 +128,17 @@ proptest = "1.4.0" thiserror = "2.0.6" tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "proptest-impl", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44", features = [ "proptest-impl", ] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43", features = [ +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44", features = [ "proptest-impl", ] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = [ +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = [ "proptest-impl", ] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index ebbf49c05d3..5eb03b1c4fc 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -73,7 +73,7 @@ impl HttpRequestMiddleware { .and_then(|encoded| URL_SAFE.decode(encoded).ok()) .and_then(|decoded| String::from_utf8(decoded).ok()) .and_then(|request_cookie| request_cookie.split(':').nth(1).map(String::from)) - .map_or(false, |passwd| internal_cookie.authenticate(passwd)) + .is_some_and(|passwd| internal_cookie.authenticate(passwd)) }) } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 17bd29baed7..42cf878a8e6 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-scan" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" authors = ["Zcash Foundation "] description = "Shielded transaction scanner for the Zcash blockchain" license = "MIT OR Apache-2.0" @@ -77,11 +77,11 @@ zcash_primitives.workspace = true zcash_address.workspace = true sapling-crypto.workspace = true -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["shielded-scan"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["shielded-scan"] } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["shielded-scan"] } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } @@ -96,7 +96,7 @@ jubjub = { version = "0.10.0", optional = true } rand = { version = "0.8.5", optional = true } zcash_note_encryption = { version = "0.4.0", optional = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43", optional = true } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44", optional = true } # zebra-scanner binary dependencies tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } @@ -107,7 +107,7 @@ serde_json = "1.0.133" jsonrpc = { version = "0.18.0", optional = true } hex = { version = "0.4.3", optional = true } -zebrad = { path = "../zebrad", version = "2.1.0" } +zebrad = { path = "../zebrad", version = "2.1.1" } [dev-dependencies] insta = { version = "1.41.1", features = ["ron", "redactions"] } @@ -125,6 +125,6 @@ zcash_note_encryption = "0.4.0" toml = "0.8.19" tonic = "0.12.3" -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index e0d3094ef98..0a187676798 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -16,11 +16,11 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.2.0" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } thiserror = "2.0.6" [dev-dependencies] hex = "0.4.3" lazy_static = "1.4.0" -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 8ca769f6910..fc2a6505791 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -77,13 +77,13 @@ tracing = "0.1.41" elasticsearch = { version = "8.16.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.133", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.5.0", optional = true } @@ -108,5 +108,5 @@ jubjub = "0.10.0" tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index eb00fbda3a5..0dfcd585c12 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -551,16 +551,14 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sprout tree"); - // Don't add a new tree unless it differs from the previous one or there's no previous tree. + // Add the new tree only if: + // + // - it differs from the previous one, or + // - there's no previous tree. if height.is_min() || self - .sprout_tree( - height - .previous() - .expect("Already checked for underflow.") - .into(), - ) - .map_or(true, |prev_tree| prev_tree != tree) + .sprout_tree(height.previous().expect("prev height").into()) + .is_none_or(|prev_tree| prev_tree != tree) { assert_eq!( self.sprout_trees_by_height.insert(height, tree.clone()), @@ -756,16 +754,14 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sapling tree"); - // Don't add a new tree unless it differs from the previous one or there's no previous tree. + // Add the new tree only if: + // + // - it differs from the previous one, or + // - there's no previous tree. if height.is_min() || self - .sapling_tree( - height - .previous() - .expect("Already checked for underflow.") - .into(), - ) - .map_or(true, |prev_tree| prev_tree != tree) + .sapling_tree(height.previous().expect("prev height").into()) + .is_none_or(|prev_tree| prev_tree != tree) { assert_eq!( self.sapling_trees_by_height.insert(height, tree), @@ -963,16 +959,14 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding orchard tree"); - // Don't add a new tree unless it differs from the previous one or there's no previous tree. + // Add the new tree only if: + // + // - it differs from the previous one, or + // - there's no previous tree. if height.is_min() || self - .orchard_tree( - height - .previous() - .expect("Already checked for underflow.") - .into(), - ) - .map_or(true, |prev_tree| prev_tree != tree) + .orchard_tree(height.previous().expect("prev height").into()) + .is_none_or(|prev_tree| prev_tree != tree) { assert_eq!( self.orchard_trees_by_height.insert(height, tree), diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index db111c88f35..d7e22448a7f 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 2dc0a382a04..51929caff71 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.43" +version = "1.0.0-beta.44" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -94,11 +94,11 @@ tracing-error = "0.2.1" tracing-subscriber = "0.3.19" thiserror = "2.0.6" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.13.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9cf2d1e4095..ce5fbde2a21 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "2.1.0" +version = "2.1.1" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -157,15 +157,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.43", features = ["rpc-client"] } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.43" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["rpc-client"] } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44", optional = true } abscissa_core = "0.7.0" clap = { version = "4.5.23", features = ["cargo"] } @@ -279,13 +279,13 @@ proptest-derive = "0.5.0" # enable span traces and track caller in tests color-eyre = { version = "0.6.3" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.43", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.43", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.43" } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -296,7 +296,7 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.10" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.43" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } From 82c23f377c5f76b65ed89ca519bd159f6331ae18 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 15 Jan 2025 12:31:45 +0100 Subject: [PATCH 048/245] add(test): Add serialized NU5 blocks to test vectors (#9098) * Add serialized Mainnet blocks for tests * Add Sapling anchors * Add Mainnet Orchard anchors * Remove wrong Testnet NU5 blocks * Add Testnet blocks with V5 txs to test vectors * Move the Sapling treestate * Add Sapling & Orchard anchors * Remove unneeded test for fake V5 txs We don't need this test anymore since we have real V5 txs now. * Add `has_transparent_inputs` to `Transaction` * Fix `v5_with_sapling_spends` test * Fix `binding_signatures` test * Refactor block test vectors * Use real V5 txs instead of fake ones * Fix `v5_transaction_is_rejected_before_nu5` test * Fix `v5_tx_is_accepted_after_nu5_activation` test * Fix `v5_tx_with_no_outputs_fails_validation` test * Move `v5_tx_with_no_outputs_fails_validation` test * Fix `v5_tx_with_no_inputs_fails_verification` test * Fix `v5_tx_with_orchard_actions_has_inputs..` test * Fix `v5_coinbase_tx_without_spends_flag_passes` * Simplify `orchard` imports * Fix `v5_tx_with_orchard_actions_has_flags` test * Fix `v5_coinbase_tx_with_enable_spends_fails` * Fix `v5_tx_with_duplicate_orchard_action` test * Fix `coinbase_outputs_are_decryptable_for_v5` * Fix `shielded_outputs_are_not_decryptable_for_v5` * Use `Network::iter` instead of Mainnet * Rename basic V5 tx test * Apply suggestions from code review Co-authored-by: Arya * Return an `Ok` in tx is not coinbase * formatting * Update zebra-consensus/src/transaction/check.rs Co-authored-by: Arya --------- Co-authored-by: Arya Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/block/tests/vectors.rs | 132 ++- zebra-chain/src/tests/vectors.rs | 27 +- zebra-chain/src/transaction.rs | 7 +- zebra-chain/src/transaction/arbitrary.rs | 17 +- zebra-chain/src/transaction/tests/vectors.rs | 246 ++---- zebra-consensus/src/transaction/tests.rs | 773 ++++++++---------- .../src/vectors/block-main-1-687-106.txt | 1 + .../src/vectors/block-main-1-687-107.txt | 1 + .../src/vectors/block-main-1-687-108.txt | 1 + .../src/vectors/block-main-1-687-113.txt | 1 + .../src/vectors/block-main-1-687-118.txt | 1 + .../src/vectors/block-main-1-687-121.txt | 1 + .../src/vectors/block-test-1-842-421.txt | 1 + .../src/vectors/block-test-1-842-432.txt | 1 + .../src/vectors/block-test-1-842-462.txt | 1 + .../src/vectors/block-test-1-842-467.txt | 1 + .../src/vectors/block-test-1-842-468.txt | 1 + zebra-test/src/vectors/block.rs | 206 ++++- 18 files changed, 705 insertions(+), 714 deletions(-) create mode 100644 zebra-test/src/vectors/block-main-1-687-106.txt create mode 100644 zebra-test/src/vectors/block-main-1-687-107.txt create mode 100644 zebra-test/src/vectors/block-main-1-687-108.txt create mode 100644 zebra-test/src/vectors/block-main-1-687-113.txt create mode 100644 zebra-test/src/vectors/block-main-1-687-118.txt create mode 100644 zebra-test/src/vectors/block-main-1-687-121.txt create mode 100644 zebra-test/src/vectors/block-test-1-842-421.txt create mode 100644 zebra-test/src/vectors/block-test-1-842-432.txt create mode 100644 zebra-test/src/vectors/block-test-1-842-462.txt create mode 100644 zebra-test/src/vectors/block-test-1-842-467.txt create mode 100644 zebra-test/src/vectors/block-test-1-842-468.txt diff --git a/zebra-chain/src/block/tests/vectors.rs b/zebra-chain/src/block/tests/vectors.rs index 5ff19ca1092..02764c19ef7 100644 --- a/zebra-chain/src/block/tests/vectors.rs +++ b/zebra-chain/src/block/tests/vectors.rs @@ -9,10 +9,8 @@ use crate::{ block::{ serialize::MAX_BLOCK_BYTES, Block, BlockTimeError, Commitment::*, Hash, Header, Height, }, - parameters::{ - Network::{self, *}, - NetworkUpgrade::*, - }, + parameters::{Network, NetworkUpgrade::*}, + sapling, serialization::{ sha256d, SerializationError, ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize, }, @@ -191,88 +189,80 @@ fn block_test_vectors_unique() { ); } +/// Checks that: +/// +/// - the block test vector indexes match the heights in the block data; +/// - each post-Sapling block has a corresponding final Sapling root; +/// - each post-Orchard block has a corresponding final Orchard root. #[test] -fn block_test_vectors_height_mainnet() { - let _init_guard = zebra_test::init(); - - block_test_vectors_height(Mainnet); -} - -#[test] -fn block_test_vectors_height_testnet() { +fn block_test_vectors() { let _init_guard = zebra_test::init(); - block_test_vectors_height(Network::new_default_testnet()); -} + for net in Network::iter() { + let sapling_anchors = net.sapling_anchors(); + let orchard_anchors = net.orchard_anchors(); -/// Test that the block test vector indexes match the heights in the block data, -/// and that each post-sapling block has a corresponding final sapling root. -fn block_test_vectors_height(network: Network) { - let (block_iter, sapling_roots) = network.block_sapling_roots_iter(); - - for (&height, block) in block_iter { - let block = block - .zcash_deserialize_into::() - .expect("block is structurally valid"); - assert_eq!( - block.coinbase_height().expect("block height is valid").0, - height, - "deserialized height must match BTreeMap key height" - ); + for (&height, block) in net.block_iter() { + let block = block + .zcash_deserialize_into::() + .expect("block is structurally valid"); + assert_eq!( + block.coinbase_height().expect("block height is valid").0, + height, + "deserialized height must match BTreeMap key height" + ); - if height - >= Sapling - .activation_height(&network) - .expect("sapling activation height is set") - .0 - { - assert!( - sapling_roots.contains_key(&height), - "post-sapling block test vectors must have matching sapling root test vectors: missing {network} {height}" + if height + >= Sapling + .activation_height(&net) + .expect("activation height") + .0 + { + assert!( + sapling_anchors.contains_key(&height), + "post-sapling block test vectors must have matching sapling root test vectors: \ + missing {net} {height}" ); + } + + if height >= Nu5.activation_height(&net).expect("activation height").0 { + assert!( + orchard_anchors.contains_key(&height), + "post-nu5 block test vectors must have matching orchard root test vectors: \ + missing {net} {height}" + ); + } } } } -#[test] -fn block_commitment_mainnet() { - let _init_guard = zebra_test::init(); - - block_commitment(Mainnet); -} - -#[test] -fn block_commitment_testnet() { - let _init_guard = zebra_test::init(); - - block_commitment(Network::new_default_testnet()); -} - -/// Check that the block commitment field parses without errors. +/// Checks that the block commitment field parses without errors. /// For sapling and blossom blocks, also check the final sapling root value. /// /// TODO: add chain history test vectors? -fn block_commitment(network: Network) { - let (block_iter, sapling_roots) = network.block_sapling_roots_iter(); - - for (height, block) in block_iter { - let block = block - .zcash_deserialize_into::() - .expect("block is structurally valid"); +#[test] +fn block_commitment() { + let _init_guard = zebra_test::init(); - let commitment = block.commitment(&network).unwrap_or_else(|_| { - panic!("unexpected structurally invalid block commitment at {network} {height}") - }); + for net in Network::iter() { + let sapling_anchors = net.sapling_anchors(); - if let FinalSaplingRoot(final_sapling_root) = commitment { - let expected_final_sapling_root = *sapling_roots - .get(height) - .expect("unexpected missing final sapling root test vector"); - assert_eq!( - final_sapling_root, - crate::sapling::tree::Root::try_from(*expected_final_sapling_root).unwrap(), - "unexpected invalid final sapling root commitment at {network} {height}" - ); + for (height, block) in net.block_iter() { + if let FinalSaplingRoot(anchor) = block + .zcash_deserialize_into::() + .expect("block is structurally valid") + .commitment(&net) + .expect("unexpected structurally invalid block commitment at {net} {height}") + { + let expected_anchor = *sapling_anchors + .get(height) + .expect("unexpected missing final sapling root test vector"); + assert_eq!( + anchor, + sapling::tree::Root::try_from(*expected_anchor).unwrap(), + "unexpected invalid final sapling root commitment at {net} {height}" + ); + } } } } diff --git a/zebra-chain/src/tests/vectors.rs b/zebra-chain/src/tests/vectors.rs index ef9d00f6d99..69e4955f6a0 100644 --- a/zebra-chain/src/tests/vectors.rs +++ b/zebra-chain/src/tests/vectors.rs @@ -15,9 +15,10 @@ use zebra_test::vectors::{ BLOCK_MAINNET_1046400_BYTES, BLOCK_MAINNET_653599_BYTES, BLOCK_MAINNET_982681_BYTES, BLOCK_TESTNET_1116000_BYTES, BLOCK_TESTNET_583999_BYTES, BLOCK_TESTNET_925483_BYTES, CONTINUOUS_MAINNET_BLOCKS, CONTINUOUS_TESTNET_BLOCKS, MAINNET_BLOCKS, - MAINNET_FINAL_SAPLING_ROOTS, MAINNET_FINAL_SPROUT_ROOTS, + MAINNET_FINAL_ORCHARD_ROOTS, MAINNET_FINAL_SAPLING_ROOTS, MAINNET_FINAL_SPROUT_ROOTS, SAPLING_FINAL_ROOT_MAINNET_1046400_BYTES, SAPLING_FINAL_ROOT_TESTNET_1116000_BYTES, - TESTNET_BLOCKS, TESTNET_FINAL_SAPLING_ROOTS, TESTNET_FINAL_SPROUT_ROOTS, + TESTNET_BLOCKS, TESTNET_FINAL_ORCHARD_ROOTS, TESTNET_FINAL_SAPLING_ROOTS, + TESTNET_FINAL_SPROUT_ROOTS, }; /// Network methods for fetching blockchain vectors. @@ -118,17 +119,21 @@ impl Network { } } - /// Returns iterator over blocks and sapling roots. - pub fn block_sapling_roots_iter( - &self, - ) -> ( - std::collections::btree_map::Iter<'_, u32, &[u8]>, - std::collections::BTreeMap, - ) { + /// Returns a [`BTreeMap`] of heights and Sapling anchors for this network. + pub fn sapling_anchors(&self) -> std::collections::BTreeMap { + if self.is_mainnet() { + MAINNET_FINAL_SAPLING_ROOTS.clone() + } else { + TESTNET_FINAL_SAPLING_ROOTS.clone() + } + } + + /// Returns a [`BTreeMap`] of heights and Orchard anchors for this network. + pub fn orchard_anchors(&self) -> std::collections::BTreeMap { if self.is_mainnet() { - (MAINNET_BLOCKS.iter(), MAINNET_FINAL_SAPLING_ROOTS.clone()) + MAINNET_FINAL_ORCHARD_ROOTS.clone() } else { - (TESTNET_BLOCKS.iter(), TESTNET_FINAL_SAPLING_ROOTS.clone()) + TESTNET_FINAL_ORCHARD_ROOTS.clone() } } diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index d29eadff8cf..c04f4155b4f 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -253,9 +253,14 @@ impl Transaction { // other properties + /// Does this transaction have transparent inputs? + pub fn has_transparent_inputs(&self) -> bool { + !self.inputs().is_empty() + } + /// Does this transaction have transparent or shielded inputs? pub fn has_transparent_or_shielded_inputs(&self) -> bool { - !self.inputs().is_empty() || self.has_shielded_inputs() + self.has_transparent_inputs() || self.has_shielded_inputs() } /// Does this transaction have shielded inputs? diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index cf4aa7a9552..150801cf305 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -992,16 +992,17 @@ pub fn test_transactions( transactions_from_blocks(blocks) } -/// Generate an iterator over fake V5 transactions. -/// -/// These transactions are converted from non-V5 transactions that exist in the provided network -/// blocks. -pub fn fake_v5_transactions_for_network<'b>( - network: &'b Network, +/// Returns an iterator over V5 transactions extracted from the given blocks. +pub fn v5_transactions<'b>( blocks: impl DoubleEndedIterator + 'b, ) -> impl DoubleEndedIterator + 'b { - transactions_from_blocks(blocks) - .map(move |(height, transaction)| transaction_to_fake_v5(&transaction, network, height)) + transactions_from_blocks(blocks).filter_map(|(_, tx)| match *tx { + Transaction::V1 { .. } + | Transaction::V2 { .. } + | Transaction::V3 { .. } + | Transaction::V4 { .. } => None, + ref tx @ Transaction::V5 { .. } => Some(tx.clone()), + }) } /// Generate an iterator over ([`block::Height`], [`Arc`]). diff --git a/zebra-chain/src/transaction/tests/vectors.rs b/zebra-chain/src/transaction/tests/vectors.rs index 66d5009ed05..7daff649f3a 100644 --- a/zebra-chain/src/transaction/tests/vectors.rs +++ b/zebra-chain/src/transaction/tests/vectors.rs @@ -326,130 +326,6 @@ fn empty_v5_librustzcash_round_trip() { ); } -/// Do a round-trip test on fake v5 transactions created from v4 transactions -/// in the block test vectors. -/// -/// Covers Sapling only, Transparent only, and Sapling/Transparent v5 -/// transactions. -#[test] -fn fake_v5_round_trip() { - let _init_guard = zebra_test::init(); - for network in Network::iter() { - fake_v5_round_trip_for_network(network); - } -} - -fn fake_v5_round_trip_for_network(network: Network) { - let block_iter = network.block_iter(); - - let overwinter_activation_height = NetworkUpgrade::Overwinter - .activation_height(&network) - .expect("a valid height") - .0; - - // skip blocks that are before overwinter as they will not have a valid consensus branch id - let blocks_after_overwinter = - block_iter.skip_while(|(height, _)| **height < overwinter_activation_height); - - for (height, original_bytes) in blocks_after_overwinter { - let original_block = original_bytes - .zcash_deserialize_into::() - .expect("block is structurally valid"); - - // skip this block if it only contains v5 transactions, - // the block round-trip test covers it already - if original_block - .transactions - .iter() - .all(|trans| matches!(trans.as_ref(), &Transaction::V5 { .. })) - { - continue; - } - - let mut fake_block = original_block.clone(); - fake_block.transactions = fake_block - .transactions - .iter() - .map(AsRef::as_ref) - .map(|t| arbitrary::transaction_to_fake_v5(t, &network, Height(*height))) - .map(Into::into) - .collect(); - - // test each transaction - for (original_tx, fake_tx) in original_block - .transactions - .iter() - .zip(fake_block.transactions.iter()) - { - assert_ne!( - &original_tx, &fake_tx, - "v1-v4 transactions must change when converted to fake v5" - ); - - let fake_bytes = fake_tx - .zcash_serialize_to_vec() - .expect("vec serialization is infallible"); - - assert_ne!( - &original_bytes[..], - fake_bytes, - "v1-v4 transaction data must change when converted to fake v5" - ); - - let fake_tx2 = fake_bytes - .zcash_deserialize_into::() - .expect("tx is structurally valid"); - - assert_eq!(fake_tx.as_ref(), &fake_tx2); - - let fake_bytes2 = fake_tx2 - .zcash_serialize_to_vec() - .expect("vec serialization is infallible"); - - assert_eq!( - fake_bytes, fake_bytes2, - "data must be equal if structs are equal" - ); - } - - // test full blocks - assert_ne!( - &original_block, &fake_block, - "v1-v4 transactions must change when converted to fake v5" - ); - - let fake_bytes = fake_block - .zcash_serialize_to_vec() - .expect("vec serialization is infallible"); - - assert_ne!( - &original_bytes[..], - fake_bytes, - "v1-v4 transaction data must change when converted to fake v5" - ); - - // skip fake blocks which exceed the block size limit - if fake_bytes.len() > MAX_BLOCK_BYTES.try_into().unwrap() { - continue; - } - - let fake_block2 = fake_bytes - .zcash_deserialize_into::() - .expect("block is structurally valid"); - - assert_eq!(fake_block, fake_block2); - - let fake_bytes2 = fake_block2 - .zcash_serialize_to_vec() - .expect("vec serialization is infallible"); - - assert_eq!( - fake_bytes, fake_bytes2, - "data must be equal if structs are equal" - ); - } -} - #[test] fn invalid_orchard_nullifier() { let _init_guard = zebra_test::init(); @@ -950,65 +826,83 @@ fn zip244_sighash() -> Result<()> { #[test] fn binding_signatures() { let _init_guard = zebra_test::init(); - for network in Network::iter() { - binding_signatures_for_network(network); - } -} -fn binding_signatures_for_network(network: Network) { - let block_iter = network.block_iter(); + for net in Network::iter() { + let sapling_activation_height = NetworkUpgrade::Sapling + .activation_height(&net) + .expect("a valid height") + .0; - for (height, bytes) in block_iter { - let upgrade = NetworkUpgrade::current(&network, Height(*height)); + let mut at_least_one_v4_checked = false; + let mut at_least_one_v5_checked = false; - let block = bytes - .zcash_deserialize_into::() - .expect("a valid block"); - - for tx in block.transactions { - match &*tx { - Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => (), - Transaction::V4 { - sapling_shielded_data, - .. - } => { - if let Some(sapling_shielded_data) = sapling_shielded_data { - let shielded_sighash = - tx.sighash(upgrade.branch_id().unwrap(), HashType::ALL, &[], None); - - let bvk = redjubjub::VerificationKey::try_from( - sapling_shielded_data.binding_verification_key(), - ) - .expect("a valid redjubjub::VerificationKey"); - - bvk.verify( - shielded_sighash.as_ref(), - &sapling_shielded_data.binding_sig, - ) - .expect("must pass verification"); + for (height, block) in net + .block_iter() + .skip_while(|(height, _)| **height < sapling_activation_height) + { + let branch_id = NetworkUpgrade::current(&net, Height(*height)) + .branch_id() + .expect("consensus branch ID"); + + for tx in block + .zcash_deserialize_into::() + .expect("a valid block") + .transactions + { + match &*tx { + Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => (), + Transaction::V4 { + sapling_shielded_data, + .. + } => { + if let Some(sapling_shielded_data) = sapling_shielded_data { + let sighash = tx.sighash(branch_id, HashType::ALL, &[], None); + + let bvk = redjubjub::VerificationKey::try_from( + sapling_shielded_data.binding_verification_key(), + ) + .expect("a valid redjubjub::VerificationKey"); + + bvk.verify(sighash.as_ref(), &sapling_shielded_data.binding_sig) + .expect("must pass verification"); + + at_least_one_v4_checked = true; + } } - } - Transaction::V5 { - sapling_shielded_data, - .. - } => { - if let Some(sapling_shielded_data) = sapling_shielded_data { - let shielded_sighash = - tx.sighash(upgrade.branch_id().unwrap(), HashType::ALL, &[], None); - - let bvk = redjubjub::VerificationKey::try_from( - sapling_shielded_data.binding_verification_key(), - ) - .expect("a valid redjubjub::VerificationKey"); - - bvk.verify( - shielded_sighash.as_ref(), - &sapling_shielded_data.binding_sig, - ) - .expect("must pass verification"); + Transaction::V5 { + sapling_shielded_data, + .. + } => { + if let Some(sapling_shielded_data) = sapling_shielded_data { + // V5 txs have the outputs spent by their transparent inputs hashed into + // their SIGHASH, so we need to exclude txs with transparent inputs. + // + // References: + // + // + // + if tx.has_transparent_inputs() { + continue; + } + + let sighash = tx.sighash(branch_id, HashType::ALL, &[], None); + + let bvk = redjubjub::VerificationKey::try_from( + sapling_shielded_data.binding_verification_key(), + ) + .expect("a valid redjubjub::VerificationKey"); + + bvk.verify(sighash.as_ref(), &sapling_shielded_data.binding_sig) + .expect("verification passes"); + + at_least_one_v5_checked = true; + } } } } } + + assert!(at_least_one_v4_checked); + assert!(at_least_one_v5_checked); } } diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index ac1a42fea5c..044c8b01842 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -16,7 +16,7 @@ use tower::{buffer::Buffer, service_fn, ServiceExt}; use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, Height}, - orchard::AuthorizedAction, + orchard::{Action, AuthorizedAction, Flags}, parameters::{Network, NetworkUpgrade}, primitives::{ed25519, x25519, Groth16Proof}, sapling, @@ -24,8 +24,8 @@ use zebra_chain::{ sprout, transaction::{ arbitrary::{ - fake_v5_transactions_for_network, insert_fake_orchard_shielded_data, test_transactions, - transactions_from_blocks, + insert_fake_orchard_shielded_data, test_transactions, transactions_from_blocks, + v5_transactions, }, zip317, Hash, HashType, JoinSplitData, LockTime, Transaction, }, @@ -44,11 +44,11 @@ use super::{check, Request, Verifier}; mod prop; #[test] -fn v5_fake_transactions() -> Result<(), Report> { +fn v5_transactions_basic_check() -> Result<(), Report> { let _init_guard = zebra_test::init(); for network in Network::iter() { - for transaction in fake_v5_transactions_for_network(&network, network.block_iter()) { + for transaction in v5_transactions(network.block_iter()) { match check::has_inputs_and_outputs(&transaction) { Ok(()) => (), Err(TransactionError::NoInputs) | Err(TransactionError::NoOutputs) => (), @@ -64,172 +64,178 @@ fn v5_fake_transactions() -> Result<(), Report> { } #[test] -fn fake_v5_transaction_with_orchard_actions_has_inputs_and_outputs() { - // Find a transaction with no inputs or outputs to use as base - let mut transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.outputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.sapling_outputs().next().is_none() - && transaction.joinsplit_count() == 0 - }) - .expect("At least one fake V5 transaction with no inputs and no outputs"); +fn v5_transaction_with_orchard_actions_has_inputs_and_outputs() { + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .find(|transaction| { + transaction.inputs().is_empty() + && transaction.outputs().is_empty() + && transaction.sapling_spends_per_anchor().next().is_none() + && transaction.sapling_outputs().next().is_none() + && transaction.joinsplit_count() == 0 + }) + .expect("V5 tx with only Orchard shielded data"); - // Insert fake Orchard shielded data to the transaction, which has at least one action (this is - // guaranteed structurally by `orchard::ShieldedData`) - insert_fake_orchard_shielded_data(&mut transaction); + tx.orchard_shielded_data_mut().unwrap().flags = Flags::empty(); - // The check will fail if the transaction has no flags - assert_eq!( - check::has_inputs_and_outputs(&transaction), - Err(TransactionError::NoInputs) - ); + // The check will fail if the transaction has no flags + assert_eq!( + check::has_inputs_and_outputs(&tx), + Err(TransactionError::NoInputs) + ); - // If we add ENABLE_SPENDS flag it will pass the inputs check but fails with the outputs - // TODO: Avoid new calls to `insert_fake_orchard_shielded_data` for each check #2409. - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS; + // If we add ENABLE_SPENDS flag it will pass the inputs check but fails with the outputs + tx.orchard_shielded_data_mut().unwrap().flags = Flags::ENABLE_SPENDS; - assert_eq!( - check::has_inputs_and_outputs(&transaction), - Err(TransactionError::NoOutputs) - ); + assert_eq!( + check::has_inputs_and_outputs(&tx), + Err(TransactionError::NoOutputs) + ); - // If we add ENABLE_OUTPUTS flag it will pass the outputs check but fails with the inputs - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_OUTPUTS; + // If we add ENABLE_OUTPUTS flag it will pass the outputs check but fails with the inputs + tx.orchard_shielded_data_mut().unwrap().flags = Flags::ENABLE_OUTPUTS; - assert_eq!( - check::has_inputs_and_outputs(&transaction), - Err(TransactionError::NoInputs) - ); + assert_eq!( + check::has_inputs_and_outputs(&tx), + Err(TransactionError::NoInputs) + ); - // Finally make it valid by adding both required flags - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = - zebra_chain::orchard::Flags::ENABLE_SPENDS | zebra_chain::orchard::Flags::ENABLE_OUTPUTS; + // Finally make it valid by adding both required flags + tx.orchard_shielded_data_mut().unwrap().flags = + Flags::ENABLE_SPENDS | Flags::ENABLE_OUTPUTS; - assert!(check::has_inputs_and_outputs(&transaction).is_ok()); + assert!(check::has_inputs_and_outputs(&tx).is_ok()); + } } #[test] -fn fake_v5_transaction_with_orchard_actions_has_flags() { - // Find a transaction with no inputs or outputs to use as base - let mut transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.outputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.sapling_outputs().next().is_none() - && transaction.joinsplit_count() == 0 - }) - .expect("At least one fake V5 transaction with no inputs and no outputs"); +fn v5_transaction_with_orchard_actions_has_flags() { + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .find(|transaction| { + transaction.inputs().is_empty() + && transaction.outputs().is_empty() + && transaction.sapling_spends_per_anchor().next().is_none() + && transaction.sapling_outputs().next().is_none() + && transaction.joinsplit_count() == 0 + }) + .expect("V5 tx with only Orchard actions"); - // Insert fake Orchard shielded data to the transaction, which has at least one action (this is - // guaranteed structurally by `orchard::ShieldedData`) - insert_fake_orchard_shielded_data(&mut transaction); + tx.orchard_shielded_data_mut().unwrap().flags = Flags::empty(); - // The check will fail if the transaction has no flags - assert_eq!( - check::has_enough_orchard_flags(&transaction), - Err(TransactionError::NotEnoughFlags) - ); + // The check will fail if the transaction has no flags + assert_eq!( + check::has_enough_orchard_flags(&tx), + Err(TransactionError::NotEnoughFlags) + ); + + // If we add ENABLE_SPENDS flag it will pass. + tx.orchard_shielded_data_mut().unwrap().flags = Flags::ENABLE_SPENDS; + assert!(check::has_enough_orchard_flags(&tx).is_ok()); - // If we add ENABLE_SPENDS flag it will pass. - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS; - assert!(check::has_enough_orchard_flags(&transaction).is_ok()); + tx.orchard_shielded_data_mut().unwrap().flags = Flags::empty(); - // If we add ENABLE_OUTPUTS flag instead, it will pass. - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_OUTPUTS; - assert!(check::has_enough_orchard_flags(&transaction).is_ok()); + // If we add ENABLE_OUTPUTS flag instead, it will pass. + tx.orchard_shielded_data_mut().unwrap().flags = Flags::ENABLE_OUTPUTS; + assert!(check::has_enough_orchard_flags(&tx).is_ok()); - // If we add BOTH ENABLE_SPENDS and ENABLE_OUTPUTS flags it will pass. - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = - zebra_chain::orchard::Flags::ENABLE_SPENDS | zebra_chain::orchard::Flags::ENABLE_OUTPUTS; - assert!(check::has_enough_orchard_flags(&transaction).is_ok()); + tx.orchard_shielded_data_mut().unwrap().flags = Flags::empty(); + + // If we add BOTH ENABLE_SPENDS and ENABLE_OUTPUTS flags it will pass. + tx.orchard_shielded_data_mut().unwrap().flags = + Flags::ENABLE_SPENDS | Flags::ENABLE_OUTPUTS; + assert!(check::has_enough_orchard_flags(&tx).is_ok()); + } } #[test] -fn v5_transaction_with_no_inputs_fails_validation() { - let transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.orchard_actions().next().is_none() - && transaction.joinsplit_count() == 0 - && (!transaction.outputs().is_empty() || transaction.sapling_outputs().next().is_some()) - }) - .expect("At least one fake v5 transaction with no inputs in the test vectors"); +fn v5_transaction_with_no_inputs_fails_verification() { + let (_, output, _) = mock_transparent_transfer( + Height(1), + true, + 0, + Amount::try_from(1).expect("valid value"), + ); - assert_eq!( - check::has_inputs_and_outputs(&transaction), - Err(TransactionError::NoInputs) + for net in Network::iter() { + let transaction = Transaction::V5 { + inputs: vec![], + outputs: vec![output.clone()], + lock_time: LockTime::Height(block::Height(0)), + expiry_height: NetworkUpgrade::Nu5.activation_height(&net).expect("height"), + sapling_shielded_data: None, + orchard_shielded_data: None, + network_upgrade: NetworkUpgrade::Nu5, + }; + + assert_eq!( + check::has_inputs_and_outputs(&transaction), + Err(TransactionError::NoInputs) + ); + } +} + +#[test] +fn v5_transaction_with_no_outputs_fails_verification() { + let (input, _, _) = mock_transparent_transfer( + Height(1), + true, + 0, + Amount::try_from(1).expect("valid value"), ); + + for net in Network::iter() { + let transaction = Transaction::V5 { + inputs: vec![input.clone()], + outputs: vec![], + lock_time: LockTime::Height(block::Height(0)), + expiry_height: NetworkUpgrade::Nu5.activation_height(&net).expect("height"), + sapling_shielded_data: None, + orchard_shielded_data: None, + network_upgrade: NetworkUpgrade::Nu5, + }; + + assert_eq!( + check::has_inputs_and_outputs(&transaction), + Err(TransactionError::NoOutputs) + ); + } } #[tokio::test] async fn mempool_request_with_missing_input_is_rejected() { - let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); - let verifier = Verifier::new_for_tests(&Network::Mainnet, state.clone()); + let mut state: MockService<_, _, _, _> = MockService::build().for_unit_tests(); - let (height, tx) = transactions_from_blocks(zebra_test::vectors::MAINNET_BLOCKS.iter()) - .find(|(_, tx)| !(tx.is_coinbase() || tx.inputs().is_empty())) - .expect("At least one non-coinbase transaction with transparent inputs in test vectors"); + for net in Network::iter() { + let verifier = Verifier::new_for_tests(&net, state.clone()); - let input_outpoint = match tx.inputs()[0] { - transparent::Input::PrevOut { outpoint, .. } => outpoint, - transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), - }; + let (height, tx) = transactions_from_blocks(net.block_iter()) + .find(|(_, tx)| !(tx.is_coinbase() || tx.inputs().is_empty())) + .expect( + "At least one non-coinbase transaction with transparent inputs in test vectors", + ); + + let input_outpoint = match tx.inputs()[0] { + transparent::Input::PrevOut { outpoint, .. } => outpoint, + transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), + }; - tokio::spawn(async move { // The first non-coinbase transaction with transparent inputs in our test vectors // does not use a lock time, so we don't see Request::BestChainNextMedianTimePast here - state + let state_req = state .expect_request(zebra_state::Request::UnspentBestChainUtxo(input_outpoint)) - .await - .expect("verifier should call mock state service with correct request") - .respond(zebra_state::Response::UnspentBestChainUtxo(None)); + .map(|responder| responder.respond(zebra_state::Response::UnspentBestChainUtxo(None))); - state - .expect_request_that(|req| { - matches!( - req, - zebra_state::Request::CheckBestChainTipNullifiersAndAnchors(_) - ) - }) - .await - .expect("verifier should call mock state service with correct request") - .respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors); - }); - - let verifier_response = verifier - .oneshot(Request::Mempool { + let verifier_req = verifier.oneshot(Request::Mempool { transaction: tx.into(), height, - }) - .await; + }); - assert_eq!( - verifier_response, - Err(TransactionError::TransparentInputNotFound) - ); + let (rsp, _) = futures::join!(verifier_req, state_req); + + assert_eq!(rsp, Err(TransactionError::TransparentInputNotFound)); + } } #[tokio::test] @@ -1173,144 +1179,93 @@ async fn state_error_converted_correctly() { ); } -#[test] -fn v5_transaction_with_no_outputs_fails_validation() { - let transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| { - transaction.outputs().is_empty() - && transaction.sapling_outputs().next().is_none() - && transaction.orchard_actions().next().is_none() - && transaction.joinsplit_count() == 0 - && (!transaction.inputs().is_empty() - || transaction.sapling_spends_per_anchor().next().is_some()) - }) - .expect("At least one fake v5 transaction with no outputs in the test vectors"); - - assert_eq!( - check::has_inputs_and_outputs(&transaction), - Err(TransactionError::NoOutputs) - ); -} - #[test] fn v5_coinbase_transaction_without_enable_spends_flag_passes_validation() { - let mut transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| transaction.is_coinbase()) - .expect("At least one fake V5 coinbase transaction in the test vectors"); + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .find(|transaction| transaction.is_coinbase()) + .expect("V5 coinbase tx"); + + let shielded_data = insert_fake_orchard_shielded_data(&mut tx); - insert_fake_orchard_shielded_data(&mut transaction); + assert!(!shielded_data.flags.contains(Flags::ENABLE_SPENDS)); - assert!(check::coinbase_tx_no_prevout_joinsplit_spend(&transaction).is_ok()); + assert!(check::coinbase_tx_no_prevout_joinsplit_spend(&tx).is_ok()); + } } #[test] fn v5_coinbase_transaction_with_enable_spends_flag_fails_validation() { - let mut transaction = fake_v5_transactions_for_network( - &Network::Mainnet, - zebra_test::vectors::MAINNET_BLOCKS.iter(), - ) - .rev() - .find(|transaction| transaction.is_coinbase()) - .expect("At least one fake V5 coinbase transaction in the test vectors"); + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .find(|transaction| transaction.is_coinbase()) + .expect("V5 coinbase tx"); - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); + let shielded_data = insert_fake_orchard_shielded_data(&mut tx); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS; + assert!(!shielded_data.flags.contains(Flags::ENABLE_SPENDS)); - assert_eq!( - check::coinbase_tx_no_prevout_joinsplit_spend(&transaction), - Err(TransactionError::CoinbaseHasEnableSpendsOrchard) - ); -} - -#[tokio::test] -async fn v5_transaction_is_rejected_before_nu5_activation() { - const V5_TRANSACTION_VERSION: u32 = 5; - - let canopy = NetworkUpgrade::Canopy; - - for network in Network::iter() { - let state_service = service_fn(|_| async { unreachable!("Service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); - - let transaction = fake_v5_transactions_for_network(&network, network.block_iter()) - .next_back() - .expect("At least one fake V5 transaction in the test vectors"); - - let result = verifier - .oneshot(Request::Block { - transaction_hash: transaction.hash(), - transaction: Arc::new(transaction), - known_utxos: Arc::new(HashMap::new()), - known_outpoint_hashes: Arc::new(HashSet::new()), - height: canopy - .activation_height(&network) - .expect("Canopy activation height is specified"), - time: DateTime::::MAX_UTC, - }) - .await; + shielded_data.flags = Flags::ENABLE_SPENDS; assert_eq!( - result, - Err(TransactionError::UnsupportedByNetworkUpgrade( - V5_TRANSACTION_VERSION, - canopy - )) + check::coinbase_tx_no_prevout_joinsplit_spend(&tx), + Err(TransactionError::CoinbaseHasEnableSpendsOrchard) ); } } -#[test] -fn v5_transaction_is_accepted_after_nu5_activation() { - let _init_guard = zebra_test::init(); - - for network in Network::iter() { - zebra_test::MULTI_THREADED_RUNTIME.block_on(async { - let nu5_activation_height = NetworkUpgrade::Nu5 - .activation_height(&network) - .expect("NU5 activation height is specified"); - - let state = service_fn(|_| async { unreachable!("Service should not be called") }); - - let mut tx = fake_v5_transactions_for_network(&network, network.block_iter()) - .next_back() - .expect("At least one fake V5 transaction in the test vectors"); +#[tokio::test] +async fn v5_transaction_is_rejected_before_nu5_activation() { + let sapling = NetworkUpgrade::Sapling; - if tx.expiry_height().expect("V5 must have expiry_height") < nu5_activation_height { - *tx.expiry_height_mut() = nu5_activation_height; - tx.update_network_upgrade(NetworkUpgrade::Nu5) - .expect("updating the network upgrade for a V5 tx should succeed"); - } + for net in Network::iter() { + let verifier = Verifier::new_for_tests( + &net, + service_fn(|_| async { unreachable!("Service should not be called") }), + ); - let expected_hash = tx.unmined_id(); - let expiry_height = tx.expiry_height().expect("V5 must have expiry_height"); + let tx = v5_transactions(net.block_iter()).next().expect("V5 tx"); - let verification_result = Verifier::new_for_tests(&network, state) + assert_eq!( + verifier .oneshot(Request::Block { transaction_hash: tx.hash(), transaction: Arc::new(tx), known_utxos: Arc::new(HashMap::new()), known_outpoint_hashes: Arc::new(HashSet::new()), - height: expiry_height, + height: sapling.activation_height(&net).expect("height"), time: DateTime::::MAX_UTC, }) - .await; + .await, + Err(TransactionError::UnsupportedByNetworkUpgrade(5, sapling)) + ); + } +} - assert_eq!( - verification_result - .expect("successful verification") - .tx_id(), - expected_hash - ); - }); +#[tokio::test] +async fn v5_transaction_is_accepted_after_nu5_activation() { + let _init_guard = zebra_test::init(); + + for net in Network::iter() { + let state = service_fn(|_| async { unreachable!("Service should not be called") }); + let tx = v5_transactions(net.block_iter()).next().expect("V5 tx"); + let tx_height = tx.expiry_height().expect("V5 must have expiry_height"); + let expected = tx.unmined_id(); + + assert!(tx_height >= NetworkUpgrade::Nu5.activation_height(&net).expect("height")); + + let verif_res = Verifier::new_for_tests(&net, state) + .oneshot(Request::Block { + transaction_hash: tx.hash(), + transaction: Arc::new(tx), + known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), + height: tx_height, + time: DateTime::::MAX_UTC, + }) + .await; + + assert_eq!(verif_res.expect("success").tx_id(), expected); } } @@ -2731,164 +2686,138 @@ fn v4_with_sapling_outputs_and_no_spends() { } /// Test if a V5 transaction with Sapling spends is accepted by the verifier. -#[test] -// TODO: add NU5 mainnet test vectors with Sapling spends, then remove should_panic -#[should_panic] -fn v5_with_sapling_spends() { +#[tokio::test] +async fn v5_with_sapling_spends() { let _init_guard = zebra_test::init(); - zebra_test::MULTI_THREADED_RUNTIME.block_on(async { - let network = Network::Mainnet; - let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); - - let transaction = - fake_v5_transactions_for_network(&network, zebra_test::vectors::MAINNET_BLOCKS.iter()) - .rev() - .filter(|transaction| { - !transaction.is_coinbase() - && transaction.inputs().is_empty() - && transaction.expiry_height() >= nu5_activation - }) - .find(|transaction| transaction.sapling_spends_per_anchor().next().is_some()) - .expect("No transaction found with Sapling spends"); - let expected_hash = transaction.unmined_id(); - let height = transaction - .expiry_height() - .expect("Transaction is missing expiry height"); + for net in Network::iter() { + let nu5_activation = NetworkUpgrade::Nu5.activation_height(&net); - // Initialize the verifier - let state_service = - service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); - - // Test the transaction verifier - let result = verifier - .oneshot(Request::Block { - transaction_hash: transaction.hash(), - transaction: Arc::new(transaction), - known_utxos: Arc::new(HashMap::new()), - known_outpoint_hashes: Arc::new(HashSet::new()), - height, - time: DateTime::::MAX_UTC, + let tx = v5_transactions(net.block_iter()) + .filter(|tx| { + !tx.is_coinbase() && tx.inputs().is_empty() && tx.expiry_height() >= nu5_activation }) - .await; + .find(|tx| tx.sapling_spends_per_anchor().next().is_some()) + .expect("V5 tx with Sapling spends"); + + let expected_hash = tx.unmined_id(); + let height = tx.expiry_height().expect("expiry height"); + + let verifier = Verifier::new_for_tests( + &net, + service_fn(|_| async { unreachable!("State service should not be called") }), + ); assert_eq!( - result.expect("unexpected error response").tx_id(), + verifier + .oneshot(Request::Block { + transaction_hash: tx.hash(), + transaction: Arc::new(tx), + known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), + height, + time: DateTime::::MAX_UTC, + }) + .await + .expect("unexpected error response") + .tx_id(), expected_hash ); - }); + } } /// Test if a V5 transaction with a duplicate Sapling spend is rejected by the verifier. -#[test] -fn v5_with_duplicate_sapling_spends() { +#[tokio::test] +async fn v5_with_duplicate_sapling_spends() { let _init_guard = zebra_test::init(); - zebra_test::MULTI_THREADED_RUNTIME.block_on(async { - let network = Network::Mainnet; - let mut transaction = - fake_v5_transactions_for_network(&network, zebra_test::vectors::MAINNET_BLOCKS.iter()) - .rev() - .filter(|transaction| !transaction.is_coinbase() && transaction.inputs().is_empty()) - .find(|transaction| transaction.sapling_spends_per_anchor().next().is_some()) - .expect("No transaction found with Sapling spends"); + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .filter(|tx| !tx.is_coinbase() && tx.inputs().is_empty()) + .find(|tx| tx.sapling_spends_per_anchor().next().is_some()) + .expect("V5 tx with Sapling spends"); - let height = transaction - .expiry_height() - .expect("Transaction is missing expiry height"); + let height = tx.expiry_height().expect("expiry height"); // Duplicate one of the spends - let duplicate_nullifier = duplicate_sapling_spend(&mut transaction); + let duplicate_nullifier = duplicate_sapling_spend(&mut tx); - // Initialize the verifier - let state_service = - service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); - - // Test the transaction verifier - let result = verifier - .oneshot(Request::Block { - transaction_hash: transaction.hash(), - transaction: Arc::new(transaction), - known_utxos: Arc::new(HashMap::new()), - known_outpoint_hashes: Arc::new(HashSet::new()), - height, - time: DateTime::::MAX_UTC, - }) - .await; + let verifier = Verifier::new_for_tests( + &net, + service_fn(|_| async { unreachable!("State service should not be called") }), + ); assert_eq!( - result, + verifier + .oneshot(Request::Block { + transaction_hash: tx.hash(), + transaction: Arc::new(tx), + known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), + height, + time: DateTime::::MAX_UTC, + }) + .await, Err(TransactionError::DuplicateSaplingNullifier( duplicate_nullifier )) ); - }); + } } /// Test if a V5 transaction with a duplicate Orchard action is rejected by the verifier. -#[test] -fn v5_with_duplicate_orchard_action() { +#[tokio::test] +async fn v5_with_duplicate_orchard_action() { let _init_guard = zebra_test::init(); - zebra_test::MULTI_THREADED_RUNTIME.block_on(async { - let network = Network::Mainnet; - // Find a transaction with no inputs or outputs to use as base - let mut transaction = - fake_v5_transactions_for_network(&network, zebra_test::vectors::MAINNET_BLOCKS.iter()) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.outputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.sapling_outputs().next().is_none() - && transaction.joinsplit_count() == 0 - }) - .expect("At least one fake V5 transaction with no inputs and no outputs"); + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .rev() + .find(|transaction| { + transaction.inputs().is_empty() + && transaction.outputs().is_empty() + && transaction.sapling_spends_per_anchor().next().is_none() + && transaction.sapling_outputs().next().is_none() + && transaction.joinsplit_count() == 0 + }) + .expect("V5 tx with only Orchard actions"); - let height = transaction - .expiry_height() - .expect("Transaction is missing expiry height"); + let height = tx.expiry_height().expect("expiry height"); - // Insert fake Orchard shielded data to the transaction, which has at least one action (this is - // guaranteed structurally by `orchard::ShieldedData`) - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); + let orchard_shielded_data = tx + .orchard_shielded_data_mut() + .expect("tx without transparent, Sprout, or Sapling outputs must have Orchard actions"); // Enable spends - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS - | zebra_chain::orchard::Flags::ENABLE_OUTPUTS; + orchard_shielded_data.flags = Flags::ENABLE_SPENDS | Flags::ENABLE_OUTPUTS; - // Duplicate the first action - let duplicate_action = shielded_data.actions.first().clone(); + let duplicate_action = orchard_shielded_data.actions.first().clone(); let duplicate_nullifier = duplicate_action.action.nullifier; - shielded_data.actions.push(duplicate_action); - - // Initialize the verifier - let state_service = - service_fn(|_| async { unreachable!("State service should not be called") }); - let verifier = Verifier::new_for_tests(&network, state_service); + // Duplicate the first action + orchard_shielded_data.actions.push(duplicate_action); - // Test the transaction verifier - let result = verifier - .oneshot(Request::Block { - transaction_hash: transaction.hash(), - transaction: Arc::new(transaction), - known_utxos: Arc::new(HashMap::new()), - known_outpoint_hashes: Arc::new(HashSet::new()), - height, - time: DateTime::::MAX_UTC, - }) - .await; + let verifier = Verifier::new_for_tests( + &net, + service_fn(|_| async { unreachable!("State service should not be called") }), + ); assert_eq!( - result, + verifier + .oneshot(Request::Block { + transaction_hash: tx.hash(), + transaction: Arc::new(tx), + known_utxos: Arc::new(HashMap::new()), + known_outpoint_hashes: Arc::new(HashSet::new()), + height, + time: DateTime::::MAX_UTC, + }) + .await, Err(TransactionError::DuplicateOrchardNullifier( duplicate_nullifier )) ); - }); + } } /// Checks that the tx verifier handles consensus branch ids in V5 txs correctly. @@ -3430,9 +3359,9 @@ fn coinbase_outputs_are_decryptable_for_historical_blocks_for_network( /// Given an Orchard action as a base, fill fields related to note encryption /// from the given test vector and returned the modified action. fn fill_action_with_note_encryption_test_vector( - action: &zebra_chain::orchard::Action, + action: &Action, v: &zebra_test::vectors::TestVector, -) -> zebra_chain::orchard::Action { +) -> Action { let mut action = action.clone(); action.cv = v.cv_net.try_into().expect("test vector must be valid"); action.cm_x = pallas::Base::from_repr(v.cmx).unwrap(); @@ -3446,87 +3375,65 @@ fn fill_action_with_note_encryption_test_vector( action } -/// Test if shielded coinbase outputs are decryptable with an all-zero outgoing -/// viewing key. +/// Test if shielded coinbase outputs are decryptable with an all-zero outgoing viewing key. #[test] fn coinbase_outputs_are_decryptable_for_fake_v5_blocks() { - let network = Network::new_default_testnet(); - for v in zebra_test::vectors::ORCHARD_NOTE_ENCRYPTION_ZERO_VECTOR.iter() { - // Find a transaction with no inputs or outputs to use as base - let mut transaction = - fake_v5_transactions_for_network(&network, zebra_test::vectors::TESTNET_BLOCKS.iter()) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.outputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.sapling_outputs().next().is_none() - && transaction.joinsplit_count() == 0 - }) - .expect("At least one fake V5 transaction with no inputs and no outputs"); - - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS - | zebra_chain::orchard::Flags::ENABLE_OUTPUTS; - - let action = - fill_action_with_note_encryption_test_vector(&shielded_data.actions[0].action, v); - let sig = shielded_data.actions[0].spend_auth_sig; - shielded_data.actions = vec![AuthorizedAction::from_parts(action, sig)] - .try_into() - .unwrap(); + for net in Network::iter() { + let mut transaction = v5_transactions(net.block_iter()) + .find(|tx| tx.is_coinbase()) + .expect("coinbase V5 tx"); + + let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); + shielded_data.flags = Flags::ENABLE_OUTPUTS; + + let action = + fill_action_with_note_encryption_test_vector(&shielded_data.actions[0].action, v); + let sig = shielded_data.actions[0].spend_auth_sig; + shielded_data.actions = vec![AuthorizedAction::from_parts(action, sig)] + .try_into() + .unwrap(); - assert_eq!( - check::coinbase_outputs_are_decryptable( - &transaction, - &network, - NetworkUpgrade::Nu5.activation_height(&network).unwrap(), - ), - Ok(()) - ); + assert_eq!( + check::coinbase_outputs_are_decryptable( + &transaction, + &net, + NetworkUpgrade::Nu5.activation_height(&net).unwrap(), + ), + Ok(()) + ); + } } } -/// Test if random shielded outputs are NOT decryptable with an all-zero outgoing -/// viewing key. +/// Test if random shielded outputs are NOT decryptable with an all-zero outgoing viewing key. #[test] fn shielded_outputs_are_not_decryptable_for_fake_v5_blocks() { - let network = Network::new_default_testnet(); - for v in zebra_test::vectors::ORCHARD_NOTE_ENCRYPTION_VECTOR.iter() { - // Find a transaction with no inputs or outputs to use as base - let mut transaction = - fake_v5_transactions_for_network(&network, zebra_test::vectors::TESTNET_BLOCKS.iter()) - .rev() - .find(|transaction| { - transaction.inputs().is_empty() - && transaction.outputs().is_empty() - && transaction.sapling_spends_per_anchor().next().is_none() - && transaction.sapling_outputs().next().is_none() - && transaction.joinsplit_count() == 0 - }) - .expect("At least one fake V5 transaction with no inputs and no outputs"); - - let shielded_data = insert_fake_orchard_shielded_data(&mut transaction); - shielded_data.flags = zebra_chain::orchard::Flags::ENABLE_SPENDS - | zebra_chain::orchard::Flags::ENABLE_OUTPUTS; - - let action = - fill_action_with_note_encryption_test_vector(&shielded_data.actions[0].action, v); - let sig = shielded_data.actions[0].spend_auth_sig; - shielded_data.actions = vec![AuthorizedAction::from_parts(action, sig)] - .try_into() - .unwrap(); + for net in Network::iter() { + let mut tx = v5_transactions(net.block_iter()) + .find(|tx| tx.is_coinbase()) + .expect("V5 coinbase tx"); + + let shielded_data = insert_fake_orchard_shielded_data(&mut tx); + shielded_data.flags = Flags::ENABLE_OUTPUTS; + + let action = + fill_action_with_note_encryption_test_vector(&shielded_data.actions[0].action, v); + let sig = shielded_data.actions[0].spend_auth_sig; + shielded_data.actions = vec![AuthorizedAction::from_parts(action, sig)] + .try_into() + .unwrap(); - assert_eq!( - check::coinbase_outputs_are_decryptable( - &transaction, - &network, - NetworkUpgrade::Nu5.activation_height(&network).unwrap(), - ), - Err(TransactionError::CoinbaseOutputsNotDecryptable) - ); + assert_eq!( + check::coinbase_outputs_are_decryptable( + &tx, + &net, + NetworkUpgrade::Nu5.activation_height(&net).unwrap(), + ), + Err(TransactionError::CoinbaseOutputsNotDecryptable) + ); + } } } diff --git a/zebra-test/src/vectors/block-main-1-687-106.txt b/zebra-test/src/vectors/block-main-1-687-106.txt new file mode 100644 index 00000000000..c8c3f5443aa --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-106.txt @@ -0,0 +1 @@ +04000000f1a919374d4ea8c27be01de057fda89b504a680b1212a44a02a5b60000000000260fb56800153f20113cdb8fc3becbe547712adaf5910e4a1199ea3f3216eb6c92d9dfc92a527212e5bbec9f472697beae80b084cf6077b21a913cb8a3e3341bdd559662e4ae011c5360d63c000000000000000000000000000028000000000000000000000390c5fd40050024db4ac7920ff1c55b1017b3e6bd9c328e5bf9792efd52ab3410d546bcf948d958f04602a2097ba4d20a2a64da9d186f574a53b3de4b63ca2d556aceadbb1442b8f5b6a8cbef6549583ef46bf46288b71633f505df14cfe69b058554e6c075a555ff55656a7f1fbb4c19f2a968d6ca25f16d6537b45ebe6e003a788ec10af9341a68df4575d18f896d0c53edff47435e821e1a0b3442686a92bdbbf203282149da7d0f78edad5c00f7a2f9f072b4e7f47433101f7e04f4e4812c7add312b4fd59de93a777fe48829166d798b4a341d42963ae16353c3212a5d1a63c433aca4683e5ae93a91be46f09e4d2a63fd634e87a64373e91d75ccb85cd4a2076aa8bb905853bac539992ec67535467571db281b16528be572de14bf77d3f17f1194275637beb76e7c0cadbd1069931c1aaf07d243dc1dc0fe83e938047a10be72b416c9eb51c605720f11f21c5983385174bf01295c3cfe9b23db0c47b126ab585d9321131bf2c60fccc6542744dca73f38e3ca495dc0f16f9998168a128ebfcdff8b2c753b91e2eed3644ea6c5fd3e49b93aa7c2567c103add2645168ce94851e28c0af7971d09e60b1f7b5967a4dec8948090adca0d5ac913191012c721d1410e900882f9e81470e31dc7227939feff18dd4713a94bcc958f9553e074a730bd50d833a55475975542c9e1e21bf7f289439871c8fa83d298e332083dde879118d607998ce6ca03f163fdb9e11141f61860f326d406557e6ab4c938c9d680b6a3b4793b741504d580648c9582fc9d325a85e072f686721888e629af99e27fd5a65b4e02b5a9795564722eefd67f5a1acb39a56a549f73feb9856016c0c5a6ad46fd0c4128c9d560d4113821795fd6788fc23b2ead0b5792f82b86ae72425154159ee2b49b61e98ef611c537bc7e61a9f3727e6e061b8c1af85bb6755d6e4599f817e5023668da6570e953c84c12c0aeb36d68cf0e14d34409b72a7f32a8ec6db73d527a72725e6e2f11d81f7e0fb8656c6e57117b539de2f8b75fea7dca719df98a20b36ca759d1c7a793735239f23105b88f48d0951d032bf89ed224851de6868386e0a60ef213895ab62a2f558354ed349233ab573376fac9494646325598bf0e620d57fdcd7afa7d051384be4f2092229797dfcd320194d5712a2c3973ff086b96699cb2c53a5c7b7604d70f7dcccd7817921454ecefa9d315e026f6826e30fc8732b562e1139c712a047af7fcbed6d1b817a60ca4e3efd6294fff8b27628ed55d2f20e25cebc57023ca74099f2a00f5b82db5ffb1b3ada24ef6bc19a50ddbf14b3a47f141dbf9d5ae98e174d9fc53be5952376af5f5d31454db81bb846f22ce57f5464ef6e53435ec923553b86ee9f422f3aae3f792a734135abd3038d536357f64c2b9882755183d6dcbc96a35b174fc0536f6bead9c0d53a7ef4656226f40ea776b3cbf330a7dfca77f03ce0fcfa221befa35ebb56ef77f81f009ed9a744b63ac7da6437111ee2578e2d8e73fee451ec0b4d761eff721f76c57e3f76623573f4f1abaaa14340b0c0193df43b42e044dbdb044eaaece9a42854069950e625706d9eaa9d5bf29e439e9e2c1b733421eb2a0fa8d28b751a73f1536716cad1181b81d9a7728b62510ddb540a5bc83229de6bad700f965c87b720944394cce9998b17621e472d365c4c5c32f585141185ed993d0e6676dcd7471f282e4f345e0655413681d537d7a91933941e56848a54271c80b9c967cfb3f3370c70c47e7156df4d143d172245c7d5f3a7745e40c1f43c182924b39f7bed1505220d0624673fb86164bef0ba85a1c3ebb3fa29708837cbc31e6b0147fa61143ca496a06883bf62db22d1bc4a79d7dc98c9bd81bea111d1b90e4db784f22b47fbaeaedd54ef85b6605050000800a27a726b4d0d6c20000000042be1900010000000000000000000000000000000000000000000000000000000000000000ffffffff050342be1900ffffffff0420c2e60e000000001976a91447c0a56cdaa37498b91cbfa3b6bc6cd86159e6ac88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c9087000000050000800a27a726b4d0d6c20000000068be1900000001a05c5685b75252c400119ebc80878f8003e900b6a6ca9febdcd74d97a129e58ff6c66e590b881722930ff701bcd5381b7c82ea343da022617b6aea98fe9cb8c97eced1a714d6e8a649160b15ec659fc001e17ec405224389505926a0b4785a3401814cc194f3fc5eed11f6b11a3b5eee6675273537413c17a9da5411b832177e91121bc9572146c399626617403b55e502a941ab7fbff6708f8a417f24b98ddc582526d9e36bddef4234a4a70c24874cab827faf76d8b628065ee299dbdf728b56d152357b271efe7633420eb52d3dc381f04ff0924a52a83bdd0cb21ae393fac38b44aa95483da277f78054b58f7739f3fd4d52b7a2b980702415438d6b926d73e4a5205fcffb1d9c94e49638d796ee4fc164f49656fbb03735266dc9396ad2296a40850f91a0d0e2ef331ee7b73ceded08d7a183997f3c9de5d6fd0e7d2bcb5f0286128955048886b22e48e45e7d1a36e521d10eeb0b2e3620c99508213e2e69458ad1b59c71b399022092fe074ccbbb44601cc3ed3b029828f7c387e96dbfe02fe55a91db9d4ba22004e7921dba6dc202f559882ee8e0796e9fd28415e6301840fba7afda13ed92cd533258ef70170b3911f0bb67f03ddceca9d1481d28ddd3e249835052408499a6e84fb72ca33c95fcfd7bfb184ba4479bcba65c0f606885c546df02f373d748d2c3da3f7b8467674ddfa1ab6cae1c70c968983511669ad23e2c79d63bae301719831032e14333f2e50de3548c5a97227af4463fe89c8d26cf5b075fbc20677a67b629ce40466452071800a553e35a035e9f33f0a35736af85b617a95b828df0cc700cc9ada04cbe2c835f7838c17b87c7704600ac9e4469520395e64d0c3a1b3adae419c43671aef6e87b45da4e1993d620507f1ed7df0acfae3a0dae723ab8121ea71fbe90cf19f85685732538f89b039e9bf70684486b3d5c75777a2bf03d5f8c5afc0cbae11253a119e894bd7712e68c6489c9479163d3edabce29fba0d64350e7f95a688e4d138365731baf8abbe12fddd29157ecff161a43fd54237055bef0a734c5a05e95ec8e16aa2a47d485add9f1652f9b23aa11417184d10b5430e0a212cfbc86e76ec537f361a361fbd80286d6dc529528a6b29f52798cdf0397eaf892f6b7e2280c520521e3c92d3c7cd89d26965839dd7dc3ee0bcbbbd8e61afcad546e18537091ae98fca9e8030000000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044c92b7b64d4d49babad06049c4ea3f80fab97a0dabd0efbbddbf0f0da587f1a283f08751b05e84f9d94072052e3ef5daa38be31854f23a7addb15dbcee78c726b633d11fcf282df523fa60109e9b331488017222a74f594c0da57308a3d111776e0aacb93d56966cdc9ea880dde72f8b79044aa39effc78e4d2c096ffa32731c9e38d2f1b0674e653d6a4a3cf5e5a2b1cb9621ac41231cadb9896658b0e4c4f3e1806bd6cae2925406c59e15b62aa6eb3472114eecf6603134864d28d0c2c0759e30ec5be5f7cf2b7926e86a7d4e8a613caf063119e9be48064f64356a8a36a1ca6a1577b0120c536a68f70f56a32935e8868418eedc046fcf8581ed3c182e2c04a982d1cf6d14f5f3c23ad71be8d39a9c424171405cd96c9ca5a85d04b7db74ac552ec1e2f902cc0d272d05d47aaa203f966723939d77eb8600db51c1c8373e70825bd88184bf16f2cf6a9ac98790aed138882f0eb69d3d5d4e6bf4bf9b865bfc0fc5e7b54d184fe88c843884cc813773f00efd39d79067dbf8d67859c3afe42bdc041cf567a25d35527dbd7cfd4253fb90e4e90c570202824eb19faa1a1bc65b45ec99518d1ea5515e8e1f9139443ec4df871038000279342e980e4b354270c70e3b78749696f50aea162c0e52df94f6eb7b616a46131f9300dd00c251ee9b8adbf98f540c32f5c19a3c929453db7c5117eeb2fda9291cdd5314b2d6ae9f880000050000800a27a726b4d0d6c20000000068be19000000011e547c4fb447fa288beea9f9380e98699b1e5762ae9a7dde092eb2db67df01ea646049eb21b596db882e7758452953b0200473f0dedba353ff2f8e7105dddd839eab06db43d6348f0f48900a391bddea17f3e7664e7c532c6a6600ea21e34032019531dcbebccfd818c7c4273f38f09a9b25cbb6b0b56781c0d9bd7044cdad2f4febef39a9df875e1f7ee6fb12c4e5d186a4a4a642f76fd7497cc7cd9c4691872373853d405cbcdc1fa3364b9653c578cc2ad6c305351b1dba8949b9af16f68ef122fc27195386479c247f6673d4fabd87c943ea0f9d4f878d287af44a36758bf174e4572e11643f2b6afb64f918ea98d303e0d81b35cdc19327d476a6e4b0ddfa392df0edb83dd97d25eb2e0521d0ef62370fece4ab49ad796c95437ce830644272a8c59a18fc9a1a2f9dd7c878ac38aeff427fb5d8ee0053fd78ec5ec52c102caebf1b4c99785e2885d4c1fbef927a5bf6d4fef2eda6379a9d8757014e70583f6bfd94f82fcba77c4d36946fe58d8d011dd4e37f71d36fd4da9accced08abc61147f9cfa7d837c0889df43bae9151f346bd9120e59b6fcf3d2ea3940a5774abdb340a0dad4bcbe6e8f625e1b0b2f210e9f29994bbcaf57318bd03c9992cc7aca9307113c1eac75416d4415ddf01287c4536615f706ebb0058fd0c8a1164381ae14970379b4dca5ab1ce981a8c05d140926e45eb7da02324743c91a9b96754745db42e0864edfbeb02e3a91c7d208d6747e930428b216429cfd2578c61e08ecf8eb7603d486575a237960d016f87acee0aeb25dddf7873cfc465305980df797fe192da22ec371dde3c3e6bdbaebf1193afd294232b308d2c3602aa9b19b14e17afddbf9ba60427f43270fd57044a88a8807266f2f01c86df79761b5bc66790d4e7ebbfbdb882433b1ff4a85d0e9a1cdef6f8448f187f835b87b343b53c405479c444931a13a7c291dd5f12e818a5d36558d8970707dda26a23e09aad21e38a6a973464f201657bf8f4018549f2e9de73ba1159a4691d3409ac59c16bf083fcd506a85b913ade6f2ee24ad8c4c0de1c0ab175b4ec554562b3c11af90392142ec8d501c9eda386753bf2ebd3c7384e1355aade8507bea0b3c42366e073416dce0c24b6574ff30d0f05bf60403953cd2ab5f7894fab23bd0b0ca325a3fb7043f4a2ce82f0486e67cc4087669ff818baabc906d3d4786e8030000000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044c891f575953d48bd334567cb6019457d6ceb326030261d17cb961b070c385bf282e6893b33c19499888a7831356ce6d319424d36ea409cb6fe53fac5961779d4142cdd466fc94f6a7ae16859ca99575692de0490ae6b48fdd1cc0d5ca7f17f5fc02abcf9fa1f56e45eea0a2b721e79eb721142220cf7f634ba7d56844b2670dcb225eb8cf0848bce60c86fd3466db3044903ea20904d37c55e56de073b9c30413c6dde674cd102e6b765b5f511e462312b81414ab39a73f0469277212345691a30a3cd0dacd002152ff966a270b0b8833c1ecbc4fb8d5b8c295c020bab5abe594b1ba263ba86984bab5841fa96016c01938cef0cdf87b329fd51d8b31f753090aa882402813cf5ffbe6633fbb0d750e6bc73086cd11c71fadde8630940517d9126b645415c6df4f9d4049b829de95eff7abce9af7796a64564594ab8d5cd617c04fd98cb82fdd718a45e764a206c29cb485f3baf91f2ca1338923ff81f9054aea066bbbb5320dbd6a0e28d9818c00da61346826ba324cfa9f23b9fcf722705b7befb0a1c796fa3e91b56043ac9eaf0bdfa103fe143aa36fcf38a56ddb9822582bcd95789314fd560504fc8e984619319d02e8a0cacb0b13a949bb892955737be35bfb9792175ce3c82fd07890d90a3db0c591cfb11517f45133e173a2a181a3da851799e7d1523512938940f9e9e608c09fcca7c959ea96bbc1478b53b326730e00050000800a27a726b4d0d6c20000000068be1900000001cf21ab54b733d4b33992b08c667cc5bf54e90d71d67e60d966d4b28d82935961faf0cccdd426290469d0ae8a7162b7c5b36ea7401e8939d6ab83b2116766f90032f9afd9611ff0a7578b961f7c6015c14617e444c546f5e099164e7d7836171001e2036e93fb22c0f06089e3ea6874a95444026e22f5216913f9d3dc708b50ff2ce4cd07cc1d80eff3076e71b21f64506899d0c2908f5b1529f6377dfeb0285d64856b87ab4503a4ba4f5c0033b87e92e08b53d5970deaa8097411c2fb880d65a1885c58f4f8983c49302219404c1b29c302e091256328c496cf9fe418059ec0d9149ed9f8dafc80631bbee6af90511c0a76d4728a43a213bd7ec411f4d8df0ac135df46423966881d5aff8ba8cb3e81cfb5c9b50203195910c7ba60ed6e220cdbec6a659c37cbaab638e87bb87c9efaeaadc366c5b52d8642ef74d2d28dae34819b1d2412dc955b067c713a22a88871c524cb2797c9524cd515f520b2554d791649ea426d69b85009a7951b9c5c946c59b8cb8d56aabbe17ae2aa7bbd5689a730f8c9c50b8579c04adf17a0815a2e5085e309a9418491a0e36aba8f2b90692d8d7780e664953def242f367ca42fa188d524791ad2dd4d83f41753717fe953e964500a2c43294fd91f2c4a2a127d9684cf4a287afadde7cd69d4914c3e6d89d7bf71a70953b96b79b5e0b5e62b636479f0ccc65007df7ae5ddf2063ca357b9cadba8dd3539357699591e6cdca76b29472ae3acd21f417b48cefd2e3dd92ece26b09f9d8c2755b4767ed36b0c5d72238b64db6cb7d4b8623810f34f1377059bdebaa4ed5a62e27ee2bcc3270b60f46abf8078d4918638d09b5fcc40adb46f8a82301db35c3d032f6251f7c125fe662f8ee8166b072e15458fef7b3adffac20a5fa5f448e943f0259c59c083bb6945077ea85d471bf47526bb6ab6f57cf7670627a10daed111611380eea2f369de009aabcccd37af5af5d191873090873a86d04a57892687f946da765ff9ef89ffbe7b483e49114684126866692448b352606d7be23f293d2d3dc329930e201ec51bf0e870f02dfd49d238ceb0095ffe60dc8831647a9aca6f79b8822aa62b4f57364cbd3a00a8f2b277624aad55849434f286242c510d13fc1b384a14823422ca45e093d5cd67f3e7e3f3d079d3eb5570f08f4998ed6d0649f565cf93e842a2550c7be69f391df1f5e8030000000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044ca8f134350eba5e33e9faff89d5717c21b7e97a6680b45f9ee0fa093f4adafa3ad957c5b0f9e71c7ee5c20147dc70d6a99580527109f44a0e429f49083002d228a7fe343e9d43dceb592afce2e684d1aec34b007fa19001b48d57a502dbe139dd1126a65325b878841b6fdb741945ea3c966d534f622553453f63f6ea7123efcd8ae7dcb6a2d64f4333b61435fbb7ecaf864f280dc8ca2e1df6daada638653d20da8a88bc8a89c566cba74a714d05232114d9ba357fbcb630b6e49de2f9ae63d002679a593a1f4704d9ba3b7e019ce720877ac7b53cb01a63f17f35f2eca3cacf79115465a9a71439aba805927fde2c8dbd2c43d142ad4582b3b7d8fd6eafa50d8c86daacd720f5dda68c0b39ede9b6ee61c72cd6d5f3bba12c560d0af9ed01b777dbde30ca76879edd117a2332cf152b93c4aafb6707a1f43cabc5463b10e66d0f83438c77a204b478d960839bb02ad9718129c3be621bb59e703b755c3aa3b40bf151641e1ef672ae0ee5476f2249d49e95dcf1875cf1d5e1cba25239421aef645322433c40158df6aaaa7fb392484f803f87d0825102e5cf410488182143442ca2df7d3fb566ffb353cb12facd07e9fc16c3093a960d540cd99f18bae1e678a45c2b3eaf0acc305557051c868bed170298d196b9c436a261e5d550ec18ea4f0aa87e9b526924c1f2f3d200edd65d4544a549285f94916981f2aede2adbca0b000400008085202f8900000000000054be1900e80300000000000003d39bfc6d4889111a8aeba6f013f3be13f02e78751d0e820a3121078c60de06a3199c225ceec155f241c4d7927647ecf78dea3ac697227b0913ee41c8f2616533ede97ca8519733ba7264dec73852971035dcbf3f19d480793a3ead3cdbddf19160041aa9ec72aec4fea4911cc642a9f10295fc53cbe5e371683a038524716ee8ab45319b07274b682df263a854a9653c4ab8e1e4f49ff99f492b2187756f31dd62bfc0dcb02df90c41eb0de0165275f391061f5e0d64b95ff0496a9d9a7a1428556e7f468a355c0b7fa31b5fc3711a12a4cfc798c9bda782249f3b524b8bf22b0be11fad715bdbff2769fccd7b091e5fcad2e153f6b424fdb97fafcd96b8fdca9dcbce5f97271802eabd9e71f7d67daf987c16319b48d4acc9fb44e19e6d64c0606c88dfb640d2e53c574b5c9fdd1d88c0ebb2b7c4c4e8c75f562140aae8b6908aa03bc08aa2380736137c5eed3c86ab2eec2b9fda38755d3c49094b8e4207d544ce779305a6be4c741334dea0018612abe3b40c062f4e78d475daa09eea81058ed9370c7bb3a71416d1f295a66a7362bce9db5f98476a117ac338ffd75df5ba199c225ceec155f241c4d7927647ecf78dea3ac697227b0913ee41c8f2616533ef41b213c86ebd2373e1f40ca2c4a0dd1c831ba2888cda14eb26dcc074989e3a08eb84270125ecad990072292a0e3fd23022dc8e9086e326b4641b6ac638d7719847cf28f5bfc135f45ba1a5a23ef347ba62e8d7754b9645f25c6161cf816d69c95a1179f2931fc0883c90fb66f46361b1f354b273d91986fc6a0174333b82ea702ab650684ce0425c809ab7ef170e8313db8be8269e6d24a674c52717c94047121ef3318b6c17ca4e63d8a30d1db7ce56079012eaa414b5f275e3c7e65acf5333ee9791d0fea151395faf104fc27fd7938c7f53e3b7c937dfde01bf82ad28c7ca3560d42f1f0c8185b790cc690dce552ddbc1e4fc2fa4993537891794cde811ddd5104d3e4d51b94d0db20df715769baeab11c1fc70aaf5920edba23a5e4f94a62fbc4d96ea80a78b6bb225b66371ebb01bfba077db2fda5f05eab8ed1e1b019d6a7ebaed51aedf2f5905cabd724040aa33f66dec4a47560a2d5a5a74969729199c225ceec155f241c4d7927647ecf78dea3ac697227b0913ee41c8f2616533b88fbba68a91e8137f39b32fe7c339bd7aa232bb0edf31b34cb14f7e959d66da34f881047a22849adf921a47dbe0037f018e555908c9b5cb49c33153ddee8431b4157b604f3ffdefb9b44196ae410e0313039f09db38209f340af0c76c84b33d140984b2ca471752e54ff235622eb6368265593228f2a5573bf3d572cc56cda5b7fd295ecd31d1500295b480ea460905cd713a7bdeaebbc7db27b303d08ced4810e30699077a7d8526d4025fcca7dab53724a40851bd950232f219fd0022b1ea92b8a446b773ca78d84984784727c0d093bbdd5934bd544c53b390121fe5cbb8d1bd37b9dbbdcf70e4e4a64aeef71926812fb9af8e39acf28f26f9d50eae5de60abad45dcfb3e3c2664ea0aa30271dc2ee6cebba2aee0caad56c78d4dff523b10fe66e6d63b86e7ad49b02f8084f512088c613e84fb1f112d29eb9b2656ca60a02141f49aa39d025d56f517d72f8fef84f77aca218947013860cb93977059975c8e6b896e35b0d33fecf96578642ad937e51c4c6e838a0a4fcaa54500c9ed94448b1e14d2c62a9547c0049bc80dce751c7e5b2c6279c41092b3474ba29fb7683223e8e5b765dfe2f75392e542a54b79a30d8a9817771441d638e22c85fa719df94deb20670f0827297c1969f56d57c4c3fc5905b87bc22c90fcdc91a18cd3053bc859d0e625b43315b2f03cd7e5d292a649fc76c07e772b1f79910966048e068d386f8d441729bf37ba8748d9c9a76c2ed94306233afd81de9535873d67c04218ce9d7b7bcde9d2673ddecbdd29e55f9ac48569fe85423703a3412d7901588b89616c5596dd589baa10d2beef0c2bc54699a1989d4d21b45d61db8212b4277cb8f756283b5e79d36866f83c771dc3d797f43f6a495789fed6da50bad73aa045023812207df9b02e9f5dbaf84d9ceb97dbce8ff141a3d5ea4f8aad5e9733e85a58562adec8ac01c13a2e4bbed262b2d672b94a7b3f3ce40031bca84276a9d303f7ac7f7f0c391684d49b323fd7a8f9f26f54baf7ea110b5227d67740bd75e0f5018af522616b49395de9cba46f0ff52e29ad11c457fbe53626983a503aa7c97c5f1ea4edb6e189874257718924e91fb793fc25b2db25823b8dc767bd08f2b43b4c0df036992a75cdb352701d9c28950fc33032bf70f20b16ba55b7e95625fbf367f4b0d2224de54770314f5252238e47449bd9891644c60ab33bd702fb0eeffacb38d730fd1ab05b49e8d1a8c4652953326ced719a9e2da0c15f232f3894914405ed87f9f9e37e1316bdd52fc88c5f7a9e9f05cfd79f3cb37e74aa40e4976b6b171f61cdc402b9015e75b475153ac58c5a9430fa04482efd475d338a5710bdc3bf19caa9a2647de045701b12b41b36620d7f9c0b0bbf501ee38d453d76daf64aa52fb4f9fb1d9682e063e174d53bcc744b6dc9366e68e97d85f6b8792261c3b20fd3fe5df3cad9b0835e25aa2b30407a40d93b1beb1def3c85fa01a6538383047913b5ec9e1b574bfa1c001d7ae9d83c58833b9ccd3aecfc3cf4e129caf88bc356f9783702d29fb9d6d961b00eba18ad1fd1d6ca65af91e3353a4d9999254567d5110283658b426fe565807c5cd096245eeba829e11a10d2b5b97d6c0ddedc172512d65b86ef8ff01aa81956ec80fadfefea6bd4de00cb3939fe1690d50479595c378063238eff95d347b7ba14b2e59620b6c781593059190e68c87a767991dcd4ea9e7a832af9e72dfff64fabd74606f5f98e9ad48df83e65559d0cae987fa5d9a97779eedb86c60cbe0053743192ddd468cc893f7bce67bef0435969a52adaec8ec3ae7f15f8078ec446087a8eadfdedd9f1e1b0b0aa118583932f7dde9c24cab3618700a0b3608e1e14a7988fd38d10101d5ed0f493be0f136c7c566fd9e1274d0e5895bcf68185af7c5fae67c966961c4b6b0d05760b0f551d1c76621add43b8501aa9c7210bf2b420fc57c5e4a88cbca46cb3784beab8f61cf929d5f209058abe777ca5359c4171e58a958aeb9ce26878eeeffce06a2dcf8e1e85b5213c38f2794d77e0319d60953f09ea132709a5cc5a717842c169bc838a463a532ffce61173dc1c398bde8514d7c3a64528f65ca0277a874de4d0af7db3406fbd4e30b133e01c1ece29a689408c1739679d0a0258d1b005a5f95a51a630f5e33493fb3623dde48b0a9d415e5333d140f9b366afcbb64972de7b6f39dce521e9543bf523f711b2643c08102a7a926cecbf7cc2ea26b8b51a74baebf90331c79c42e09a38b1dec349ef9819c0da1a99a36d83039337a2329acc5c193b8eac784f7b74eb6f8e054e98a852f8ba177f642495b582f11f7410ba0f02f8930d7c2a62978d3a9fec1f25f04396aa97f39dcb3b66c92241393c42e078faad1c857e87020287b884caeabf4bde20361a7036616170c01537d06f41907c83d4aff07f8295f4389d36d4e3f94a5c77825ee7513e883d1d1be0909d58d170687d40f45ae8ef030c4fec5b4d8d5fa7f538ca975ce67b85cd79b291a0c66c7e71360f1409d94ec1ead4676a5a749d6d6e1c31823b968c78abb87716ea4c27a3de6a8c0001abf4af419ccac51a43dc21c88f869279124771135af4f2c36ff6bae3f82a7d1cc01a3234131aa875d0bded31e7511fc044fada6b2c7c657526bb44727bfdf4f8fea13e4ed48d4a2b6f5796dd248daf1576e42f76a7866e89e60e8dd5534018fe4d21da28a1a3e57c4a3cd23daf436536259a7f55a83c0c1611b4bdfc30d734876a66bba9f01c56adfbb92a2e6520030f997d1e2a5a8fd88ee54e1336f4f9ea40375f850107212d8711f8aa0341e0eaf562f8a87ba475e0a25188a78ff65c786509b396f5ed73d04b3da8141459ff1881df3705c2fccb98977ebf8d7fbe4c1f7a3682303a71d7c8bd02fd517621f9256cc35aa12c0ddadcd9e782374245e488e9b95b69e4d876d59a7539c41f437e6b30c559e83d5b635171c896cd8342553756962c43eec62cbf5b4867f39726e390606ce1af4c6505db6201bccd4f07d1ccffeff63d12929b1bcd1e97952435c7050e99bbfa733fea65e6dd4a10b7b49ac9769c410b3193bdaee1221e94a11bccbe7e84b91eaab4fdb93c4c4ebf0cd4f79d00bd1d0ca7f9fcd82dc2e1d75af751700465becca16e7f963aa4e9e47dea10336aaae32425d85a9d8451bc1a65aa6b067051461908c2c049cb376d79047b1fc3a3b68f3a75a70a06f7c2508f8cbf04809 diff --git a/zebra-test/src/vectors/block-main-1-687-107.txt b/zebra-test/src/vectors/block-main-1-687-107.txt new file mode 100644 index 00000000000..611a89d51e2 --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-107.txt @@ -0,0 +1 @@ +040000006b51ddb8529693b985fae432a309105ded97e9d27bf2f70ec5407d0100000000b636d1feb31d83b928bbc9a6e817be1de6264f0f33e85656b3ad810ac07bc8b66ec5d20b3be1dfc82409f0665f79d2bb426406ef4a9922890a891d73aa74b660d25696628aa9011cd82df37800000000000000000000000000002500000000000000000040061274fd4005000c0b400606ac767b2833924db7d681d717194b684d9d3cee11e8c8c7a16446f2c851883a75f2d678970fef6fb089b5a945cff9937ba772d092fcccbd0ad849ccc38de2dd9fbdc25ed9cc2b58fa1f44c19d2502032023337e82727d0a6b82b735dc49b5621ad8ed323d727673bf6b167fcf7b06a4c2742fa9da013725db0e0de353ae9d7d91a86334a22af9e8f9e0a91b42f017e54e21a31ff0c9f7bdc95088d8e78b7b9bff046b02b1d25a12308da7b72b720a9f3efedba226bdb219457d92f16f6dd73f910ab594824fb28259a91dca771782b2a4c9b41ed3df81f500bd450ec671d937d67a30341fbc061e2b111f56f4f116638319d5c294586c06cfa8424dc60b155a1a42f976d80361c4dbf94c3125b2f6de92e53e31cc06d5fb09de3a937fcb9c367216bbeb8396f93d0fcb2ea365cfbf0b39679fab4bc11bcbb7fd42b0f249e8c57675fd4bb771c2a71e8fa901969108354788c676697644deec880ef3ffd9c2600f23ba600846bd0cbb87e14dc3a80d9c720cc713ce19cc862dcd935f6bcd453ae9ed5cefdeddc61dc2392d8ba320b71557e915e0d6da82e388e5e92ffcd7690d2310e9f9a747b5eb6d719abad6e3a2aaba5f2dbd12af4a9f750f0f3ccede45d4293f0a26dc281e112f28e6aacbb8ebf837de2d63822c43e692d653f8d29735c82629759dce65af33139b863469555df6791e500495c8e76aa51111b175a13c68ea5b1c7bedf72a35138537b9c48c3989c72862d83974c3b1e4321cc36c06da5a1f69c713cff8d24325714dab38ec9d0b24e1122048de8d6738fd956e7127089ea47789dcbcfe210a4e618e774cca4a6830e501437044c687b2d61184154e9c245988724f5ff5f34058bbf8a0f09e93069f10d94c1399da5a7efcec88bbf66e70ab065cdc11dd3a1236e6dcdc23df02a964c066cc7629e8841e4f16008fd0102bc62be47911700dbddcb9325249d32f9137c47d1b660facbf505ad7daf5753ac328137ccbf70597650480889636aedcb5ab0cfec23697afbda35b0b5f959c7f4f121a9a610215f4d4f07da5fb747ce117a750e2b94d6439c425e542a6c0207969dece948032788f83285ab93911d573b69eb8754d98c41d94d81b1314d2b52dccbdc2022217faced9b676f4fee86c524dad0f759de2d1485ee65d777545533458dd3e4905f2ca3ff45abbe5c91a616e2ba6d0fcd6eed5ddbe33147445fd637631948c43ca592bcdde8ff2b90c292cbb94b1b38b95aeee50dacc1474ef7f8b869e2f536f841630d0aaf599cef0e92bc3d9e7df00ea1a50d708b3df5c930435d074be909e1d160714aafc3a970719816423a3ec6d7fb2e11782e06491936d5efbe80e21c3754b2849e997480b12462bb619052e4cb97857299dbc6ca6d33eedb02aa8993adfac535d0b5cf6e903bbcb997a52007af9fa40fe0668063c6739ff61ae06b87a3dae0ecc84c5db81056250f6d4a0ac7ff2c808d8225fcc6677e985974096867adf41bd17d0c5ff11f4df2faa0ab746d68052313895c93151745d0b7e06c7dad1d60af91ceacb5712567b5bc6892b3ae5701f92ee048c9a7bd17237a47ef13b720d488ffd026d1b1cca62aa665be957efb55bc63b94c9a3a45bc5112560cbef42ea02e16efe63cf9eaa580da7da50b56207389054570cb7636d9641c232c5a33119c5308f990c3053e7a7e9e04d66a724c3baafda9a2e311c3de027ac34e4ac0ea2e4cbb2d365d423d19d723d1ddb7244994ced61520e9779c547b4a24d451a0750b0b7542530e1a5e7cd73d4e7c71735a74cbe8a6a793836c84e86bc14d5238cb5b269768ef3cb984e751edb4eaf3377756d541b295fe79a4850a859bfee5b4d7e2c863b0e476bebaa61d99bb655944eadde7e298ef9bf8606050000800a27a726b4d0d6c20000000043be1900010000000000000000000000000000000000000000000000000000000000000000ffffffff050343be1900ffffffff04589ce80e000000001976a914be62da59de8993dd79965ddd27629b1ffe55e46e88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000400008085202f8904dc0c0771d136e2eac48fed3e3dec85118ce36300fb405a05267de99d051a55f6010000006a47304402201c0316fa1d5f27f0a3bafc4fdf85bc76b1e767f787d8b8858a8c83c8782e87820220598dbbf190d031150762fc3bcd5d7c46f25fda10fb9dca04cce679061e7a66f6012102b469816330cc955e79bcd9842a4dc6098989510c4cbbd08688ff61c7de53098a000000004d0a4f9953e9cf00c2e28c1d791fab40b645e85e6f7f4aaf6b7e038504bbb825000000006a4730440220459c10cb81e5cb18422955c3abd464af15e005cd67f9670b8e7c193d87787716022002a3c61b1ba5c89bc4ed56805ffd67bf9c09abbdb421f2c00659887b26e1bd0f012102b469816330cc955e79bcd9842a4dc6098989510c4cbbd08688ff61c7de53098a000000007077fe331f4e1517711bfce3bb48f72bf4d929b8c83aa785c5db0252d806c1d3010000006a473044022075c52fd7a1a6a50506ac320db8811dfc0944b94c17706d9f508b17f5a05ab12b02207dd34f25e451f4354320a20ccbc4adfc9aa23f4072a2e59f048257a85a8d2ef6012102b469816330cc955e79bcd9842a4dc6098989510c4cbbd08688ff61c7de53098a00000000501172a526ad69e3f37d4a1631c7404c5fd8227a3f7af1a159d9aaee0ea7dbff000000006b483045022100e54d74beff92fec8922bef7f038686303b0fb64d08933bc999562b663fb9fa0f022063ce5d8023955bf2da48ce67241f80b89562f10652453e3e7089faf5622ee2a6012102b469816330cc955e79bcd9842a4dc6098989510c4cbbd08688ff61c7de53098a000000000200199222000000001976a9140995393a8f8da4d2320dcd13fbd32fbaa437270b88aca4a53100000000001976a9148ae59adcb3dc033c6962b48a4c108d5847d672f388ac000000000000000000000000000000000000000400008085202f89013cad013ddd2a0dc565059ade33b4d5c9e6677fe274f535121993811bc2453208010000006b483045022100f2184f6fbf99dfae56dbfa24ad4a2d9cb3f3424a0b2bea4ac5993c1a5632c0fe02201a38163532969a74abc02f6f6fb5892b7481bd3f96051fbef2fd57f891ddd5e50121028e10fef2d8834f196b40f417616514e50035b92b93eab7620f1eb348dd82d336ffffffff02a0860100000000001976a914ce3171a425e119f3430b3766715979e885041ceb88ac1ce10200000000001976a91476adcc9ed36417fe87b9ab59302281694d5023ac88ac00000000000000000000000000000000000000050000800a27a726b4d0d6c2000000006bbe1900020404679c0535c84751871d4eea5ab2aa4fcd403b64e1907f2fd4214887f98d17000000006a473044022050bbe141dd051afbe16eb6ae78ced380edb29df5d29a0f3f225d90aa33ee78d802207993449211f006bb85c7cea7a567a85f4f050931dcdd2888fd6383d8012b9408012103dc5bdbb3321d6f1db1d4683b259e66df141a4c76ce3af96492ea1e44331d3132fffffffff298ec8d59fb36109a9d25d3d3b80e97e07e66774b94b275cc1ef27a728ea4ea000000006a47304402205c09f935703b1a216875aef97696095ca8b1ddbd614e1dec9c8458f2f3d7d52402207df1dd8890985b394f250542858a53a5f8e3d922ce903e7789351a4be49fdc39012103dc5bdbb3321d6f1db1d4683b259e66df141a4c76ce3af96492ea1e44331d3132ffffffff00000145296811f8949a77ef5d5dbd5be53bccd0daadbcaa761019760f725d0b39f3698ecaecb21b6656babd50626ea16a897e9197d4702f3c9f297367729c3f269668e14c10af3d214c0ee42043688ab5d6ab0581b1a505f8ea68ac00348135feb596b6b06476ffeb0b9e162aef1801c80b47c59d1e1fd1a8b6744c4695745604da7473c6d525409ad7f285be06b54ee87a993c357331f29f7fd6c45a0035fbb8966e6a85a0c122c48b6523c812bdabcc2b89876ea19efa61de5a6840c0e629e8af5a06e8c8309677edc9f94af01933234a4393960587e1764840e07fbe64214e88b593f86248ba1c9812814cf2343ce4d0b1215d0c63ed523ded939f2dbccfcaa5030ebe931e8a5789069560f3a987489deb13d7da657f216375abd360fbdb5fa8543f269f89df5649a0c609ec847e7f8a3e369f936b06f72716cb4fd5d62e2921b81adcb7a7621a3210f18c1b577b11f8be9141d942e0d9775c92c10198652b94c9772cb961aec29cc49d02cc9e5aad24a5d66ea65adaed96dfa99135481e4c47db8e1b8cea574c163325447e24341cec69e187c910d76f6655e5f2a86930af9de249df619bd825b85ad2c80b80d62f2aa1bd3dc335379cfe714dd0f9d3c68f8af76303588a56619fbbfa5299089bbf2b93021ee646a8a2aa35f01ddf02827dd44b4fdd79660d38d6b8a3ee8d768434e4de17914e4a2b7c7c5d46e1509f3fed26b9c24e2c5c28012425bb3191ad81ace5a2d4a5990ffb435a8764c5e4409e8dea71299bc6ccb03e003323f5357a125ec9dedbb5f928414296b9642ceda17004211079cfce4cc2e2cbfaf0f39b1581d7985133a3588d87a82dfb50385ae9e5f1ac143066742c4fbc8c9f1f2ea23aa02b0ba40ed427eb544e8bd91cf632823e811bf31d37f380f0c7bdcbf88fa537bb90f582ef4d2652609c2c66667eea4bb41cd8682377580829d6e5beeb9099a1eb35e7560deea70181446a3ecbd8c9fbcc80079c3ed58cf9735170dad3861b1005fe3f14423bdf017eb6ccfe90ad5e3fc3c9335d393aa2decb0de5340c4465de83193fa3bb45a68dc98a16e2ffffffffad7943b7ee97a25bfdce859829f16a6f306ce1e62024bb3b9264147e09d92822c716dc5bc849c7992abd560743707508ae52e319b43e87fc87f55eddf48fdf04e0c9acacd0a0364514faa9b27da84656328a84b73ee89fb8a49bb39e4d0dc8c31037c9ea167955e908a8d0cb9b4e4fde6ef8a506c64a9bd89363d713f6ea79cbf9b4ea860150b116e1292a8081dad9f6878f122fccd746def6616f31eea10d27f14cd553aba961f0d0349a4d77b355a80912643be83c9b7e50a3a72f1039105cd0d2fdf482930bb175fb6b8aefde0d0599d0905b151e9fc35ab0949ddb19dfc0b9e037a835da6dda437f2cd8bed33396a9c69c2dd430f52c7e069dad9aef120d00050000800a27a726b4d0d6c2000000006abe1900000001ada61578ff8fea6f5da1c76c2edfef74c1fe4f6810077ecb2c6273cb2fdd318d4ce6ffed04d6114b52ca43c680bc8d53c75d3b7889892c44e61a8a47638aa563b88bfb7952460394aa19555c1f0006d997959e8fa649b7d485bda3a327cba8ab00e0c81000000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044caa199518bcb2f7f7e258e6a99f750546dff515891280bffcfba699afb26417a10f12c7dbe0d3dc595ee0c5a51e8e69c892894c99413eafefd1e6cbcecee11db9ad0cfa772cadf8cfa7cc1bf096b8a6526663ab96ccfb1964ff2c07d3def34eed19787fbc5a4294622d9421c9187a96e9d4fba0042b06b951d3127c77319fe6f439a46d3285d5391dc39320a892c07533a35a8372ea45566ee8106ab87688f481915b8c5f501bb72bbc30a07bd0adb3afcaec1dba69d168f2c41beded040dfcd7e6b5bd94691d50ae8e3d8e8fec2556b3c4be9e17465a0eaaec47e5dc0c1718e51833e833313f781885cc957eb420b961fb46a41fb3bc6e22a24a72ee31d4d409ffc7b3f0e92d61c2ec1fdc279e0012adaa419bcb7d104abed2b24bc696aaf99bc70ec83d1c4f46b6f2e2ac67404698fe89e66d64cbb2f69af3403ad66133190d029d9f727356e46b722268e2bc47d6eaa676222680104eeaed7c7d30dce805598fb3cdb97715d5e3dd624fc87906b9d13b4e4ec6a63989d989936f2504f0a1f70660fb2efc730797bf9d1a2435c8b03c7d0a023a22a39b56844f319b9d54bf1794e542b41a8a44e417521228218da39f865283ae50431c2292c36f379f6da04d2de95567ed9e38738b4befaa472757d8672f3aee83a283f88f2e6012ad7434c9b08e88d967cfc9137e87bafaaa32f1a25cd080d0bf7572d99bb8fa40c23c1a46722409433260fe2e4caa5b66485a8cef96a44f01df68d59eae23de8d022ce23e6175bc0206431ed4e9b9604871d2983227ed4c79b4c608f5544eac6203ae3ee0e66a0f3440a12a046a982b524b1f0770344ff4b651b579c605e83419507ff8df1d95c25e18940babaa6d7a2687c10fef2a6a0ebc17a3fe219ceea54ea3e1181d4d9e9ef00378c91ed8b8eabc304cd61e5c4f862ab2e76ecd8bff284580fe8161616489c7cef83dcd57dc1555408facbc6462386a50fe88250f4ba673ab3b30b27b5f3b90d65231f260b246bb4841ceb89158dd434fa406a62a2a327a3ea910f4d1fa3b761493e0dbd2978dafacc50d1834f3f562610cdbd3f6aaf3d7a4058fb37902223e99269a0b804aacf10d34426525cae663aef7be059d01076af7356594597f6073f981c0f5b6785b049c8d13f29fcbc88262bd7237b5a67df14194349efc769916970f5dcd904f5208e21fe5c2841a5d6acc313315aaeea953716b72b12fb2a2673140753badae2840e855308205aed3f601a3fde0ebdf5dc0a5935cedabbad4dc2324001759dcafdb9f54ea843429ae8b20db283f378cfc0227f3fe19d41e09f93019d2dcd031d2463b0631757badab516c97136fb289fc3d7017d104c6da14c19cf9260c5ce55206c44c24ad042de9d034aa45949d130e6dcaf76f14f7c316fcba75949ce2caa924edfc4686bb5f3e4f2f63fa8bf61a999b142b41e51f243e0040906f5b8651dc282c1ed47b2d291040c07bcbb5e4f8747f55445c9bad73f0b40cf4ddd506387c02b37423a621023018bb4c836e25c93b9395bb3b4f0c5ff33ad5beb1d2df8253a8ad66ba76beb82b72c95dd9211a543c26ae772129a90fa55bb185509782338248abd982a46890fae06264f5ca9d8608362af3e7a6811ca3bc2c0b95f23444c4dc30979f2254b54e343294fcc592d15219ef5534aa11ba79bb8ae6db89e01c55b877bc89e97f062b56395d15bdfc4fc80813c272269bedfcb5749b0ebedadfb32c30125f9232f8db919d38fb218b939d9d6b7e906f1e68e49b4a5b9c1941fbf21c543529b19eadbcb01fcaa4c85c2824b838e70df23658e495dcfa2ce38b95df925fa8988c8e1ea76eab4c15a0bfd1ffcecbde8e2d9bfcced1a01f243fb8707d03c620e34701fd7a9440d946791fba17a512cc5f64f99b29c675a2c61e6cc5f660ac9ac62af1a839cda0593e0ad46342683496afae7e937c0e2d40055f3c556e432e2b00092628d39ca0df198f2b532e29a873857e1b2d86f57feec85b9112e26b9518710bd6b25476555c94e49ab853ef6bce643554f4cd5d6954f3aca9451d70df8965b19527971e67889a64235dc531be41e1caa6b9cc664791cdddc850a9ce501d2e9e6f368d0f917c72cbc4b0f4bb14e5ed70af06f0743c47aef9c2a74eb9006d9b7916d5bdcc2bb0ae5946c43b4137b1bc18e34445fd49169029cab47f0c89903a0ef1d3689491830be5eb78cefa094e448fdbfdf5de6d5cfb291c8b03665051aa6e74389b85dd12064baa1ff43b13cb6e86587fae162c7b0180f0285d495ed53b30cdc5ec08702c946aa71f6391c96a618c8b5f15e915a07edcdac19cda64e7fbacf6cbaa14e50280b7ad20f10bfb54decf55175864b7060f9e278f4ec548b101933599e581c018b46c8c64c1cdccb49ad4453da57bd10ab56f6e98c0c5dc6f3f79ba9659c07760460157b5e72abb47d9bf20dd3efaa918cbd538183f6c0f228ca756b27f70395c767d3b377c86fa16350bc317577d063497c489867e164760415bdae6ecf14e7964a13e109a20eb80f07d2a723ee6666db003f4813705bdd72bbbc885c11e065a19dd416cfad59cd526ba464a213e553c6b4a593f621f8730ff1aa3403bec8a6f406efbafb73c13eba6d6ea895cb6365206fe0a7aebb3aa6682a85c333afad02da5dbb1e4d0a860287f5b1cbe657358e9c84bf05ff5c9a2a50a5356ab24c03af284ee1cb0671256d85167e257013b369bd39fa1fa3e158bfe990bd7b0961b4dcde8052b520d3ac8cfc6331f36a2ba1322ab84de03c0bdf0ffffffffffae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82ffd601c483638c5daf818f044248046448e8e7105b0d7ce6a4fba973889c73c45a2de92426f34ecb4cee0072d26cd05b337041feda51d0b15ebe56923b7fac6a026182f50a13ce8e3225836a1d54c0718f4d4ee7a989fff747ad155bd240b40e4434cb71b0ad3528dac6c1fffa79f1037778e05808076169c46e2536dd2b195c40dd5354b065aedc3d46c1476226e8feaa0bc5122ea3267b721a8ae4dd38752f8bcfa390d1aeaad47ac1e9d62546917f3b681bfc6b9dca15a67db5aa7084c874aa66693b35e5e04a12443b71d94ffa4c7109a102ab6cf1d0c6c6fb7996e0c9a94416b95062b7019c9a07cf7f891f7db08e7addfdc7a54429b373a1e34da306ac286e9a780dbf2c3c9444e96fc86a8fe792a41e7ac38f68518c076e018d3ed61a790968089de8412c30dff30c1c386e705852bb7098475bf19c9e48156750a752b3cb8a0bf34a7944400d373670deb0754c6f175528a7b3d8f2084d2fa0572a6bd6f8706badfcfdbd482bf7b1bbd6fe94e2d849e0002d1f4c562131f2683452b7fc2e9b4845b88c041550e9a9cd4425748e2cbe8cdee93abaa24724121c4b5d2e43a043d597234f702a91d7ab25ebebe5d4dc1cc81a1061ff9f00349d6e6b337e12e05075e3c1c294fbed25f3f9c9312e2a0018a9dcd523ac6aa38487cdcb3b18fba40095c24a4b37f882e4ee14ec1466621ca7482bc734307ed565bfbd050024546e4ae648e9a56758b7e369efdf4ee637e8b5d2ccbd69748ca2b35a5e01968f7363a1ff04b6284ad01d27011a0ce7d7c792126a8f99cb2942afd2c12f06d0643b164157c594052f38890994f3c8e476412736cab0a47662417a4706588eedca6c807bd9be1328d46bda76b62e93afb27140c6b91bf332d68608e1a9705d24225a1bb2c1749010457c563b7b80eed430c1fad20e78969d9fd93d9a660f55850cdcde608d6bf03d594a69ba1a0c19a9217927ae7913a7846788c61498cdf4972682a7b8bbdd4193dec8c79a4999eccf157a34e6fcadaec69415be3a1bbcdae9ff9e8208a20a334324bad16f81796143395de874a57c7441bbbbd1e7b1dd6c3af8ce029a829dcb95b42a027ca7e78e8ba9b5689eb7a0e80dc9c2ab243de54ce1b897538bfff7fda76a144ac3129a822f9c80f5122d126e93c0216930e547748a158fecf2cad80087b8865c86105bceb51a2c437b9e27e9f46ffe25ce1fb40cd6051d0059872aff7680a15ce48e80e80b9c92372b8bfc1652ce276714e524d4bd89628290b1ea2791bde1aeb867ea6df878fc39f60d86e17cb8d0f4f4e843f173de74e043c339a0e99ea530cbbe11c660b38504044f7dac7743403a12670054dfef976cf1a308010886d8ef0a09978bb691bbb43a3d15ed9ac65cda65826bd7f1ea1eefb0307caa6eda1e8a437ce88dddabbab2414bf53b1ad5ccc29972f8c92671e80f824013c033983604217bca5798b14b9052f43c840a65c34cd5ca6f6e24a4675e7b1141bbc53b234c926c406f10c12f96ea7708be5c1bdf4ad464ac64f998325de25fdab1c5d44b0dacfc14e35a22a6ff0c5cde6d147d8eabc79f5f744c17814e380c05300c959f24c79ed73f45960564cc64bedcb8030fd139c01153d5ccec16ea103eaa244437f07cde913f9132b5f6b8ea63f7deeaeba4f0fdb53922acf890ca92d1734bb49e8a756d8b626ea570dcf8eb8a3728d05ed809e5117046a8b106a9f659421d856030632abff5dd3da975216ad69d3bea9c124cb45814e9abe13e490639bd33b03b523815dade480e6d93215b2527e46ece921a93949cb36dce3eab0c88548f00ce4853b7a8f7155c31a5967651fec7ee8e1fa0eacf525a1c0bf313190196678347cda65e6d31218b91685a728aa29b86e3713d0e36d361641c834ac9227f5a27cc57eb48e9179809d66ddb8e64946cea94c5328054faf1deae30f273a3583079882ba5e74265322737f27621db254695c40483f93d4eeb8e14a602437cda990a00868b6507a612ec3147c0add8944ee0b4eeae1e0f4a1c6c8f3273fcdef51e3a3c346b746736986572f689b2766307f28f3327fd82c9542834f6dbe6572cc347a3cdce164ba46b566954d4f57d2499f77a9656c9fd133f71a21d1b791c4a0c79451d6de65592670c46d1eb96e84c5b60328b6909e4ae245f0b068b71389cbd359db691b77b5c92b3bf88cd7182f47c1beaf12d247992ff3cbde68b9647708b8bc04374ed770740d43304d870fa360612080efb1da04866ee72828b5fd7fbbdc5c5ad06a1f350bf78e099f7dd240bbb034199b47b6d08709dc59bcbe36c50cc2ca08a75a1917d624151767acd1289f12f8f98b5df140f9530c644427e9f4b90c76c7ff8a3e73288803e4641cc8548699445ae6bcf69241095102e117cd48e32a5a873b07c6245b44fa66543831b1a333621ca2a7f119f6b4bf9d0f0f03e8d9aa2070681934e3e541ef24e2831746cc6a3968edaf61e132d6181f152a53781f0642029c9c571903b84633e6be89c5ad452c20b2fa07306366fa4cbb239eff9fcd9116dfac9c03e5985f7167350ca278a35ba953a1aed7ef644b112b1de9c57f70efc2ec58e275269b28d1e2cd68f4f0e5523a004f5ec9482cfa32f42eb13001b99cee908594fd5077405060ceb313cde8649d20a891bd53361e3ce53e64ea421b01efa446a6e527fd0906a472959cf659f184ce45d5a68488f7f8af13cbd1b9773f8c8382e6f565be2893532366c86a650b5849f854e6797274741138c02aeba9f79f2150033a4f2dddd2e93ab02749da30d88be5de80ba80434ac614da6b1398bf164a6cfa3c70ee8bad3d6d25437a58de1fcf1e37d55e1ee4ed912455b6bde51aea965c4e9f4216a96372e8191ee3d3d0c15647c3957fab036ef23f06762c1589337b43c45bb62722fb691cbbe9b774221f479a9114f4aafa3f932aea16d0a59cf031575697c78ce5674785bded12d52d7be8d9c3e83b57d148f33dabf66939f139fea47abba7e1ceb446cc06ef3ac7e841743118b786f6ce45bf1bcf479a13d015ad920390936a3a120533278715b365760ae5adba83b2bdbef721bc7fdc5db60cfba403511feaf5f76b1d68f83ae69715a4463156da881f2a8310c2140ec7b531b91111f3de4ab7b0b5a4d7de64ad3769294cc4598a923ff36c12d3ef1bf9b4b4695a2e6cac59410cc7f6de00d91bac1b8dc92a201b26ed96d43ad83501510dacdd0fb4f84c138d6d45de54cc969a4d21d5d90fb8dac4ec2ad83ee483fbadad856aef640098340816a277aeb2050e7689aee3978e28781b55cd260b79e0f3375112d3e1c042f2eac12473a934ee20face31ebad6e0507360d260ca0322164a035ca1a42885446e46c8a7d7adb58c7ce997275130e6753e1616d1b9c76a0c883a54a9e906553956f8308c76aa4806f80feda049ab6694cba52f52f535bfbdaaf81c526899276156ae2d22c658fa5a3a0a8b7d832eb622d175fc13930536846fb677efdfec564131481f4977e78410fdc3f35c04e67d257871dfb02f0d23287ab44c7e7ea7180ddb8f8e8e9b96e0b78323806bfb69bc9ef95566505705897870d43cb08dfd3c4482dd88c8ab9791ffac7c29e6ed5acfd89bbd8143beff2ca3c5f185687e788e5a961532eabbd68067c4ac7b92825f0725bd408e9138e90059fc5a15227e6695a13373f87caab87e412a11aec3b22edbf7f02f91239036d1a2778503e1e27f07a9c028f268d66fb5b6041958437b4548fe4ccd3b42e9d193a1548fd32d4e1dce9cefdfca79987971210e57ba1c79472cda68211be04aeeb4bb1cbfbf278262291dcfa9f9f75ec141a80d7589e8bd85d44d6f517c32eff912d580800e2b61c9be95f93fea1b09733cbf1dca13eda07d299ebe72e682248854c99df59c0f315d4228ce387a218eca993fc9750a250e4a9363a1fe5482cc63b52a8486a10571654ed0c583f9bef0df3f23a42bc8391dbfbe23d488c393a386911ac00d2b45f60db922c8c46a1d9f44bae96badc05404cb8e7069f6b3f1c1ef6fb18a4b2f83eb497f1b31f6c74259d3dc17e4045f16a0b61954b960cf000938f3cab92a0ec9844d6de7fb46f1275760b4fd1414b50785f9ad272eacd8231c27c7ef2cf02bb89e5def3885f623266bdc996c6aa0879094e83d5728e1f4808150b45fb2aa960b80fe5005b2c3bc524d85fd833d64f2607ba5e7eba7f91da3d94758b84039848999a66a5b960a7996dd8e3e1d83f3404ed0bf8dcc994e8e22a7578b3f86a1e459057e4c88b029aba78d42d69bbf39dc47889a4d0cececfaf24872b434706e83c2fff84d2655146774a3c80f82217340e4417f0d733efe4af2c7c01338fb6735c2f7c658598b8cfa71a4f0e1481656a4832625030ed9bb91a3f505bbd2deef7f5197dc179f8ea834f642cd81bab08def63d18e4b28e9ee39f080a2f9043fe8879bbb47b10f83886138bf6e29d4e54d24a2ce9705f12ab488e30b3c197d364a087753b2904b8808aab0c1c0e0bb768a508bfd8716546f541fd368bda2283b0f1814cc2666767dbf2aa1147ea507693b819c22a7f7e7392f08c12d8765fb5d23660d7d6006a0e5cc198da81af27d2fc7fff6cf556d04b4df6dc18c9ebddb97c61f043e4bf66ffa00f0d4262e85f67731e8c44c05a1bbcb7dc411d689058bce938d935500dee7800dd60a5589cd3fe19cc5e33a9433f9f74620f0bf0f19021b122bfe996d9292a98e34e83b51be9680f39bfb4a26decda67210c17f11757b16a9ae2445c26e8bbcb0517674510870cce7721ee6d6dbc0867809e329dcb00dd26c5bbd3507e646e86d2684194c9c54969dd56fb33f67e63b4b07c0260091ead11c0a3c8372c442b301990c5a206cc3618d35bed17c40078a5d8b128f3a05caa014da219220fe199499535cd04f3ba6a928216db8b76ee439e5b9400aeae716ea2db598b9b29cda7c7c9cdae46169d2a0974a01ec5f3fe4061dabe3913f7b9272d6a34518c44c62cf51c858726d3f42eab68506dcf35a7133e45a51ef3229ffc83238d2b450748260b14b4a2c67ebefc966ad95a3bd0b16457505b0c4c434632a0b9c5c66b2bc4c56d9074c0b806c4de81415b0e055f2cfa4ba42b03c2df4496a66d55493650cddaeead2846e9cc0856c994bbb0678ef2fd84134c1d0bba8c2c442f829b969f5e5cd1cf21c972aa3ae74694b20166d8277bdec0b4308692386d7cf4cae2712db10cf73abbe3fe4af4b41af8681c4188e1d8346cf43a6e2c44d0146cae7e5772f9e6cb9d41c8b0f11792e15818631a2270f75f373a39e6a5d67623669e3bb02d09c905280ba388d1e01615eeb9b4e4118f5c66bd9f3ae3a95187cffdd046c8b99d4b4e3d19de7c50c801bef201250355a00e1ee1eb3e4f4f923226f8ad25362fddf25717680ad5cfd9fc68830ffd47cd08c35ab758390995dbf137f0cafd5a398342b8dd5c1aedd4f66e527eeb5f90d0caf585b6793dc3348af4ac7f5d9f674f19515bdc34de3a1378605b2dce5a99031a69daafc40dcaafa6e9d81f9ecb7f986a9a6ff994e221af6aee47a51c0dad3ccd8ebd2076063595761b2ba6f42077384811d1634ed3a7e4e2db43641cd704c1ff61571b2625dc6f9a1bd6eb303ca64183975a1b9576a631551f61f9c4a1db3274267dfba1298712e70631d5f426279c57d5d4d0a2f45d52d512505227656c59089b4da85f2f44d4db685dcd21d5678475824077262656eedbbe7b40d3b112346d740e0e143d5f893cca73580eaa1bbb1bfd6217c761da7e60563d98fd8763e4bb18bba70525e8ec542e8f6fbec29d89ed58575883e388cd74f25148cca119e3ba5d40ffcc106f0d4a45268cb8b9c9f4c3483c42067d942aeda75117708fe1de33a1f7bce92170fe922c75b8d17e6846d102165a388dc7ca43d9043850791454003aa4eb69125030d535f8f119a11e5f26b6bce1bf70668a8033e8690b8a4a3cf47b32798135c4a7fbc160d940231bf29c88c9ec232ddbba1d7cdaff61e5a4f4d9da4c3bdd15ac89f9c29b7c512a35193cc136d75ab1a71cf23d9f37c04fe6c3c06cedb97c0820fd21aaf4d81efb4a650ce5d8186288cff65d144b062126bcebd1e8b56e8d3dff30c2976571c61edebbf996e1ffb9b0d086c06e5c881c1d91ed9e39abea2b32164ccdb0ba1597ab0dccd78896bc71c4384cc49b6e5bcd911c0dabe0cd2daf32ec98675b81beba9d50a43dd8e131bb01ef9d5a63abec0d9e990067ee4abcee0c5a3888972f6dc2a225509ad018e1634a0ed7c33b793f4331b1b5a57e198f17312e65f659fc3b565318b1d8735bb4cb8e660c234970563be510c7c993707de21f41714e5926af805e9e2bd709886a118d04a8206a75d6b396d689cccdc15cd90d4bb83c4e46875f8c62f3a1ceb78a4fb73215b1bfd0c9204e3e9a2600e3bf1f1259cc274a281ba768b4b525b11aeb73ae0037269d726502ac4e967d1fed34412c2c1e85c28d4f155cf0beb25d1c727ec864e3cd6d564f8e1a731c4bccc34a5935ecb0ce52a9627d42ef55b44eb92fc38a0a272443c0f970e760022e042647f8014a4c5ca08f10e11ef592fa3ee8b752d49070846c78b2a093bf7d9d67816200196dc4b6e182074d17f492c1b3a6c68f15eb74de40630385bea8c08d75606fb209da76da16bd9aa0f986a815c43334d7612694fb1dbaca93c557213ec6490727266a90c3b3aaf5a65043d6b6b87c835ee093bb3b0b3205a4cab1b933575678e91530109bc2494400c6141df7b5cf241b36c86d1e9093fe2fed75c74b2ea6da53173a847ba77e4e6a6dc21ab8e41b4d67ee34449a4de55f352305521896d27c601b6abff20aec74821ae3d1bf058171784312acce591cb6acce6dd37fe7dc00e92c207a54cf55fae634168f5101a763111812015a44a3b09efd7bd064b2fd448e375d6277b44334bb8ee36bd84532703cf864e026544620eece8c3a18826a924a2d5de2e25b96df67c6f2d5efeec806b08dba83f3561f796ede197f283559a49d1dc3c84d7efb531f1e3b137a201c387a6e37127d3b37a7e5df3ee1a2846dfcfa3eef2521eb9aaa0f86143cef3554fd931be0b46d2f3098f8147ef033011a900615e866dc43ddd0d4d560cd53a4dec32c870dc59323b75ab061434a3abdee093f051ce749bb645199cca7b95893bc5cbdaa1f78226568ff8ba8d46fec1ae84d4430bf3c9c71d2271e7f27cbaf9dddef43a8af4c32ccb1b275858c211257b1901020c6ea689027f37f90c7f55854af5285d74107c675380f37016ead8ba7e200812414f1208ba803a15c39a09eab126ff723eab9789b8453c582ad9fa3e00a68c60ca8c950970dc13b8f2dab9691db828cf0c4840fd938633262861dfa7810ee7c1efed44108f46e8d4ebf939e59635bd8e236b60d9bc02374cf7927b097dfe7e93b1bbf6e96f677566407a32e163e59a0aadd23304aef1d1c653ae65f5616207c06e7954a9a5d41dde96d2cfe70b6c23277471c12f6c7342a882a2c3d63f786d819a6e3e978c6ea5545822c74336c75ab868b6d6942435f82dba90a9ec9a2883833acd8d5360899f271fff077e63a65356bcdf7ad6fa8214413055731a3dcfd343232e32d81bd7a3c2147fe7d795a1e620133f349b7e81cbaf77652bdc5db2fdf3d9e541f83fe505bcaf6ec22eb49c6712f8f0307914257f0dcf665cc7efb009920c6a85bb41a884cbffb782ed9d64c999dbcb383c774a16f7a8fadf5363213511698d031cf6ba850ed4628ae27de02cc6f0a5818267816068053645bd6d72ed82c701ad96489a2cecbbd210b079ba2bb73e2749cd5554bd80a7448c6c852036234ee2cba8445eca4d3a47aef85078ce56d5f2b871cfbe38379acefed00a5accf2cb6c2c459b55e20aeced3554328b290fbc9bcb2530ca2fa016735db83f3aed21862b13baee2839f830c39770db37ca2dcb3904ba1a87fe823c99575ed660e4432c2db552279a07dadf5dbfcad85d48b61f9ba2e54bc79702c0eac0336df5ca222526966e0db5b13f7a24446c2dec575345298070833a58881a5592be79fbf5624d031b67a1a479ae770a6b6d31b33ed5a0267e4e017e19606e777ce6bf0047326b140810477711f1ea6ae191f2f2fd0bfdec24057f1f1a59ee5d42bd024fe8e0e4f49a9a885438dbf8431132c610609dea48cdb897142a6350200c71886303a1e4713bfe40b9d317e48363792979e0df0bb0880d956ae89fc978e4e68d65e643aa7bab0fb32f6f02f0dbb43032322cc7a1e0790ffc62decc18d912c6dd5d4ac0867ba9809a12d115259c968c71757455647f3ec0b108adb3959fc2e531575de1348e90c72bfc719d226fab85579e7fa10a1892951071a6c94b2bf5722773e3834bb9a17bf886877c211104487afbfc442c1dfdae41630826384021582158af82f0e7f7125ea8e77b174e3a02e0dc5d8ac7786b3326a9b52991914e0e577269c24206b3d63b7ab77e71cc675efe8771c68b6c83f84a207c5561878842cae5d3507c3b9519d57c8eaf11098876c5dd84ac0b04167b79f83d32b21073ca834449501fc92f0fa688d8e466a2494decfe5e17a573c8870ecdc22ad6cf4a2db1bcf49117854ab365865dd686bdc53968962f70b1ef76519a6d8f4f22d70f77363b4da1b52c797c7ef0f12ee982b3c898be45de86ba317dc479ba82710f7bf25d3dd242416544b71a6965239531de664b384ac4bf27d327c12295df1cdaf62e99299242c33c326fddf09edc476228eaf375991d8ae19e56ab2acc96d358a06a405472b2dacef8eb807c3cf2d3f064566f8bce6e93ea30e7f903545327bc3251a3e9c6a1e122794baa6bb47759baba032bfe23cfb98c992d321c68c5b7ca90f4c87ba892176b67d94f7d2c740f0301f8c41997d8ef70341bc5bb1073fa24104f1451bb10d12dbf20341a3272e8a0933fe131569751d0714272871214e5e7d9ffd578ee91a1834be710fb6f5bd83c241b73f87b81fc1abef5072128979e650cd362292b923af17f2275cf441737de29715d96f5df57ef93d3b03f1bcccc8150acbb56ed238dc852f2d4aaa0f530dda2b047a62c2f0b9efd19f4ff8b62da4195086237eaf0be6370a9af9636c9c331394f37854bac2d31d7217e541a873594f9178649859231e8a7db1d94601f9a2f4c05c4541fd4d3d9762fb10d881e4bde21bdd3c818db725aa0ff9faa02378d131725808b087c4f2bdbee5c7173c6fbe1a2102c9c8ae99e8d9e5c47925459d8ed7cfa3223cc520c2be6ccb1d9165b55731bec09e0a6d112aa050f7172ae7f55b2192904ecf9f0abf582199f3579313daea313eccf38a3b7e6bc2bdc8ad08a879757679c5a6378a75035f62bd72512f0244c57f23f2a90ed9f3778a69b2161507aea4d955ba3d057a743fa733ea4f9cf096bf0f00ec9f968c2159f8be70f93cdf178baa6481aa7f50a94534be3255e1b26ecb34ab7fe9a534932d9d680b9906de03ef1c7cf4b0a0f25d83860c9934943a03801d9954d7818683d87f2b5dc93e8536e030917a005ce27f22f04c38b6f0805e7833d2bac79365f6f1fbcfbf985cf4f91a81ff4fb0dcd4408c0a81a96b4a7dcaa423cac2a5035928f00eea14c9b57a24152e648cd7e6cd9b08ad25f9a8323250be1101d3b4835e91bdba7137c9b59813c18fe0df8194636b0e5740c73604224f8df384514607bdd8714b0128a80f327b689562a381b4ae04ad6c7a4b542532a65f2519d7aa0c1cd1f39061f68a73adb92a2ef175e1b65532ab57b941127e6479152de312a598ea1cb75fbc0dc3e15f4880bdcb8c26f8692a0795a13cbbe48840d53fcaebd92ce1a640b0ab0285b4fe0e3e5d6931ca6f673290839dd5db28731d11f5556977b941e560775cf7aaf0a56cc37f4faff05a1304dc7fa0623a417c5d427010af211c6bb36ac1acee3d5b63629431004adac7a863945872b4eb3afaa809c727c51b2a50d1fb4dec11c9f382041f58af3a4f9b47afff4cab365215e83fe1bb165f6434dfca66d9fcfc30cf9fc5aa4885b9ad263b3aa8fb65761ef8f85a21eabe2536ab83fa7a95af7b105c441eba494a67185f302c1034298f532f1d4b0656dba9a108ec1b63dd61b017ba5d884d00f7a33c010ebb1a83be1e3b32d23013c37afcdfbb43c55e62cc219e3cc781cb3883535ca2a7acddcceefe2ecd886d8d362fbd1ba6154c5627bd65bcd97d0835c6b5e64211db44a3a544ef6160192bb9f68843b026e9d5f2d5d63d586598250184898af1d46af8a371a64650658b7af2844c6ad004af8d97e2e0d983075bb19fa84f30a59df03e6cc2f886ccc0ed3e8b605d961524bf04a16ee6635fd5e54ca3f4a3d10219848792898eb10690a289311f7be01b2ca4567e54fa3eba6887169000b07e12a9d379fa6d9774cdfd523cce2e2a3beba1f3690e15a5793b4577eb80202ebba9ff100be233125ce7478c3c5053e0fd482687f6e8b90535c5b9406638367a46ef9b2431bf740804dac9a69e4af64644a88a968eb8e19148a6b27d8c4fc00f9007acbec7876353da2bf8b07e42873b07430f050000800a27a726b4d0d6c2000000006bbe1900000001520a7855ea2883b61960e74bca23b78eabb483f306344799b0dfe00660aa8d3541a122a45221c33f56c0e3b8dbd30540854753c7443b303b43399338d13fa6b7570aa4182f7a1e511db2f188ce31b06e656deb6686731ef490de9a2ec9dac2c502dac6ee6865d582caf2e3a28c007915b75f920b60a79c5d95fb0534c9f79f4e6b2d2e0c60e4cbeb75c363a9cdf06cf808ba90a06159ceb450ac9be09bace2635f32d6181655d39a2b9279b753492b0723a1625922bce5722ea51527dea355f9d481b6ab51fdd8775638d7b8d188eb45f386ad5345574bbe76504ed549a2511bd924aafc67797df64c926da5f50621288e14d1d4365fcbab4e8d2d410703b060ead04e4302c8203c29f993b3869eb611598c6956bd79eacb08693e3934cb179ad36d27b66a7d25f2755c10b2d066c45f332dd331938a6aaf20bd4ff6c58c135143a04712780fbe166d3cc63fc8fe148f7aadd5d9c2f16a3b1787a90be68ab2e83b3d78b2cb89f6dd09cb262a52ede7785e253e4d38ce84c528762b2961833ab2f39d0fb42f874e1dd3fa5a6881fe0a7cfd4f247d38fe741dbe635329d786dee26ddd43a259edfa517a0e2b7810e44e9c2779c356607f5a95867a67761e25295581a183452af502be2f16ea5b0bd3e72fda361879b3021199c7cc1c6ffddbdf6f595f671f4d69b97db5e0443be93fd14c5f2ed0b85d12401cc38add9e55e66cc2de6f05e14ab549dda66bc22b01d44c25186a4da9172d43c73d936a53d85cc6b75539b4ffcbddba887be0d8ef0f1f565f3bc697dba1c030bfb5e72e2371be9d8d3c69cfcb2286dffb004486bfa19cb41e1a958806d5a0cd0722e7dee455c3548b01bb5ad7a18198c17d3df4bcf1de180f7307046e638798af33870e448614857495201b6e5ce0c5c787d8cae967c40a10381e7a1f00ae91ad10b86b08896c49f5935f3f6d5bd657b7373e0d6dca10f4632f2da46197b32d49c707d6fe964c9fadd4e186ee397e4ab492d04e15bb37bebd7574ac9d89813b5db2fa24dc94b4be2343bdf69b0e08be587727ff2a5578517962d2c21f08f67da7edc0475216d5a9c5e602d8b35a5cccdb8acefa8f68997e9094f27f0cbbc39c2d6cc6e0daf8da67bff202c622bee215f38db172413dfff0d397ad208128ee4943169c6c2f6b66978f5f85da38d227bffeeeb9ec7c987fffdc5e993af381e37d43c9c4d913ee983df576d2dc056135c686d0782e6e61c31122daf1611d1c548d76c9708e1ea19cbfca3a0da6e368210dbe36ed022f32641416070760e7506f7330102dbd9199c096208a48eeff08733c1d67879b56bbda84f51688e32c8b77e0e0d0a9771e8dba73eb961736388f825470c0da97f932478c0ad9d50abe06f375f79bbddce953355c40f71f0da2f69b6e6f399ea14a986968f12ed11166ef91f6113f950e90d49de3fe15b73a63893b7f032bf501fc33641fe06f0c409097d1a0e55f7234e12e6a1b3bdd6c58b8e00173d87a80069ed2d86597efff4903f62d329187603af917a53f9c9702e747ef489e914b79aee161282779e28dd30b592834614d4a868d2c67f2ae741ec18c583578e7b9b4eb4bb1521db31dce5068266f891b7374bb1eaf11a494bda946dcf86fe7f134e858c847f82b39851c4a9fc4985dd06795d78bed5c820d36ef1f6708c4e22ef23cc8e4eb5d5c74640b1ac3f51c385afd66222919764ee2a31f7d0b367f9eac2450d0551ad0ac9f25ad0754ed7225bdaaa3161d4efe7d15d7356c163bc6b5264dddaa1382aa8949c4b35c9e184915ea1234249c6e20b26d9e2b70b4f47c1b9aa8ddd89d982fac67dfb54076c50e4c5adc9adc17281b38f0a5e45fde5ab5af422c844fe3164da9412e2aa1216ad088394227c2cdd7ae98d1de07dc6a64efd90bacbd153f983df9ea47dfbf76f6100c96dfd58c618868b9adeea1c2c0a446e97d1e58ae742f2fdf4e0849b18e08a9d2e31b8c9023fd3c0382ef092c88672bcec3607e27198a9d38830158c7b5bba31ef6f6d08f18e4f0548b60f227990e4237ab4519b497bd4a33de99378594c1314c3185ec04cac283b2fcab08ec8a8cd37c8328272cfad1df69904e2a404e49fc72400ae99a67df7657fca8eb0182abc62132ffe09ae9aa524804c718e255bf33614ecae5dfe3d00d6661f8da69562c74988e7202da28f188602d97ba9f8c7dfe419d646e1188bf2b6e93ebf2f2d22c4ee632268d6b31d08d7e4dc8678d1eb5ce265656d73193cd2deab5cfe835c9dc35531fc8e8030000000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044ca9f016f324f52280e66fa715588170eb2365d52f2a64d90f473bd838ad2362e5a4ed3843276f7f0f68c9415269f12739af68a7666696f03433c8098ec169645473aa1113f69daa635671c658b4465d563748f86d6bb22142840f6475d683cb490052284a9ccbfc7b5290ba3191f54df22a1a1c629bd9e3c257159519d03f2add6b4a7de8fe4ca99485fd9ca354169280922eb8daabaa2a70e314c47eb5f47d871098103b22054a49578ad077b58694b351299ef5d83fdb1ea2ec5100e336f1ecb32bd1ce9518ee759ade8903dea7e5c2601759c1bf7329cd16263cf1c25d6856710580cb2e205b5d17c1b5f88c370294279339933b2478f529bbd6993e8ab20da4a1531fe479a775cab236e62ffb6e39fd33566fa63bce788de0bd45cd9dd28376fa3213677e978b268940548bf86bdda5464821686dbbaeb3755c37bfd9deccb5d5a6de3f5773e0586dc81845d2c92faeafca1eb352e3d91983ec559a22e38a161b1cdf74b279e8bbdcb93c1c1090eaecce3f8a4c34e3f4fed7319bed716bc29049206fe388a241311a5a7598b9a01fa344481d73bea81c0e7662c7dab7197be60ec577b6c4a6f9d6066645b438bdba5eb45c876871e47a9d1b392b57d4a20792bd56cff90015a4697d72cf94c6b457cad61f135626ec56a1754944a33880f608e8ec5151c5e6943f9dd94ec5091362a50ec2cbf50bc1d8653380a8c459b4011cd1daad3f0d27e76c8670968589546a5cd670e7551be03f0dd828ee45be1b2d0c6929585ee534406b9f4662fca8fe5d2791fd28212194d032b36a3fab975158613d0ab92839794f7942b25684273a82a9ee7f89219f7baff672866eb84c1b9eb998b8beb87442bc7ad01f000ff8c0d2963211437b6fae867f666c9fd9aa24de40e0e1a328d55ab0ac1e968dc0b0a041121bb81779762d2b574e933eca2c2e6a8029e409843ab0f89a27a00c6a1b4e5de6cb427bfb5d4760aae2cdb86d60f80500 diff --git a/zebra-test/src/vectors/block-main-1-687-108.txt b/zebra-test/src/vectors/block-main-1-687-108.txt new file mode 100644 index 00000000000..b2148c3085b --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-108.txt @@ -0,0 +1 @@ +0400000098b4e4acb3c235cb2de68465719ae80c803e7f31b694c4546f5e5a00000000006e2dd8abc90559b89c6e6b85feda4e995966014462a8b95ad2636ba1c3cccef392df86ad684322de8561efa1b31ccbe5a3c5abdd17d6930e53dd35bf492350b8ee5796623ca6011ca72cd5f90000000000000000000000000000ad0000000000000000000007542efd4005017899bff2a922f3a7e8901fcf241f1cb85d8609d930c569b724981ae3a9e99313d8d52fedb2021842b00c83d30755123fa7944da3dd0f2d1862c99cfec5503669f3927153ac38f5e073fc43ba72f661ba746a220bebffe35ee5f063f2b622084ce53a39dcf3179b5626d137d84575538ff95e283952d1bc4624c416910642fb8400d8d9c9335c4e3573c7ee8f6dcb8e51d3cb4b6325da0f96314d56d708a5d6d8266e508ff36d02083358ef60dde4152082747b22c2917a25a33a455f0d506e012c4863a9d250b3f13f33a54ba6093d72430ed58dea951ed56f6c2d15f89477695191019655231854fbc1f125c3fb986a664fe8686a5700aab8a1c30b30adab98192178cbdbf43d14aeeaed117809b6f30fac70f87a5cc2d3496a6130bd133c1f621b9f9df232fe05cfd590417f7ffa54ac87b7164d88fa9fd4da47d2854f5757259594aa763268c3cd09cf46d40f8004114fc40405c50cc5f39170e6ec90b4e301d388c52aa74a591299c9d5e32e861be77b3735fc45ba3ea223b50251529c3cdb63815801d9e133aa0ec37ba6b63d671460813469ffc54868a953cbe4c3bcb5fe01c529900d63591c2c8f9072d58f92eb58e2093754bf1b29b3138c4990ffa3a9bba599b8c6202b1c5df9c4dd354f6c5201634f95b3a3c479a14d584e052b7459e567dfd49285b5d6b1d51b1ab15b7dfc2fa4257f6a1506b105c900df4f87582060fbc7c85624f26af685e91fde86352fdcf2f17787331c01295c0d0d200e2ba9110e5463d1cc04572acb01439a3ddd986df4e6216e19cf27c79933e523f81272afd0a4375e47fd55358a0de1f47c66d3954388ec62d928f65ae21343dd575f2af3df31117757f7f59ee4bee072b8d1517f4d48db1086bfdc0a4c3dc0b5d8e1cecf2476da110e5be39b2fc2767070b4856bfd4b886663d45eab8c533f357a03c9837553e4f18b9736e31e8cf58ab9d95fbd8cc70772841d38e7f07f72d4a65900b8cdaaf37ffaa9e5098551df205c42cf53e231084eb57762294f9fe81e260ebccea9b01be39a8fb36c3176c155cbe65490170ef188782d634263aa14439a1755ceced595799cd819bb76b49a1dafdd1b0e44444676322722f9fa5f7412a97c0ae460acb94865739350d512c9f9827ff2943a6b5daf3229c3996ce3644cabfadf16ae5e9d376f06b5ffd9d412608fd67820ef17d106c9705cdc12702f002b1b2f986267e7f9d601214b66c5b6c0120fd22061a1df756ffe3f921e223e43c745158d993b9cc336aecb85ce4ee11895ac84b5c0cfcb25545e5d355d0fc773f115e03f470bc1f5f5bd5d64a1c08cb184a41acb6eda604c674de54526f7f3e0c85a9166fc838a38a532d57e9000b68211253367695e93c7debe8eb367b62f5c10e15e833f934a71dff59a2ed77efb9d610743e2f82192de43cfb8533bafeb18b9518e983ae3135339d3ff85be106d5764b427fae3d22cd2d92f7c1e890dffdbe803bd7e1d63a1a8b6bfd9b0427e1b331ee953244b547cb9a270c469f63ec419aa49ee5e790c7668a83de0a84fd64f24a3a3a785057b3c98ccf52b932df062d69f0f9087846fb85908d5f1ef92555a10c2a469fccdab5d936e016e706b56db1b3d9d52e115a41ec5496163011f6cf3fed3eab27d92735c289209517b446a51c78fada6f0e1a3ec8974a444662c3e1b230a1e756d5c27eb5991b65226300e16f572afe912098509a215f4fcb2abc26c53d755b1a4554e731717b2a50344cda59994a1d44e5ff023ce33711bda51156b09924e548864c725b3d0d5f401b344445a9fcd1fc37b28ced737bdc11c947fd026e491e5cd1d2aed171654bfda8dd63d748ab7e6c8722796ace63cd18a41556e452097689bd234e797e75e07464e6b98de0206050000800a27a726b4d0d6c20000000044be1900010000000000000000000000000000000000000000000000000000000000000000ffffffff050344be1900ffffffff04f407e70e000000001976a914be62da59de8993dd79965ddd27629b1ffe55e46e88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c9087000000050000800a27a726b4d0d6c2000000006bbe190001bb4efbfdf3418188e48cda6bf3a27c438d56f1eea919c2c8be19a896f501ddb5000000006a47304402201fd1dd841b419adecec88a917f2eb267ddf83420804f8c69ae1473c628823cae022022fa16412ab96f348462de55a0a1ac29e97d3047f57b9ba6ca87f0e6adefd1fb0121035556ffd87eda7df56cf4a1f180022e22572920d70994cb9139ba973f2a105d71ffffffff01d9f4f02c000000001976a91418ae4e28f414a29a171a922cae6fe4cad6370b3388ac0000000400008085202f8901f5834ea2b8e61b1a7c3d4452adc27424de46ebbbf76f35c3418d7a0a3eb7b570000000006a47304402200f852ca4657f4fae04e88a21d0260d07319cde79d8c2c8b4ee12375f0b5af9f702201f44df17dd9606404ade78b762d8b1c61b6514748c94961337c360cc7898afba012102fb51787a8b082f22c24ae5147769a3c5c114cf45cfdcf2af2137a0e4e0b82345feffffff02c0aee820000000001976a914fa5e366d5e983134ad61d0fef34fb6f22bb7a83088ac84030000000000001976a914c4caffc572964eeee8b27f5046fdb1e97b21386888ac00000000000000000000000000000000000000050000800a27a726b4d0d6c2000000006cbe1900017f8c6e56b86c8fcf3d84e6f3cbb094a12ff18800f157ba22403e823a4998cb7f340000006a47304402204d3f9d54cab6fb703c0cb6851cdbca1faa4d85fd0eb2ce471fb855054a04223a0220429d0983f55e260ab9c51193e118762aceb92c84fbb65b57312fe165330965160121039956d867869b5c4a38d5e75352bee899424d88c358ff5463d1e3d1380b03eae1ffffffff0168100000000000001976a91446e35b3cff7b7340bc772fd59fee62719f781d5088ac000000050000800a27a726b4d0d6c2000000006bbe1900000140420f00000000001976a9146afe1e399c083c59160fd7de99863989d8d5d39288ac01ebca1144fa95ee7ab14fdfd1d393e661764c4ece11b601d72b463c4554a926392598fc9d4253c1c361d3563bee56a362f987063bb445caa9e11d693c9ee71a2ef508a03e0763a2e10bda4c500292469fc40286fce43a68b94a7ac7adb2b3421601db33de1290511b76101de731f8542a20b427002242999028c51ba213be640783376c2f7c3b5337091aebfb4729c6e81dacd883047226ff81a919793202fc663b52ad69b48174512739edd2fad9ed26e4bb7b030f73de80dba7d8f48b0ed1f79ef0c4b9f0eb07dfac450ae4aeaa9b918210329145633de03d6ea50ffcd5c9535261de78f8f09c102b24021c21a966e00d6eaf62b83eec136b8c45109310fd7d370c15b30c3f8d5c8cb453ef72cff09e89b58ff3c8db3b36779401af170cb1f17e8e3cb50ff8beecde87c54cd5d586dfe5361ead3b722307a8722137fa30665230945d3bd55c9d0521603369cef2a6c849cd8b18a64746012279718a542564b3c798190daa4e8aeb83ac350202cd12586648d03e91aa098733294db175f041d75ba2af87824af23fb2e0556060376eb9d50bb51a66b70f0bba96a772ecffc304c544d06c736e8f27cb3f53d4f1810b8c22ab6ab5c58325b12980972c2108cf077715130e2f97d00ecc27bc24ac93e5955da9107bb3526db8c10f02ff6f073d72e876d0d604308ec8b5a4a5b8f87562da2aef5f05def5ac0f538f2907154192589cb29e8785a71f0a20272d0aede4fe7d517afa5ec9df30f429588e93d96b9fde20a336e50c324f3b766569f0cfb49cc7827e37d144a4dcb489203b008617e07da4e749295bb08c86765ab598cf58125c92fe6cddba18c5e40e67bf2b6d335d6be2c8bc28f4e29184a68722de290e66bdfcca4aa36c9d5a80e847f3bece065985103e54a71ede59e337dc996814722865b4bd25eeb6425e4747c6146f981208c0ab7e922127b89e2482ae8f861e93b7caf9b8501a5c21db0c700d774618857a7d1e542d0bddb4fdf9205995fc1738280282f5905674adb59f2d9eccdd0010e998bfb3af1b4d28bd23cf9cf9e8f1fe714494d53462764de121a5c775f19ace6d0b943ba7e4f10b58ad85de57c559adca4a8898985bd07b6ac9e72cdd0267c397c10afce133e35050333513cea216d5c936093f7d0ac94960decf1f5fb7ec7a8ec1d71fb43ad8843933017c5b33caed26a17b8d16573328460f00000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044ca7ff0e1ec83926ba6911169ea6af1f00a12b5ab85a487f5804a71e43b47b22a145718f0dbd6c1bfa811112cae4123282ad86b85ccf58f1d618c90a3beaf640d51e45d2c09c128a10e65c35219ab2e21731b89eb2443b8b60cebfdea91a9d385e0cb30f58cc7e05e3b80c53ef8cedcf48274894928c45b52627b9f40f593b29943cae4ceaa7054ba49645526a5d544723ada8287bd56119914a105a90196aeeecc74632bcb922340273e08a6e7ac93bb914f3d41c3f37e21b0f58db5fe3ac14bb760a746f4836c5110eb61b35f4a57f8cba28f08b646ebff1c0684f2f6d97563988ac3a7b98bca2a41f0d2aea55906f21833176743e0fa6d71d747011c194970880e43115d5f0f45bc1cab035610c7c383deea88fbc2d2b6bfe80ccec2541673f408b06756a46a55729633570d86db4c6b7d059cf452e709c470b329735d59770f0cade588d245e9001423e00b045354d5098bcfab11860bc6a230f57e720ccde19607631f18696b894519bdff06fcd1c0613043c70b5df4e44ce9db7686a1becb927c9eb10292c3f12c492e7531e743e97bfccf968903084d7b4cacb1330d09003bc65f6b853c1df70ff6e6c5f7972b8108bf84a9877fbe1b1ca4c130f2b76e3972c99cfac14e69b039a1ad431f5ac3f289e3e45056687534bf8392c0d1b7e9ee3860d43e2df7e62f407c2f1ca42165f8fcae6ed38ccb55e3168dcad49c22a0c00050000800a27a726b4d0d6c2000000006bbe190000fd2001c78c3300000000001976a9149498dff8a999306841148891637c7a7e846489ff88ac3fce1000000000001976a914e2c6e40f2c81d76273c606951227575484bc9f8d88ac06041400000000001976a914b35ae9b8331be18b1cac643f6f3de2be6b293a4588ac35034000000000001976a91498d678deeb742579826ca64bc0420a815c13e9f588ac5c341400000000001976a914d7ffee74e1d9750a804ed512a5a18808730bb92388ac85bc1800000000001976a914ab6951756067106f7b3a2e7ea803b9ac71894eab88acd33d2b00000000001976a914b921d264368717f021c4e98170f1cce6af5feaa288acc8a81600000000001976a914f8d4c6cb5e7070b30bea5afcf56d5276805f43e788acaa421300000000001976a914fdd05a6fec40bce35ae9f9b989a83a0ce0871ea088ac521213000000000017a914c792ada1c9dbc70cb1bb909ecead9df37a17f7c08765c71e00000000001976a914de42417101859a1f97fe83c8f184f628a0a01abe88aced2e1100000000001976a9141a8735ba195919e1f8135b92f2d563efab72469f88acb4641400000000001976a914e747e12615abf78c7aff9dabc4c5e496a6f4e1f088ace30c1000000000001976a9142afe55bd3c48073838602f49000e613a11e0ad4d88aca9421300000000001976a9143f04656d56a8186a262661d639f8c242f2f367af88acc8a81600000000001976a914ae21ccafa0f7194bab8d0f9ac7b85588a2ea230988acc8a81600000000001976a91426fbf6221d3165b18979333055918d5f5ea45b5d88ac99001b00000000001976a9143bd467c4b3b7fdeb44d17eed3881e686418dba9188ac51831c00000000001976a91447a915bcf854124815d17ab0b335e84dc6f2627788aca1ea5500000000001976a914eab54f8062ef3b17407773c5b360bb6d31d83ff788ac35ac0f00000000001976a914b8757b3a56ca4dccfec1e35184c738eb560efc1988ace89d1000000000001976a914bd56769b224a38d96aed6801c2ad8181242ade7388ac41b43700000000001976a91456b3b86a4b4a9fa31ab320dd420fc3cdf27720ad88acb3483100000000001976a91456fc590ef963374de190e7cb207d525168de5a3a88ac874b0f00000000001976a914fe5f81599d421e2d86854ed36377a999a92377d288ace89d1000000000001976a914fa642f025fbcc4c8e322c1eccf6a97d2ceeaf79988acb3483100000000001976a914568d9f52a6677022412fdaaa99deaf66d0b088c788ac95e22d00000000001976a91444abf10eceb9b509adb8bd7a1290247fac3b665188ac84bc1800000000001976a91403345e99ec235ef0a1be2343de4bdec26c13ee2f88ac96fe1000000000001976a91497ada3bc1a5c68f2982aed94df4101d93ec1784d88ac55f83900000000001976a914b1232af817f908423f8df97a17e722cfa0f855f188acdbd03500000000001976a9147df3535c88ab3b3c0e81e4b6ffff8963175419d188ac5d341400000000001976a914202bdcc00be59c06a624b8bdf9419dd5d50e92dd88ac42d01a00000000001976a9146e6ab517233a99b81a4a9b597cb99ba8e2aa048988ac35ac0f00000000001976a914235f446b6ea3f6b3026f9a7523afdf058d863c1c88acb9f51400000000001976a9144e23e70a45a0a4df88936d0a7e6c152144bacba788acc8a81600000000001976a914686d0413b72f94fd03e2c2cca54105ed4aae6f2c88ac9e5b5f00000000001976a914d82d2eae1401086b5944442867739e35d921343888acf7501200000000001976a914fd4353d682df7c14c6b36fac7429132639d791ac88ac75091700000000001976a91499b711fd899912326fa4d3c79381443a5d18b71b88ace89d1000000000001976a9144a652e609acec2ea300c90a30bda2b6557d9b61988ac308e2200000000001976a9146c5e6feb7b439a2754fd797be88ba9d2369b880488ac10261500000000001976a914369c3b4d5e51cf64cd2b1c3410fa4f0f0a19b0ec88ac01731300000000001976a914b64b9a3e66afaac929fac3cd52720bed4135e0b188ac150e4600000000001976a9147dadcd77d0612d7aa075dec042b2e2996814db5b88acba682800000000001976a9142708dcfbbb4ea31cd297feac4bf5e258b811126988ac65c71e00000000001976a91446315c1ac2a5c7181e095a2287bb4e369c68224f88ac19da9300000000001976a91463ff59fa5e384314df1ef86f044117acb9b9c42588ac0b951400000000001976a9148b712da6abb288264969aecc393a569c19d1766488acc8a81600000000001976a914150b84ceff76d9970d09ec27558fa34b6d7ae8b288ac58a31300000000001976a914cb4bb6bd1d8ded6b883aa76ab7e3d33e6973506988acf7501200000000001976a914d3dee17d914dba3710bf47f52596ed503e9db9f988ac37e96600000000001976a914b9074b11c39cfb7d4c3e10ac6eedfc86a2bf705588ac76091700000000001976a91415778e4c7c26d96fadc757195ab8fb82af44ed7d88aced2e1100000000001976a914febc2dc1ff33aa2dc15bd40e7a1f1fd70842c29088ac06041400000000001976a9148cdc734f70c81611890dc155d24364e43cf2be2e88ac76091700000000001976a914f0cbefe3f53a552acfb66769747958250656ee4c88acb9f51400000000001976a914b5ae96a69ad039e674595b9b56f33697e24660dc88acafd31300000000001976a9143e5c214fbd03b6bb8ce76457eca21e51ef7ccb2988acde7b0f00000000001976a914e23496382b153dea1e592f5f25c1f755df34743d88acfce11200000000001976a9147bf3dd1d161a559a673c64669aa9e22cb25aa9d788acf2bf1100000000001976a914a05c69c331c84f6d737ba15543b39d2544d12cef88ac88be2200000000001976a914c2b4fd151d5819c180095b643a61c826a65e8a1f88ac30723f00000000001976a91405e181e427cde3a97c49fbe60153c97e1ffb695288ac06041400000000001976a9142244e2d883c193e534c023089fd66c18d31f56b388ac95395e00000000001976a914cd3dcc2bb25082d39591bcc2cafefc3f704f59c188acaa421300000000001976a914aa811e757a67e33436bb5bde6eda34c8ca3255e088ac03593a00000000001976a914b981ca6a2d5b25b82df244edf51ff7b8cb46467c88ac874b0f00000000001976a914c61558c15c0ac08e9275a2d0601a0f9b7fc22a8688ac41d01a00000000001976a914423a452a462f4a2e1ae3dc33ecd711594bfaff6988ac35ac0f00000000001976a914152328ad667723d8ae72275197b938906e8adc2688ac566b4d00000000001976a91407790af432712bf7699a5ea7f341335a3b9ed4de88aca5b11200000000001976a9143b5a5311b0ff83e9231812d41a456fab978078d188ac3fce1000000000001976a9140a166282ee5994cab3f665dfa444c748b6b0ddad88ac4cf21b00000000001976a91462bdbe57b8fc9663829b799f636d31d246f56c9488ac01731300000000001976a9141057a1ac9ec36abe64c33b3cd8bab569aef8483788ac9b8f1100000000001976a914b1fd203179fce53dcf57788bb4fc5587cfbac32b88ac68c92800000000001976a914b278477ce091ffd3d4c03a7cce07c7072764b55988ac58a31300000000001976a9145c0f063d198feb0f20bd27af5ee7c0894a237c9c88ac445f1100000000001976a914f43494e50ad6400c8830e600a9ecff269e2bb0d488ac49f01100000000001976a914221ed99c76a1b3d9c51cff87d2d2026d65c433f988acf1a32e00000000001976a914af515568bd9cc600267050c87bdf16248b0a9f3988ac85bc1800000000001976a914e62c0556cd89bdf654df85ce9fcc00c54fef13f088ac01731300000000001976a9140ed2e04d3e14edfae7b1f14103e865f7e14f2ac088aca0201200000000001976a9144427e3bf11c196dd849d57d661fd406747d8123188acfce11200000000001976a9144a185b5a8cf52f98789420aa309e4eacc119666b88ac7c645b00000000001976a914454be93e056217cdf62e4a69631b43df21245d7488acd5cc2100000000001976a9145d4d8bdbc4dce907530474983d4ed23f73b6f15188acd0ae3400000000001976a914ef5caab3dc646bc235df5d82e5fcd397afd6c05388ac8030a900000000001976a9146c7e8b6a2d660855b2ee317b09dda22f4db543e588ac874b0f00000000001976a914980430b7d581045461b9e455d41ef84385a04db588ac5d341400000000001976a914c018833ca8e06695ce4b548612719222def3d0ff88acac283a00000000001976a914b1d7e6a710723efcb6a5366138567175f7836c6788ac1fd91600000000001976a914c5cb2f0d16da83b368753504cefb1f752949054988acdc434900000000001976a914c205f9a68c44c66ee4e248c1940cd93987341b5f88acc78c3300000000001976a914e630d1a5a92a446a75904d96e30d7b1797528d5088ac693c3c00000000001976a914df410378c57a85f9aa4362f7bdd33c9f0b935f5488ac60361e00000000001976a9149a42fdbc93c2461633a07763bcc0399bbaa3e21a88ac3a3d1000000000001976a914ed45271c12daa6a7b9783fac6acd3292286b05d088ac34902c00000000001976a914670e14b466528eaf02341fb204f1b54bfa050a7488ac9b8f1100000000001976a9140ac0f2abb2c641dd7da74a76aa91396e81da0df388acd03b2100000000001976a91440ddc2fa2800f26db32c25d63f6671925381dd0d88ac5a893a00000000001976a9148eba92e697ef86d7620489216486d21f0beb850888ac2d703500000000001976a9149a701c5b5445e79a2104ba988dac42b19b27fc6788accc746400000000001976a914f3250dccc0a9445c76b13d19f213e3f5b292ae8b88acaa421300000000001976a91457769fddeaf1e2a20d28387a60b1c1b23c7575ec88acafd31300000000001976a91447b4538867e8cc60599095dc84b6cb210c1e63a788ac9060e001000000001976a914da20e7835c5f5e624a4afa7315945a30f5f1f22188ac61006200000000001976a9144c6d4c629c373944cfe12b8fb2f3daf6348259cd88acf7501200000000001976a91476a14f0edf09bad176b52337791bd069ae4aec7588ac8c334000000000001976a91496ddd2832e15ec1b8dd79c6476d06c44b30c271d88ac5ba51d00000000001976a91418a984c63d5750123a2b01ea05e1ae54f662d96188ac3a3d1000000000001976a914a33fb2cba483c6a2fff25ab271dc454e78e3117f88ac49f01100000000001976a9140b1590da9feafa673568242375f5621e82a067b988ac15b715000000000017a914eaeb0fa81210523a6d4b43ef68b8340e4444ed5587f7501200000000001976a91445b05403eb33050021d8443b7eb3725a8df0861688aca5b11200000000001976a914259b316b2482cda4428752cc9745b3dbde190d8888ac9b8f1100000000001976a914a358cfe0e1d5f97dcfa5b5908c7dbe7755b943b688ac0a951400000000001976a914a3ae0e6376c32f18fe038f521123d10c91d6e2c588acaa421300000000001976a91487e8559be06bc7c65f9cc1bd948fc517a5d83ceb88ac3a3d1000000000001976a9141515969d88427e77fd41ed6a692d99928d4d340288ac15b71500000000001976a9145d61346ff3384ab606f21989bcd5923f83b048d788ace30c1000000000001976a91496c69132f7b58f9602a961568a5ce67bb5a7feb088ac651e4f00000000001976a9146f84d2eee8683deffc85829c3131df48550bce9f88ac07ce57000000000017a914574e1e79facfe7d2e3aafc141537aeb61700964987f2bf1100000000001976a914f3c267c33295e57bae6fb8ec19ed0b0f2c38bd6088ac14f26200000000001976a9140f883d36530c9f5bd89b182e0c1541abb9a402a288acc3171600000000001976a914971974e057d375aa879bab12f06fc7dcaf2d7c8b88ac0f261500000000001976a914d0c9231220cef0a09629433b0eda8a460c4231b088aca0201200000000001976a9140b101da47c8098e6e80a2b008072224fc798c16688acafd31300000000001976a91411bc71663b71d83823d5173e7538126c53ae72f388ac96fe1000000000001976a914e99a1af5d740caefab1daabd328fc1381891286288ac1a481600000000001976a9143096a19ed75911144dd89612fdd8332c2b40e0dc88acaf4627000000000017a9141294d6c60bbd47871228a3b8b9256075f01f9cb287e30c1000000000001976a9143a6e4451b05d3213b7beaff49accf4b26957c54688acde7b0f00000000001976a914140c7159449d22a8c82964a84780ff65270bc81288ac8a4d1900000000001976a914552053642f3e9bdc17ac9b198fcfc94dfdc4e81788ac0b9514000000000017a9146500fabeeffb177ba84fc4e12ab1af1dd1e62ae4871fd91600000000001976a914b595b60b9a3a112d9a0a090d49936a45c5db673188acc7e36300000000001976a91454ed3552ef13cea04b61545dfaa351ac7fa1afea88ac1fd91600000000001976a9140893b1fa8b5ebda8e4360996dfc08e0bacc3a8db88ac35ac0f00000000001976a91419017329d586359d3293dffb1cd0a811e205d12388ac3fce1000000000001976a91465429447591635ae47d3b2cc857af4ac6e84de7c88acbd6a3200000000001976a914c0cd651071c926ab80a2f4ce67fb901b8e296a6e88ac38ae1900000000001976a91427f9157cefb5f6036eec804f50937446c7610c0488ac3fce1000000000001976a91429ccd5d4a4449f11e8a28b4aa5b5ca5b127e852588ac8cdc0f00000000001976a914ea23f560f9f3ad1f15f3635a2a460fc6679d586488ac06041400000000001976a914b6d763fab13daf9326752aca816a866625a3927f88ac29fb1700000000001976a91491ea886287c466b3ad5775a28bedfd40b899d3fc88acd040b200000000001976a914310b9e8d5b2a5b110d59fc0f0622ed0d8402a74a88ace02dfd03000000001976a91496b6232b5ab5be57b291848c108b9342bbb9adbf88ac62c51400000000001976a914f5daa94b144c590f074b61cf871b421b2d6828f788acafd31300000000001976a914ca27a5371fec322984faf6df51a5622f92387e5f88ace97b9802000000001976a914e81fef68bce492d7e99dc7926beb0cb69a522ae488ac894d1900000000001976a9141540fa9420180d9bee4158f6079b9159a6b9ada288ac60006200000000001976a91468f44935b5bcc9147802f4cd233a5138c3bebee488ac5c341400000000001976a9140969a633efd4bd4267d29953cba474e713a1609c88ac864b0f00000000001976a914fd6f435f4df41c9a1d3eb70bd3926d595841fa3a88ac06041400000000001976a914b52bc6fc4b5375c4e7139e1baef21588cc1b107c88ac48d42e000000000017a914b73519cb04cfb6a2446a8269f5743382154a36218768c92800000000001976a914a2c5261d37e900d784d034f8d31169c100f46dfb88acd3b03e00000000001976a914321f2aaa1046cca83dc3009ba66a9cad436f9eb588acaa421300000000001976a91410f9fd8d136c389eca3ad1f7d1e0b64e62a0cdff88ac707da700000000001976a914e8c73f44c2db3274ca566356940b48f0cbf6f86e88acf7501200000000001976a91431324bc0875ab26e4fb79efe4b63415916129db788ac06041400000000001976a9144e4c1d99ad0f4df79941d76fba9738cd899fb69d88acfce11200000000001976a9149e3b1656f1e84d444048b715678332ee1b5cf7fb88ac8cdc0f00000000001976a91458a8269c35dbb56ad6dec93c64ce8852bd928c5188acad441d00000000001976a914dfd74c559658d53b60bcd34aa604de26a85213e988ac36765300000000001976a9148bfbc5bf4d404f84e470bc78de49f40f60dacc6888ac162a2900000000001976a91432d8d5de162fb4a2465fe439855226a714288f1b88acaed31300000000001976a9144fefeda9d8f3d87218239710be41757f103bf02c88ac05edc100000000001976a914b1bf31d55d809470d3d1d54e8f8c5fa8458bf1f988ac6bcb3200000000001976a9146000b6f8adf9c2984bfa5e9f1a014089e549bc1e88ac96fe1000000000001976a91489673b4704ea943a2f5d0d0e8d8f48f1bbc4c14b88ac5d341400000000001976a91428429656c35e352c2acea39a2eb4c4fabd92cc7188ac21bf3d00000000001976a9145a1990cefe87971484982f32fb78e103a67c5d9488acde7b0f000000000017a9144c49456da29bf002e2bec36dbb10aa1d8e38aff387fce11200000000001976a914547fd146003bb3be6fad811d97a78ef599897b0f88ace89d1000000000001976a914f0079e43f3d371dd303336e5e5b02ecc9666a76688ac802b1800000000001976a91446431d72eff136cbead309dd2c42f00fd6515e2a88ac5ba51d000000000017a9149137aa91637ecc8336d20996c05f206eb5a0c4ad87b4641400000000001976a914787f01965810119e15216ff8eb8d7dcf76eb39c888acf7501200000000001976a9149ef13578f3c082d5352a8216210658710d4e638c88ac3a3d1000000000001976a9140f9581d134a25248006c820189561c9a79e1db5b88acba682800000000001976a914efd13fdc9e6821f894f73fb980a5ad8caf44cd3688acf8c32500000000001976a91415367222767c23486f79b1a51127312fa8d72fca88ac96fe1000000000001976a914dd0b0782e3d99ba115d7477ec4397b3e275e42ed88acbcf71e000000000017a91497d68a1aaf2f65729153d503c9a39b608dc8366187be861500000000001976a91466aba4bd83574021008b787ae3c521740bf7877388ac4e811200000000001976a914afab2045b559adb6bd3811a9984c6bd9408c875988ace40e1a00000000001976a914d46a180d51b5eb82ceb984a40646064fdf3fc69088ac96fe1000000000001976a914b492c512e036bed9b30a45219588377798bdab0388aca20639000000000017a91428a15d04373d73d08e7dd7ba0a84b03283b4f54987e89d1000000000001976a91421d2628b70e429234b91ffb4216648b17c0b465f88ac874b0f00000000001976a91487cabe292e0ffbf7e741373bcb8082e73305e80888ace89d1000000000001976a9140387fcd749753a50b5b6cc761e6363929fc0454988acb8f51400000000001976a914fb397bb5759cb724ee03eb0e1572999c2ad5ea3888ac88be2200000000001976a91432b8df84ba6299828d82fbbae8118ec2c237778d88ac361f2300000000001976a9148fd8a961d48ba5ca1a231825868ae6a9661442e888ac6fe91f00000000001976a9141955f23d429f0597ee2e350d729a60b682b28cbe88ac149b3200000000001976a914488d3b7dbac25d9e84749e81f8d4348e9f485da088ac21bf3d00000000001976a9147d1347d2b3ef4354ae99d3a6032c8feae038437c88ac89313600000000001976a91494215043d11f80f895cb1d6dba3c8337858684ef88acf7501200000000001976a9144aae6d17e616a2836d3636f9b22acf3c20b3a89288ac1bbb2900000000001976a914710feafcecd47ab06f489640592dfc43222198b988ac37923600000000001976a914a4c71c144cac707f6e7ee9d3fe53ad8e81f022d188acf1bf1100000000001976a91491d8ec967c114c4fb146b9f8fc46208f7e8ab03588ac62737500000000001976a91406786a879f55c77b9085b9a1e9333e469c1b896d88acf7501200000000001976a914b2b9d6b0f3dacd7555343eec942bf304db9c4ffd88acb6661e00000000001976a9145b705bd64b29cee9423bc868a88ae7a0b76eaf6a88acf38ee600000000001976a914baeec0b2f64c1701774fd2ef7891ca6612a1209188ac51831c00000000001976a91438bc18a6f08570be33d87abc90149707fa86530b88acdfee2200000000001976a914e8f575d43a916290ce7dbe48d243c9798c2e53ee88acef143800000000001976a914b4e5ddf906a98aebb55165141e2dad27d07a7fb688aca0201200000000001976a914ce7723a4d45623e937a4be8838688a12f478fbee88ac1abb2900000000001976a914f22f299f400a287f0b46a78fddc03d2315e414b088ac25dd2a00000000001976a91454141457da0df6bcdff7b7b7a1906bce4d255cb688ac638f5800000000001976a91408e875e92bb77d4d1b56778decece027ad7e874f88aced2e1100000000001976a9149ccb2f7b749041c52ee84d62bafd122db4f3c1a488ace30c1000000000001976a914935031f62446145c845a22a7554e457cb9dd2ccf88ace89d1000000000001976a914aae9ad50a2a6c40036856840c2d1a01cde561b5488ac71cf4600000000001976a9141f26bfc08f7a020c2139630e6a4fd8a3f58cf3a088ac06041400000000001976a9143a780dadb2176c7f9f963dc726b29faa815dcc8988ac9b8f1100000000001976a91460ca72d0b14c340cfd856bcb8e11186b39fdffe988ac1fd91600000000001976a914a3c38ab718686dcd3a79dbd52681ab8a9323b51988ac9b8f1100000000001976a9141405c98ed4cfe338718ae4d6816a7f639f4e907c88aca0201200000000001976a9146cc5790cda1333b465d1791f68de0e0c7aaf6b8288ac6c3e4600000000001976a91490dfe98218f9f106cbcdeb4d304367dec18d14e788ac2cfd2100000000001976a914f6665f192030dcef13207005a995a5ad2378e0d188ac4e811200000000001976a914036a546bb687270b49f06cc73b1beca23e74f0d188ac8cdc0f00000000001976a914000dfa96cbcfa1123b2e47cccc02aa9ced268f1088ac445f1100000000001976a9140283ece05072aa7347b13067bd031ac86744bfb988ac42d01a00000000001976a914c51f6f172d074b9374a622f0566df6eeb8a0d2e488ac0d7b3b00000000001976a91480c64ac24b908efac5ce8b293ed48527ab3a8f7888ac331d1900000000001976a914e7e7d2478be0aa6ec0cab3d61286104d250902db88ac797e3400000000001976a914e6daf2ef2ec359ae53cf8b91d939a757e566666088ac29524800000000001976a914a8a61d9a26f187057d8352055e4f0526908e667388acafd31300000000001976a914a1f89a1b95eee70e503efcdbfc1c2cffdfc6628a88ac0c082800000000001976a91468168b61041488079ae7f2a28928be2e4970dfd188ac8a4d1900000000001976a914c91a3636acf9cb9f6d22040fb4c198666e949f8c88ac802b1800000000001976a9146745e62ee3bea2d18484ab6db681bc67298090fe88ac192c3300000000001976a9144ec0ef56cc750a78b8bc51fde6dc9424b6e4bb3e88acc8a81600000000001976a914a1772a061c90ba4de6004e3fce9c1bac87972b1f88acb9f51400000000001976a914ed320259144dc4fbf238a5bbf718e87c0037dff988accd391700000000001976a9149ff0084f96f08be53142f627a3284196c80203bc88acf7501200000000001976a91424b1cc8cd620f15f405f668d9af2c2a620f8ca5f88ac946f1a00000000001976a9143ff2c3a93152031afe84f7c1fda10ae68ed740e788acebf64a00000000001976a9148e4dc41ff273c5197c5e156690d1052b10a0aa6988aca7973900000000001976a91457b4f8277cc400d64293c0a1b755e1a3b22fd39088ac90a85d00000000001976a9141a5a629f870674b06a4d2d8861fc6b7e929a7f2188ac5d341400000000001976a914b577789a9cd381afef75ebe40c93d1588d0142c588aca5084300000000001976a914796f3dfaed1f667b219789429660020d8302af7188ac71781600000000001976a91496076d6ad853c7c63917694b862dcee9fc2a6ccd88acc91b2a00000000001976a914975d769547163536d2a13f75e45f09f8dc2beead88acbd861500000000001976a91402a5d05b537ebe3c261f5aedbd99b7beb311e80d88acc3171600000000001976a9144b3d92cfbf29e253fe9efee4b4f9f7d4a5824e6c88ac4e811200000000001976a91491576ccec06905f138721f4bc31afec3cab7329a88acb8d93100000000001976a914de82497f43b3a79cb7af5c3750c64cb6bb7590f788accc1d3400000000001976a9140bcadb7ee5dc0f211dbf32c74e1ce90f0010d50d88ac874b0f00000000001976a9143b8419d59c3d686496aa3ab5b63ada8ef0803aae88ac4d652f00000000001976a91431fd33d7e699f35ff69b52779c3432a105490e8888aca0201200000000001976a914ea6939c47d169c479c377376a3bdf9827f4a836a88ac01731300000000001976a9142c79abf19fbe67a290a2209e7294a2e6d0007cac88ac916d1000000000001976a9140ab4c7058b94f9878d5c1b883450e5b44f0f1b4188acde7b0f00000000001976a914699eb35d3f2dfb6bc4e978cf5f97379f305c3cae88acf2bf1100000000001976a9147f40876342c65c38659788d05b2fab9822911ea988acafd31300000000001976a914f12f3364d32ba8ebc7ecc2d74b32588262cf2a9988aca9421300000000001976a914c3a64db70abf98802442cb5219108ae037a1faf388ac9b8f1100000000001976a9141a529a3bccf749faecb90b17698e9a8b3a631af088ace89d1000000000001976a91450a1248b25483671ba67c6ffdc0bba3969b833df88ac06041400000000001976a9146d5e82f3f19ca4c1c595035f88dea55ad055d3f988acfd542600000000001976a914b54fa93b47b991b4a5364c6f87513a578f92c7c988ac49f01100000000001976a914327cb6e2cc9b870e94f68d84f737157bee7cf60e88ac3a3d1000000000001976a9147bfaebd011e8f7c7cd627d24fddbf2aa244f5a5c88aced2e1100000000001976a914a59df10bf8d3a8a1a3b7a3ac99efcf9ad6d0672888ac9b022500000000001976a9146f9397a1131599d8c6e99c3f9cb6913227309f4388ac3fce1000000000001976a9144468860de68406410417f5ad312252827e4557d888ac99001b00000000001976a9144af58cf1526f390e52a470bd21457cbd5e6846c988acfbe11200000000001976a91426dbd3aa5ce67233b0438ec73095ef1ac393dbde88ac21db2000000000001976a914d0d129cb65aa6f912ade3af319021f904d55139288acc8a816000000000017a914ab7b46b0f22dfe32f64b9eded5f2a90c6b3f383f87e89d1000000000001976a9144972be56f824fc603d2303f3a7c18835144fb5dc88acbd6a3200000000001976a914533fce7419a41ca3ad7a62fbe4ab2b6bb8e67e4d88ac7f0f3500000000001976a9149e2cbb4c62dff5a3f507bb0d3a2af0dc2219b14088acf5c11b00000000001976a914e5eaadca3ff3a16fe36ba4905945a40e76a4048c88ac3fce1000000000001976a914cc85155d7fcbde0f4ceeb6b19e75f56adc8451b588ac35ac0f00000000001976a914bcb1a95fc52366670c52a3e43d3330411ca78d1b88ac03866833110abfa970fc784cfb801dbd22ef38e087319ce195b3f4bc02bceba25bd5f0017e134dd1dc75dfc38cdcf7496bef7e25bbde04685919c897757d95c2bfa4b7bd88bef71964e974f1afdec41ddea62f4cf64ef3a79f67ef4b5b675d6ca74c8e78640f1bff5b3148a5ca94ac0a5913ee47e1aa57ec816454d3e950da9fd06aa4a3b8124bc3f6e5205e75ac33fe1955db14780d323fe14e2b386e9b5bfd6a1c2d41b250ff2624cbcbe587cd0487d522959058bd4f85243e0e2f02c6d04ed19788572d026ec2e559ec531b0f321978ddfbb5ddfc5f751d30e6793d9ec299366dfa6eae9bbee0980cee87ee5fd56166930a62a08b44cd0055a64e68feb57a7693bda6f2cb81d0c3d4a228d9fa67c4f8ac8e8081b1ed8e5c07f9b63d7e3749c301e63bcc690d9ef91f4a1e281cc92104690eb88d9803fc459bc0a57aba06f539ecd212cddc9f9a0f8cc074587e76378d1205bee7efbe97e4aa73629c1bb10dbc53124418a34997c642dd63294acded0b3bc1ba8cbc43b9d8564fb601b0332017aed04dc5d160a9e11b0f3b243591a74d81518fd5c52768625808debf3f00c63d6b04c2ca355d061982065d85e9555c9e6518d232f51a474b6b77878b8b27c35c93fcd1cb2532489df5d783572511af0e015ee9977a093170feedeac0487e54d7b65d4818de0c2cd82fcac933ae402d306c523493dc4f68a990ee733df2a136a6b97c6c691fa490c12767c4abc1bf9a1d70627f841a55828d9cd28ce67ab45ca748cf51dfdac4adf2b87aa33f506533bbb912b350a9cc90a06e31b5a0f20ed3d3940f284a933bb0c8e57e04f74d0c97a1cba5234949f18113322cac6a6c2d4dba651367c6936bab018b7c69afc4c1562b253aaafee3a5c3d636ed45d5fb11e86c4d2cc6b43705690dfc9dacba24c5953da85ca380b59a93b81c13f99aee5157f6c9b132775344d8c69de434a517b419d8b3d2fbafcc6c938c0db8b8cc280e91668a9347d7a1780262cd8de56dd401894cfff7b9a4c75da856cadddaab0fa508e62cf920079c1dd81e32dbde50932fa7fad47f05994edb94f63302401b8477b1e190a7bd5888b16727dbfa2e9881c9c8d8e89a561ea78001ae86011b0a6c1e18776e2e4748858aab6b6bc4781131471e67e0981822d5bd865635ec956b999da293878c488d2d7b72131db603fcbc58dbfc9c81daafc5188e75d5ed6a3c523db29c37d5f02ddc25c963fdecd8f2f744708070ec1674b0cc008a9d8dc338db87b1b9067683dc2134a8389a7cf44f8127eb5023f15d3fcfd2d0afc7f8dd729e8b923345b04baab6e5f7c9af0fc33275d2164e0d8ea401e53bfffab56c45d5f21bbac9590661455048a6d68dfc9fad1e8648c592bcca0493f081c5cdee4a29eb7f6b18be26a4e2c55f468929bc7a953078906b0a07e09cc228bc35dc983c21bcf01e8c61e12bcdb7c6668e61b9319c75a456c9d35818d74c0fedcd2f000000009f600fe22c07fd60d49559791edcf74ecef542f800ab0adcfeda49684655044caf7aa78f475d354d14c5fe34c5b4afcf58d8b4fa2bd36d3e058d37fba0870a69bc22db11f4cec38e006f39c7187231718f46b48f7dc2ae556c6ed32b80c08e392499e0f491a812f818fba3de4d4ea6d279d9e97460124f492bc81a4d56bc6a3005c82b108386f10902f94d245e0df76f8b58d1bfb8fbb870bfa4592f6065b9d1d02e3ec65ed5b89e7b18379adb33bfe3981c1b9d96c3e8111f791975fc678cddd01c6c1d04763f33c1349d989356eb21a5625793d76c620619354418a7bab07eace29d319420643374dcc96d334555738a8c753ef690097923b19f6686c3a98021e9d3e2e56eac50a0aad0756c57af06a7f9cc6a28d648711be3eff91bed4bdc94b06b42174b18068795f350b5b9f8a29d3dc38a3816fe887f80dfbfb255885f0cdd33ee02e72a1baff294d5464c31e56aae698c6a652c84689c0e76e1d803a371c390b85ff6bcebd687a28097967495972183d4be90f5155ffe2be9c3264195c081759b7ae154bd65e436f7afca010156eb88d85cd7ec66910a58a24509698f8494f3d294584aba588904bbbd7f939b21ec5c08bb0a8a2e94b701d49d817a496f0b52beef2cedd8a6a1f4d119f3c3d8b03fc71abddec4474b358a4fd747326887dd39119ca87d5e208f980ebfaf51a2003f15bb1070b246d71382b119377cb018ea132f79e548666aeff872066901914336378caa830c53c370c4b44e9cefc72116c61322718a700c094be44463715cb24a140a7442e3f29697b598ea96e1facc0a0ff086b8a1a2c99a3745591447ae52858708997e28503f4c3177af600ea42ef5f84a40fe7f19592f5a48f6f644514ac4bf3e3678e89160cca9405185e7ccea9c7aada93fc0d16a586e1059ece86e432e8477058c28dcfccae9f655ec3d0b50c6814f369aa34f76f7956e41c6f24ed02479fb1eaeae652516919dabec83cace4c8977485d3f245d37b51638ec44b82d89a435d5869ca4e314ffaa3638c30498ed5dabaa9ffd5f351c9608067465a59e3aa1cded617d53e6c36d66a8eab96c6ab052dd99e40c1760e55c94102bd7b4c11d104656a4974b4b5476e711c6bc08b93c4a4398deee0c37f1cb1c4bb8f4d321d0c58f07224e25c28208a34c29226699df7b048501b2dd4bb2f36cf535a77daf49f0c3474d8cf51c9b8fc02a9091a4d1f8fed04ee6ca26663707c478c4032164948fa3b40996a7530e2334870b114e18c3dea917fb57b52b704af0085b958bec9b5b9df6d227489c5ed8ecd8b2250ac2c667fdf2608f6c25b079fcf55d268ab6b2cd149e65a4480028174d7160007e2e86ebb77d0fe04fd450d808e3cbeb22e6390cc2af327b8e043e13392541ee9b1cc37f6b83f6704ed24687ed9cb4e3e33c9604c2ef647257371a0530e5dad1e19b40740094df42ab32c3c6d22e1795233a5660eaca43139b5bac090b6b57590700 diff --git a/zebra-test/src/vectors/block-main-1-687-113.txt b/zebra-test/src/vectors/block-main-1-687-113.txt new file mode 100644 index 00000000000..b324b5c7b9d --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-113.txt @@ -0,0 +1 @@ +040000001050bde9a9d19bc62ff3658f913d45f0f4281df3b48046866725c70000000000ec67050cb9403acb2711cfe6979696620d82704d3d77e72cb0b406052c0b086fd9edad081c143ea88bc168bae99bd5312d75115dfec94fbb2642236154da07bc455b966252c2011ce6b4d8d30000000000000000000000000000bd000000000000000000400072aefd4005005c22f4c112be1bd420f2e7c4a4de1d80a85207ea03e711e4c9d4c4d5a9ab27729d4993921be89b03b41655a7303dcc06b7b643a22da4bc1770c177c9b33829d1155bc6a7f34bc92bc81fe17ae632ee6df87903021de11fc688aafb7d23d9fbf9df3242950e5932cc182e1a64be1a7b63cc78245b59ac9e8fa1e6bd1cdc12f2362cb745723b4878a19b214bab3dd2a8afd0d917d912632109ef13d70548ba81e48782b8ab77da2c032c3098bd37666dc69e968f95531733bb193fae0c11e3c8f9c4ce34027d48e22b767c3be20c257f1c4c08d0ebf8f5b8019dedae51df37d23fd1dee437e13f138b0e64e1742a6fbb1344a3d6759b69af2cb20fd606f26e07e09fecb5d44c712d7b5fcc12c5f3b68b6f3152cebb5db88d0de6c0072d4355f74df9fa1aa4201010068af124834dc390f394b946a90d255eb7270c4f2bacd26d1a1b97a3a1d6c252bb2ceaabccd6c71f00d5b388f70a50fb5d0731211f70b6dc7605f3853c0f581b54d71653776671c2ae86244299bf236e80661e4b5a6fe58ebfe3ef198365e4304ab19da8f7d96e5aea87dfb0d991a5d48656ad0f5c6e1f43f59e6fe3066b475f822f0363f3db51b3cf660530fd9c50425e0e600e0f65cadfd3526ad39c39cc3c49471b9f3ce922864ce061c917b706b31462ed623b6a74a2fcb06772ce2ffa07a07dffe48fd837afc3202a8b5f348a1102da377e3b69920bf35db08498d6c27ee0015c49ce4ed512abb9ad094deee37c31906ac23382921c334b16c7a63c6ed6982f386c367d33f5948a7e55dffb5b4a43bfc66514557563d80669d8e40a75c1c95ddd7409c299babd52bff356bad22e09fa4ec2495af893640e6b41d899095addc880f0f47ba3778c4474295b1d1b8b5370bf63c905304a1385cf71343713b97afe351c9571ca6830187bd128720e42f9df1232e21c47bb010b21cc629cc4afe14e81bb7b125bd99f0fd806952f139f72a06ff521cde9b60e70e80629f712d0f5660efe8271bf84e1a6ec3433cb8cf4674fcc37bfe97815becd38398b7adf5a8e649e1033b28247fff9f3480bf0371a628c4811fa1de44988da17a1a5ed57fd31143adee54d4b596cf5f3464efa4f07b2ec62fb55a92afec1f8f312c8309d2c333022531a9a997697da3b56496ec120976d1f1d36657245b166aa3075f84da206f6e297ed0eff45b2ab00b88cd0cb79412e2b26a80f157e29be0b5339b25181606a19ee090020af412331d025d8ea5cc897a3d49633cab6c5edb696f58e1e413c0c7d9e60c3052a93d8819bcfe2efc24c9f742607608c136ca2b915494b431335f414657034124a3b1caf8e0dc7a55085b0e3f309ffea5b4a17731d32d029059771e4a592a3aad7f2ef6f3c1e01986c1289ac705c6504dc605b439d1868fe227a547b2cf97c6b2501b93bd4f39c5ff3d1dfb58f02da4012a44d562d752825db009d9b3388fb68e6f32b4bc4926e8f15debc027186688ea3c34d6f85b1505513f52229481fec014e484fe1ff3102fde455f4f2d456871e1df57d4384035063b6d05a4d3f258d733d03cb63c69e6d76a1680c18ae6543171dcb50e2c185b08fb3411fe74edc511b6b524e5ea58b9b508f2686847e029a72bcd9dfb62249bbf47e6e3debe63184b2d3f01b4561ccac8f630215d2f18fc414d025e4117a5c8cfc11cf46df918730d9fa0736d6ad9dc5c34324dd4bc22df4e87572870cfd1df826859cb6a4ffa0dbd0c31d2692cd5f68483ca3ef85c16dc091b46096538259356efc6a7a8cdd02dcba247181d2985f33313be252b9ba66697613d2219f274174707f45ae1ca7be63f543d2e2a8bcc09116532ea6580f0d750381b4cdbe29a3d5ba78afdc692dcb0b5be6eda427ebb255086ddf401d70dfffd84d0a050000800a27a726b4d0d6c20000000049be1900010000000000000000000000000000000000000000000000000000000000000000ffffffff050349be1900ffffffff04ea9de70e000000001976a914fb8a6a4c11cb216ce21f9f371dfc9271a469bd6d88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c9087000000050000800a27a726b4d0d6c20000000070be190001888a7d0474f328c5073836dbffb0f2a74cfc1c8c8ed33525142c3693a0bfa55a000000006b483045022100955ccb6e88cd14327b8d34a4ec2cb957f217859e44073a664a1ed9e2af43c39102204cd30d92670620955a99a0fd2dd0ef32474d1999170a1aa2f20b7904f13fab23012102d1fa4e08b6e9622b06ddc204ae4de7b280cb8ca805f6b6eca453b368f7310b6effffffff0238462219000000001976a9145cb734a2e6aeb4a12b515fd05cca9886fe9b835288acca8e01c0000000001976a914ab44c888b255afc402a3155278dc5fcc3f73d33788ac000000050000800a27a726b4d0d6c20000000071be1900013d9df03181fbd9b85bf86d0837838e8e70bc5abd42a6326e491b99a3fa879433010000006a47304402204193899272d6895fb500fa99718df2d120edc94d9b1bf703408c26ce628f9301022010208b02a019a5082bf1e7b001660296f8033f35cca083438a0ae82c00a58326012102d94ad3dea436a4b1978cc8056dfba00f0339b053d9f29db5ef2252b911dadc4d0000000002ec4ff205000000001976a914abf774f7bac868b1c1adaea7423ac06f2e55d0a088ac3470f905000000001976a9147715466809c5f3aa32acbbd8caa402df0defdd9388ac000000050000800a27a726b4d0d6c200000000000000000ab5fb59ed0335ab00ce9666f562e1df8360206035c1dfe628696c8a16229c0288580000006a473044022043acb8060d0fff6ca04314f9caa8b61b4054a65e0fa245746f0a6513a4bcd96c022052c3b1a877ffe468e623aab46bf0f8b6d84cfaa3f3d6f40c007f4134c74beff00121027e8997981a6bba0b6eaa7bee18aced9dae9274f4683a09e7ddcb8e734a8f0b67ffffffff995af943083ad30ec061d5796898b39d5e483deb8978e575249bce0f165b843f140000006a473044022052771d333cd355299bc397ce54b4e49da541f4639ccea7e50b0050ea3c03f99a0220337003ab3ec4a526f6a93d2495057554c6e2bc339713645bbcc896e0ab49520a01210385551d1e1ec5578eebf68f6d11c54a65b572e422dcba996453d80613a02b6511ffffffff808762b37690b5d8ea2c1de05448c14a92ee463c2bc245cbd4ece956a8918d89130000006a47304402207c0160eb2eed5e07bfe6e6b63ef9433f87b38f005bda517af6dac38703ce1e8f02202febe3df1840425592d2ec74e66a90f553128bed797a3c288ceba2ffaba4754c012103075c15170f9fb229e296dd1b4064b87ac72603bff22d00f7d458acabc7e0f271ffffffffe1d23420cf7ab3ad812d4268700f8c350298a2d74656f390e097c21c3f7384c77d0000006a47304402201d549817b739277a187a81aabce1e465c42528a531acd2a147e3258f18ba846602203610c5e62447a69ccc031745b10113ac22d0ca0aca27cc18d7bc2aefab440a420121027980718b0451e87494a93c3b32461ecd4ab3c4245ef6136f5d1c94a403855914ffffffff28dcd00dce9b52901994c4e50f805e721cf281619b54ae5b3cd78c3d1fadfcdf010000006b483045022100cbdcbbd85c151ae18d9d381a7415ebf4ba9dac9b33b3956a37f1df4ff87fec8d02204bb6536dbead9537963186e895ed91c77a00159f2b0148190ec37d8251763c3c012103e99d5d2ed04e15ac6bfbbaea0e08963ae783087144d74a9f5c2588d109ae7984ffffffff1e1570b8177b4ff1db9560af54f76bac836cc133b01da6b7b96a151bc40d2c06fb0000006a47304402201cb445fe94362788df6a92c43f2f76da0681f3f62df85ddb93041eba4ae075f602207a53c2834dcc9333c1bab78b1f25801a604ba17a87237410adb4913c7bb0447d012103ecce3efc1c9e1f6d942b4132a817fa4ab1c63ec4af6a862f1cd4cfff8317b3acffffffff9030a91745a48b206d1bcfabaf36609923faad745bc468672fa15c52d5042cf03e0000006a473044022003962f5b5de77d93ee90922943889b8cd1fb0bfe09e916303e9df681e69ae2a702200e365104a73edcce1c5edc89e1bf5f00f02d3454465fdba483a4f3bdfbb5cfc9012103f1ccd8a305edb0c43bb512f8bcadf52a77d3bb48773f55eb4268a9ac6a50504dffffffff70a55f11c089269e2d2e992c99b89c1edfb7e2bc1add6f60fa4665dbdb38d9e8000000006a473044022055364ee6bd71962631e072d3c654faa5c6c5e4bba0464fbbbde529f7ee8e4fd002204ac802fe5370b94dbd877e842c7cb1f5cb2706acee6d861527b2820f79aa47be01210324b0003c0e48113f72451c266bb574e17e9cc2999750dce679ef88980c407f4fffffffffe1d23420cf7ab3ad812d4268700f8c350298a2d74656f390e097c21c3f7384c7390000006a47304402203c11c47bd75c2555a0594bbbd6af1fe722527175b1d2f1e373c126873f4b19b202201c5ec23058f9c82ebe7406ee0813c4a9bf175641218bd62c301aa4ae9817e3fe0121033685ed67a38cae5ba4be48471b014b1939d53f4e9e090b96fe3dacf601ca72acffffffffe1d23420cf7ab3ad812d4268700f8c350298a2d74656f390e097c21c3f7384c7580000006b483045022100fa0c14fa2b6f7c379379759bad14b38a404e2b52a82b46b10e9a7814a506f3f502206640785ab6e41fe5dc797d8f7c34f2fdb880db0ccee407005b46da700d1c6c47012103d4ebd2cc011d473cafd9af884b488c57c1647c9339a2ad3a0de615773ec8d0a5ffffffff02002d3101000000001976a914f70b7705062c530c1cb54f7383efd9cd4dc41c7488ac17d54405000000001976a914dd3afab4f5b964a6f9c817e4afc5c4526aa5b3df88ac000000050000800a27a726b4d0d6c20000000070be19000179b5c3cfe568d6feca8e04e364bb87e3b391361808e69fd1177aba3c3355640f000000006b483045022100ec0791928e08d083c53520cb95a4c49f114741e7c018530c346ce0161ae2ac6302200da2921df7c01bc65f0f3360c547f6957fdff028df8962b2f803d546c7edd587012103fc3ab693ff3944b869807ac49adb3b5494c4d5411a324c65592a7613d31924c7ffffffff02405ec28f000000001976a9149090d542ab9c9c478431bf48589f26a2f55944af88ac608b7800000000001976a91422543267268af703dcf5163a96de8790e991c76c88ac000000050000800a27a726b4d0d6c200000000000000000a2f2b629c5a7901ad080d8ccd7afa6581de31affb813d2fdc36b52cb0a75816645b0000006a473044022078a3c636f72b241194b8bc96e433acd2c6d7e9f8e2d3e6e0fb3e6806d5d9d4d602207e014310b007e442569ced397df45d847137ac51ed62f3302b8fd9dfc3b286bd0121027e8997981a6bba0b6eaa7bee18aced9dae9274f4683a09e7ddcb8e734a8f0b67ffffffffe8be89c3c08262adfd4e5cf8cc2a8a320ae6b632133006b855b040ee33f96b1f190000006b483045022100e846f191039adf5632eabe508a8ec6ae5a57479121692ec78aa3cce0eeef05c3022028a86d2942c0a44447ad889a2bc67de1e58f22c1b2ac988b61554e40089e002501210240c16478809f4fff37c343678a10480474a4822cc30b5ce3f7d3279c803abfcbffffffff8cb21e523c49df69e798d3a216e43a5392241544f6679546546d351c24b01a51030000006b483045022100bb4139a8f801aa5a931462dd5372c9ee3edbb6a06a9d5866823cd5168e3d2aeb02207043dbee770b46c10a38abf0fca747fa7eb8aa40de627deb3fd9c810c55b291d012102f878d6fa41ec39aeae84c6567dc9b1b4a571f34e47596db1a79fcde564b5225cfffffffffa83bf29d0ac4395d026c83b4160484f50daf0915984a1bf3a97207e605b09d4100000006a47304402206f8b718787ac69c5d4caf097b93b4e2646c9bf7572ea13b0a4fbf5d2f2d1809802205271b88e9f571585e8043af06e4e31c8552da699a954f739fff0c95ee9016b0c0121023daa5c1237e0c699aa1d7406898845b8c731c41122206e898718f54984bb1552ffffffff5bdf544d0fd1c66e6840553d01abe292db4f450153dac117b78ac28dc7e1a6b3190000006a47304402200fcf80baabc92ea465c09757bda613ee9c233f94cd7ebd69969752c403d394760220495a16ea2ca0a8b1d804ab68a8c9c3baa6acc588acc59feba45d5c2e41422ea30121030282cbf4a4c0dd219384040fa7d2e22580a72828af95b815ddb59976d40424b6fffffffffa83bf29d0ac4395d026c83b4160484f50daf0915984a1bf3a97207e605b09d40f0000006b4830450221009cc954c32ff75ec0db365b4c90bf9cede262d5023a3dea1eb0cceb871d9917d5022010502197d9275824c638fafa2111a0a1d9ae36d74b34fc558b93e1d262c51027012103d7f7b859e6a1939feecf1449d7db7018bd2ced07d2509e50f2218e19f2b36e01ffffffff91b7b575488d5eee39a6e384d107caba4981db421ad7bcd4ca30aa96c79373262f0200006b483045022100a46652257d634c1f5a677b649e24ed65e78ccaaea952e1fe5f1f640f56f53788022006c4095ca0898804a568ed1a35fbfe22cd05321b0ef300e2f0ae237b4942b13f0121038f84877466f68b4a090250a3423c07d37bf7de1b483e03bd9322ce5642cdf48bfffffffff2a54e87d786a432f1ff5442753b3ba2d5e7dfb1ea68ac6a8dd7ef5094b10a55010000006a473044022036ece05aea445f23a03d8994730ddfbbc209e870b5d160d008809357dd714ee702201dcd01597310df7b271ea42c2b97eb7bb09c5d1db87ae76aed013943b0dbfa65012103e99d5d2ed04e15ac6bfbbaea0e08963ae783087144d74a9f5c2588d109ae7984ffffffff232aa4c6b3039bd7dd2d445547656124eac040c403d23bd504edb10922363b553d0000006a47304402205d28a40bd85a7790122d5176279300e97e402d0435aa22bc15e8f5e5cd51e69f02204ad718042b7e9fb84dadf6fae4c3cc213c8ab61478343ddb235e1dceb3ac4d85012103a2e8b2e49d4109159ef82639d9d801fa893d105b7f61c1f3f866195afb8091f7ffffffffa320f2c751a3827cedbef6edd5a2bfce678e366066a6ecf0961f2cf2da8edee1130000006b48304502210095f8a8d0e6aa33ae0e8f66dd16173c7cc7ebac5cb9722ddaeca5fa69a76a21e80220531203e252524fedd661efde70bd96063525a115659903cfa0b3f2ee074374e10121028cecb2f549c3b45dcd99c2fb11207699f87e7901df6c92ebb4ca341a95eb45b8ffffffff01af4ced08000000001976a914dd3afab4f5b964a6f9c817e4afc5c4526aa5b3df88ac00018e372d87209b934ec7bcc4f77511cdc0746a66652cc43cd02c93a5550091e9de8917787084cc9ded469977124f988a1de8624016866b32d3822859263cf56a1635d7772632a34cb55f45fbee5d2c21da233219d92a7a4c6cea456b95bcda42d51883576d92aeeb5df75a199e2f52d0ff9856169327be41f4c95fd4891e74c11eaaffa272ad14330e15da0e2aaae3feaff0e2b5d45443c8d9168557a77fb70956f3205002851c56d58f8ae6741783580717c31c9605210b596b34aee7a42f59caadf90074ae8ac74b5d19cf62cbc0090576531796a9b01a6f2513b302770dc70d7b4306b7e6033a2edc25c44f0696f19073549ad08f93c75abacbfa4adad14809908002a5d9c8a69387ab4e7ae5503c39803d483fb23f1b927d2f37297a1a81890dd7f5b742b05de7145a8aa1313b45e74497799ef2a7ed4c0beca781541a20c9270bb78048c172214a0398209d6e5baa1a84aa06867c40a37c6a965c869b4edc49b76b41cd1715cca70d8dfe2103b7bc21880b38c89cf3494a9740a36037400b320d79e9d4fe50bc60a2a5798dcee3e0592d1b9adee1ed102dc047d39afbba0f44b65dc889ef9cd856017781142f9bb78cd88a1c3137078a00b3db40ba17a9e4196b517454058f69228380d4e4d91c2d1d22e3dcb6780193f129c2c83c4fcd1b2be309d39a1fc8274ceccd478d424e6de736936ab20672bba2bb49f2883913dd871496323805589d48b964ed94b467e69b6f0eca6c7cb460de35275eb432afe28fbd9b371b6c31828d77566754aa63b47a30016be8d654fd818a42bd747f188d34d8ebcc13557eb7b011a349579020d46812c1e8a6e3c94606ddd0f863423a6cfde61d62b192208be1b8e9875d23bafb5ef87f9da8109073db57b0669a94d906269cdf67da49ca99547096ac4ebb0e918e331434d62d58d1eb5da75cc5a08ca741f280966b72495e630e322bfef3f7d285699429524d0d4104a5afb4e5e84d2dd6e5b3c8cbd053c57b9fe5662fd395650bd56ea999dbc229cd622560b4d45c201a02efe5c05e6419e81d61d05ac26581645c28fc00d3cefeffffffff876b4874502a1894e87bf567a80a303497ae06269a2ae02652c6e537038ec85e2af2b7e734dcd25bbee197d4e4f88049938c67bb2f5b1d01798e37ff1eff8a4353e61a6c3c508621df39b495eeb74b0517f1d1c68e2b4fe0f82e0c7fc9ca670e050da8263bf2c36878ee9332a685b36b140fb06103b48d633d7f170de5236c40fbbd4560fc09a3a538e63b193e7b6443b3f1a5268bd7f6fd2e6ae34213c9444482854f2f656e45f641f2edfda76c89b279e0a409bfb69aa403dd95c1a96f6aad1a1599248f4631da8a05d66c8d0843d49dd7e5c3ebe109fa58c3597b845cb13c45736107821361ed44fa2a5941bb013a578ebfd7642b2dfd9cb92d0e1ffb880300050000800a27a726b4d0d6c2ddbd190071be1900017f9d63110bd91dfa70949ad5f1adbe892e4063128678485fdd673aa854d0f425030000006a473044022002d464070913148b7cf765333ca05e23ac88e77692c8d68fb98b86b57329133102206f622d7fa8124a4de61aade388e0098f7dbac1d701ac2032ce632691e75cdcba0121038c705bb34a9cf95c5ccf54b9ee34e79a2a51226e862f8b5cedbf5cc69ddd2419feffffff02656494a8000000001976a914c3831db1ebeaf65110464e03e78eecf21e67643d88ac3789c80c000000001976a914c6d4036106a6018cab574de8cc0e8329b7dcf0fb88ac000000050000800a27a726b4d0d6c2000000006fbe1900019c4f7221e7b8a35b9f52f6dda574b124c9e6f571bdc6bff19b907009d14d69ac000000006a47304402205b0787f10e7575e11a50276a9a7f57a2670abbfb1e7d6546e83d5fcfae0ce03f0220660a2852a6151bc0ab6c1d2106e5c6c6f70ce6be03475a204b119836ce3030eb0121035556ffd87eda7df56cf4a1f180022e22572920d70994cb9139ba973f2a105d71ffffffff0141b9f02c000000001976a91418ae4e28f414a29a171a922cae6fe4cad6370b3388ac000000050000800a27a726b4d0d6c20000000071be1900000001fe0bba7368bd4dda33ac35620b7197e2d34b53b5d90a1618f4f665bce04e1208ca4c6ace6b84cc41bade7ade973a5eddb5fb92c3e8a014f885687c8743e641321585baf3f9566388b4e6350a50ad4dafa543ac213fa64d888d54aae4a3f97839022553424b2b81824c700ade5fa016f00f7ba6d35f7c2f76530c18e4eeaa8df76b2487e416558e0d7726bd89ed3c8c3c48b2d77bcee489bdd22cd6ec00da320f29ba969733c0e1aadbb60e2ed6ef0acf8553699766b33f5ba51eaf771f6ad556e2a298a8c421073d067f94b24428bc67c3a12fb08f2d3af834edbe6a0a352b1b00aec36020b15cb637cca95d3830c773d98d9f9ef4745ae68e50b8a2db63bba2abd3bfa50c544302a53b84fc7c3e339c016947f5f59f94cf9188507c60bc405205f24c730fec48c31f048b3becc247425d4ab775c1ccf17f72cd21b80ac74a91408294fade496c2c8127bef72577c2c367e3a6591850e1ea105f29e4132657416293d6949e23dcd3cbc54292c95167c8044aab2da065fc48844031595c185a142972255694124e1980494c5efad56f34ef08042099adf620b723b0b6fd4c3e2c08a9d92013a97c164c5e65f5800d1573d7bae422d9cdf387ec1a494cdd5d22dcd89ed752ed6cc7e0449c8a2f5c965ae81ec824984913704b8bc892986abaabc8f90e0974145ac94761487addfd0103d4e230875b5fefc228b821ae358d0f8233e68df3ef8f24f25e13d2ff5a92ae77aec9986ed1ccca769832b2f3fe05e52ae61264dbda263375c044daaadeb01eca638a480bc0a5965b28258a48b4e1e1f98430847c7441c241a093ca28f951eb4f1b6941777507e1698aee18ac1230d8e4e7db67e31ebdc59cd9aa7bf6b58494a3577cace332253225dc8a1a57bb5e6710c955fa6bd6781c9a4fbfb336c118697ac7b1d243c29ae4f017c9885bda70494178c21a4c82b322349b55a16b88617130a9a6224a7c70e55df82db599a8b046d43d9d0b5b0ed91b06d9e62696b1e5f43526e8f7c3aec0d60e7165f5a9a17bd02677f24a1ee03358cb7dd7c77ac85cdc6394cdb867026fb70d1108279fd089927b29b77a4e13e09e534e774efbbdab1ffabdfe5446a8894854005c6e79dbfa2fb27b3ae0013b04ca202b9ea34bc3bfafe81a0c0dc7c38dc78882df0c0782d9e308dd3b2c92a7b2f49a26c1150515bc14186816ccc151d2f416dd9fc5892bfab127837636b6201cfe12cc5f9b59233552e0df0e772492049cd182188a7c9a4b96943003606a5aeef5b34436408dc2cad15cce3a70747450c7387779c5651db55f582d878dff3ff108387519621b531f9429369ffdd379ca37fa51783939ab6d32cbfd442a0af06bfc7abf4045c061a733f5fab7451862bbf7e4b525c5a918ac6ccbe82c71b244442a574a63e09eb896a69ec308fdd00d5db19b0ed13bc2a29d3d8bec4040e5b9361a8d36f118b50cf6e0eb913ffc77a8c35b803fadcbf8c4ddca826420a3f9e1df889412abba5d9093b81479f600d4449a463c840693c8d9f8787cc44ad7f7ab4d78d9dcfcfaa023f43f84fed9bb224099461cee0d8e6b9cbf475a06f7f267f0aa2f25b8d7d55d445802e0f343e3f165884aaa7c3bb79107a836bb80370aa3e9d14134361c6e4449746d7378a27d116c21f29316c1652750d167c8f5840ed22b6bc06f5190f599f05963f9599538b92115caf4b118f2d87eeb6e0304cb5eabf63658abcde27bb408115f103e9cf9b24e9db500f934fa763a515ce55d34e1ab09022754a6058f7d2832fc374de6ee1a244d3f0bc105cfdff00994d3150e3828b618ffe750368c0a83b5aedf0934078a13c28803deab11da46815e3bfbed7d7c1c231766a39a483d24be9248343b0e8cd841e3299591c0a135c5ca20ebef4caae51474840ceb4d925ab524e8ecda879350870ca4519422b7f2e02327d5a9c48927ab4ad470c586ccb047876218bbf25a81462bd66adddb0d7eeb62ce71a913ad3de807722f994e32eb591946844ff0b901834fbbfb63460ca79e67d3b6a746aa26115bfd9fb9333696e0e85afacac21c87abc157f48198df493d7808891bf350473218805e1d3c792d2e7ecc597e051620a218b7e62d3182bd18b5ff5a87170366a8afc1fc303ce01c07149eed04b2f20990438fb21e9ebcced54fe924247e0b928de9b720addb160c127958e9dbd6af52da0ae3a3886212bd3e7c4f6bb5c9130bd3bb24a1402c0d2b8204ef24bcb3a5a4cb24f947896dbb5519fcaca240cf0a84e24015969ce803000000000000222cc1ad097e2d5e1a2ae16d4f046bfa932ada7607130724711910dfe5ce4a398ee25686cc8ef938633b8070e43ddcee79afd8f5abf3b5bdaf22b14754b701d15738cf76d40c0ecf17dee25d458f935badb3508368168ecaa60ea220b20f2a9fafc310d2b72d790e5da65983d5cad987f6799f057aedafb4726910fbbf5c1fbb11dcea4048737e456476690cca7aca43e57a1589441e0706f83933ff79b0fda73f1abf5178fd9269778737c18588e76bb7fbacffcec7ff844e3911cab5193983ce6ff7f02a7f64fe594f11c129d919893376d11ae6872175752127ed690ea6728e1fdad42d3fe21e00495b6f607f87c47111bbc9a4ebaedb968e08053386942d1eae138508374039d27a1fe30451432aa69bb0732b0c678dd93d7e60713ee008af7d7fdfd722e0867568bd9666b18b173c0a4913769016cddee1fc9efce860c2e101c6e3748d161c15590560f1b85944a361ae59e8120c9820b58f3e9635deb307e3f8b0a5b94225507b442b78a6046f688231a3cc7f68e42f0d194ba144b70e0fa63ccf7083c9d16b39f90aec39f3e533c7332f8085aea1425055845d3d3f68d06d48debb3af22eef1fa2bb4a78e68e83e66bba2d84b594d3b4a83063dfc596c9cf6c28ac0eb5410517efd1b8ac75d8b4ff6db2ff792a3c1e733722079d65bea3a1601fe228f854ba18a51d49fd2d721ada25a7428652e2722a2c5f5dfafe6eec75b7b37efef4bb3201f649115f8219b890c231d03ed58ec18d5ba51e60a1a72a93214cae04a5f5f6820cc51b8713703c7f2d65d64d23076f5011a4f294971304c81e3e0f88bec49a4fa6726dccd22a4d5759f7f5c015518fed2cacabd79753ce0e6a6530d143ee5b48118dd5c86018abfcdddf0efebfd0d262dbb7c42a6b4cb5959877b9501771dd575bab70626bd5ea7d232671680f3cd269c8e5b3d769cc654da5115209c80c79f7f12646a2ed22ae876220ae323fb4b8d1ecfd24d0a9e0fbbff14c10e0b94391d475edfb0c531495e57101850ff679f3ba8bab7debef0900050000800a27a726b4d0d6c20000000071be1900000001160da523c3e51c6072ff34801fc0c2f62995f40ad09e2f8064ed4cbaadda3900558bd6c32b8deb1912c777ea38f973e439f281987aed08f5784734b2249748d55a8c3869ce586216e0f357cc7d2292d6e1938a0217b4fb98781b5380b7ba9c1b02212b7c58d8c62f1a56f9fed6acbe6295ee3552ceb936dd244c82aabdd4193ade8dfac9d004ca4a6aa5eea8ceaaeeef967edd638c61dd22b00d00677b4ef6d66e64f6229e36d75ca28ee0e08de83a119203451f5cc3a15bbe99abe02dad926667b360d1289e8ac6c68ae59526e6a20969960ee128254720f0611cb1b70ab2edc6245e9580cb3ca982eab1e39d8544f752678477e0ac783480466adf17a2d1e84cae30c972774d40882556a8f81cdbabd3342628c351920aef41061deab9b6a8953a8389b7e64a10c0e63b5082e08c8e18edf3a49ba5ce755b031de56c062f27004fabbbba47871b2d068728b0b4a5fba3e968e3233553a066d6868e9de1280c0a36a96628e6dc4ff772af6dfe39369277fbc6348aba00476e07d1e300dd94de97d88760cdecf055b57f42ccbf2af94aac375f97be8371dc166748b7f41251cda3f7b07ec32ca3cd3c2a4508d2e9a82ae2e34a64866d8c6d35f69092c033f6e1b4c3f5c9f8910d8315947ef74a7bf59c6da025de7c0c4d6f11c13768af62e7383f0306b3f6baa210ba199dcb3195ab25b5db29718c7ffe86bce04b9b74bfe374fd464d5d3e84bc2cd2da8bcc1e01cd1f58e22a81a7448183a4c28eecbcc2678cabe16ad43d34597789bb7e97eecc2007843254542f1d839a5a2dfb646bbe5952b112810a7156b642109929ec10f5e1fd0f4a2771b50480d154c7da82ce5ef6e5e88e9764a5f3dc101feffe3eec14c95f37d379d5876c84150c67ea31ef75c230df098c6c683cebc45ac5ff6f65279e04dd70f3d34e1eca39ba5c8f6492482ad0fbad304c669c017e2d916ab67cf6b38a0bf7ff4eb71843853105f1a7fae28f3169910ac0bebdc67d4b2ff12cf80e4f844f10cf98aef86ab69184f682f3eb7d0f1c8964772924dd144e2d4887adebc3c07224870934806cce46ce5c2df3a7346a5c22b16df70c02dbdaee5c7bc93c8b4209d8ba43fbc4bbe6e79ed99384fb87afe531e810a4038afce51c5c9eb717635283d9d98fbdb27381849f9278658f272a1e3f3dfcb01ec4f5a3b86f8b811f3aad055c056b0aa0f7d5cca78ab0e17d4b0c1946236874b61937802a654c65053015860958739382574f94b87bc8cf1c89a45a4edd2ad307305b7dc891cb8aa3f628ddc8a0df64a4131fef14a8a6fcc0c8bacbdb896c72443b45880d90c4ce965ebb232f7825abcd833fa9fceddaf2d00b8bbde4b8fda530fdad10be6db724510fe149f0e1e522201920220e803c5a12d54dd3d69c08d66efbf3216991cf20b0ab7dd94159c552aac3102f009577fcc6a9ff1e3288b3847a2d07094f1df2e62e4ca2d4b633d9dd1bbfdcb36fa9b6693f0933f70dcd6689d3deb8fdc89463e3cb5c28fc95a8de79a26c643dfe2812fae8480a77fada34e471db12711253cb73f2bff1f3a633ac7f8fdfea4cffe73f233209455749530bfcb40682f772f44a21bae2551144e88f6f1688aa6ede149f0365e8efae70e54b588b5ffd6451b17e725fa9c219969b1a311ae7c8351fa4ad5bb155fc15c5772d19e8b24eaf485dd83567ca65b43df4ef0fd72658e0930f1678b7c0d150ab6332f32b2509081fec6650864a1738f81e09bb81c664d61c8137ab13280c9cd51d80a806028184b521f8fb8ce490f844f35cc94690f41fb3a02fe2de928cb11cc64b7d81930bf642ff8e539ea204a35463d7c08c9d30385845284d238e6092d2c824d2b7af35796f2bbae16f4d9febe5698453753cb751f3d8f9d550b663a623d15124b6f950dfcc57a9d4e1c4ad6898e1e6c3e157c04a0a622280f57848f8a2a1d98123b52acf3c5b18a28c749a5f58a72a23ddfb5e1dc2193fe19f4e50edf22bb2d6282204fe16c5b3ffb53091e0f469cfedb4f2917b10219715bb535d0285dcbd72824d1296fd6ad2f89deec22216a02c65c65debfe497b20c9444f4167342837d5f766da0251776da0aaf69b6c8b89276fe537099c67ee17afb4237e291a376d7f0d0f59b8b304825771456e56427ec29a62b266381fcb2c80a5deece686064d92e82f3df425a00e0a9ae9cd3a538527318d76bb5cd7f3e30616b7ffab22bb5fb3f225020810bfbab34ae71fcc3321e56d3d3e916d8ef905a0f0181748e4fb5911ce00464ee803000000000000222cc1ad097e2d5e1a2ae16d4f046bfa932ada7607130724711910dfe5ce4a39a11b201b41e199ddd5775223e1d1b3241d3d6924f2a8b9b8df7373dc7298c0be601aa14724449918d10ecc179ff0e048ae3db6980fa529e5d053fdd950ce4a9d9bdd04b52c76a68191c60876fd60ef5e5ef8491b69bc0a75c9b192feea90d49a06f7e5d9a0492f5406de2600674d078c02cc89f27151a1be1e36421672801fed615701e11c96fc94e69e3747de340f26b62b231e70dbed617570bdc87291348f2f39d76fde767b4ead7a015aba6f3b1c676b4424a7add9c0119d3f79abd935277aa6831db6187bb1a6c5455e3267708abfa3ee629b4d545d2eec3c695327d5e2151d6d37a20197af6bfd1bdd407a5f2cdefbf5dcb2df621cad331181fe102205ab60f1cf0fe92d793af5d27781bcd399c4d3b87b670fd3a2bbb16680d6ffc148dac511e08e48e6ed274011107c177a8aae500c1fec386018a7ddccbeebc20a8fdfed6b78390f6c831a37dddcc4e8494a5c2b3848ed7b3f0ad0fc04be5ec1c48c0b008e6101158d9b2eb8e69eb6c7e81cebf40c4d01ffd45b8604a823f6e834f9780a9b087fc66696bd70e2c20ff646e080b40b3d03e00d5f53a36dc44f85984623b636ef0122b6d4d50d002eb17b50a2650aef21a2e5297cc809a3bb31f8c963a9a6c98e59cbf65daf9c7e467d054955c0120e0d7b2b1a4ff172fbc15a73b186a700619c8a118d4a9d42e4c4cfd8e7868dc0cbc089fc8dcf9ed35a8180376eb9b750a5537a3ef790bfbd5e0828be4b845ffbf4d369dc8aad6d0410bb1db3ccf7179d71a9ce7afda2d23fe033cf9f2ff2f582f324d049edf7088b61232723750154121e60d0bb4a5f5223c8fbf49f888882cd93ce1a7e05552b7d22438da5fb64d292ff23fa7ec2093c3b4708b4817bc26983dbc4d1e8125000e31c498a1c7bd2f19aeb109985e3802a3503b98309cacaecc90e1a1d88fd74cf59fb0e169ff8a213ada5c7a717edb518b410e7619b1b4c002aabad55fc3079c406feabf760eb0000 diff --git a/zebra-test/src/vectors/block-main-1-687-118.txt b/zebra-test/src/vectors/block-main-1-687-118.txt new file mode 100644 index 00000000000..7bfc6623a00 --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-118.txt @@ -0,0 +1 @@ +040000005376f61e8b5453e3bdc4a63bb5d0d647e430a58290304b6e7e220a0100000000bd923ec19bebae32c6d061ca7561fbbd54e3daa9b696b81f5726551549b791b82053ed35b6b50554f2aff1ee3693d76db006d13e69bcb237b046663f584ae3b9b95c9662f0da011c3000eda300000000000000000000000000005000000000000000000000001d69fd4005002e06a2f8d847ecd8a460f0374b4b3ec7ed39a32d49a7cd7da8333e51df88c518436d7b9954c99912a002b256eefb9a3cf52188d1df3fa7c6bdbbd97229e409f7646eeaa5f77da54a128f895abcbe618754bbde060ad1f1c9150bed6855631350af5e55bc35b7a4b81b6f2e5d28d45ca93401545f45ecadaa3cc31d6c4b1f2fb68621e5a5c967c695dfb7bef826093db32c5a6979acf50de05d493bf7586234dcc44a25b518ecb301eed414755b325754d39c303e6215036ad25ddfac0f07c36e3ae9ee9986b3b670f3b730f305a3d88d07179a359199f08025a335d2489c573b078f3b9f7dab203067d5f4621373f71ef4e099f1fb22331d7d6cf70715d4ecb0193ed7b088c16b3958ed4d39bccab59124a88ca9538decec7f21b5c6dcd1aa820dd214d01f12091d6999883df3d035d35fd6e3439df601dd82f91236052018475725106f73cddba1f7a1548d90a34b0175cc926c4b1bc9c645b1479f75f29070ef0fceb7223faa3ef2d0132bca21551db65568dfaf8d5fd69912367fba5655bf3bb128c38179acb79a319ad9e4c64531aacba36fa4fdc4f4b4cebfcee91ae2497f07430b988d9fdaa6d6b1bdd05139cba94660b05cbf33f73ea4fa513db33d9bbb6c44d518eba5fdc644f0fc894c352e643b5d7aad4ada1887e4e6f33abfe15caa9589746c88b7f1f335f97188d3f6fc4b935ebfdf282c02ef3122285f46ff39f325e9bee82abe689ddcc0e45d0033854ca612ad49b7a622f6738545e72d3d1d30174e041cdbe976015803c2059a76f1f1e83ab51a0e257adb06eb607143cfdb75d25db12c25efbf94b65204ffc43126d02272f7a051cb5b74dc98de7339f4e77d297e2128a2e2a5d6de687fc96a4ed351853f454313bf16cead88c757c2accc6edf6db907399eba5997173959c8b7666c4fb8f983fb727d3d5201329e7ca6011ef17d2117b7f73948b1bbc621e90a279fdfe7e512fcadb8204c370b2fae44f80761d701d38374bc0803aa76ee050aed8d598995808269800a05513954ca25dfabdeaedcd5838dedf391253bcdf566579c84350ea7fd1c0dda6017b89da66c55491a4da52bba3b97294624f0aba7174b5b6764117d42dd223c26bf264e102d91c2134ad98d1d67f27300fb394159be3484621826bdd5cc230f0f51aaf1c12cecb5ceb26c9d517e01286c39cd60dd2755c140e0d3b37492073f92da4f131a0f1ac3d96acae85e55b3807601daca1edad5511b57af5a47d14a4d28bd148966de945b85215fd3d74b33e29086a0138b078498befe61458789265fa4f709a9d3422c235905215cf34b7be8cee6c62cdfe55f0e4a2daf644c2d3d842fd6e6fbe4cffef44bb8521d126f34501996bbf4dcff23eab879575e976036cd5a232c52b39a1f28c9d0f255ead0f1dbda1fa91bb30f081a26f6f3ac5313c145e515145466ba3d27f386a51980b988a21e5574f5fdb620aa6f8729dc50ffad5208e597c5a60db75c82b170b285ea0e11cc02b4a7d40d06b3176fc7e7e36630f2349e5da11649e939e0ba091323040d0260f99cc077e54944986234a0db6bc81b6dfa06224fb9ab8b67e1b72f57c0514d972d5a66179b556b171513e9ea570190b2fef2bf0d78b9205e46c71ff27e279faf5f0d16119cd9e83a57e8791b950b529cf94f04972389b7e4be4aae9b15c86ffc3bf614ac68af4228d563c34eb179a4286741d7b48f2a8348b3a6a85d1845d10eaa1560392b4ceac92dfd9aff4f83353b8e605c6d9ab4678714693f9237b393845b15a588b20924b2a7d8afd36ad0596966462672f340346feec61c634cb7f1a0e41be75f683988891104e2178cceceabd88de99258618a0a7b01fcbf281f191a4451ab0d7e64d1bf29161cbc2a7b2cb7e5777f445e08050000800a27a726b4d0d6c2000000004ebe1900010000000000000000000000000000000000000000000000000000000000000000ffffffff05034ebe1900ffffffff04df7ee80e000000001976a9145c38e5e20b62bbb5683dd68677ac3715047341ea88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c9087000000050000800a27a726b4d0d6c243be190076be1900014633befb183f8bfa772e58e7cd751c3de08cec2c890df2e8a183ff4fe2178a34010000006a47304402207ead07bfd21eae3793aa93b59bafce98b8ceffecb8fca9df435030e43b1e89d502205343b36e04a7838bd42c3d63cc7db51e912036b27cada7906270de69ed7d691001210269dc30578ad9e95a2507fa76c6e79d8fb7579c73690aed7cd54fe94d3405a246feffffff03f4470300000000001976a914dcd42062f0ad04dc1f4b4b1e0800311cefc4aa9d88accf410300000000001976a914011fd5b3ebeb3405ea1d9c7fdf2b50d93f05458d88ac08898402000000001976a914056ca47461dcc0e7a9a77e754c5efb047c609ddc88ac000000050000800a27a726b4d0d6c20000000076be190001d7c1d36bbf1ec167a96de8294d00f9a1c4648eb92c3f38b7a37226656daec71c000000006b483045022100b8686d336e3a2a7d859485c7dd6b627dcdc4cee90fe7b342bbed6ecffd922d82022004903ad43771bce0e57f8a10386a77e83bf43160b4780b677e6b4071801eb4890121035556ffd87eda7df56cf4a1f180022e22572920d70994cb9139ba973f2a105d71ffffffff0241f8ef0c000000001976a9147063948e537dca1f9410ad33e792a2dbe990755d88ac603aff1f000000001976a91418ae4e28f414a29a171a922cae6fe4cad6370b3388ac0000000400008085202f8901a1e72b1c875ac2c3ed981957ac5786b918dda980d28dcf5d7b8d572b1995f079000000006a473044022062048e047574f1204aa3ecd56bf80ead631c38fb20d18b4f71ef19f5217b371602204fa9a1a8b3ca071a800611bd5af9c2fb0fba1b3166e038e8335e948ed78f95ad0121039e88036afd89b82bf78a4b277dfc1d5713cee411096a65a7d49c172a35093718feffffff01bf81d7030000000017a91443474abdff119f6a0315d8abe47f9aa671018c498700000000000000000000000000000000000000050000800a27a726b4d0d6c243be190076be1900017f8c6e56b86c8fcf3d84e6f3cbb094a12ff18800f157ba22403e823a4998cb7f1a0000006a4730440220109920ec91d5ca28f9e0db961ca7c1515993e46df94e960e11501a838b9eddb502206da76a7eec81355d8c96985736654715cd9e1daa50e4e0f63d12d5a8c824b049012103989c75fb77bba163b0b0d93389b9751aae16e91226dbd02448f834fe19220bc5feffffffd35e0b0000000000001976a914665bc06b2fee5acc0eb0c5f21e70a235d0e0a7f788acc0080000000000001976a914d053fc7119c8140b5c76bf067ccb191d82effb8a88ac6a180000000000001976a914d052cdda5464c446238f445b18d64ee8750fc79188ac0a0f0000000000001976a9140dbf37f3161e3cc95769f58065d2e9b26725253a88ac6e140000000000001976a9142e6503896cd81448caff2984db9147cee0a256c188ac680b0000000000001976a9146d006216df836ae063e94e4186f91040726cf86888ac880e0000000000001976a9145b4716be3637feac6605d0c8091b293ab5708cc988ac180b0000000000001976a9141b4842efa72b0ce51f61f6088b2b5b9d719ba84e88ac0a0f0000000000001976a914ed1be71d635d7d72b1999694de2b5ac5b539ce2388ac66080000000000001976a914be1496e0dda62ce6ceeacec4178d54e4a8da75bd88acda380000000000001976a9149f54bf1c69f66b2a7945450a282b4857336dbdfe88ac420e0000000000001976a91492c39cfcc4653a0d68b3e2d1d561ddc9dec09b2388acae310000000000001976a9147b583104d342d90d60ad874d180d8d32c7d777e388ac7a170000000000001976a9141c8a8d4aabc1936b3f455384553b810e6b57dcd188acfc0d0000000000001976a9145f57da07958ba018501f31af31d0699d28d2542288ac9a100000000000001976a914d4ea9383392ff0b1fe66dd27940c477f7c69d08588aca6090000000000001976a914d137062f3bb63b035e56761980a41bc63c4eab7688acfa000000000000001976a91401146851843fdbaec3c1daf49e70cacda6b1c9d288acd8090000000000001976a9144fdf4a96b393f1cf16d628c18132a6f355d110cb88ac24090000000000001976a91442765d96387ac910b2f5a73a1aaf8b5dfcb0de2688acfa000000000000001976a914ddf415b3d7bb0c7cc98ee0412093fa192ea43c8c88ac04100000000000001976a9146bfcbfb7cf8f8b09fad77aad55cf5bf2abd78caf88ac900b0000000000001976a914751630440deb39394451e2f55238999f75c329cf88ac0e0b0000000000001976a9140ba5a02222e082f4f5c3f5251de7c6e2712fdc0988acb0090000000000001976a914430b18303d6f66b53a7e093b94b8b234bf1d1def88ac120c0000000000001976a914549cf3eac51f6026af85b006c96602ff3d7724ce88acf0000000000000001976a9140f5cbd0a20d3dabad664488171b396102d9bad0888ac8c0a0000000000001976a914501a6672bd4c3308a61f52bdf0f47fbc02dadb9888ac70080000000000001976a9146b23b2537250fe5cbf2e42dd872d01e528e434d988acfa000000000000001976a914c04a6895790aaa4d62d6ab161ffaa1c5eb74cf8588ac1e0a0000000000001976a91410b7dccde70977a61fb66fa822c26fe1467ea42d88acdc000000000000001976a91472e6256345700c56115515738fee3d605c3658af88ac34080000000000001976a9149d8892b355500d125e14f8da7e97980c826ee58588ac3e080000000000001976a9147c1bc9304fdcd89c060b42e920b126ad1517f53a88ac7a0d0000000000001976a91428377978a793dd91e7c5bad253afe74a882a278588ac34390000000000001976a9149b539a4cabb57efe891714c0ec4ca35560d9588f88ac64300000000000001976a9148d3171d08361c18e24344a9edd8b327c16317e2d88ac080c0000000000001976a91496d9fd422567c478011055fc2b3cd7a65483cb7988acdc0f0000000000001976a9140139d95b52f0f708e71bf1cbd8c99455599d067688ac2c1a0000000000001976a914c288db31151a2618573235695bf90a9efd66f16d88ac8e390000000000001976a914502f1c6481d47ed28832cb8bb554f0188723154d88aca8070000000000001976a914aebe8c2b05e1bb7038c5ed97c40ddb83e63c029f88ac12160000000000001976a914f7c731938f4a28a51bd10304f8f7e51d841a615b88aca8070000000000001976a914857b2e67d92b8c48492e80548bd4be9319d0093b88ac0c080000000000001976a9145580f5f3fd4c71c7285440f3293a01344c242fcb88ac9e070000000000001976a914d40989866cbdd40265524391309230014b26806f88ac820f0000000000001976a9143e4443c129987de8abc4fbbc0497b26b676ea34688ac20390000000000001976a9146c9d999ecc7c96274882c3905cdf97f62f8b785f88acee110000000000001976a9144099b5c7005aaaf47d905a833db4a2a2edbb536788ac6e0a0000000000001976a9141c9e242ff1e8fc2ee6353956f15518bf9a90f4f188acd2000000000000001976a914b22c1c2fdf2cd5d1325ba4cfb5b2e2418f3112cc88ac20120000000000001976a914285edcf4f1c9933c3af655b3f9f73fd8f975a6d988acfa000000000000001976a914ff1d04c8cb1489815ed607e7a2c85c4274d7959888ac9e070000000000001976a9148f4fc719766bf667ac66b951c3fe83839ad950dc88acc1040000000000001976a9142a3bda68d7c2f875e707e2723bf8c0532ff4360188ac40100000000000001976a914e7afaaa99387bee5b83b29fd060395a42de038b588ace35c0000000000001976a9143b8c90759743afee1b814c57a414c46fc075992288ac4a1000000000000017a914b8048422dc286534fd257bf17648cfde2997f9e38720170000000000001976a914c562eee981c2219b4637701f140dc81ee23f959388ac80380000000000001976a914afc318322b41ee532d9cb4af3bfaee274aad3db788acf0000000000000001976a91427b402155e23b2fef56e808b4c1bdec5583cb0e488aca6090000000000001976a914814a415b3e71ab2a1b5b7d5c42d3f42a3176d87988ac0a3b0000000000001976a914c7458da14c9db8140056b4192f7bd02f49a131c888ac120c0000000000001976a9148c0acfbe24c769e8b5472efa9d1be2a221b9d6a188ac06090000000000001976a9141b61be3dfc2977e0b6bde56c67b404397695f17588acf8160000000000001976a9140ae69623692468f6f2a839fb8abfaed8f3c16b1b88ac80110000000000001976a91489e1c65b5e1cf7d5008453bd80cd03b1b8a7b89288ac5e150000000000001976a914d3ccd876d2d73561e4799107167da0625e7dcacc88aca4010000000000001976a914557594eb5ce917000542d04caa2e5c3322d8a7c088acd2000000000000001976a9147793bedc9db922d70c58c6f41f2bd97c05d157b988ac3a0c0000000000001976a9146c95a8ff72070a10741c3f86c5d99b7efef14d7c88ac0c080000000000001976a9147f3250098d18312d98cf0043c1a28c7f2ab5f32b88acf6d81439000000001976a914578c1aa05fbc01a6fa54ab6ae85897843ce86ab088ac72100000000000001976a91438e2c5db7b908c100a60482eee04c600ba20e45988ac7e3a0000000000001976a914af31f840fda761a9cfb80d6e06137bbe356801c488ac24090000000000001976a9145f5a36f12db55a09658a2cb4dd0c5734f491251a88ac84120000000000001976a914a8651caec1c15bf481a6b6798ecc04af126fcf7488acd2000000000000001976a914bdd21b5b6fc94461b028353b013d4418c92a63dd88aca0140000000000001976a914f4d48d6287d56f2243e5f7b8f8dac1ce327f0d6d88ac18010000000000001976a9148481be2c2338de4f5a0f4d5e5a16bdb49e4c9ccb88ac2c010000000000001976a91483cf5d1d541d869296bee9accbad25a16b7e238b88ac72100000000000001976a9144f5aec527e5725390eff041108dcee7513fa6df988acfa000000000000001976a914694a799ea6924d8f1d91b3edf8886090b5debe9e88aca00f0000000000001976a914756ed47c6348e2105015310b635b5ae5556e9d2d88ac04010000000000001976a9145165e1a9a8436584b942399008ae401e7e01dac388acb2070000000000001976a9146d9c01d7a2648cf140a93de6dfcacae3615c66da88ac0c120000000000001976a9143cf48613c3601bed975e6a49f9e723c6e0a50da488acb41e0000000000001976a9144b6ac5a72cc882349c048d97380fb441e858e1d988ac18100000000000001976a914f8ccdce71ae8ec427502f9db8e6fa559585d751d88ac655d0000000000001976a91457238e5c566820c90718c6884445a09d9206040888acfa000000000000001976a9146a6dbbb2b7631eece68f23b431d043fccc6a0be588ac4c0e0000000000001976a914009099bceed0dfd00eb4df571779b59bc5c64b6988ac2a0d0000000000001976a914d50f08040f864df9a2395626a85f8645cb13da7788acf4150000000000001976a91410a2d4b23851383b45344babd0bfd46403be593788ac80160000000000001976a914fc88e1d072e39dce4ca647f115828f4fb4bb535a88acf63a0000000000001976a914b8562cc385a4daea565d549ca22e5181fea866dd88ac020d0000000000001976a91426382c263e99684082e1475f6ae8af06c708cd6688ac1e140000000000001976a9146670278d5f07b50cc54ae974ef1ba62ca7f5899088acda070000000000001976a91462ae229312fa63fb81e077d2720889f83f804de788ac4e0c0000000000001976a91446933c3f6315005fbb2320bc4410f129ffb8cfb988acb80b0000000000001976a914c048c84b918c086261e36b5c26d1ee63e4c210bb88ac84120000000000001976a914d1aff76360aae3c1600392f4a3ea5999f6eeec8a88ac9c090000000000001976a914ebb6f17c58cbff22cd295b47406ece389647f51788acac0d0000000000001976a9146bb822f9b392e32cc0fe0874d28f5e0dd416246888acf5590000000000001976a9145e1b00521047954974af87212272258b789a68ad88acf00f0000000000001976a9142f589c49a6b90dee756876533a89a176357513fc88ace9040000000000001976a9147a557b673a45a255ff21f3746846c28c1b1e53b988ac2c010000000000001976a914679ed58615cb040c1863d97f91c6221b3969b7eb88ac080c0000000000001976a914fefab0b01dd038123afc81af571fb001c317af3e88acce0e0000000000001976a9140788d51ab8671d9f59680c94e7bd80734d8e7dce88ac04010000000000001976a91419465565c88d7df9c739daa8472bf78299e58a2388ac320a0000000000001976a914dbaa634ded0a8043ab2c55a8673857d339ca525588ac9a0b0000000000001976a91476ee9548ad922c448535bc7d6bdb8cd8accc534388ac0c120000000000001976a9146b6d64b8b01b832542f3ef3f3e61f0c46a2a744888ac5c080000000000001976a914477c494da7056e5d0531329d002a3e3b76499c2588ac8e080000000000001976a9146c2ddfb2bfb3465a8d4d27e1a3835c5cf13b834f88acdc000000000000001976a914f0e2a14e0c0da6122566414f2521b53fe90a301388ac460f0000000000001976a914fdf43cf9945f83e05514e4b3053d4b510cf9d0b488accc0b0000000000001976a9142debe7b67e761f4ea4e5005fc71c33d85e5157cf88ac8a070000000000001976a9142f2c0295e179c2d19eef3cc7b8baafbbbaf3c1fb88ac2a080000000000001976a914e0c46521d8a6f90a433083ef9e97f47e22471dfe88ac741d0000000000001976a914e320f896e5800f03153f1c0df25fac0057e1665488ac3e170000000000001976a914c12cc5ec348c066e863bb635ded8cb86703ca49c88ac8a070000000000001976a91497101440042b2082859020e40470ef4f107dd9f088ac4c180000000000001976a914a03f785a80019e77ef9de0e74338358010a023b488ac9c0e0000000000001976a91408ee4550fe59b08879ddddb4dce9a1a1f883dd4988ac60090000000000001976a914d216a89c5a31da54f7b4c6a18ed8020a9dec36b288ac1e0f0000000000001976a91444563afc3fb0969ba1ffa6a58fff0bd1d188b49d88ac320f0000000000001976a9146c728b1d1b35008e2895e36a2a3ccf962402091988acd2000000000000001976a9147fa095110d1c6a96d8aadf0cb18d4a5dabfe6a6188acb2070000000000001976a9148d2175ceeb0a83469054ee941eeb5503fac7827888acd2000000000000001976a9149a05a14fcab454679b279f146b1015b37294389488ac48080000000000001976a914d404c587297b02a23348b1ea0014e2e684aaaf4788ac59030000000000001976a914618db5d0d7c22ea01fdb22403db34d5163578aa688ac5c170000000000001976a914c932c6b8d0150632459a3432d6359765f581b84788ac180b0000000000001976a91438f6ac15880f4f65722b395eecb393b519fb993188ac181000000000000017a914ce0578845348993edef37bf2883c8a1ab41b155187320a0000000000001976a9140b4925dc81fc58d94a522e451b328a0a8d5304c988acb00e0000000000001976a91414ac0aeaf3784d02ca716031a3a481953c16cd5e88ac640a0000000000001976a9149b6ebcf0f456ce12b90fe6f0fbf96efb8bf7446c88ac40100000000000001976a91470d7ed5fca0b5bcf48d414991fddb81a299b441788ac3a0c0000000000001976a9140734b890813232b5d6456e62dd8c2b95ace32ed688acdc000000000000001976a9141730f5f6302fca6efcde94d89a19699c1684190e88acc8000000000000001976a9141071e338b20d185badc00356c56f394dbafd6f8188ac34080000000000001976a914a6de4a3669519534352d02b7b5d32307ddd43c6688ac420e0000000000001976a914d9803759075abe33970e9bf854a04167c11e453e88ac1a130000000000001976a914d69edc74353c8fb9a4e50bbaaa5184b99a9de4da88acdc000000000000001976a91476386b69a5480a3c808babd0142bc1fccb3eb68d88aca8070000000000001976a9149602f246361dd51d9f69c559d4572ee7d3a8578d88acf0000000000000001976a9142acebc11514b0178e464e4f4d4ed6dd14b9fbd2588ac18010000000000001976a9147cfd527a191ab209972375ad9e0c64bd6157c49388ac40100000000000001976a9146f2e79cc8a118c7c44ac7ce3cff08f7d1da50a4388ac22010000000000001976a91454d87f0a6094a5ec16dce56d0967ab0526b08cc588ac24090000000000001976a914195c2f78357e05121d28ec52fca0c900f23a30d788ac22010000000000001976a9148cd62825e1719d1cc835a8b893cbe155ed4d05c188aca40b0000000000001976a914ce20e303700c3e6e568424f79b00a9310d78a29e88ac640a0000000000001976a9145331289fccfa21d0d6a02e8dff2d31102153313688ac04010000000000001976a91425bff353f3cd9f820e8c6c0f0fae4c467c4fc65788ac881d0000000000001976a9142f04e09647e2f369daf210c7cac702129d75373388ac800c0000000000001976a91431948241e6a85c6532b8bce101b74ce7917f25aa88ac04010000000000001976a914f31100f980dc33f57240e730f4e90f8aa819a72088ac24090000000000001976a914cea7fb69deddd529cc55bc7da46c3650848e570b88acfc0d0000000000001976a914c42015137353c37cc74040d4eae580c027ca7ed188ac38090000000000001976a914a1ef40de464752a1de6c3bf15adebeca6e541c9588aca2390000000000001976a9144f5a6c703c56eba9d47b18952233b8d7dc4114fd88ac42090000000000001976a914fac405c1cd8173ad8469cec43050112cf2aa385388acd0070000000000001976a914bea30d6e92dd43ff91b14b565105edfc58085ad888acd20a0000000000001976a9146e825667823b0f1799806d973c880e5a9778486288ac2a080000000000001976a9141f0f09ced14b6f079a622c1f75a70a2c2de59df688ac5e100000000000001976a914889793fe2912735c314ac7577e297460da948aa388ac66080000000000001976a9144f953b517aa792e05e5a561e51725dd2086f769788ac860b0000000000001976a9143e6b5bb51847570ca4404bd508d3dd812576f22688ac880e0000000000001976a9147b1988fa0c28b4dd634b1d15a58d7296ebb0522288acda070000000000001976a9148028dfe0cb2cf75da2530b2ac4c63745c18bc01e88ace23a0000000000001976a914bcd5241b39ceecb75f44c43b48b402d2281cdb5d88ac78300000000000001976a91440f014bb93a834c42439f6359f9624211a26d07a88acdc000000000000001976a914ddfcd8e31735e6ca711e759dc9cc488678b7099888ac72100000000000001976a9145d6af230d48d7031aaf9680e0b42cc4596e7788f88ac98080000000000001976a9147d86fc6c92ba0cbe05674e23ff9072240da9195788acc80f0000000000001976a9141767e6744be2963abee290b463d7bf2be15257ec88ac22010000000000001976a914e0799d7d726617777bcb91953b2a83c2f24fa38388ace6300000000000001976a9140806e275eba8578dbec6753dba12deb99d5d9cb188ac8e080000000000001976a9141e74b76f5f9111a4aac8b4f4e86ba24d1b3b513988ac3e080000000000001976a914fbdf2c19cd23c6cedb2f3aeec45e91b7574c8a5688ac7e090000000000001976a91493b89e8c8d974af240c6291b03c41549593effb688aca8380000000000001976a9149908721b234fbcf87e7cc012cf06d9f4721c82a688acd2000000000000001976a9141fa24ab6a84dfa6553f69ac774bddc01fe7d9cc088acec130000000000001976a914270a1a6ef87216bdc205f7ca0f1e5db17185828988ac9c130000000000001976a91402d30164a6768a4d620d251af7090905f1cb8b1888ac640a0000000000001976a914cc43e66051dc53e3e22050deff1d8f0ad29ffa1888acbe000000000000001976a9140202fd5a3b952710403bd299068feed9c283889f88acea3c0000000000001976a91402b70915db2194be41883f65c4908fc2e368a97c88ac323b0000000000001976a914051c3d2a8f452554de10bda48b5482ddbf383da188ac72100000000000001976a914921ae0dc8716f54f9cee80458be8e2559c8f379f88acfa000000000000001976a91418b95ee488e3d971f640f8fa0f4027d725933e6088ac6c0c0000000000001976a914648857ced1c119789a6161a352a6c9f50c56fd9e88acca080000000000001976a914af171d28a8824f5234d93c802808abba695b88aa88ac563a0000000000001976a91442e58043be43a10f20c6510c67f26dbb3476d8b688acc80f0000000000001976a9145692800a0bae42a3a2486897bfd109812189bc0a88acae150000000000001976a91459a193f51d28ee25312906b50056a905a79e4ab788acbe000000000000001976a9141b9b65711827e40ce9d39b67aa6e410ec6d0a18088ac18010000000000001976a914d78ac33650c1f8c99ee277e7d6e44c77befdad6988aca40b0000000000001976a9140abb48dae84d7aa393e5aa1eda3ea21df533648f88ac84120000000000001976a914349f06e228226db243cb81eb7df59db5e2ca57e988acda160000000000001976a914c8f88abfe9fff68c7883306ee3c45f3409bc2af988ac640a0000000000001976a91482ff41e000eb11da51c27396e950575cd26cd0ce88acd8090000000000001976a914af901d858acc0112a20e5fdd54edf3742a5ff24188ac9e070000000000001976a914c4d5e29afebff4d56671f59b20128a66cd85382b88accc0b0000000000001976a914661d74c9f091807be3faab2bec397fa349fd531388acf6090000000000001976a9149391cc1f82987576f04c00006e0664396828ee2c88ac38090000000000001976a914bfc765634b18168c15517f8f8f1ccc5743171c8488ac000000050000800a27a726b4d0d6c20000000075be19000000012affabf8c095955e80128da33203c9f3fd0b8c3b370efa3079c54e35b2fdaba2dc47947469b51879325d242b9eceee90b35f3fd0c43574a66b29f1cd3492478f182f5a84a0d7d28ad303e81eee9bb89cc26f9eef270a2e76780bcc8c4d462e0f02b5f9a5fb1bf30569d0a178d2f6d2467b47e1478607bb958805647b33d2f3021394335aedfa6dd590ad4d474053ab96a1c75ef3ca7bf74d92108f57f7d36a7a05210116d3c4776b4a1454d7835c5ea4e5047bce0ff0be5b084b9395eb2b593ed142b45571106d026d20ab662cbcd474b0d6c3341d7edb827f0209f715c81547a8d0e84db088fd2a58b39d9f83122573ea0946f03d8cc6cd579cbdfccc98cadf04a176d825314fd09283a243176b47fa9287cbb3709826cb2037d3762b36f06ce56f9b791e1da4b8de8cffbb70fe2735234cf6dacf8c07085b8bf7c80fc5d36d29cdc9c05919458f97a17e275f2b99230212b3987b2f5cb66c80e027581367e496e7ea1fff72365b3123dc48deae22dd09a4d0dddec7b6d11d5c98d550dcca3160a993e8e1ddcee50d7ea4b31ac289c7f9bfba88d7ba8060b27433963bb9a056d5f485cc03969921416c8680cc4310ba90337c3623681c99dfeeb70d9a56b01a9cc64f04a11299f3b47cf4aa8c00c54f581dd6ea16f3ea943e768dd63f5bb93d317542709964d441af750ff39e5cafe2cdec510c7b38a2fa4dd80790b61a4fc03263a1308df946b5a3ea4d7e02a37e5ee1d7c352ac93c4e2e5e269a64a3a05fe71be3fc3d02440fa0b370dff3c18ef284f33f5a0699920b06d3d3ce3377f3da0218764f299f8a5cb54024756406c38b8ce8938c9a6cb919de4f84300249cf17aa4f9fcda6b6be648dd1bbc93e9d376b6a5f5d637a48868a1b49aa9d727352568f6b5341d3cba09bb8088b586b8066c073c153acc5085f594c5457e07c16eb64ddc6c1a9020aaa64d7c2e9521d14f5843888f4cf826e083dae697a15cfb142bbfc29255b2b330ef71a089a94e5caf1ea462748b9a33c36ebf343f3b986d4e6bc0f56a4e09a167e6c98ed8cea25c0c4ebc5a215595891c6eec69644df52eca8daba68fb73ca9ac2f4f2b25c93f3389019db52a5d8cee029ba0ad5f2336fa5d5be4425cfb9dce7553cc330e7b255214e65b271adacb9d57fc77f753e0ff23a45bc31957fcbb721ef2e290af2501738bf9c82a727c4a2594acdee582b2632f338f3bec1b94450f78656162bf75237e08ba915ac353e20bc666a0f6aa7b0745f175506cc331eade6293febc32bc02ad234f34c3e82bd265b7f2e426052e9f2751dd4dcda06599f50e00a63f633e12dd30d128c1e95e0a16e59a8add0de87b42cce1ac062660df327978892ddb8f7a25e12f1f779c80f0e68735240987b230e6900622191de567e1e87e5615a4ddf744fc1d1f1f837c65285fbc5f5f5a9eb3053a9a2d391ec77009cc59c36d62e3e5d82e833eaeb75bdbf52bd42d01167bc99b892cfb2620a10ef0fe3f02c679fd2bff0d77f20742aa0297dc8854a211330e4a491cf8c05f8e82924d113540648e1d8b5592e6402f1670464834a564cf87ce6edb4b0a345b672b51f6f0c41b180547966b6108690b2112431cbdfc4865931d746d8a44b6ac9f850342e803c327ccea336d59036f6aa395628ba4b29b2b7ea4314bfb4c2e1e6cdd9327b245ef33ab69a79c3568c9c7bffbfaba1b5cfdcd763e3711777aa9a7ebcf08f92a22ef7e78c01edfd8291e87b5dd781b0507f48d1a85c9749bf57aec2d1067c311eb8005aa047b2ea55966bda816ebde1a8da6e319fccb41bc63040b64c27f29aeabeb14b2c50c585d14195aea85ecf32c40aabeffebb31ab4c6469f8457c87f1bc5b28741f02ff6078595a7ba28af731885aaa66f8076459a32e191c3d8ad24a15bf48a23cdd790867ff71c915e8e1b97e2ba2b74a23811ed371233a66253531a5e82e5e5087c90b686cab5ed36e537e6030966808c443f98ea9642242fb02e16f084d6d9bac97fdcac255586875bc573d39477869e958b67e54fb508411ab61c4a588279141573f4b3743e1da8710a1512988dc1d65fb0554b198def608a07966957e216f2007d1b4d6547ed3838adb28aa97af73f3eac630d608d56037a4bf915a6073286449d5a62801a18ae28c41aada84aa68da647b9b996dda2167f6b222537121e467854d096457f354403510f6e717691a7d6003c261e63759e39802b07ddf0c45b6c995c73b801e57d96837bcb9bc04c1d9757ff6ff80b265822f63c46fee803000000000000aaaf743951ce4aacc9c2bd27384d39f95be20f27b3f0651d427a11780565d80b911ceec904ad7ef335a8fd88f78fc48eb5f1df4e0ffe9031aaece42fcd64371391f5144bc737c4fc00dae05693635c4292f797375bd90cbfd1034391deb5d7d7ed847bc78b8a62d44cff9ba0be17d96b233fe16866578f9a7219bffdaa56e3f11500e65371f8aed30dfa0e8d8e5000aac01c397ffa4d70e34792965e9091fc01545aaed662a92bd97eac30eef1db265da678649b049d7aa16f17210201c76e6575d8c40ecf0b03eeb714ffc8c092e1b4cc69099d475f6714fd270531816ab56dbda75492b6130b8b16dbcfccce16109869ac4b362bd6246f4efd743d4e919005cebce4afc0aba706225ea3487cbb8c5d351f64d3bd508755a93c5e12273c7e098b9b1a547c09c2f6fb4cea21d8f7183b103a5cdf1a9f3c2d10a9485be21824306be6b2ac5a854b2a7de0212b7683848ab4251915e787bbac62a5855be8a38ef6e416e3a0a754b88339f9fc033119f511e14ab5661e2999c66c9a696fab826d10132a9612c4cf7af156d23a4d7be51328b1406dd1fe4be3f655a1e97f17f2cd54f3c54a0bc706c7b91c5832d9096e00f6815c9e93ce56b73a63d66a6e9f0d260182bfa7a3363a45cca4a3eaccc0fb585822f0f35c393d9446593b87f21e9e6a478d9f6a078c864a2809d7c3bca93f7fa490251a1599ed30ba4df2272b4dccf55f8cf60b47933cfeec7af9248cadd9b1488a8b8957553a964691630640ae5d1696fee7c9f99ac9dbd8110023e1a858cbee57a51395071c97fbe0daee3f0e26d0600ca3dc9f4db13cba31863d62925844ad90c91bd12130e438403a8fad837c1970055d3798726b1fed84fe00d53d293d27a93db2f3b4eb2b61c2bf80b0842f0fd85b5eb07e8f5f8326e6a65448467cb315a53a1c783ef81e89fbcf0258f96bb542b60e017999c44eb01e6049360a1d9f539707c829c5f35a487336681afe429c9c3e01486b51b31cfcd4791595c5fd678f01cade02ba18c9326fc462274b3b320b00050000800a27a726b4d0d6c20000000076be1900000001d631df616954b7070608cf89eb24eba4f408de53cb06719d1d6dc833357d23b9877d5e12c974fd03f1254bcf3957b1dfdaca74cde9cf98c3383635b02bcb4c2a85cb74fb13af269d5aa691ac6a4d78863be03c34d763943876e42f455c5decd20271c29a96fc4f263416389116e1227604eca91bc57b3d3a735e59802bf7d63c0516d8a76462ec82b31f9dda183f91f182d29a2b73e63bdacb8bf7c094303b805b87ee1d177379ad9d5df42b81266136a0719176f90d1ac3370a5c15f2d370db9ef431db7c7148754d1f635421b13788abc40e6e07c99b0ad070628c0c74307d537cd4fe72d969b163abffe86cde11ac6142ee6bdf97011a66b762bb2dd0234c6a4c98ae05e4e7d33cadb63896344b3ebd0de47b787b5777631c9b45a73bc0032039cb649c05199c5a179f13c02c09a3a7148b7fac646e7da916982b9786b5d6bbe1d55f8bade6b1ed367cf92283e122f072e09d81689e9f473d5ce5f5f47383104abf97212e87a34942b4492a8dd4f110e32a004684794a59287b238d8f770858aa29ea60cc691410b4c864aee65672582cececa1e6bb7ced81b5fd3be3cd60448634a7b2722740859c96291b8043cd676f068b4a1ec74e3363155e5b09a4b1b9bd09c61b925ca7ab47c4eb4742d4bf13365400ef5664bab011e89ff268a32abb0e8d84e68ad82aae7b4e7bd144e9d3be500d9a99155020730edfc0114e48efacd55c18603077f523bdb7ffcdc523630f801eb08709a51cdb2ba0d063370943098bedffd19a56868636db5445fb62c5e976b6de5bd14577223de65a3e4b1a77586fedbef55c4ed11a4ed729fe8fbe227a3573f08462ae78c7bb51e8bd92eeeca225ca9dd374cb5430c86b44976386dd81a31107299927b7af679acd77c925b8a24fb4369359c2afb20043e16b619723e5d3cd0d8d299e1d8a9f0b3cebc6fdce34f2c9ce6ff5e4ca92937974728d99bd28ff7267dff99ba9fb912932e6426405f05834f386a9a459b09d123da6002c8032890aecaeed933144dbaaf5807c016dd455abb98ee40d83d5e79bf0c7c57d3b058ba3382fd91b187d2785128093225dbb73881e73f57c561ed5bfa19049faa66b92432430e0f3c8af5ed6adc3b2453e391d16b8bd2d0f0802cfa0dccf275e593adf830863d15278830459783a78bd20042c080d4de07d67e5388764c2280c14bc65ce665893bbc36436d54cd6d6f3e4bb84f11faca7c13ec071d08b7eca9e903be1cf4235cc8110aac06a6246a952a82133684ce739ae26b482b2bd42fb10a889ae18686a6d43c60d44e16a0d401916990be958d80c72cef3095e058b1d832c51b6ae94f2acffd76e04788deefbb4d815db329dea6431768ffaddb5903c7767e040f337041ae08081b04443ec202e143324076e2fdca45726586815f061c644df0b2a5c98b8e64b7611621852764b5d92a61f45b38b648dd8e2f79a0de4395f10bf62e4e80e474ddfd3708be9b507415c4e8012e3dc2f14cef185f3d456a09a388eb6e81debb376602c47bdf6ffcb54cc59e2ef99f2bbdbadebb8cb3035ab8ff6a2a57e13efd76e1a73d3bee58eb42dd6aa5a8712ac4a3e08f1f79c5a18c9eaeb36434cbd562fbf942029cc94f13b8c3cfc9e26bf03c405322b76a1cbd3d3a897c0b382fdebf7850aa373e05a576dcaecc4681ca06896d5af4abef00f3efb772c1297373f7dddb9ed2ff4f316eec9866ba4a31e8388cddb2736bafe24f7c13d1c0acb8c6bfbdf08a8df9652e061815543d9e41ad99567c45b0f19f2a09dcc037f9c42e591c7412280805c68514b7454ced95aba9ea67ff23223cb5872535b17ae1e75c77785ea9b8a4ada4874c493135bc8c6561448ddc52a1416c915b1a1dc4d09fa667a1cb566d14d6e0d533b66c2e0d34297b1d92c6a2f393aa1a44a895d487b5ff627a0e2f399a423fef4226510ac28f8c1f82fb0fd098094c5082ab62bf14d31db4043d7d2c4da898e17cfd811faf52cba128c0d1ece7b2d6fd25b6f970ec715c568ed7e69945aa00a1f6835f07975a553a08a8eb1a27361792919df831655134dbf79d6b9e01965df8f17794a01c644808e7f8676f0d496a71dff8405e0f3d0c2c2648d5ef9f9ba557f765ae14194c0710fd3e660847aa7e9c416916ff93bf532594858fc1a006df993f8f40773a532af085cdd664e36ac33fabf69cbc4bfd215369bd0980f8ffe3687f98e99aabbffaaac2b3924251510151008210e09b9e8b8ae9f0f31fe352d2eea5e25f369e61b47cdb30a210ae803000000000000aaaf743951ce4aacc9c2bd27384d39f95be20f27b3f0651d427a11780565d80b931d78b6ac2dc3ee4a2917ded41f4d1e320f96c2102b137961ccfa6471be614cf53a337b5c848d4867fd35d3c17f26a1a709972b830102a3b8115efcbc0693d49c48dc58d520885cb5d34e22172f3b101a1be0aa203f3e17c6bef42cc63677d8053bb0870aa3f051e47f7333cc381e394d5c01abfd30bfb4d484075a1117c057257daabeef85bbac3dc0ac3e7b6648588a444ed3ecb8c2d0eb46ea52f8cf68b35d4aeaafbe9370f273d293dc24f90c4ae1984cf2f9aaf04a6d89c4b61ddec5408688e349d16aeb1b870b40c1a8bc657b671e60e6b561c20cb4c9fc864ea860352d34019e27983372e89e9e190d3311c982f6f83c314e4aa20852acdf413b8e01b74bb7c840df37212b3bfe79fcd5e69419846c511bb727fbd7fa52db57166b0747a576e4d2faefea90e33ff989a996ff8fb39829b5c656f2aeb9a96f7e347718662b7db2caa4e710dec8963058a7162b984d22912fdb44221ef0ea7e4deb6317016e63c3faf1912a20aab5ad1d898a93fab9bb592d2718b04cdc25033efcac07adb7222ca7d6bee26e1ed562d0e34bdfa659cc02734923bb4aecc2507f974671f0cfa5a37b4283c04f4d6e253ebae8fa1a614701dab8b59a8bd5165c7e3f19c38702e57513ee2eb504a8204c38599c70c179d0e185264fdae198354cf3c355e1c5a0cf0ed305c7dba2ef9323cbaa02de8ca85bb5cf39760c988676bc1a105a77b399917329d35c28ee050420f6a022aa3de3bc18ecfcbf8f2e5d76ebcebfc4ad057eb8c73432189e399c88f31b9aa2cd7b538b5abcc8e81149d291d559dae6408392dfb79917ffe8ae6c47358921c138b703c6e35e5a2bd237e3a43a9356622e4982d868e404718ecd992e3e322762e6cdf04b7b8d6459a9fcb9273edfc5650b36b9a94ba78eb7846277c30cd82f691201d40cfe4380c5a7279fa2ccaf96574f6748f3538928c8eff21c15703ee208dd93ea31b2c79bfdee94334722dc65450300050000800a27a726b4d0d6c20000000076be1900000001c7623f6338fe45a7b3d1ca1543f07e2731749ce1d68ec465925a71ce26e106d699063d772788e0d755fd53e355d8b705bc8090c8f418150ce115a566ee6b4e8cd9f037f3ea0a3e67c7691ca7cc42ea635807e4effd284657f2e752d07c689a1901e48d71fb8b2fad8085bce82d1b1af197ae7ed8f4e65f7520664ddcdd5ef74b07b79cc646794b1f09a56388986b996e8aeb8c2e20a2c5daa7ee555bab5f8c7e5537e4b890893c25a7296cfd4dec22be5c07bc7e12ec1832407e96c35127cd048d74f60398f924119058cd69bd27851da7add42af4c1d4ce0e02e1d978979d8d68a66d34971abbc9bb12924102f2469c4b24b3780fbd52dade931203036fe5f21230ac8c0ccf15bfab74e2144e69729d9db522af9b97053426fd33c181a733dce4d227769324e2acd4bb8b9b33459b22b86f333e90d66cd0c53c7e4306879051047a19b80852fecb514c7cea23d23bfe0d71a75aa1fef6a0fddcc7c16dd0157338cabcac8fa73495cca3603ab86689aa0c4b8ee74366b40883cd4f3d3bd8d06f41fbef853dc0b43accefe1b93577e1ecbb434b3bf56758ecd899792d68579fa4480f74b29fe39b2649a7738299fbdae921b26dcb54fd03bcaa266a203b96e4ae72c7b851c0f51df66cdf781b87a28cd8add379b9d89f5f207a3f52c032d6784ec0607d0ba6bf36f78b72605f1aa45c1a939de49ca3d338f416fa26264720a2e736c6ff1434eb3e67b39554bc1c488e61ff8b54aa96e3998bec08459371918956028b4e7ec1ba947632b4144790cb07087ffb52e1f325b0b0e943f866e5137ab4c9c4515ceca6d8509f023f4150ce44b6eec1f502cf6acc779563e59a3651929325dc41bce473d600c057c9e62cace696b19c6ad6ea8f2b60a451c1c5c9a635c540263af8d93933c86b304993286979d3a33b8275f421cb79ec128aac0a85d5b5e5bf4576fcc44572d4a5ad797dfd6694bf4d5541b06d45fad91b71b1c2f50de515e7ab5afae57afaedd8a70c88d927ac5613c6dcca01f5aca87e65a38e0ceb3ca640b75737b6f8bb9426f986c39e48a7da4970e3552d9d15ab397814dd35dc586cce03b4119eb41238c20a0790b9aeb607a0f6457d388506838c593364b1d555696f7231c39a1fb0e64c570f0e5fc1f9e6dd5568bc23ddc298e17e16fdd2fbebe7c7feb52e8dd44f5e74b9d9feec4faba288f8c2c9d007000000000000efde3adb327ade0b12f9e3ac2eb6a4eb042521e90512cabd80b20fbd0bd5300db9a23d773faa0a998de491df6187e86dddd072b67e6c134a86846455fdb98f3d93c7ad25cec3786e99de886acb122ef9ac553c025648a1d8cb3df8ced1ab635b74b388ed0b459c917d88b176220d7bb03aa4bde99d16bb32b9c4fd629b06ae3705040e912f9d525ca370f913b5ad39a9b99b4ec26d9541a9c26d6067f31e0ecb6326f807f1699cc896e2aeada0ab218a934c4546af9758fc4c98bcd1bcc530d786fddf5a5833ecd4f1a072bc3d251851c04735916ed25d1025df37b44c93f91e9858bdf4563a47ced242cd25232492a68a1e83f2860ed66b72a73cbec8396cb6e91db895e8accbf7ab7d029b77341b82112456655d3f026c604a90460944fd0d8278ef89248e55c8babd1631154a65d415bc712eb177d6d36fd9d2a517e2b7482e53f4bff1ac00486d798224492d3b03ad7f084947f56e382603463a817a0c56cc388d9b15d9f6a81bb3a81d393bca0b6449030e0a30bb56d676a8bd9c2197bd119c52ddfe51920bac81af1d5a7af5d33a5b1e8536f3331059368c9a690cebe5ea65e8d2d937a958368c53b5a6eaa7f6b47fb37099c84725471136f3cd11543ec11525f762513fce80e427075dd5db463c5ce7156ae5558d52fb9d6ffc59977bb095fb1a25ad15ad3741cf23bfb881335089268cb80cb168ef29361a800ae58d4446abae47ca34010f1d162f00075a7a6172feb77ac25da49e470c068399dd0602b1cf85cb16c7a9f177a96a310a040130e42d5ceb68ea69eb9b18ab2f991c4aa594405859509e9ab889beb919330923ee4603aee16426201b4c2e258b4adce605ecaacd33c7daa29f14665a8c6e50631601d047deb7cc31bddce3616c006a6abda1bf42eb42cef55879f0c064b3c0c445a5183d84052f9dc34c6bae2ee334cd1f4224131a11881e97eb45fd9a7c4255260ac2207889fc293248cb40b63a90682ba55d372156141d70b714bfaf9802556295c95ed688cad2f963df862ea690d1a733ac9ac92d82f4d3f457f8a98940c8153bf5b9508dd42d0aec4cfaaf8ec122bc07759049bc679bfaa0151df297cb44c8d667781c7bc6c3f93fc3f49ccd5ed96ed96b1ee6b1a3f1d6d4c74839eb2753922680851f5f6be1d523cca47c27a0f720ac839f92306726432cd4e19d88919530dcd115453f5b203ee890e6b075bf830c4668a706f3b2444fded7048c2b529c8a2552e0688dce37c11acba59d11835f6da3ca6af94189fb71b6eeece9e895198bc0af71db4922ef489dcb3688ab3d7907c8a07ad3805ee7a4c66d7c902b7475795d22bd890603ee2e0239079e9f42dfdda12da5795ed272f52c3cec87be31c2d8e9afed58a9a73876e334f554ca84b63908de6ace53da7d0f7018350441c341d8c013e084e9d528784224f4ed17f9192a36c0760606c1b8396c7d7dbd2e17a5fe35b32ecddb8521214006b1398dce9d565876d164d2dcd5aea8eec2f0b0089bc65ff8d376946765cfc930a4ba7ec3876bac4e5becaac1a3ffe5bc1e2ec6b73557392d6b2da2834ad258716bca943076f138152550643ad50035262a5bc2068aaa7037e3db7959ff731d29de79f0f871cb22fecebb8c5b37ea4669a45d798fab7cd02c70c1da0baa0c6e1dd91a3503b90198abf7d430f9cd8a9c93f7e917daec3213519bd269e89ed41868ab9db7a9065ef33dc01ecb31fd23578e1f34aa4320873ad4eb817484aff4bc78b540c493262c8a87508a0d7b288f49841f2ed8e91ccd386b852a32d65512caffae051b98a9d7a87873c34db17e1cc2f7f357c3eb10e231a4212fae42ddd7e1456724f7a602e26f266cb435cde5e70dacb8e7fad748dc8208bfdff8d8cee35696794eb4e953b6e80729589c50f897cb3fd8679b88623d6ff9c31436562bf93aabbd8d6edfabef091e98c4cb0398f55748362ac76983a3421e4495a382393d5ee11768caea54d4d726e42fb72460906deeb3178924e62bfc0e5f0ddd88500dcd7bba02da0160a7446f149fabeddee216f04e115fda9ce3f0076e1ec5a77eb2ed20bba0767f4a89a877dd3093da56618f04c898e01ca57fbcd9072a0cc6200287ef79325fd4690762c7cabd16e09ef917c0ec041f04178361f147a3a630086021bd3ebdc3d11a44e48cc7fcb95f7e9b16317e50eb8f525c33adf6ffbb3aed9c3f87728788e0b384049fc7ba6c279cd7dffc0f1bd50d2b6f87124c49d863b2b3be2cb38bf91ae8bb698eebc5dd0e26543afe94c47e4744a5e700da051391aed89d5a3b4e5a652bb6106c97c7472374f71661452886f3cf0d300bcb208f12ebc67c3f6ac575e46f2d3388db4cdce1949c91bd896b037c76f5259aec6462dc6caeb4c274555b121df666e709b2edf635b08e20c25c99c22461a7001aa34c1ba8d676ea65a3d5e7c6577475ee586c5afe24235b3fddbb4d4a33c3176baff667eb1aeb2ed9e8f4c0351b8194d138536b18aa20287545502e31aa3572eafd461ac0ac6e8086f09d1cab2008504b98ffd85ea4ee2512cfe0706a2a4bf5e6972c7874c7ef174d1ee8b5b8dc22f606122ac04614c64981fb2af3da299800247793767c4eec14ce47700fb76d5f1ece7d3c873f3dca6166d4cb9b8a9013c8594a0d10de929b2117662aa7321d30cd56b7bc318708dd4cf9574bbfafa0b861f09fd2329502cb8487902e6c3f65b794f1b5fdc27246d139810bfe71bc339f9b91aae1722e1525f2b212330672b1c4fc7d2ea8dfb430df27a3ac9bc4fb103baf38b15dcc232f004d278c828ef71d8ef34c675895251e36addbb6340a932ea2d5d7fc88b9edcf5856d9b0a16eebc902a205b20fc4315ff2365a80348b72c22f36be8db102e2e859a4ae6b62677f529c21025911b516a5b0a54b512bcdf8cd7dce7bf2736bfa077321d72300cf0bbf36f49f76f2efdcf4d7f12b6513a760ce9494a0ab7d18cf29e2e4d3589892e52f631e1607e8c94eb8c482ea2d612900a49a87275160fc6ddaf0c798aaf112f0d12f92a04a400c98f8e087d7e1ed8abf6fac0786ce7ddada0292f045c9924fcc857dd44ace92ec9acd0318fcffffffffffffed17182c783c649f53c0ecfe900a58cd818e89d2784f750df6c906999337dc10fd601cbe12c77908a1fa9176f725d2e9946bbfd36e57ca142291715f9601625cce6c063d0b2348ee79511fd4da7b9e7b467659c799db3d6946ba414d1dc4e63cc0b19da60d09c4c6e8c89c55bccaad30cc30a1286aa02103b360df6286f640fceaf2238a7420b20f07bed9de036cea83fcca7c30f78af9fcd6f83cdf2fbbe233ef1cadfa531ae7be352979e7e35761507f2d89df2e2b869d9b559e11783e40991a7792cba9d85b67d12228f847bf77da4644ba9f25b2d5c00840a2f913476ad8fe9ea03968735c8f03be73a988c121cd93f2aa1e8eae2da53d8c034448d13cd8d0b79089daf5b3cae99984c90c6c0567163c6158f420f22cb8c575127aaf4aa87a6301998574429095b43ec8f58efc0b3a478c5892b2fb22bbb6184da85f0622b01a287aaa7e8b0500fac7acbf00f906584fef29c6cfc3fc79340e28eac1fc43ef863c1649d71d09b0a3e8af071ea9e251d66ca0dc4ddadeab90e3cbde1b63b6f017ac5d674789298fee86cf0ed94e43f84aeb9acee607b42588e3d5c2599b172f7a21a59a5266dd9b0b8e439783144c1e27198c95d54f77923f6bebb9facb94f3ba8d36c28422feebbea33465bed6eb970ba845c9ecd17018084cd3dcb0d9660b688d95bab50c40aa6d45a4e41b103fd2184b42823ad01eab30c312f048e0b3768514720b65efbdd07602d8b98b28a617e321a4932cefea65131061b66abe06d32fafde9b1daefad21b31dc7aaa9171af0a22f2eab4297269ce491ea4dfc9f329451a4b245ce5b8cfb1ce676dbcdc70417b726c9fc891eba5f784b6c8eb98fa73dd826398cdce405bb5ab37133770b7c056976eb25d2c6a63971b2beaf4fdde029485dd642351884d08dfe4793a1a58bce450a874e22883758f174b22bc0f446e691e6e297088294268a3679d9160bdab5bbe52a8e3128bb695f587d9c76f7cd59d14ab0ffe23ec71359a67fab6e48a3eec76c7d25b104c9dd72daa31d053d4c83c105fd5f643a4d1350c36d131a2888043b3b4b00f2964aceb5ea83f8b59eeae003c9270c4cb235c881ed70270ed71e58e307540c05817a45ffa620edfbebfa4501f55299159c16b09b4f038315a9247140169b266ae311a2b9425ce82c6f1790c2821992e2b8971a8d8c2b93c5b406cc46afbaf0ef0dead007f5c4a539c26b96007bee86f256170349df627ce0a3f9f56cbdbcd14d169f2f0bb581e4f4bbca1238a1232c21c2f5c0d9e7dc440c545e15dbd198d1aac7a86f14c7ce676966f2c280f46e05f28379fdb0c9283e044d638b96c006f24bcb1630f4f6973b0d3d458022e99ca66ce5e96dff300a4cd8552976f8d707c62c25046e27a13536b06b5e8a889395f1a2a64ae549e9325fc1dd1e315d63f13d3b27c1315e75400145cd1f598b98b2979c420a262043a6b3ff77644c8ca7f502faeaa6e0c6e958a0a0c41713c18736024af99eb728ec85ced9f5498af53cdeffe6da0019ed8d678b608a7f42b9c5167ee359d01d582ff1337f02ac68c8b7c0e62f0b23d87d85cda65051a38e4267f61ffae2a2568258b11ae715145651573fab4441f410a53f93d2c04e95b402a26627899b58c326616518b976b47c58dc780d37a5629aa04ece8cd60403d451e98b7d7a8e450cbefef7a6ffc5a9ce758c1ee44eed3d77154806e062bd6bd0109bbd4b131a1dd65a6220b7a03e055546ba0e2802d2e1f974da4492fa302b0dc336a66fbbff22cf117b14e494670d0df70cb5533006a4cc8fc09e23d1d2f39eb94bd34c91572fe28c5bc2bf5c3c721c12cda22ff8ddae2ebc135483be812f2b89470f8c49db998401af494ee9c4e1ec1d8cb6dc13945346e3939df37f17c26913ab74053f9d1c0ebff87ea7492cf78605a4c58f6b3f411e9313c0dd42150ec151543beadfb92d805307d58e5cd092920dbbc9b1ac2d282a66484ce4d187689d1068956758b8faf0e58ac45a0acb390186e93cc813c60d95ccc92ae49084014bdb9d17f9a8792cd259ae8d26f413b6478e6f85da659078d4e6bf0ec6b01d7a4cd068fc66a366d6aebe3da3ae382323a297c3fb6feda07750532a502b627e357ca94bec355248c2af55a89dae9b0ebab240e3780741ae07405d2e5f0650909678c0f5839fe94ea4599b6667e0ba7a1a8a330af5deffe74bacea1cb68568582cfcc227473dbb8f8517a7529229e753d8dd2b221a957325117fdb58a6109ce669518059e9481d871c673338fb13a10afdb259376986da520530e163420aa5f2a2df19467508356c156002bee6466e584ea3580eb89124a824cfca548c642da889360b94cac2100d28b1993f560ba7fc8498b7799c661f946f94de409f3725b94872fa2b92f70b8d37b3f87dbfe49885f4ee03f945d874bffd578da6c6a8517749e5aa75a97402c6ac6ba8b641d1dbcb09edfa468a160ffe2936ef434d21619d2a7d611c44649514c693d4c4908fef9478ad52f01f3086e6f6e645b2fdc71fab0f151359810478efcbc1bdf1f78641870cc85ea6279bd9b582ab87e04631e62cf04700f683140a2893abff40a8385a30835404782344764a1d8c299506c325788279c37ef0843e9795ab862081a7ab48ec675716cf6244b9b1baec2f02c92bfc52c2435a19c83ce522500b280b35c58b74b22ff5e8d5f8b32ee25570ca2228c13e93d192861112a7a6bb560889f42ae353e5b2077d3481cbbe80da739ca2a033732a13ee6d0f2347823ef89839201a70e5c5361c4a77e0742bc7e76035f464248ba4a0e6d33b3658f3d1c602d42857bb9177ebfb6f625117de66db487d853c33b45382c0e2ae35b418dc2cdb8df208c8bb6fa70763bcb1589c8f7399e3707a7a1eba22989f8bf3eb10f9bffc654ad09da6399d84a52770b4b41c894efe1535c5343f4190996ddfa9958a4c85ae2c7999d6b9821c76a923833629760710cff881d98d50ad4fd24f971d119610852fc00cc92925b67fc1884914e0f801f5aab42a050c13cadd2a06b2e914bd322d7d0d06de8ef0abe89337b250b085c5e9edefe1ab5eb31b2554027786dcd31f7f843977fdd170ee2558f1cb047e0f41c33e359aaf3b819de22072d029d933a61a41e17a81d23a493411526678f6fa48cc3eb519dbdc81bf5f638824a694f7cdf67f3fdd967940f20f46f53f9b4d00c4ea5575cd413ae361fcf08cf9c07be710d78ad2d3adb44274b80305d2b49b0bedfb500dfc139401974a449cbef99e4199353cb3c31de94daf1c3664e4be33336103a4ea9664ae6044f1a4be9a1c20fcbab112afaed7bea8fea51f649e3e7328fb557e0ea29eb88077141a2ef9dc883c1321fafc4f2409ba4e4442885b1621cf9357b7c88dc187b30775e0cbd3d11820dcb2b1ee8b4187827019c6424aec36cd795ae3e30929fa32acd3f60246ac98554d701188cdfe248c0fb6d021f675f0b992aeca2ba48a5833c27e93bc4574c476d4fab5964a8ca840e975c5a1ed841897d285af79d9ed37f042d76078ad05e8d9aafc9b99adf373c5f494e48bfa615116c2aadbe840ad8b71f2e78157cacab06fd47af10b8ab3db4b8b5deca2278c63f42cb97425a8e24251389c2469c8af12d5b716073999df5d2ff2d98444c36d172d65e12bc243d305a35dc1960d4a900abff69ca2bab362c1113898a5c77a6f0bf7754c6a9b245e35a3c886ce4d199f0062cd81684c33883e2ab78bcabcdbab830f9b8f18dae38073f1c2a16d49cf68860a770cc8b83465ec7e6e0adb7d4dfde2baca3cf07185f73143e1e0f3d8d48c2612f81e573180b782b2cc0fd4f19ac53c90df0525f8637a24b14c8470e4d772fcaaf547d93fe3b8cfb576d5c95122faad4c2408a09a44d71c905523318b2cfbff35f54212cfc800fd9c40be627750e90c0f2e2d29674e9bcce10bcc4b463d8dde7e1fb2770c497382a3c3c9a2a9da7c55754e31813d393d567148f42579b5beb242c03de27698c8a10bd803852510358b6d9de23bde974bbe43fcaba461ce295e460f00e0d4877c6ba3b35d8f1fa2e5b2066b17a041bbe8ded12a498ecc9760e37092a227841baabeb82918c1a77067fb520cb4f6a69f1bf16166772a4f4b9046936d3100feb5a8357dd759237d2d48ea431bc9eb14808f3b632d749eaf097e3c337dfcffb3421fe1799b000abb8ed1654f3f7cbdd6eb8df9e110d6c14903e9c7b53df0728332d9c691497c056dce17323f08fb655d478bc9b158d5139ad8d72c02d36efcbd66a2fcf545c202cad1df2fbd9981f154f7b783710c422fc3f4895ae9903a2d52184cf2855aac768c1642d53c12e13f8291ec49b163b0c866279e011b2594cd4d799c6afbf58179a945e68eda2cbc491650ca9593de9ca4723e9955972e2c411d24260026301b610548346b5126d1f2cecc98a3b1346b225e3084b97b15ed2ad6d1fa742481d678b7ac384bdb31d1235e91f79c83f462d47df9d9601fa91629f28a625bf89db32fd7e30d5c914cfa0df305a428f08c238e42c591b51f6cb3c895f76fc12d3fad226ea7d96df01cf3d737611c803371e0d313adae368afb5021c6b8b9b32ccdceccc288dcb068d8e13bcbc9e017b14e7b6376c7f7dee26011b1ca85105466d3f8222040ccd60c48bed93afe069ff25b27470d4f6f74541b420a9a8da7d5a65bc4ea2a497c2fb682ab2d357c37db43911869604ecc28cf103be1465dc29caf4832998a3440ddec22a289fe54a48873d743c981ec45fd560bc06f97444d2764045303746bf9f85def52d6680e31421322dbb45d9069a7832d89e8b9859f8d335074c6452bd4446e5e94b8b26a51b9d29921d63ddae9d818a6feffc4637f2fda2929543a283c597cc06ed6c37467d70024fa38483173d9f1b8bde7786d0733aa40ad9ac7df7224d5e779a057c347d5e3e81392f9872db1e4c1258e95a3c61febae4dd40b42e1a5d87c197550ef0edb33ea47346f829538f37ca6f300022a77de33aa822bd295ed2a67b05bdd1f8ffe6360db6b2e905dd767cd03cc062030053ebe359aa6024f18e4f05f2628ad01aca0e9bc9b6314e3fef5cd52545ade0c9329242229469aa787945f154e52fe9431e0f8694404fd156cfb7c2580ad449b873e6257af62df4f817de8b9b30c237879e1a7114d15a8f1e963367c88212524fd678069f6ebd9605c5e96fb7acf068363d3f9d995c73c09c15815e718a774cdfcf489be716db23da14fbc80fbc42a63a7f1baa445cb7ab0216120e0d1b54caa5e6e17fb9665289f04ba0f9111f73607a0e26282621cd333bcdb6ec14139c0d874730473976682afb7960a3574b38f6396b3055dce559568cfffc8b762ec232b30a5a577d48fbab4eae1e3dae0474ae84b9139798200cc0adf16879a0022bef68760c3b2b61fd19932260e8aff0ffa2d6a81f87a18d605c31832de607e36335a364c75ecd93f85a703d3af64c8a754e15ff0d999c8829068af9e044aa80e78a7adf97f9500693bb27ecde2db1c86bd4e9a231160e92c2cc5a96a887c720f10a9a4004992d1f038cc470bb781ae79783ae7a21f4d06f885a3e3ad9f3fb9bd355fa32e011f436215e9e8dd00772a6776b3d581598c9760efae1d503cb1ba4a81e8f5a52621319987e4890df8b6bae5072c26d17e3d43736ec2250e2a7ce0f0818053f8e5439262afde37c797bbe8bd21b1c3e04116638df057cfeb31a043f723566c37b10d7dce1daa869884364396c1b130c1e504aeeb061bcd4e24270d9439760c0f4f6b8faa6f10c02addc6eb29f661c0605147be1a0fbc00672bb56cdc7a95314844dc7657fe6b7c2eb8aad6d051d3d5a32e8d4815e4f3ae28aa92d8c75579c8395749320c2e6dba4d1c945458c7aa7cb21a1dd4787e864bbbdac43fa33edd12dde978e53c76e39194c64ae2a40284e4f280f89cde30d18833727505ee36099d3b9ee8e940ad137d2ba55a8a70c3a29b107dce63075c484d7a05393f238cc1d00ea8afadead82bc9430fe387fd119d6540454a7adea3edfe42ad9e189b6b906a549a57604bdd19997ba4631313c8df48419d212b69c9be9de592e1898bace19ad8d6c8780c1b0ad068fb6d08d5de9c5ff0b323ef87401f8d7f7ae8421baa022ceaafc6b48527cb12cc8931071bc648d6c2ae9a8c5a6255726f45868dd64e03518957d9eace44e90a9adeb380d597ed7100d61f0b6a9cb0bfc3ff05de01aa557d6f477d5f532cb29e735255795dea5ce1e01a372f53c2c29716c7d0856cd82821bfd36fc9716b26cd0f5a5ab20acedacf715a0ce093216ba1856fb474c7947755a5862d5e1900e6b43e951859ee9d9ea560fc41b2df520dda9fffe2bc767e4c1d83fc34be8eeb57d9c41c36cce1a856e591bcf2904bd319e09d2c29e9ab4cfa8ae9ee9b98336586a4ef62b8707addc73122dfabb8f29dc0e9e2bbe86b6750ed45a1f0173bf9a837e91d4fb26eeb537b72136760e5ad4a2f1395ed77b80ebaedacf88e7457f6bb1053cdfd383807fd2fd222808f61f1218ca27658bf68d4bd684de5d98c672b9b13c4bbee84bb129232b7c1193e334b792adab0221511d92ee5a4b8e68ad344dbb428da5c0624cf5d3f1e400a256340a76505214ba95a8142c1aeb4a6bb34250579a20cf6c5b33d90219ba180e5c571a023376e69871ea39b73e3b5476c0a208e1cea666eccd18fee895662b8f6ef2ba478333bf9dcdd349c769a894f437e42cfcf0f93b9316b2736dbe08321963cdee7f426a87da22b1b5f63b4c2372a3fa8c2daf82bd132fe83b48c1cc3338bb601d04fdd51f294f44a9bcfb941210780c25706416abdd9a9d476951e630369d932e5720d4a607f95191d70368464cb1aa3db6e90c8a18f347ab5d7dab119fab4d2d7dca11f9c43ba9a3202418cd87912a044f9daf0566486c1a7697ce0ee963838d041dee7e73a4fa1e29761e2677cbbad6854c0111b739925c72c61a1199f4318e9f7202c6641c79243bf7100fff2dd8377957b1d135695136afeeec1a7295ac6586797b986a74c15b0d469eb56f2a6139eac3c3f6ade6e05296e17d380080a91c4c22f961271348ecf2dd291e747ddf4d25bbb16f3e7f3673f254530cbbfea050cb9123385b83d12fabee9bb1fa0c0be15e388c145d8391978cafb7031ac9e216c9cf05383b94f966edc66db7bc7d05cc70cd3e9c85810d7d43d7fb0004577fb9abdf6656069fc73ae9ccce1c4d5a914b95508354e0fd51f9cdf1891a3d91693fc3fa18724ea7460e6bbb734cc9694b19fe9849de9f38fd5dbb38a7253fa3793675880690e143212b8763b9419f5cee9b1406258d7356c6f5cda9e11399d7b97223e8d1bd66121f52b73dfd8a6eb223af576f23f35e49a1cfef262522da5024d42b7b057e837d17d4055fdbd6d03854fd193906dad4739125eaa5cb3f8bc954fb23d9bc83aa2e19ce2971c83044a7248a8837fea27c2b5f86c6d7c80590239c005027dbdaed9c2766cf28d2651f0b5600b39dac4b582a4b3a655da432dbdd024974654ea40c64135f1d2d6e4a6c4ff043c7f442e8d8d3d622be589d130a5c257dbf03d14d9765cee61d37be444baef4787d550f36b311424fcb58f021563b775cc9cef240beded8e6f9400ec7930f359e9afa0b97588b05fa922bd10429799d7580fbf68fc8e01891e35768396168eaa08e434b0dd6990c5ce03e8b0a623ce32de7169f25fdae5377a9e8aa848e039f6db8e9944a1961aa0d6b398310ab94ffb275684e747cf15890b83f8984b47d39147f9af4e8fdf02ab96b7d8a0ec6a496aa7910fba9ede0a0c7baa80aca55169628d664eb7b2b87b54d7d4cc91046f75591d81c84ffd22627cd2366dbab703847f923062c5ce0cacbff0ac9622af83fc9b804ff72238ab28a985aecc9cc87551ee59a95ae250eb3c53e04a0ae202136467e395d74f69745b025019cf7bb98b39042fe76d579f2e67f0c62718f3b8adfb8ebfb661ab258e133ef45e9a74f195e35f2c9b8fe4aec01dd939e4a47293a9887a5f8e385f0f2e7058e5a8c025fa8046734cf2535c98d4407c11a7c6516f597524133ebe0338ed46ca8e6fabede12e7303b54b8e61d38cfa075e21af11b08f83864b5c83a593f2a80bc410b5596801a4441b0491e24846f0e8e98b8bf3d68b5f9762decbcc2597afec42e20566b1d97d435c7b43f3c7851e3eccd04641b80232b3e8c4577bbb72974f4f2095b88486d86b1466260c7720974a3235d593b934d59d264b3e974da1220ff8136ac9f24ecba5966f1877634f53e7a3b27f3145b450f7aae7d1eda09056fe914c38253ff07ef3a8e63cbb2e00561d5341c00340cc053a4193479740efe1f1b82fbd52441cc8023a006bca0c6a3cb99e5c3531bdd6ae65bdee68dd75a4934b5f41f5ee7da088697f17cb80ca3f956c28f6ca3358ae4b3c8a799bd596292450c0c5a2640ea073d7c4b6b0ec9770d3db36266ad3bf77aea832d14ac58aaeb9f7446740c592a898aaa8fc9a63915f4f9353484542ba1e303274a577079a70b3192b5f4d393016104c557f51bf15908c0c930427632e9b1603715916116ab8dffa5468221c14b99af35ef6839def90521797f48763dc53857538e7080378123bd229cec9dd61b7db32ea7774c998811f12360574418e9ffd803b47ba85e0c59cdfff5dacf7e4f17d41cb51aa52bf9c1ea4f4e2b8f3a003252ff62e7c47db8a4cdc41f9dfa67de49a345c8a4a4c9fa40375f78ed5b209c0c57102e1186c88c317c6bb437b9a51c54c845120dd564b76f14dd47f0262f613126dcd5042456bcc4738612e058b38ea6ee7e2ee4a409c735c48a8d98b41d6007b644fae075eca4849d6cdd346897d8923b4c5f2d5033441a7652b42ba630de11051ba8ceefb0719d1fdfd20c189ea132f2d6362ceb10a6173547ce061a32a348a774869d05ccc7d18c0ee84f5419697260073ca6e186d4297248673498131a13f0b3e5e3bf7ae56eee0dae7fd45b955ef0229fe171c4390e41a486c8ca0177f965532463fe1426f90a6d5280698624bd967565ea8c25a71dda95b22e3400caa58ca342e7c655d8cffabac0b165d12616c1fb049fab754b31357f49c46039ce5b717ce803c3ff9de756e8b3c18de7f5dd220faad5508cf52ab244be45091790698d241728151ed8500027829a8cecdfdcd1c5607e62e0bb4f0301de1c09397b12f33a70d4cfb2bb75c46410123a12641c5342a082814e5bf131cc57ffe9bc3558a87e35a4e562fe4c4a20cd280c1ef2f3fb896973e271347d67664304e82adbd7b2eb73721951cb60e645de0d14f7afdde400007ed904b057f62540f3fe24b442f1a27eee3d244ad6212709178362cf6e50438b822dad8b98e5e297fd42108df4338f052c078f0996adb07e607bc834256c31d865e2802140ed4410a54206514329bf3fa6dcafffdf73fdca16ab8d6a66b94acde0d5b400c1a72048783620c25d854243827f2021607ee4b004373621758869b45ee954b9c82524ba39aea2085fd3d338145a809f85a35525ae875923ba5b7fb00af433f3a51e03d728fd8b9d04e8515afeb1d2d9b979f92183c7ec342af09bb27c235d9b23bd29dbf7c799a1a9860802f5032ebac2efd6f576ee0f70f93671cb89ba99e33ced59c4eeb5a20aeb4e2345c4cfaa0893c23f2cb09ba4cf0cb1c08433cc8136adc259372a36be8eba7a1bd622ed6cf7933339efec86345f510766cd9d96786f567573a110973a4d00baa24d875d16c9327788a814dbeec67049b6989788f1f43e3e9735701aa0fc890548b28f09115ee469e910778c0e9f0c44cafe1e6b8261f2864e08b49fa97dd070a55762c1915727aa2c8c9ab357898550526275daa1770296c5558821b595dd6ccbe795527acea6692ce2d85934cf8a6b9afb7a8bf0fd9f79e6a6f473bca89e8f62a31c12b38ef594ed024ce6534c36d671e1596e90ce855712a0ec51bf30be5851fff0026f7b02ca9ff3207f5a7342029abae89d8f2de7961bd01496a97be19fcc8f4be756103f3ab79cdd77df4dc893a6c5cc42ff18d89d775a5035adfea6b277acd3a4d060aab264231cd897b5af17ab2e0d416b91cbe528d8b829b113664905f54f6131b6129ea6dbe4fcda8abd1aa29b42af8f982b285cec7d1f2aac9e45a3183bb9e2c400289547d8a667a1c773607b4780792bdcc8b92471972d65f085e09ec49fa0458487c37bca53290d1b7ee603ede6130e2ff682f4d4849c4555bfef27ba1742853d2cdd794dbef7fcaa2070fc4dd37ce4e609d917393d2ae741e1b3a13f6086b9e56f398a27cfbe2dc0d3a38b8a6f11762d3d703714213350c0767a9deabe4441ed8069fdec29a8adf2fa5f598c38520ca16faebf109daea3749a0e0f96f47c66fbfe6981dba7b370caa8b038445ad50916aa7ef94c832c8432dcfd3dcd708e7eefd1a3ca8fefcdcb32061ea06efafbf6f0ef43f6dd2b3f37e92a7a495947b873d15b2a4d72a97c8dba8aac7d3afdcafa2c627651f7b333fa4bcdfe45d07e2353290c7e284aee26ed0de36bde45b311ad4b9030eb61972cfc0b8f88622fd2d6f9be85c2f959fe035ecb82cac355716018a9e6a5947cb517 diff --git a/zebra-test/src/vectors/block-main-1-687-121.txt b/zebra-test/src/vectors/block-main-1-687-121.txt new file mode 100644 index 00000000000..fcb86df62ad --- /dev/null +++ b/zebra-test/src/vectors/block-main-1-687-121.txt @@ -0,0 +1 @@ +040000007605df9ee66f6cfb78e2ab05017f060dfa3892955a450fe4f0e1cf0000000000c683414a5817ef3da22b88245e98f4fa0517556b857ed85e8052db3208b7f1109cfef90b13396ee098034296de1ce2b71afb5e86a566eb0c7843a655dc397e38b85d9662400e021cd82e16d5000000000000000000000000000081000000000000000000800475defd400500bfcfaef00c3783a9f27222a0c0e3bb3ab1bc7e740a40be4e881ced0b6f843714775ced53c8e19fa15a2c1cc1853d1d02eb1336447bc2b44c3e653bdfec315f9a536bf4e537f3dcdc08f7b3c8a5ab150f5c11eb02515f6bd2c5f947d20dd174e828d3667a3555ef0714a521814c0ab97728bae4a83e27b9158ebb9dcc690cd3df6a02a5463d7952c113f6ac41fd1d7dadf5701a9a3fb10707f16c4bf9153892d5a85e960757c8130e5c458e9bd6e91108adb1349dd630d174489dfa131dc383c67267d1b38092931b9a4102d11809f7f8f5173e60d20c5000f74eb433da63e9ff41b1739a8dd42c6b5eaf281c6fbdb097f64000f282a5f636bedbe913c4fef508f5b8b1d636b881d5f5c662a1097b1a3f40f24b03d05b7361edcbd7b3fde5bb9663b0f98c2546fc2624bd2f1705a352d552d6797c41e3a8d9e36a4841b7946d55a61fc96fb5b96db2839a6669dc4be6015036fb970dd569ec67807c7396d8919d961258871ae8792f2c8b525ab84892e9d79b65abcfef5fceaf0273a3ef46ddae7b80af43fe77c55c7e939377628504350cbcca6c08cf6e74c368502af855454f1e0a7e072d89c3ee8c222ad5b270d13e2873e8afc21c27fb26709e4b18ce535b08eaa45dfad415f25c75b9839c0736767d2ec88a5692c3728e1cabe13111ecaca831282c8faa8a35c645fb63044e033a660973ba1395da0175e0a4575e02b7360a63ecf83deb053e1cb4a54b6adbef521f1f8fa30b63180de3761a4e4b2cb82d4225ad61adcf0a3576d62e836b54fa437950173979fb4f568b1ca9ebdd658ada0cfc7274478f5afd5c757f063975559e41aaa73c1200659de497a553522ed5886d4147a7f172f19b98cad9d15c571e772baf19ebe2091d7b27c965a4aff349b5b84ccaed8dec828faa200f37cd3c6c1ce55b9f82835904a35e6ab65ff7007802e4a17821837ac7fe07e1dd44d91368f294d4e548132ff98333cf27171206d58dd2e1f62f79c9bf9732056547c559dfa96571a6427336ecde8f6aeddea9570577ac309543fdaab02260f1317834d5df3cd069260c2177a234597e71bf8a61602c0e7c6256b7b91cd6138406e6ce54a27193b2d3d0deedf7ca178a59a7c5255d6daf59976714dbee84ac5647c9cd69430c81f12e8f8511d5d1e8a50407a4858f719865a663f1740c07d162d68086300f636de780fcf75b9ecd6c1854ef2be3afe70d9ace05dab623b72c3608057e2337c7af087fea3bc9cdd3db2e1391a7355aae847ad8e9f879158e8f342b06c296d4d5d6365dc5971a792fdab8f90ae7362323665023c01c31189d4f1b0dece15afb2340aa8bcc3758d769e3fac7100dcef7a1fa9017da8b13049a9b710dfd3f9acd5258387148aae7e5398b3f48a1766ffd63d1b35b54c4ddb4e189759dcd5b4b3409e7b89c2d91e0a8fef3317bfb3226ece121ee8bcf17d3718721885c0e7e7a66c92f5df942325f1b149c0bfeb668dda11b03f150f536b0d07d816d9f1f26ed14f9793bd836c213ca40215a6d0ec0e9bf34fe32290dd744df9f2770fdab46b4c3a7e74489e5313e853e37fcf38d78221c43abe044c3047892968b0bdfcc421f127c05693dc637fd8334829dd5fe83b2d97e2b862cedadd0610cae69ac157664825ec9a6fa3ad928cd1604e1546d48eed3aacbd2364bfb6bb4c3668b154047f854bcbcdf847b82bf15a20ef7f7b9bb5a5577f53b1eebad29dd67cba705b77e4cd4b422447a9d810d52e58c3b82592053333805512c2bd622db5a3b1fdc1d816d71ac5026ed4a99940b3e20a031119a99c3df2afa5eaefe1010eb65ea14b3bbf31c3306a17ed2182c93cfa959cb8fa3f727c31d6dea9391f81cd38fd23fea060c8058c61581ae5465e84356413bc94cf10f04050000800a27a726b4d0d6c20000000051be1900010000000000000000000000000000000000000000000000000000000000000000ffffffff050351be1900ffffffff043813f60e000000001976a914be62da59de8993dd79965ddd27629b1ffe55e46e88ac38c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000400008085202f89026bf5fd27d6465ebaccc763bcad7467694de9e9ec7eb5797a8e0cfa1ecac6c81c350000006a4730440220159b9f59e64ca403709c0e48b0672e127cb5dae5d128c091c792382e47cbf18d02204a0ad5533e5ddd4ccf64a17db7f2ecfc6d42b5cc20494bda894779823cdc0f57012103103448d6d4d8adf400144bff11684883774ccb0cb1a69bfb01b885fe10486090fdffffffa79d034d79b4eb170a9d0af0f40c17a30ea237abd24bc38d6fbea03c13f96897240000006b483045022100f45b150c20bee81cfb809723a8f2d9416f087652d989a1190683f85a52b546ff02206f67f6fb60aaee40c6d6834e95668dcafcafd74a947d6cb9db4874b27b8230c5012103103448d6d4d8adf400144bff11684883774ccb0cb1a69bfb01b885fe10486090feffffff01930d0300000000001976a914124bdf5da18edc9126f5966e276da0202cd713f488ac00000000000000000000000000000000000000050000800a27a726b4d0d6c20000000079be1900017b571ed7ae58b0ce378ed70dd81c97a728beda348e90460112f0a28edd9fc6a4010000006a47304402203a6af468cc6b9331477f7990d7e2780ad946e3fbf225e66c445f2b62d4ef3a4602205dd34b459559e3fe2105939fdfeac48a347d4be4868fb7d05fef09e7c6aafc540121027cfc0fb5082fe7a09d31399966721a7386eb580c9713f04356c68341ccc6af23ffffffff00000133172a80f42d14a7aa803d50aa607bd645193b6e9d4fee50a2aae460acd04757c72b1a972427756162cd86dec963a044b16aa9ec866173006e4f2570e7e69501d517c370eedca1aea4b7dd107721786decb9984ff07bbb3f46b436e8fd8d06439285d65d421a101d9d78bac4afbb741a9f86c7411dc6845bba4b96706cbf44b47d245c99ad783ff725f06522da68830d3656b2faa0a3a395f1d15e349645100a530d04df5fde1e7d0cb35b1da6e29b8d26991ad78333ad4de00e080734e1e24ad64a2277c42a2f63383e63d92188c961b05f68ddc429b3bc21da179afefed3742a3019ab773d7e0b46bc5cf0089f94c5445cf72be11538bbadaa2d05320b74eccaa2415ca32d39e2a3ae19c7815a274859b440e55e66fbc1bf16b01337dc2976b2259fef345692733d067698c1f2197091de4f48fe981c3c2f504540e536ac89ca9330b232c3844a3c8db27d1a2a3e982adb4071da8e8d083f002db6de924b8af6ef807a43e5b17b3e5f0a36cb3def7c191d97835dd900e2396daaa5395d17831da0924c5d5d80db236bc396778262a3422f2bedcba0be904b2c45e49f593d2ce299fd97e3e98877a5cadf0e3a7f87088a5abd34825c68cc5d4f488b018d00a185c565567096e1456352bc8230607ddda59fde7bb656bb6e455fe6cc68e23fe6210e8d535327f27ffc1888b827377aa8a16be0eb0556c4c779243e348eff537281f16c7f008e600a563604c48c0b199aecf3692bd9e759541344781211ff2ba150e5c03097e3bc1e1dc907a628553cab0946ec29b663307faff2a0e2b0c5cb3ccd3fbf9752fd89630297b8c6fdfcf756a9a5b62cd1128aa8b6ac703f90870646d6ab66c372ecd785e607e4335f51710ad26d131d06e09dc18fa9608c0509de08dee22eb6f20b47c78271794723297dec4f666420975769b9d86035fc87ff64920d49548eae61f65f4aebf5e603bcf1d6d74c29a7aba86d1d2fa0fbd4cc6b915aa7b6dea803c221312a3fa8c910042db68e9229d724f53b6a4a6e4f60e9713c3ef3907b63bbe2ca79b1875c27f2583cd61728c9b3f7e336a8fdffffff862a5b7d6b89c915621a6c4ff2c419268dd68860333ba6e4ccebae936f15ba2ebb84f272390826f138201b4cc40898bca105756609f93d08c581341343dedc5d7065fd3c04cb23bff3ce2f847995620cdc57eff052981906859fa4a58aab76df12db8837cbf89c82c5c6daa8c5abb1d2163ee8bccd79b91b5939ecde8dc09e2f9675221c76f972b456531c82c385e2dfb3fa1397faa91bec1ce90edf62724e796f362feb5c7731cb8c8f35c0dad9d4a2ada0493bc09ce3f14cf723dd9195805049da5da53a15e2e7358ff3c09232d792b4629322c8ade8a2858994dc0fbbecdd5f54131fa1419d20fe265168931399a672804f7686f0e3a27604e0f6f56cdc0000050000800a27a726b4d0d6c20000000078be190000000000021e47aa4eadbd33ccd480e432c39f7510c78e49e31f9d13cc09fff7428c09b60091b8a6236c23877cb1c12def624d5080f991723b192669a345471ba719d79e136e7a3d7099777adc909db50d11f87615df309407d3460d2b2924385d39ff338b297d1dc28724a7dd6ac107bcbabdf76fab687867faed2b10f6c2fed37ad0d83b564943e27b01c54be637a8fe1e568c23b595edc57a3aa57662741fd51d078f95b08039a720112db1bb3a4e87de8af702fabdfc3cb627b23b1882eab54c3683c66e520c7f1efa1d439ca330a241188fcf0028d514aa707b4e7dfccec7dcbe1146f9e23eb7a706aa0547a048480e020b32c99ec886c99d01fe7c94e35fa7950c284bc25a7024f7a79255ea5fe8ecb5cbda2036dbd29f838146b5c7b3c856d6086d9b1d16b0e75cf978b79e4d85e55cf177d7bad41a2e9b6b73ab7b99458b9b57053758565f8cf27e6f7c461a20666ad149f9354b2241b38ed098dfc70001a478a30f7acdbdbe755c275821c7f98ac12eb083a99a79d01f0516859b0927d649e35db35f9898f2475a58236e8711d1d5f90a7e6b6d8a0cf32a202ed7a885c476f9e11451d0839e1833231c508535e0d591a75e411612376631d613b8f971b7a3c277d1edab900b689a33a0d475e9556f419b08bf0e28bc73824fb669172fa9b31d516475b1a312d8ecba5796c6b30089b61e829a0964bcc04c8e5e0edc2a6cd15791eeab6497606fa5d3560e98223e5d69273a9be8dcfa93d5449d51b317184256d1a90f158aefdcbc750293c8cbaf0cd0f0b27dd5e731e13c48a56d5a460166d9c2ba3e6c6488b3f4add63470e42bf9d7d467b2e254b37509793552f19c9a055f4834002f5685a21f49d0429cf9d7c99eed0996d7cac9cdbd5aef54808caeac1c6c2abbe3a84b7f9f0f39e43e157f1b606f0d963047c9fde62761c577d63d260be082989f8e76f85cbf16565685f8d14aa27dd8fbb613a6b8c1df38178ee3e636deb51e1c166cf533a5b9328d179701810db6f2cdad91f01f8adca669ade5f2ddc2a4fb458bd6f8d7c81184a592cce284269bc8d4f625cc0401179c3f3152fb3e29de145b2624e0cd528cfc77559bb0bb79e904c3098bd7b8966fb0c0556e0e5f66af35df3d676c744bf7a6fca50dd960fe1e838441adbfd4f0e9d2582fbea9c4045abbe6439e48427f3f8cc5d277125c63d73cfb3ad25ead823d1a9f4505909fb92898846055383500545d8a205d8aca12ca55fa328322fdd3fea960c5183d0710c4e9a4222a17e798dc7a0f16e81997899ccbb5ae7405bf582b8bcfb25fd8459d776471b289a22510ed7f2d66c10177c15e4211012d05bc3b477c69cdf9f44b60144a7943c5259e8a54af82fb1b8ac91eef89e186d46fa1c5e0855d3369ed9f722b46e803a3fd3049ab20cc4214ab8955e5b6d499b51a8f5d047c6087e3a97ffcb7e362ab5102bcab3c4ce2b01e464ffac693c68e55350e07e0869bd76f2cf6f4ff3973141d9b661c225708c0e18a7d1ec2989fde061217e660d73bf6299093f9444b87e6ef06abe0d1d88011398418349d6bdec68b7e38c6fb281cb3ed1bdfa21405c417668a6e992d05630ddf24e63452c1bae9b4d9ef61d5462d865376043a1245521c27e788d8cfe8dd33638e9c595dd53e2e64214b39a7b0eddd1cbfe21f92c9b5e499a3f5370a1a980f8578d751f0ccfbcd1a92b247c665791fb7664a7fa0fe8077ecfa1b4738875b6d01a15d3b8bc6c1850a5276c418a0f6b947bbb772680494e4c7d7f77e44be847f42497a8a6fe8ad1a54eb1a0c8866dd563658f9ace5572d3ca854b862580d29a8b29ac165ef1f2c88794035f5112719723f05d4c20ad220da8bdd209e920d9dcef3a5a5e2916c22a77de92c2937b31f8492cd666c4fd61fe03fbb9e39f6600c3d4ecfed1ae153f4db2df4281401fd585c596dbc43861afbc103544f2ef48e070fe7d8b11825d39224b7a3a7e22088060cff387cd9a8412773d5b9d85a720c12b5a936b09cf5798a5f2028165fa0c2490d7292faa80f76507412139a4d5254b0b0f002f6a94ff51734496e78d41c888e608421107178254ebe5cf3969624e407807bd238229ef747aff600dec87eb29834a35990751f53298087743c1aa77f05f066d0eafa53c411ced345d1612b66e1f7a2ec6ce97468538787ea594d1cc76201c09b02caf6669b41f22d0fc3b52407765cb1b696a1e972c1c54f2f6f05660852af5a847a9caecb0e9d32395fa9efdb90d5e98718f4a5ebd65c82fe72e2676664878e9e7b90fed5af7cacd06a3a130e0ce238865df9daf6b1a919fac80340420f0000000000ed17182c783c649f53c0ecfe900a58cd818e89d2784f750df6c906999337dc10fd601c93b417a4df52f6529356fd9276d37d672fd9ffcdd589582f8876b4d752bc47278171d269b58ee46c7a90446ebe66dce90d546f03e2a1495f7a4c8231d38178898c058475b65b5bbc4db4f39f317cb51eb9178f5df5062e21fc28d8262fc949b036a6804c51baaa13240a47b8633999e3ab7bf2efe71cb4415c8d49f5ebbd48298b6af2eaedda0229e4740bbdbd72369a32a9db6da8beb5a7fb0f5e9d8f8bf31014d393b53584f01ad42bdbcfaaad1d19c6ed662d11d7b9151b745cca010c7d0148415a6b5855e2d7dc47de1f868cdc0f8eda94b131a252bb5d9f7681868b6628143497a44ae7667992bf17a360e59ebac5cfb06d93f9ce1afe86d2b089ea56186488329a2b21e33877ae36cc06c2e3df25aa0c6f7703570b457f40cc9292c9bdc6b69c13d67ae20b4ad208dc5b31f57f1a336175f7cfb5bf124eba524c2d6a8752fd2336d724963ea6a72ad22f05680b2c15cde751c035d8195741fbd314770ca5a103d91b8a41fbf959bd6be52c0e8f3a8561052b61ad59c3788c238b287186bedfedca5638b6cfc2cb88fec9e942b5a9d34609ce05410cdf2a253760bd102b52b706aacb40a2d341b1220416e44205dc346bcbd3c7d72d4966914f7a36488a19565b46e01b147c49d468218698b1991f9bf2acc549c1d083306ae4e87b5c9ffa603f60a1f4a817e98976f07d201f015e5a8ca77a05ab0a9fa500896405213ec5095b636ae99160e7f6929750130a6b9d1d151c512fda12b332cb98da1cd79e987f48c1a2e5e323c8fe859f9b833fd2a0f287f2c0da5a10271b5918d2f0cc9713d8573e12d13c1a7f4ddc5cedbc9dbadbd26cac882437ff987e97c42a99260fe04f56c40b128f43dce0483e78fc691c1f373989ccf09eb3a2e9aa5b3f771cb8bd217cda0a9a49271c2aa9d6761046ce29ed5e541a67bcdf489e6fda04fc098e91772825e383216350e4d1d2a6d4e654b3cf543af2cf99afe04b9458829316bb6746d658b5ff1a48b0fea8f73fa5b96d2b14b28dcf2781fffeebb4a95b081b31521e784f2e5d79be90875dfa2ce13fc6d5d5ae80105aabddbd134874ab8f1707f1071bf47778f9b2eb77424e78fd9dacf60643b98e835a81cb1d1d0d5f75ddb186d01ee93b45a2c49f5cca77c4a2176e379afc788942584d1d3fbd10476193a54d4b5cc637e294f32c5e2c31a871b9d387780183cb895a9f1eb4dc911b3b511af3e65a3b18379467ec6d6cca7f784cabdc50c2be028265aa7d588a19206c14b3e492cbb3931d52d92bf2e3496ea2996aa88ede7f3381ccf341180be1449e6794edbf884217bc4af7255ffb2a86a8f009f2e8d88a0c6c108252029126ffa5ca873f509f2d4408ca0b44e77e720b5071da9d7f7c2e8f427be589bc8ba76a51393cd456ad30b8d5d0202eb2475a9d2e423b42bd9a09851439d18e11b36ee8d99639678009606b3a4abda4c877317ec23cadd89990470ecb756c2b43ca9de6686ba1446bae910473172efd58cefaf87536eb0aabab0e40fd46c99e7e215c1706e52bd9e59c5f55a2a40868a092ac583d02b579083568cef0fc086a067857bcf35dbe76b34d17976d448882b9a9021b20180f64a672b10386626aa501e10f4c2a490375f176b995275a7b2b69a045d413ae1b9fd29f005c275e906d57fae5771159848859a3b4e355beafc914d684188da6046c8979d92182b5e4773613ecfcb56411b8aaaef67cfbfa0ce6be65f27bb01a243d343e5d1bdc402aca5e19614ce50c3a5ae6bd2d966038f6f677b5bf9dfb5cf7c1081b229eaffb6f11c3f7018e30d28cdbf5a729b4a0e545f424539bee531e5249dec1f0bc40bf4db68a3a3c7b014a0cec192788ad7eb80b0bde6b21585b6719e678e07a0f3628e6fdc2a932770447171a898cbdad8732a9307f7f22230c356af49fd412faa85b827c9d827e73f2f38c72b238228377f8afbf4e2c738f5f9bebc1caab0797dbc3faae8a2b90588724340b0da20e6fbc21f11d36e8b92f40e54be902f1817a61aa59d94bc14f8d34e1b91c5550b7c6f0baf7f598a62b32c80a51c2ad31c9f83828b456c2d7cc09241f84ff25586b62ca3f256310fb46c3fd17cf2ae7ae91343dfcdd67d9bf8e2d92572a23aa290509e85b36867efb64e0fe9f58061c2a27760d818d3795c5e9cae8f7bb3dd52a0b3985d04942e1d384486d72473d1bc48771167b95ee2f6f2ffbcbe70b639d1e63a383235b26d1baf16b8dd46bf7482dcb00adbcb7c7b87fdb1f2e91944a277f5f9fe371dd90157545141049d70ed45766e06d16f0b6b3d86e8da76b234ffc480aa1f0dcabc1f8c6178cf5de43e813a882b3a183b1827bc6418b96babededa3106bdf58d5e14927289fcd161fb0b916e884292f01481651ede45d1d7bd0fe47f6e0332bce447168c7944cf05fc06489a9a1334fea0396e9d2e9d1d0f2aa5430fde45db027b41bc4c877d99123e517b695b16fe8147f61867a93414bf17ef22882c11cd292ad1cd1ff51a67c3cf1c819c0c693a725eaf100f64b7cabd1d61cb9f3ed82cdb9fcf7dff7910f2af53b4d96f29ed53f7d0a2008e8d234a7017518c43d97115d2366debef4412afee8c4cad4afc40615b3a4e985122541d6a160f235793148ef4a69002f4408a7491c979248e8970eb889172bd7ce3e0ef94295dd306fd2d37e4e3e592e5b77ea2fa57735decaa6740d87b5defcfbe4ee47c388d86b793aba51268c0a1ca037ea6081bfdaa7ede4e596b34a7ba6e98cddc350def2bbf0977736ead1bcf460a9001c3f3d674c8ec63921488345808db89ec980f5f84b0b1b7686cc321c372421439146c2d58c94935394fb4685e0502f9683f177a488145bb33b63ac04708a5dca934af1c69fefae69d9a7d0036b6a42324f7019a5ce27172c8e3b5926d5436fd29ebdf7707267d9c1fa2358902e4118679830ee93a46ce4c1e4dbc739647bc5a1585c0e2a38c389ac25f8f10a954c0978b112f2d5e60b25b1da33d3a320ae1d1d46015ab6ec38eaf0631d403720ab9b21732217b36a957283dcbc7d8fccda2ed3831f950a1bd0a9fbfc439195e9d2c5d189f2676d3491743a0c6d36e0623db9ce94e10f602c16b09e003d0e2e65b06d02eb11f7571df819f0697c55a7f277dcb6e2c343a1c16741baf89ed3b9bdc7fd1c92b0c30020fa97bb26461121b35a060546aeb60e1a371bf83a6c59ca9da95da46ea3f765c4f4abd85377e1aa9c57da4a1ef5c41208a1d408ff8d29739a6f57fbe9801120c4660fd2e7199e04251a0ecb055852800950417b0fbba4a8df78e227c460c077ebb4dae5d7f146b1bb89f7a4caaeb75d71c7d22209de48f44769c29f7bb0ff58ffa831a1cf2a9d5fe2f8b7a890805de124d41952471f91c602e5ef0dcd62d72bcd734684a8ce47054618d29720f462cbba8d80e865e8da6ef2825f93f8d213927640c33663473f06bf3217b0afd9a8d1a34e3fdcf18a8e5729706cb664b2ef39f402e02a69a70e8e5b968b1c5e8d885a53990431f97eaa3593a2659c681270c24fdc27d989ff3d626059b18478a7a6328d253d1d37a9ef79a547196d73606c0053eaeb9723a4a647d9f6e4b14575c69e291f526c14555a0547416cf4c0d0d3ef8be9dcf09703768c4313531362887da1239f2225635fcc1e9c95d36900e1ede18059619eff10f678c3933da64f874c9f645b3ef916ae971e4918e5e0bc03d4af21c9ef828359ff8255cfe978ed1b47812c61c6f3fedcac07a030ea14b272190b127ddf2b9c1b2ad895374fd162f410a4924a48e40e57662038e95806b572c271432ffc40b6b0fc03ff4244b0752c29e7eb58bd2e628e8afc5e857e54ecf0591520f24fc6839ac77d0c9ebee0cab36dc70e55285cdc3d65bb28a8ad3ccb4210a35b4120f797b86096271d35e906e6d15b23ca19d2b0d4ce49c3126db6e000ff18aa24cf27933028c16b14afae3ec6acb5ed75f8e1f2c9388f580baf8c8e93020c1cdcc23fbd41c2b319fcc81117a437408ccda109006191b613b5320ac8f0baacd758c46964ac18de04991d88eee3757b1762d9e654ceac57d81d914d9c23f922795923ab2a888b4ee9ae4bd8ad0600a716d3a051b3e01a3bb3dff7a8c3f21df46ae440ebbdf8fef37321a46633a648298c28ceff01b95f1539beed09431363ffb40748fa36c873c1de3cff9cf01e4394e0d1c759f9ad9699f7497996c911f0eab40fdc15db374a727e4e01650ace51841c046c311d74646bd4928fce2f31ffef524f033560b92e8e5902249c740742533bde154e4975c0378fbc27f57f00595cc790af29f482342e23ef2d3d088a9d641e65db1f0f7469b98774a03bbec1ebc7a5916a4ce3fe7ca4387ae59e8a1380c93de51a32c4241db3061a64fb4be266a7945e75a702275925024eda7cbabadf7d4ae552e7a6bcc05b5f38047a36d34ccf780fdb7c9ab2b8a4b02182c376cbcb53137354843922a2bcee5fb700ac4198de63de1632c345c92836bda1e2ec8274c8a29de97b8a2bef47676ee3efb2d3d6f07c3fcd1429bdeb8a64ab9d8a9940e677186588666fd47ad375df649ecbe25a2750e32b416740525819293561b9a4279c289027774cb7b165230ed3bd53704a92107faeaa08a71c6f8fa7c716522ec39801b4d9a89c385746b7e8f964f6b35934d5246de8097051b98d913a7e704cbf8d133ac172e7c528d9406237eb59320c7e3a17091beb43e66218e1187b2332845484ffd2453a7b431a2657f9288503b54754c91a3439c636a742e30dda6c75acbbdfbde64124d882507db29f5106e04fb2beb72aa37f201d33cd556a88ab2e924e64d88a4fefd5a2bd5d0b175895d2db52641b3769a89ae8296ea248b353b675d165697299e34335a59152846be2627cb9a2492e4c3ebfd9a66ca487f5d01407c23ae191eb48b6a6c1a40735c1c6a263817e658c2cd18fd26ae80eba88aa78d9b0ed01e5793b82618475047d9e8820fc3118d6343a6d277c39e545bf9a6590c0a3da99a3ee922c7006abaed3e8761003c274b540d4d8301d4939c2b0a5878675852eda7ba1775e04c40982dfaf8952330b5a2c00d3f6c4674aed1881ae6fc4992be60b410ce344c5b5733bcd90b5223876d10ef1ae7c96cb14d1497035a4ce255391037b7b57f014640e3ee9f1fa50f63edda22a5ef2915a0687699d9821e974e95d56207e3033b89a969e85cf9d90ea374d98a3b63f808af7c57b6fc3f514e4e0c3f50e0232ece703bc6ced5a60f302f6e6d61a3245cc1a94de948831284e99e7907e664352f4783109ee37ce530305fef34ce01f985cf16cf236d68df4518abf28abc808a7cd775d1416205f2b90804f10d4b614f965576ab5ba04f5a13ef56a671658eb89eedb8fb125c9ca81525d8aa85b624b9a505ac2055fe2c23e962f5128930653b139223d114182c02f019fddb73c51eb540274cd6feb0cd9eb276afddfab356c73fa9354a47af05aa3531524f6ecae6dfcf4d83bb68a73573aa1fd3baf0fcc475d43a551139b3d81a460180a385ea52de72ea93d3c5f35ee9e38e202733d7c8ac9708940a130da51076093996e5fe829ecb1155d2ba64c9aa4b766d2b5707874e2010ec2bd87656066e3791fe236abc7a506cc0a973aac1fc0d3eb39849bef4d92e85d0f2f3858ff6da2d0a35dc2cafe1fd530a833d30cf99a27536ecf4d5a21c56550e4794769fae49024f0f3ecc6b7c56dc6d809858a9352b1da20f25deedb597681e168abe89e5b717c998e0514f65585501eb4421120f400abc0717cd0012ce8f2e016719c89cd93e0d62068bb2eb7bb0163e2bc64db1c4970f20a1062bbfbea2f6a1a9944f402c135f62d84f75827ed179b1db778849a624ff2a65b21a6f984273a2d7513ff851131fe035e889d62226a169e767f33726f6b393669c3853be9c28bb9c867ddb771943430086d3c6eb2fade4d383e4cd563a74d6c652a21d9d1e15b9428d8b6a522ce51a22b10962ea2bb446af1578e7af16c55fd16e5a11d1023a8f34d22f1bcd203e440890b20b8a38dedf8d90309d0874cfa35bf57d641c2c2fc39c1915fd4c194e30ccfbd8cf25137689c0e5786135c1f74577df3ab84670ba304886ccac610cb3a7c2fc4924d3b51cf37f24bf07bb4ea9aceaf95582766ccd9559664b8edf1df40f49d5f5205642221cc48f4e8aa74cf852debe2bd83f9dc18a2fbb8dbe3b3b220f7f38218daa3761bd614f6db85c626f8b47f94ea8dca426d26d278c07ba191aba391529bae8e5cddbc23598b65cb31ac5faceb0764c64cae9ff0ce98081293e6e136c9393cbed89d57ec0ac53b5c733003fa81082f3cbe0bf4498b210410c767db0114c10bb921a55cb2895e313af9e8b0ce2e0b0e927bd2ae35fc3a7910355fbe6bd71f188f04f4085dbe0ebb2623af6a79ff0f4f576bf3c8f90f5118901056333482bc571cf0f52ed75ac6ce7638fe40e672ded60bd775082330ae1ce0c53988b46d2d4972c4cef673202f4fdb235c22a3ce706918f113a4806a1199335c581fec82d2f66c36fb1497686fdb70f12e702dbabc13062209dda18f07a870c9e4978fe2a07191ab1e12e25fead737a06e178d9778e953758066a2d63ecb902234d33c4f2d6899fbe6219009fb0824bd02dde79d2ca1f442fbfeafa4a2a310b9b61f50aff3f1121f3bec1724a0104d70507f9520927c93fe4a40016dd10c51a2270a5eaca9d68a5a80dc515330a1f6e0978b71bf962526b0c91d2e6c34bdc265658438c72d92ef78070e989a94835e277c6e60648adbfe22275380c60aeec385e0bae23ab661c9489aecb8dcb0d22736c5bb498eabc9fc44a2b057e909c0a240662dc7b7a2473e05a9359697a86e3eb52ca18962caf4691532624908684912bd580e098e3fe48c8bc6a917024809953626f957499b012f37d0e378bd7a89e3fb7fa055273d2d81cbdecffaadfcffc0cb70acf39e2303611f6e799a852d37a35abb3fc3651e9fc72119f6afa2ae901ca0f3d8fdd61e613fe923b3e3f47befe160f8cf910bdb2250b52ccba4698aac5932bfb14765c908ab4da8e07212844733669969ea70eec1e7dedac5a6f531728b9ec0f8b39227e889eecba25dfe086af27cb0702fac1defcc09aa263bd5ad2a5bafc1f33cac0a5c02e0c924652c589d6097d0f58152a10e081ed36f46025e55573ce567cee06d3f3730446c95cd2284f3ede2fe99d5231847301ef6bc7120ba5d188380ccd159a9ad4f8414fca3b037a30742e91617794293a7e51bcb6063a008bc119cd59be5c95312c375f695ba8a30e58f2617379457bf8bfceb67afa7fc25b210bb0e5976b07f182e4a578008e4c2b8ca00241a4907e210d0cd8d71784817b518d83e14ae88e7f57ad9597bc08da3c3a755aa7925719c4cd6d5e84bdf3c140a0d8e261c0cda041d0e12ea6a53ccc1d3df9b5ad7e864f60baec265f43af23d42648b9f6fcfcd1eb0873c5bcec6d192391f56fce435c7d4a3a7624f62337b90a3784e5907e84b600f768195e679e24289bef256781abef2a0aaa58e322d0fc1acc0bdc2034df516694ab7112fa09c507ce1d6c91268336189b827012355dd5891a21bec1bfac38aa0a3a4d8c4c3ab5368b9f5a79ab8f303ce4adf602010d0cb5711ae1adb080fa4b0c8157169a6d950b93798dae16d9e654f4b614d7cb9da727d7a7ac12c99b994eebf3c5a4ef99a819bcf5c4628fd4add6238690ede3ca8bb82425cf3c458ca726297b5855fa5f22328c7d2a909f545147d028a2b58e95d86781daa78daa37c58022434444dd9c6b053e4b5ad0fff23ccd64913f43d1d4e08e0451090840ad2f053eeb7234854347032de0165c321950f7bb3b0baaec3e98c05a98ee5f44f67e2504fccda3cd322013539c06a8ca11c118d7ad8fc918831b245d9c401fa6d9ff3c78ae95da8bb4e120021582df40e6e0f5ae582f0a5ee7f8ac9a701f0837639407781a3d6fb458161b4243df670923f640391faf34489c5f58f660114c248d7f01af7f353478630c32b64955b5977e5ed2ba438465da59e4ef182ddb7db034c02b384e3bb277090c17d6db97644c6f4e2542b8bb26cc2cb2a8ee880b97d990aca6448b69185552f23a58b147922f2fe65b8672473e21ca245218e56291f1f3e4b321a44cc9554cca0057448f8a21377d02dd61df6e8ad1054c29dca50f4c4c4715b18b9c414a5d1603eba30bff0785c1d4a7dde549d67e1180b906ca061122460232145e4723056c27c279844da40814340b71f474b7b6ede0816e40b54c79e9426031ae1271a3520488eef6c1d367890e4bbf2c3c38a2052e6ae8bd731c57ccc682fc03a2451bbf3704774dcff2f56e3afb1ce11030fb004fd8c774b779e74ba702eb3cdcef6a76121f384b6580e41ee6bdd3ff089e41d69a38f2ebe7725b28cb299eaf3ae878213f209ab9def57adb5823ef267768140c9f8391caee6e966650ba1dc298f11be12a4ec594275f3c25087dff496eed59611ec383494f16cc07bb0b4db5b7bf712c3d89306944882061a60e4b13048891fe6ccca909a5ced128d683655d15f1dd3b2880447897864b2f54373fa7136aab55dbe679ee8dba455dfcdadde21c20994e015012971d4bec6be270aa837bcc9d61bdf8b3aea44edad682104253b82b362b1757c437e4f52fbdfd6ae3f6fce33bda924379d66809c012e5f66487e9b380ec0ff4ff7cdf399f94a02a6a1f01ac02cbab420f1c57474b799c7583498accbddd2342e1ede40322a59fb9feec625f78805e0e0686957881afa3f99ac14140b77714de79a8718e1ee7640f3c4c155bf4dd6e7edd9668b2b22282f89eb3751cbcb70f0532661ca12f0a0c71e4b9075b62e76ef8e835257001854a5a93baa931f3670065cb002245f92d1bc4f25e38e5c8da78e9dbe1a08e9fd25fee52cc594327f503fe23b769e4ca922671fa10bcba1d73f2a3507b48311405c0c60f24da903b6434050a1413610f8ac7620c19c1d676570051e1e06b3ee2219cf544e46d052d733dd86135d0734c696d2c2a7ccc10d92b81f93ce7a94403bf842d99cccbbff5211e86df989730f85926d973602534906ef60791391192d8b94b97ad6e77d5f4d30e571fd1b7be7347157dc4fdf9e113b11281755807ddec16d6e4bf8be354e94b277f7e452b69174628aae0ce9b885e0f747a61356cee1bd19658402a8325277e3e3ec04639bd301a5de4d60523dd940d917cf1f056890daa6135ac29d263dce437dbcdb96209435222415cbea279ba107fee351d628fd0889f02a6b0e22099d98e058a3eac964ea989ad465566938a899948e43f2006a2212556640a3807a1101e66191c6a17c58685c0d90ae3c9efd36772c371dcded9fa258765e6cdf6caed1ce2b8a420b096e3acd994b0ec861926dfa6757422d7dca28d5717f71f2232502af8b02e9bac5209163e22bab643a0b038bd56c2ed7b6f002962af8caceee01e1aaddda87623d741754b0f746d5daa5da23d950aed8c3467b9f59b3f21d9b3751ac1dba0f838fc64e0912589bc894fe0759d3b22257cea7c580d998796cf80c9a5441bde899e1e218bfb4f1c4dd92f8bec41fbb15918c0fabbb571089a746e528f7459fe03c2c754fa089599ebf7c4b5c18438a085a53fc22d29b7460e0bb6db04cf8276a2030e4906c3e5692434f0a47f4921a09621c9628787af357695468f3a03724c75e3e781d6c40001f113041fb57f8b80f0a0a6a3b5a5e5415643ffce956bef0b3eb47bbf9fcc34261fd18aa1664e6e46afa0e2e4d168efaef6c3d0180c19b9cdf42ee05478a580ce1cab03d7fdc28fcb9dd485917d6a5605193db3429906a8a779c7aaa714448079464f12b4082b5b155bec9443eb135d87ea7d7da59bfe89d42b98e857cb2b4f246c30c3d3c29e91c66b828f79c16bf34beef0b026bb1fb69e0de9d97f8d12383ad151818b51e3aa94173b1620d4a232b18bd8ece6b462ed3bdd4b90acd240e589e930907853890b53ae606367b130e3a064c49a1a31deceb02e467fc02965b5411fd5e6659cfcf20faf9dd53ea586e3797e1c9db518c7c5bd3d201befe3d1be6993562a386f27692bba6d8cc9f4304ee8b7be9a5e99469b52fa7f348639ce37c93d6d82bdc949deb657247d4fd35bb90ad84e7be6bddeb4ef1ff49e3eafce3369a9bec0d8bc090f91fd5b9af50822b99babbdd6d92d44d4f41f2cfe9e746aa4aca96e91274d18afc96ce9255528d584920b6e6f9e2163427a8f84227cf8da91f45ffb720ed0f1d6ddd2cdfd9e83eecd2afdc695f5366843d349ee4a421331eabfd364fdca35404c17783250b0561f64fa9bbd9b6ca2cbcc1657bcc81f1b5fc00f888a419aaafcb41af6386294e7e9038221136dd22ae08375b16c1d5d598a8c3ed08ec162af5ba6c493cefb2861438b9e62e5b88e0eb2a6ef5b2613fcb401b7cef7cf4498f9290fc1539987edd34ac41c7f520ab80f126edc593023060958ce621e7994abbf203128b0dd9329a69ad4defffd7042a81163cda85827bc88097b3f88c13d0c8617461997e7acc8d9604b40bc3305621b diff --git a/zebra-test/src/vectors/block-test-1-842-421.txt b/zebra-test/src/vectors/block-test-1-842-421.txt new file mode 100644 index 00000000000..1e77de6e117 --- /dev/null +++ b/zebra-test/src/vectors/block-test-1-842-421.txt @@ -0,0 +1 @@ +040000008173eb058689ebd96e2ec02799df35a59ec911daf71f671b3d09b3605cd70600e7e2cfb70af4c0019e883b26bdf228ffbfb68d9ede0cc02ec51b06747b903e76b1537c927050539d31e2aef5e0fb035c3baf6b55a09942a34133d8a3c9f32d4aac825e62ffff07209301cd628a61720b554a75b7b3d9d88461addc1ead3da222b89c4dde009a0000fd400500a44797ae856b32dc41632cfc726a69291c33bd3a1727f3d4465b7c61a1273456963cc27a6e3fd3f5e2232bf57e62cb5704a24182b1954045ff2c7a3e7edf4a93bc386ee6205dbde46523042b486dd9311a758e0420564238217779fc71920f92e442d49510ca13d20dd074de90c493fc34c3339702d01a82a541b5f06e26a8de022ede570fe544e3f05be650522484513bc22bd30fbfabac10d1c5fe667cef7420e2a22d99138800c7db050ba815139b87b2501fda50b545059e867f17e00fac30e81ba587c2883407d16e8efc387ec06a0578dd095590c21ba7a033ea83f560c583c2ed9c1d190ac9033e093e36ea54779adb4c5af33fa57d101c040e61d384d3c664f9c2b1968ca11fdd34cbd150bb115f0a18ac89d443dcc2c1f7c9f8e1f5bf7793a88a0c68e2e7c651be9a8f9255752ee0d38a162139070b245d3da3cf146771a3c863177bb486f632d4f4636e0119aa135c1e20e1a7dc203b78b80739098b9670e326b58f7af04a89f1dd8383c0c44f952e326c1a354a1179a7e4621499477e6fc19003b83c06926ed4a8c323c066101d74ae17ba0a15178cada08e6c06d5b7430fd53b8a2daf1c1dd0d2d61cd4ea4929a4437a479d1dd08ef16166071fb13cf3a9bdca6c990f4ddbfc981082ec33e7cba04ee84331a1c2269c1ccb3995031341b53e3a0a6766954551b5d95df28d959c217a4edd033bd93346139191b70561de5997650d3989ed787a1a637cca689cd347af15356ae7e510999f539320c73b263fd615329dd9e67f48853fedaec78a649d152b503274df88183573e8d51a2ff17a4bd327a6bdede3037e4bea64a4e4d19cd1560556cbe44f85c79eb30f10f7f6de4ba0d9fd4a02647425dfe991a76e92dece085c251fdf192a918a50f51cbac0120576827a381a10160f86a9956201178d8183be2231362f7298fdfe00fb13b3d4097737ad1643d2543b83262bb4b5b55f1aeca3a5680a06e1a750f1f6ea7fbc3da63831fd4f0e3e8f3034893b24f4b43348fd252e31f5c4efaf0939abf2880a230353357758e721c87872e76f5f1305021453c06411d7936e4f61acbdb68131168e2e51f657efefe4faedda9b9ce7f7a9b86665de0e321ee9bc09207a7e4fe36937937d11d9c8d7da3d7092eea2fc4c9a0aa7f595f06eb89bf50a2dc3e1c66835fc922b068b679c7111b998d5bc92d109c464e2b554bc15ae0aa90659e3b7e3d7cc3bf1872a37a9c283db17fe4028672184a626eb194f3273470ef56b9ce2820d51002a9b2c7e84dacc5cfc90079cb35ffe32f687183f9d076038b837cb968bcbed21105609515ed8c31d0da01d81634573683149736214be9574414ebb26dce4be0e110f969667816d755c54b3307b70b1d0e3bef65040792e586592c41542aca58e5ece0f92bafcddc56103d3ca91704e9dd0c390137446720499c4651d6d0f07237909390edf7bb0ce712f1b44671fae5afe7c430d2613c19f46beb5f4df813dc374a07e781f162335105a037e35d12dd5dc7fd4af1c2fb3195f6bb1cd940c5cac2704856030d93c218c0038575dcbaeaf053573141d6c321cf3f7ea6d085a0cd074da387c3a13d8151d0e5f619c4d27b47c836e9658288d1d96905950450b822c4854b887c5ad045ec3488d559c7d70ac6e0a79964be11cb16b59b3935c5dc0e4b53b884e694223600f352e0eb9db5fc9d371a25368aa47133c27700ef65d50579f0c5d010374ceda2ee8131726d93b8c1616ccb56c8ecf868a1d122ccbca677ca8dbd88e6b11b4dad74eca4ff6e00772fb3abe8c82dc837cb2854274849162e4793b5e7e76ecd1dda54a5a211f21f0161bbce8d71618d75fc8944db75803bb7e51dcf5533f183e5f82e4df2999d6e4dbe93a4299b8a690cd9e02050000800a27a726b4d0d6c200000000f51c1c00010000000000000000000000000000000000000000000000000000000000000000ffffffff0603f51c1c0102ffffffff0480b2e60e000000001976a9144fc15b96560cacade80c1fc87da50f57176247e888ac286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c8787000000050000800a27a726b4d0d6c2000000001d1d1c000000048d0635287cdf8852d9e69d702b6952082b668ba48ef54210156ce85c4ceb1639959b8d298c7842149d0116e78f74ed74206ac28e1dae402198d5bf9480d5a98dd6b043be5d293c29d78acbaf884af1aed12e2e2aa36c06cc22241f0f3f415c0f6b5c4e69ef6d78a52bfb38a03c9b310d9925d8c6ee0ec2b88efc5b53c9df3718347d33b836d8377ecd1b6fc344d1c8c34926f3cb2bc95fc0cb496409dd8d0db096c37cff6482bec05a7a7c439a495dd032e46d3798fe8c33819bbf1cb5983f6232d4bec8295f5e848d77c15b059bd1115868f5a117a06276dcfe0994776c90c641e19cf83733f6426a195dd8080a9b9541c72b380ab5d378e2a0abe9fd90ea5871a6969acdeaf3d59eb0dc274d6397d02022bf33bedf0d0697f6b12308eda8edaa48adad0c9032a22c1b2f4572144678d4df283bf31b18ba650303fce362ad30bba4b30f9fee9cf2029d99186f1dd9b252422b02c8fa44c1583674f2bf03f3b20228326652710692eaf2f9f1ce657b73313d9fc938944853d7a99a14df62efc70000ca9a3b00000000a6099392e011346f809cdc36d2fba03eac5ce98f93015e16cc06ae175f69ad5cb7aad15640a403ce56394612cb70882cf73cf82873d506a6ff1cee26c1d44cc5dd3a41cd1a0a87dec0bb3b4da35804a4a31bb5717c8cfd0803974fc70c3eaf389f54a1110323a916ec083574fac187a284b042b16ba13c3faa3b58ed046b9f830a27c50ddd4f0fc94e72199d32e17075f0a9fb435667b2a4e2cd967159474f2b6c114c086fec0a25353a7c3956898b15a1554ba0679b1abb2fbb184d956221f793628788039325ac36d142f8f7f4920ea1a920a27d8d795b77e5cec4d279797795c47a8e55143bcb1488e015735f130eaec183fb7ed5933a1cf076bb95691dd03f5458a932fa583e86782ccf699e3407b7cd79a597cd24b8a2f3a3f862db72ff1241ed0c29ac585209341d9877cda0240c55bed3678c923dfc80b3d988c81a020a9f068d19c689a1473b451659092ec11fc64a3eb6690fe37ee282686420cb0d0d0e807cf8ff6e252667ba6603bee0adadd6b33c02840cfe672e7bcc38edc1e929727dd985b22ebdd113661da4c14c388c32a7bad414badfa1272074148db97093782bec411d12ec15b72f67bab0d9d1f8ac8fc628e86d7f87e91d07aaca7f3b0dc7749032f766bd2358aa5a15ec86dc8759c1f973329e6f31d1cb5262a5b370f24c385326ca96f70cabe99c18bd4fa6488853ca8502af1a9382ec7e3ce3b21309d09ffe63c3481a66a8966d4d634758c8ca4ea437ee2fc1b2ac7a22b25eb1f2557d5b1ac104de60c9247ed89347726ea8d839feeca7aba992cdd9f5570ee51b946a45aec6373579c49462e0c63cd64fd92e860ef61728b848ccac9695f6044da037451e4c0fd114b865450755c3cc49ea155a14e02bc914b910e3231ac16845931b870acfface06b83700c9c031085a996827ca082b9c30fd92d8a6eb12fe8e8989124d7f8d6337ffb25c49b8bc2e3ec5d69c8a541573cc6d0b9b452d94711e132562c9919007a328c8e7e2e24f44a68226eebd0a7638dfba53a04f62f215b5cbaf54f39a55a657c69111c7fea090e28544390f9e37ceefdae959418013641176a2cf8ff577a376bcd03f1fc0aa9d9517c48280fd89f0bd02c874de50b61219a1099fb6eaadac68fc8188d8869e0a4c58a9f085970f6f60952681f0e990c7596c467afcfdc9e9451a8968bbd774cce42d623e9ee7f0eb90b0046ac41aa7f1056011a1450177b66ceefa343876bd1517838f5c2d763f463d4db8f209fea232b452a164d1f32fcc93842eb60eb9cde11a3e1097f5bf4e30e53aafe0b87968cb08f9ddcc03e76e8aeb73fd131c8e1c1cd76f52656b111988e34d92334a6ea834e40dd1712fb48d9c6d46e3585895942196c68714225f3d5f1aaa3d40f6b71e710623412167b58e4951f9f165384eb23f3bee4c84a51465f3d441d16bdbeafd745e555937c26c70492371c0777d505c52d9174696a4be0b288b3a6515eea20cc50d6b464caa4d465380146d7d4eb0cc518ee6e38582271b71c2df65322c1d26c68eb6d164276e0175b7c908dc848fb4b6cfd14c3cb1591d4c6d8006c55f585a4d0702de8ba28d77fa63c298e07240335006f9f6555501311927607a6a5fe668a6e32583c69b7d5f0a0d2f1e8044b9e8b93e4f15487671d7875f7ee8a7946666d3172bbdb6fd5fdb9eb156e64639136b93a112c74a9b2705e4f260c56918a7ef175f1757cffaaa3482cbcbadf135e68841c3a4d7f75ad4daa75fd8e217493895c5e40332cbc66e21a43977712328b617a9e040a3969b72efa0ff2d0dc531c4fe5a6a082308e8bbd36451b7abe1d0f901e12b27e0502df3d7aa26ef1461a837f4c5d5b74551f2e93a3aa306a2bbabe86794f6ef0830f0e7627b81c50a455c71696152641e016906f490b697372a548bdad898f0f485347baafa8ee1a398f88101d310569c595e16b391746e8300581e319b04c68b1e427b975a87f8ad4aeb2ff5edd002748f3ea9f5ffe6c5bc67ac067edc4ab89ef8452cbffd314b76256fa3048d1fc55fd23bc8fc35293957fdaf359a33063b6dc9d4efa64093389026ff82ad4b8eec2972590bbf5ed5a7c84debcb37ce7e9c362b2ae857b5cd4c39088c13bd9c527478441005a6f9220442cbbebf4c6a92c58fd483e50ec6ab5f9db29897c10e7ef04a31ff1ab4c7eca9959ba34d0042af88ea2c4aeaae40004bd0a168acd0dcd351d1dd2c31f8e1f96a27caf8cd844a7fd7832ecf83d7c07b9ced6750456703a78af700222e90d6e474ac1d34166974c457b02afdf365bbf0ab4a00154527d72691907d53993de59c4ac0db201414460701325b6acc679391c7b8ddf11a7b8266281f4560feeec68cb7d564be2d9ce3b02c1816b2c681a58599b471c2ae3d0c97d0281c2a8fc69d96c907f292c9c988f734bfa1fb21e018a85b3b35bbeae9c77c5f8b367eab344e28bd53ee2043a54eacb73655ae2e925276f6c68512561cf937888623458ea5f344702002d58347e148ddd7e2e75d638e8f823ac6a40ddaed21d35d1f7bbd896010f49cdd8668aaeeffa773d49adb33d3a9307096cc70ede6fd5e7c3dadbe31f3f0fe6f62f3f5b05d10d1290e76e6ee4201e8357f402527580903a1ab18c40c98fe41f78e3efd7b9de515250da16b9e8f73cbe7dce2b905ce0f8d8d5d4c39a987d356a211480c6de046f4494460ce6fd773f5acca5b8b1da576b63c313abba35a98a5909902ead90ac43a9688272213e2e6649f318997d777c62d7ced62de1e91d256fa8151fd10be68e35f611027680aefdd5a89bb68ccf371d86ca0ff07f054a9c17dc2ea231b9a56b26be6f509bc3db480f95eac649c2d8205183428d0023fda3d2f97a45fa7388bf04eb7e484073f3bf87608d0396f4fe849570e7d11af82cdd02bee3308b7d73f757633221cfd92d276c1b6060f3e39c01942eeaf0f14622399988dd0c5cef8ca635e7f152bbff804cc19c4df7402c47026efd58daed1766aff860b7c09808f8c933716e0034ffda78d7ac13044054be3d3254e61cd833deabee4bff2ac1061f756aabfad7df33bac142e20a2e1698a0884efeed2a630fe67c4df8749a5f933889f4205f173dde95036c9f00b9ac283e1918a8f77304dc57dbaf0e1c632bf2aca5936c8db57f9ea3a1b265c7e5aaa20a1c47cfe5675b1e905d861ef92bdba5dd7689a14288b516c49d99adc821e3d0002104de7a09a8f54730bd442b2c017682c8de1e51b2fa81db34a08397a27adae34e31df2f71a8ccd5c5aad64b3c390fd35d2d620f6f528cdd7df5599da3492d49e52fdc82d38169fe244e4fea7b447dd067fc706bd997b3c90b01e1573557665e53ffff5d7ed0c8fc7fd383fa9aa7ee4c6eedab56a51cba8937eaed4987463a5137cb8e286e763e349b0fc12e5357c50b78a38ee15af724016d71cb41daf0218c0f88aece1bf2430cd74f0d25aa2030f635d49918375f9ff05b952013e914a73e382dd5489d7620e003e96da27c32e27b54974c31aa3c3571b5f6004c6f210fb711ce23f6636b7c29496f76d698cd2e88c0f226aa9e04a8add7c1937d4bce5a8ad6ef303d5d9df98e6d8552005ec1490fb5006e5cb69b795fb2262b3563cc53b0b2752ed37cebd741dfd83962f7c7021b2919ec47652a4de5ecfe9a0c921dfe6d3f92f5bdc87f10660c1238d4830439eb0b2ab1af3e2835891dbdcd58881adc6fae23095b8e0edae0413e6394ff3f874dd03164aea4348553fab01eebb209738f0ad4b8a3caf3d16e3bd4bcd83cc1cad822b707908804b9d15317162be454cceaa543668644132ae78e6387c49121b2661bacb99004ce67a0f75a1782ec4c13aac4a62d4246511f68ba2f444c05fee7287a027f2657f6dc20e2c3a8ebaddd6f7a9720871ec3285f2500403003665c4ffffffffae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82ffd601c915b849cc6648ac2cabfba726644a96118b6670c3313fe9d7dcd325e396b7bb559e95f6c9c0efbc6126ccf48bb1061382fee19c8fd3acef8286b6e013f3d5c9d5715cde36a6a94812ab99b60a9554f1dc88e5c6049a6c4ef85021eccd7a76009a46be3aafcbfa08ec54d2ec1b6d2f4a0a061a775f3c2c51e6a19ae078c32d6869742d1eb785d65bb245fbbf6fded72c1e9a220f78ed111046c87af194018e5ba7a6ad9055e72e1277bc03fa0323432d172b2efd16f874f910989074983a0139d38b4d5e39ba1013bb59379c3dd9f8705500c17c77e03844768a416e1def9f332558e7a6be402dc8e3297511d4f68024ffe188412437399fadbf128362cdd0187a1189f5a7a2df00ccf3669886fd6a191fc5191abd6999ed912952e600846de09caf4cf172a4d814553fad19e0d0b6454b4992d0d253186bf9ad8681a430c64258e1fc03b0bbbb8e4f0f6fc540e043673b3281f5f1d0e1eff0dfc19cdca78bbbc0ad7f884004809a7fd23dcb468a15002e581ef27ece8b3b358a1a1aeb4edac93813818670f91a70ac3f7e59cda3e49444f53f16986945b814a9bec972d3586bc923fb6511c4da763a0ec6c1d7842ffc38d3ede51d06b116591bc51e366fb8d0ead72c2415976005c300fee8d5222047b738399e5f2b97a3fc8c322f376b6ae0d551b79407035e0b429f75b21272a52cd7f923e723aeda803e7e7a5067625edbbfd35828800839ce7a9f1e6d10cbbc3ac379b735a159b6a0afa397de6e945b12712e35ea276f25baefbdefebbda3c4fb2194f21e82eebef899fc62a33b78e70326c5391f1c1d57bc630e9d727477ebdd05803691a2c99d6f268fb40a0bfea6e3d0417cb9336680b108944b3844a50effbd8983baf7f9919035cf7c11a9da728bce136804592a483ac0a8dc88310221d8f735e9cb3afa5925cbfd188932394d420566145ac51c725d07cb5d1af92ec7187264dd63954e4d69b04f2b40f1913968249e3ba1942497ba7a7536a189f3af6911c316445a07fd2fcef8a09a3dcd7fe81b8aaa9f1912855a70db3d278ac8e5aa8f224818bc46e6f70a5b973dab96c0f1858139f0ef51ef95e273c666eb4df4ea7d4ffc096586a5442234c184c8a69f98d580a190bc237d82bf9110e3987e1ca23175e24f8a3595de99a1cd43ce4b8f30813eb6ee904f31b1816e6e24de1740cefae50956ddd097ddd2e58a5c7400f220cd181b24326da1e887102365a226a3a144deac6b363042e206a953eded10299a4e99e3addbc62602de858c94e19edde3aa13517479f59b097079227b3dc48cf0cbe07a2f1f883c2ffbdff515771b5f4a92dce6e53016449cf14e93edbf320d38eb941e5ac70a647e0ad32341b991b25483102365096556d8907183d743b62ef8e744d64eac08e868015bb17e8c32b1032c5c90b506eb1e44d19355eba2723dc8b99b4d35b4a2f32a6af9eaa8a322a243cc618150a1c006aca311965a9f98bbf9bdf27194728f3a449c53facf27332af4b7b9b6bfffc94db219c7856ce137b87941336340505883bb63368d707b3caf7c65f5a396c3eb49a495f4896f8dcb40823dfe51bd2202428c537e599a2d1e209269c831a59af34ad3cc98f2ace622eb69b4b1444d0067c82a685862f4018a7b962ba81338f5046682b86b1ecfd78a12a356adb38d2b50b3df63908a51a46472d59b8dfe3b03416ebf57cc577aec105258302c76c69e432a97039bee3c43075ead40e0b2809aa4b2033807b7250f149f3a08427d4ee57dcbace3e3a4f2b309eb031679dfa7dde89c8e51395411a7a17238d6febc576d935a9b317fa2d1b1289f3689d2854da351c2d3c08ee81db7f38d0bc0b50ff072739cb8262630c0260cf673f649fc388f7b0adced32b063cec1ee7877a4c2aa19e700aaa9beaf785e8b9d9880416a0dbaf781b12e3dcb67b0ebca52ee231244d0e46b764a9bffe4fdccc995ce808113d8cdb45d77b412e7752177621adbdec9d2ce13f68a325a8888bc4969571d70b486d8c49ba158ae1bad2046eb071585b2a0da61f33e584791d5bef4dbb9b8f435b433f32b96b7ab69b901b281be0cb06b9f09a1443aed4ca0675f3004a102a5eb16b9945d01a906afa18e7580d72e7f5dc56a45b6df7f9948cf53e8189054fb514833bc5a9f88182f2f7154b93655172c828449862517a53228ef2a3335f58d7af17682dff95bfc899d8449b1615ec0d99bc7973abd39d1b58f609b5c2a1e2e39b25b8219429b881c9caf57ab9ee14e2b14d39328bc36c7dac87eaba88544d10b0cc148e0b15ea46607e14ea8a704acd5b603a037c42fbd995ac1a785b8814d38900550e90d4e8198a252a25b332630277943c7552dfc99efd0c031f9ba643618828f03d26241594533905613cd71154e22ca69f6cd36cdd67510f501ae2a5e38c2d0abc4b459b3bd146ab043c9105c94cbf78621ec8e517409d8544db616b66fe68087801b638835b75ef3f355f88bbd09936a120d5c78bde86a8efa39395be366400cdff71646c1fb772261d2d2c0a0224b0a0c941d9ee9540d8a1c4c2e85e789dd7a27f3e8355cc4651a024a3d74387faf3a3fc3450f0666efbc017ba6c71cf83e29f0844255de61b72e2240953e74f2cb0c4d0db0f8acd90c16793bb567185263fbe2593edc10803b9d6130e8a102f6676d104382497b648cc8042e24fbc55d4e034f9daec5134716de6002f7598514d10e8f51cf0c2c08ad646ec286be8e91a5c99cc99f0ac5c8708dc1df66e923409404a79cc7e90a37c6b583be42d856f743a139482f6192ea3cdc606614066cfe4bacd91cefd13187e01abcbe062537b752d3d71fbb8ceb9112eef0dbbc92cd4352f635390baa6ed6fd51bbe78b908fe5cb0695a2be9dae8405d8c34f14de3b931f0b2f666cdda100e984b5593221971f5a99f035ddfbeb09597662f7c7ece0d5d70f0a6e954a05bc3b8561a7d66d17ac8811b3a0317596f1058972a33d4efa81eedbe89a68598b57462532080b6e323e56e08c8e35d34038d44f03ff18b26b681b35020bfb9adacdec9ee1f564613a2509cf6997e0f0a2160234c1af701c38a81f15d51c002bacfcab248a89158f015c4e6552ff8b6f32f30c8b43fac68a7924969b7ec6f4aac6d33604cabbe96553a6c891a6f2661a072bda1cd190f2f040ba8e713bffcf7bb49e0165732fa2a05fb92e846e7757a06939d377d1b604d94650a661541a5ca42570e58bed229819c8b19c7f05e02b8761f06c7681adeb22aa2edb822ee6d53c4af398cb5f3c1a541ceedd6364a8e344da189ab5c227b9e14e4b42c215b9a9d054cf7ba1bd11dae857087673fd529a995fa1cb5153eede7fc24ca093c05910c33735845fe36a52483ce558556a53984ba0115658b0e8bb85657663aa18afe250deda53b9b5218e57e70f817eb9e5d1a0827e74b651fd486822cbc43682c1bbb0d5a5c6e33766abd1e09b1ed76157f705966503ef1121ed4f378f5d975b5314b561e26e71da1dd74b8c1f9fcdaa3053261fa02831c0b0672348a82cfdd3a6e55d863af03546b65fa7a52e5224f2dc46ed883bde43f39f4b9baa4215209bdb26b699029e7b0e2bcb1d5f5e868e15186d2b57e2d4f2934db9f47ae9525e01424f8964dea5a31798896fe6b439b910a434c6c736b3fb90c85a72e3494c55467caa7303ea70cc84f41da98be3697be5c7f76fda8cbf630031cfb6ca07f07619414a0e7f21f7fcbf96b05e5f06385746d5a0f1e91b4b00c0b570d9ce84bc1b6760b06908b980f7951f790b46c3abc9aa72cb80a629de279051053e8fa34d903411b35a195eab6c1718a0a6ec1060e82175f3f37002043242a9573581d86960687e8ffa6fd58d1b391423fa33548970e8f7db0041fd9fdec37e7735849c14dbc1bf8573b17c4a62b71b2a3d248d1b87854d809d14d18f4391169086235d0a204360380d22dbf039e7825fba97a0551f866e139bb1451368634daa6f38d947ee65d967c54e4fa6929a1bbe6c251c7a1c553890b90f6dab0d210beee76f3543a6a45c216d760fce811ee5ab4130a636c2117b82fd5f6c530131312ffb562093fc95677a7138b11f31e39b9980c6d6a395f17e181001ebefb7a1930af230dbc173c11e1ac6ba1826253e5810184abbb9c84e36d02ee89a29a171c46abaf3e73fe94919cb3d3463cd6db7ecbed7990dbf0edd78124f3c14682350ccef816991bab25d0cd7004526f295bbebe4d7d36aeaada1e3afca97f262b793e757581ab99eddc126102f5c0893c7603566474b6f7a8092fcdc332023d67ec2d9ead93af5a9edec892494eb995a3b91772af009fe86612f6823ee8e66951e52183a2ef996f74e6366f6db5584bd65d529b5ce8fa4aaef1ad3713371871d84a2a1015b0dd09f6c38e817740e06f073b9f5f418fb7ed938b49b4e123a02e92c31d6d70c4464820c3ba58911f280d4db87a50a4a61353fed18a478e5a75e8cae92f7abe9a2c40eb25b3fc5663c91100807bc8c60d95322108ca2c20ca80ba1e910e344ea984408904ab320e0906908bbc90ae0a7bf23e59bc0bf0fa5bca1375a212c55ef2c41419351cdb9ba6a39c56fbd2f4881f24b9f588daed0f962b3312130799193fb84e73e6ee89e6cf262379e1081735cfa8ae526fc064e96aeb790d731b967975696c48e1c9ccb17eaf1ddf583ea2ee3bae97e82c2aefcc1a17adeeac2e278620bf8271931d42a4001d9f6fb458bded0d14aa1b580a830914b16d8fb92cace284d9bbd676e62c6304fd8feeffc9e27f1aab9b4956982b2917fb479f64265390f90e4c9030d9339622e82f5f92e532d1cf69bf23a06491a59a270d31642fb6cf29a550f1e45ce826f98262da2b544fd118f781b6feaf28668caf887132175413471bcfb634df93c59429443c405fdeb7431dd386b9ec22b2094d4e64351314f5a5019825caf2b9fce1031356c2cdda5ddd6a439da00640ce69bde7118924682697c24cc27bf7e866aa83590692fbde22ff7613394538b8bec2f03f425d23fb9654ff21c77f56f1b5049e5978694bf93c77b1cf8309bc8afcbdfb6713e6284fb12cd73087224587f9533995b8bced6f9a090498d33910d89b230fc9442f2a546e2f13419a40c6a5b6eb02da31f2de570ec8ae55ece61397ff8ae5ef2f360c1050e8a7656ed65f9e94162939d5d5d8386f31373af19b3268235f21b8756c3194debe689798f75ba33ce83e9eca3ae259bc3b34cc94b0913cff2241493ae3170b8ae967e8b4c29ce10f29862a06e83f0afe808dba20c131cafd146c2691e2256a8f93c80cfa1ac69c58ef6ee3af2291a6dd8b440e61d802baab0c7c1fec7a21b084eed03c867c87b73def8ce7e3d3608aef3fecd498f2c435864044a0cf913b0298d75c80fe7ed5ef8593571f37ca4dfd032b4f0cdfd03dfd002ef8ee923317ceb7ecb86231895e82d6d788dca42a9f41af3a6fcdf90e9771574fafe259cd2cc185f88cba33a9383a05086eecd1485932a748f14410fe94dfb66397dd5f3b1650e88604e22f13ce24b8bc565a00bfea08c87ea82ada4a1fbfb0ade30059ff184307dfd2f20835801861234303e39602c7228986d690c96cd187e91011933412e2272c473f32e33a84723037ee13a31bff07f6fc5db3f5eec34f20fd9fbfa5262faa303ad7e50dfed014c35de58e2e5c78986c682c5f686d7bed198ecf81da1be062f585f33b8bd6d301bd34f10056b994cfeb05ca9e8c09af504f5a8c2cbd2546c2e75f81cc5232386805c3041f307881c6ebe0878afd683fd96dca89e09125d08cc84611601cb63559750252e3ea1e04a76f34da7860f6d45003795779e21f00f29ec0b17730bf7d087a0becc192ef9368f7be4cc344cf1854bb0189eacb2e47446d9f01ad23443afefb76c9e63c12a5008c4f24aa5e62b0e76ee83e55213705605ad000530c537f411509a1280c5398146bb099e5424572e2322e1f9e491287394206b5bf6d9d28ccd063fcef8bec83e1a3048b6a6081af8b52394e6eb21010dda60946fb067e3047486f4d53f5def86e7c86790e0acb0802fc67d3e869047bb78550926ede36cb837301f4ddeec40250e585e34d4be31110b7f4557160192bf547152d31c8e2656fbabe4cc12464def614f3be63ef60cd892e1681311112cab68f703e6f763fc2605713e191d4cdcf64d437712580c39945955f937a3e2cabfbb8158e3f0fd2457255cfe7b7facbdfaf43a5a68065d8e4f528720e73e934b3a09bf89ec50b6fb4079bcddd4f2b33161e2bddd09ee3ef9bb0783aedf77c2c107b035ac294be3f8ce876fb72131c8f184a81dc27f9fc4a2b781f703429aa09c1115ef818c248c8c282b06cb9efacc7064a8c4c22cd9c3bdfdb97791c2aa824b7b1db5711efb2083a65c694b4b798d47660f05646c45a1790401ce59631dd1b90b1ca9b9b6f4b2a4d818dae416e77a26a29e6fa3fcc98ea6b200cafbf43153cd36125bffde60f25be312c34982bbaca2cdf6a3a0582c295bfd75f2553c12a2c59e83727a1a1530cfa326c1854f50206eaccb040867c0b313d57de592566c736f8d69f64dd9711e672801f2bafbcf1b72bd657fa510d9eb196b9b3f6f06f3108641afb127992537548c1b9ccf8c2a36f778bb796158bc513294b928b1f6782168d31eaa54569d7c93b2d019f14b265514bda89a9496a3e70e8d80945dcc0e82a70ab902b1973ee512919bc641a663d9d8c5d4ce39226e1ea8a83fab702ecd71fe28d803ee86cabb36dfd4e7d956b2e7083e6931f6c46d748abb11fd442c65a02be3b6fd18ff517041270be9d3e6bc87cc000fd6b6d732f3c0c2a22c1acf416282bfad57240495a26169d9eff47713d9e7ea3ab26dd4fa1b3cbcccf4b200e742ff42fec90c88602b37682a68dc9f6636c1f565b5957fc3c427c235d34b8bd11128c8bf29a4d84a2e7ca1535b594be71ce29aac1ea03f74bcd449da0462f60d8265f4a3eec753be315afeb342e0bffb1cf5b39f7cc95176d060429b6fdb77eff18aa3c69c94145e66522a0143a3d2a1abfd45ebd038b165868dc745c576d69632ed0b4ca10813df2b565632c30fe8525fb2557389faf5a2838998210a53af4982c21ae7ce50c3130e78c5b3da4994d25766079e3bc0cb53762467f57b0a5b4de32971d166f7c9bce8e5e46a359364b352f41b6d80d43364e2bb66173563ee288075f7c1b6996d7da9388563d90bb65bf122c9b9fb97fe28f369f8afebc89229824a145e5bb51b48083506c2c146c43f9d25e4b0e15d7feff89c6389aeec0f6bf23b40fd03984a25a78f247449d0c965cf0a2ac463ae6e851383a818674ed31fa339f81cd5975e70b8f43d58a0f3c0d0faf838e3252e363628ba6f6760d93171b26f0417185ad74df3d0309dc76a960f7b5b99bf7b3ffcd45cbd70e7243de2a6220b856037157318e4f7d8fbdb7d796d85cf17fbf6d0d1697a089c4b5f0b2abce05569005b683ee4b1efe47d674d598b21f11b0f494a7211868b167d40e0788b832f40748230a11ab10ff0ad057de3090db178955561d36ef277f7fddd0dbf60f0d9bbd988a4edb46d1153c712b962e42c92d283fe6b58105fb101397d92a8b252155854229ecde4ebd18de687698977ac7ac7b1632d05cf42293ebb622f80011393be29d7aae75acdc0e302ae73ce708715d24fc4826582afeda1eed03ab314b27fc20684d922f09b7f4489c5b72434f2fbb8c7140381640a42f142f828adf8501f8fb9d945487a64d6c0bfcadea79c4722e2622374229097a587c4be4831f883ac28d28dbd91f85f8c1ac74abe6c2e25f74d4de8eb62c23b7ff4c4fcdbeab1b020006be2729c2215a5d52226af969a4168ff1cfc0c1f5d3c3138dc387ceb44416319b7c9bade454beae37e37fb6dc7b958c0dad957456f1dc3043293c51cec83e2cd8d4e073c5415efbf218c03435dec3e5730acf690651e69d3e0fba1e68030f3f1b6dc53328a6357e3934011aafb05a4698e1981a87049f30cdab7f9f9e4630a323e047e49ade617154689f4c65adbfb438a7565f4fe3f21a753893f51ae5287288dfb849c909c24cd7a3e127d954517264519e9e489b1944b94e39a526d93d1b8114687beaa70d2ee85e0045129dc8b74d3f38b2637651652ece61b878bd05564b0ed928d9530fae30958c11ad21682d1de2e50908b7d21f83558a010cc5227603fcad1ce54c01b92518a8731352d3c9590779569d376b0245838790a9011901ad92ec9cc9c2c572a8da71f7d0a3dee61eeed9edff5f25416568909ba5b92b93524c51d240cf23cd82387d51ef3cd12f690f58be29ada89ca38f41fa6d210def60f3a6181627b53bf06cb59c7c9e64071e62aeb414b0353ae764c792ee982eb8bbda6c17afa24ed2ced5d5249b2c45ca3a21d2745af8ca91657997dba92b268f634851a616b8a5dea3fac5d0c2a86dadccec7d230cd02bc92637fc89e55f1260a1a9817851ae10d396e48828dc826b7803624d2d4306d1a23888285aa1b30bbfd0547806ec6ca930d10c461dbea8600af9624e3812ada2dd7eee922c6ba90b5a5c22c3e5941376672675d632d3c51528903902f712c791b303d512fc8ef734748e847584f491169782c36dbbe4acac5342bfb2487b2f29ad6a30391a9ef0098f6e5ea3dcffd48ec43247fdd8a6ec774abb1c71baaf1dd7033a974c8b410504833cbb15d1927e32c8d76e3f0f28c119bfccdd81ef1a971cc3c6384792a9dd13c7b5ca93b8abaa6326f27d7bb51e6cd88feb6e9079db8523bf9bb902b73f96103b6ba1bdac09a84b08e8560262a9d5f6b1c51212dcbc696d9cc1edfc2d99591c29c5027077875f513e137ea76726f7f07b9367578d39b81086fc2db92db0810309b4c565ecb1a49da84d22f64fd3e000bc9f3c97630c3c8e6bc655bbd6bd0e22f4f7f6513dc785cacb5be865659abdff30290468799401daf07c57560abae524ede3804fef5aee90da91799fa11e3b6c134d21c71f2dee4faa311a3bba47df3b8335d5f94e52f21de9fa43ad7077136515159084a196358ca542b164cf108b20555e661662c89e6311d8071002189ae1257b585156119ac4a431ff48dbb20b3c5fdbc21f1388749525aa67475b0b18ddf83123361c5f3eae430ca1ffa05c7b3ec31bbbf3dd534f3b148cfc3eed6aa5d60105392653fd0c489ea2783f3138aa19146afd785ef46417aca24367a9e1d2c5839a3f3ace9245d261b358d7341d0a8650463e5bf4af7a444719151b9c7bbef98aae724a0bf26ad65387e9980dc78da41a175ffa3f78bc7219b154e411685527a5eb0013b424fcf7be534b190e94861ec09b2f04b51df593c9a0995631e95c86c341cb437a89f354f9b4c274c5b1ce09737a2d7cff5a5acf3db79a7eab2567ea54ac2724be80fb311ac3c782d1ab1a82188e1fa66b478bdb175b316335b2104f3dc2d9f0f3be0eb50e08718a98384b21d2a5caf2aa0caddd63e3381ff01396c44a8d6e4744c339ee118f3589b13d9097ea9d7450d7ae247e0a49b7b3955a65e5d65ab700973627d1fbb9fb9f09528428f58bdd1b72d672356b3b69789f2bc0f6c8a130e0f5e813cac477cb99c31798b5eb7431d2f8703e71246490e261fca3c830b684302a6290529ef220641b7e001f96674377d594e62708d7cb07576f2c039b78cdb700641c393ee48e52bd4c98987b6d70d02ed7d8dff72d98494145fb560049a3d328d6ec10efec9566cca1ef1faf363ec582ea76fa85a7f5a1a2e59542ffaf9f01442754bebefd5170486720947a5e32e3c3ffc29d8ad4a739c1ef734519bf74bd82d959af6ddfa5cbee81bfab738bf1a2dca2986de7c2324fbab7d0f1aada28b63345100b505d76b6ffd0019ece2c0968733587ba5c275dff2efa9fc957c01c2a7272e7677582492db32ac39efba005c578bded90db0acc3104db1eb5b080ac7c2c6555b4b2afd158bc7bdb93ee00ba350b0b89568275e90f1d0051652e811a4cf85ad15ebe5bb5ceca59bb2283c23983b8270f5d9f991648f0d73da73a8e0c10cedb15852cf40c1a24ecf2842dc7b0c96691d0ce9748877dac297750e9476a3f76be2d5d97181530accdf9998b8f7e715586df7b7a53638b9f37c93e54f3bbb198846d3f227eab93da95db95c92dc1816f30920200360fd05ce671ce2a99044d9a9ec45267b97dce95739382b1b51cbffaffe03dcb52580a40ebf4f9ccfb9c4846af3143ddd8b16ca1c8af2aa5c1d0644cb3940f3eb11c713a00cd1464b7548045e0511752860abe5e30ba25bc11f08b46e5b0616fb057e608c7ebbcaad6fda20eaea7bca461b9d68e15f1bfd36182e2c09bd0fbd8c58c85e6e51bbb72897a91cd6dac94cd19afece5e91502e88f9bf8bf4c211ba4e6e017ae8af33a30838e3c38987463a2dbc855db8e588f0cb4d1fa17ed78f5f9b3a96655640f76bd7158864953c8bfe6c7f74932262a009ee07674b41e33ea058adc477d91ea5ed936048ad900e3b78ca71de75f1d1881e77d872dab1df246cb2d7bf5083d52e75db251f2bc951d6f75ff31dfcf927716 diff --git a/zebra-test/src/vectors/block-test-1-842-432.txt b/zebra-test/src/vectors/block-test-1-842-432.txt new file mode 100644 index 00000000000..870d51aff02 --- /dev/null +++ b/zebra-test/src/vectors/block-test-1-842-432.txt @@ -0,0 +1 @@ +040000008395e2bf8323ee55fd4853311fed42fc0757ff69c8c5c3375cefbab0777a2a008dbf276f5f07ff5737a2b287ac5a07ec0ff60c8a7a38e31930efc885327ddc2443a7be97a03c3901fb2d2392986769cd65569a5468ea8dd6b74bf0ba42cd0acf3c835e62f5cb0020060055ddeeaa5f9c527d0c8f5aae2a10c31b4b2a4a620020d5ec5f7ec9ca0000fd400501ecad6c3d279569df01934f14a5e739d32d38797e375e59f3d662064ff8101bb62aef48ef3b78ddcc081ea68a63cbded733bce37272a2cb5569fe937ea0064bbf978b02a6d3fb939f27b7496d09a637707602c003892bcd39a934df9899d42ce1548c3fc3f2bfc13808deb763a48a2e6ab12000de83d4bf1c62e0f25e74128da6a473937cfd4ba71ad2d3ef0323565bbb2afd348e14f6b3ed450b6aa905d8c057e996319b3ebaca0214d690a5ae2b479ab60611d438a70283821cb7231b8d87df844b9cbcb7e292801239b47b1064fdd7930748ea509c43fda2ec4d15785acb249e555abae7b4272eace12da2f4f1760ff8747b7148e7d0bcff3c780b3b48a12b45435248fa16813bc973aaae9e9d65282ca60cce958cdd99178fe4233977d34dc568756e3b172caf5ebe92b8d0a64da2482547287ddbdc7b887d36db41dbaa694d07cabfd37c033f44c4f38df7a94e02afd0f36f466cac989fa040a1a25e7c9c7f3d5a38145b96e95b7b8fd1f81ea2241ec29b71f2bf1bb47734b12c7db59f1f570957379904f0c4f745957e4b2d4e74b49b2b1f76dfc35a763cf0c2b059b4b35114c6041f4dc42a09b8905306320db09b9a05bdb2befa2b129a5bc73bc9564df12a91d340aa28be0544bad8fe211fa75da3670535392d75356d467fddee8df2a615683b3c684ceb1a999416f75d01dde8f9e51e8fcbdb04a0d24333559db5622eb1fd193cc9f90c73f139803008546447983a17d47634d5bf58d8b1ebfeb65ced11ff69f8a748e767c118f1766beca75dec7fbc4221277cb4c942ed4b51ea26e5f16bcc67fb6b3fbe69000594db7a7588e4c37ef5f2176af37fa8ea5159ad260fefac0b050ea32b8319729aacfde1196d7f8d7d59392019cb53a0a8ed5f5333bb7b6c1bb904af7178e079f04e5e7da3db6b349a4a2df8f24c76b3a3595f36022a3cce82409cede2f9b5b4c4ea350b388f3ced9911be18ff9356a5150fdff3eccfb30e5b0308de4fc402882196659c2b8f2d8d14cd48b619977b523f53b407494ad6066579b776a292fdfa3195865c27b96b13089233e7c819a011cfedc0b901a843f29b0d7a89e23ff0c3f1326c6799e8e8548cb841d8755fee35dfec355893f4c7cf4a92ef1e7a7f73fc75cb693efb68d15416cf14269d0b5b128b858c56d719634280dc091806102f4d8b893452a24592872437ac44e3cf9d93712abf496caedb326d221b9635215af385dc1d31755524e7c310e5df62e8fdecf52fefc5831a1ec655c4217f387609e6663e4b40154850d1c837a6a5f4b873f7075603c71a767a33c244b56308f906725e44ddd04e1e605dd1b99b40a376af57303a7a0646ad543e9b5a35cb6e932638254fee90d3f5f8a89d12a28f1ee20f47ed6ac7c8969dcbf52a76e69ce5b14de31217a1a10396b75a39a5cc79d62795bc7932470281cbb7db823c7db643eb203cd3edee14c6a5734cc27c42d5f59d12837a07db1514130b854251f2a5dd0c94d51bd7be1fd5df5c779897e9bba3a2f779a43409659fd6c06c03eb4ddf248133d733bd03cafdf9abaa334898febe0798099ffc574279c52661f1c94d9d3efbd0fe8a9c0df791d0f1c465257152e8c90dd8ba3311631ff8fd1ca04ae4501d3e6119a5a1ecb471079314ec1e5c3d08a37d38ef5a84f10034c544a6fc09f5a7eb3749330d785f72ae680bebc826c22bdd3c37dd59f47a21fc0a2f7fd3a6b61357efbfd23ae34a4027c21fbf083d373455c14c34264dc119a4aca750131603deb5b5760a10a9bf800d2905c9e67639ddf666ce86faf76421264ed22e29267ac5f3e7d53d867293b640f53d4ed31613cac32d46d23843b133103429ffc5dd756f2cc32ee163ee57eb65b9b7baa2fc95f6344a22355c346802050000800a27a726b4d0d6c200000000001d1c00010000000000000000000000000000000000000000000000000000000000000000ffffffff0603001d1c0101ffffffff0468b6e60e000000001976a91437b3e8484c6345d9686fccad9d72d877715cd38b88ac286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c8787000000050000800a27a726b4d0d6c200000000281d1c003200fba3b3e2ce56ca367f924c065616ba0c87eb6bbe9519417098c4fe7c77c3a4000000006b483045022100ffc574054e14129af8386569be2f0327be039a5b5ec735068f92bdba29301ae9022063a6a34483e4d95d9ccdf5c024b93f70698b5b3f1472129e9e1cde8b11a4df3b012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff0139332039803fc3c74555fb12fde73147287d27156974df37edee8739e6eb51000000006b483045022100a9ebad4db4853d32cc1d749c5268ff54eb5ee8590f2c6d6335157cb7f5cd51db02206630c4b40b7011f32aa55c7427ca5d0260fae48f87f91338543b8d32c57d552a012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff01ccb77bf931ff31bccd76ba1ea34128a925a671a5e6252b7435510fc07b9aa7000000006b4830450221009f14520613c49c12706eaa767decbf461abb2b550daed19daf9f7a08b83e93b0022064f3d3b8dd4dac724b6a52874020e0f1e9aaf2e9fa148949325159ed7e18bf8e012103af84350c47a535791d7305ec8e9a8a3cbee9b7b01ba2d70337fde97ae34fa47fffffffff0208a69d6450dae266d76a702f94ef4cfffd9694318ceece44a04a0426ac5838000000006a4730440220189d682c4987871abc3c97b75972625d11cdf0b7f26dac39e76bc37c237930c002202c5e32114b2c0c3c340f5369aa707dada0eae41e28040c027f59c0b40c7c0d57012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff0334966b2d7d7fc3da3c2486e9d7b8df1d71b78c05b38881593dc4de04c3697d000000006a4730440220490d1046b08ed67df247e81bd6faccb8306aff575a3535d618379af3d19a0b8002203b7905904b8b5b36ee736a5e7a2e16bdd36a7323f48453ec9934bb1a6e59da4c0121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff03728a3bb4dc6f7cb95c2b7117734dcf6c313191df69ce5c7b5742727eb0d1a3000000006a47304402202d898031760da22aec94c783bb835d79a7d06483818aa2660a9d7ee1277882d7022014f8ec22bb6670f0cf35f06800cecb999020583a7fcdfbdca076425f014487f0012102b8bdf9887d85b002b29fbe7eda85fe7d5b118982b7cea39ad8a205a0408a5524ffffffff04c8d9f1656a554b6fd2852a847da242423f51aa06edb5f085e8dd3dd91722e1000000006b483045022100c3878f6b172e04aa995b275889c3d800646de167cde636ee47198dfb1c0994d00220443e09d88cb5de88e4ef46854d688926ad56b6ea6f6001950663563db6c507ca0121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff08c9afaad5ce912b9a5fecab1e6fb266d6b07e78b74df44a8b049516722038e3000000006a4730440220592eeb4bbbcc2016c55f31d5bc85c21445a941a683307569e7cc7a4015c6ac8c02202f0cfedd5c9d58b52b31c5223dcc9d149d9b3010933af337e9e9af4658f8d0aa0121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff0a7cab563edccc0d4461aa8a8bb74d4e85a9810fd26367651b3ed6b81471cbcc000000006b483045022100b62920428d5be6520a88148c68f5e9cdd9790a0b2f92dc22c7b4c916e130b052022051c4c2eed513928c6fafefd08b698bc8ede19b5032d9550db2aedc5172da240c0121037cfd1285e9c3354ea7691af655b66c435658d798662316a5ca5d51aebb6e27e5ffffffff0b1e6ef25c5cd69ed8da982584e7cf5b7fa8f7980277400d9c212c0a166ece8a000000006a473044022004f7223ea2b448f35e360d2e74fe8e4ee4921793cdfd714479aab3f18ad7974502203d0d9fcc08fd8d34f8125007f8743f5d78927d8106f8c59ba30323309779865e0121029aae8dd393c519f5e567c1b84290b9111bf6243495934ffdca06ea8de44d64ffffffffff0c7f129fe07d9de95163b25106edd08800b4c855038c84358fc0198cc02ccc0b000000006b483045022100daa1826d462675b45ef1cef4e8e4473c888a933d197783db39ec337f705dd1d1022011a6bd0dd8b87c75e52fa05a0447036e297e72dd565f2fe6e5efabcb6f9a19ed012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff0cc873c1882bb38226c9c7f8c19aa7bc90fc5775c804e1fdd573d806fe9eec5d000000006a4730440220364c76c6c8aea7dd55a43dc556e300edc8b935539139c19f786ce77dd6010455022058e8034b3e0018aec6b9fe65c73dc83811ceac11659a61a94fd0291ac8688a5a0121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff0ce92a4f4684e06bb1ed07ac4ea272670c7cac0485e5898ce9c010778ea10f3f000000006a47304402201e9e0bbfad4115894d314c5052cf86b0944a3b8fd5be752e1802cb68e557d7a102203b0c2d87e509ee6232dd6be86bc417f41e4dd6289082bd0eb422165becd98b5f0121037cfd1285e9c3354ea7691af655b66c435658d798662316a5ca5d51aebb6e27e5ffffffff0d4f2003e507a5dd71e18b3e0dda1906356d09a910d600925e32933cc16b22e3000000006a473044022023fe3cb7ba8ab3056c8d91fe304ac6e866505f9dda8521a720bb14e7c8b9c90702205e6746d36cf21086199a909a17697deb58533135fb338adb3d935c56b207554c0121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff10aca633239ce9fe0d1e50d9e83bc9bb2bd23463d536b7bcf56a0b266be1ca17000000006b483045022100b61c06d87c6b9993427c21c2d076909dd15f9a0dcd7e72a6526db1655ab1f23102204a7435d7425806f6e9ccc1387ca13bab88049905d44031960c05841e1cd9e0300121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff12359ff9e7360c704804323c7ddda09e38affed9f72d57090096f9786118f60b000000006b4830450221009b93a0e542c74a1158485a155e7136e251567fa48d0606052866e07857007b0702205677fc54c74c948a85aab9c2cb77254bf12952d31478efeb6611d822da8e676e0121037cfd1285e9c3354ea7691af655b66c435658d798662316a5ca5d51aebb6e27e5ffffffff12bef78ec65c9cc426376761003768535e52157f86002d71b7ed69d0f4c7a4dd000000006a473044022022e5e49b262a61a1fff7d3b65afaf8ea54c74ebf884cb692b771ae68d9cff9a002207d33b4d372106f6db2e3c6777dc1afe15d4b86399312eaaee1ca0c50b86ea517012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff1424e70584a9d6063c79a904856fc37edc91391b206c5b53bc7217ea146cb8f0000000006a47304402202034487b42e9d0e568c72657986e8d4314736333338719cece8575962ca3d1080220522baa434a966145537d970cb98979eb4b16bb21a54844006c83e121623ae203012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff14a17f021f31c5074fa1ef19122723ed6db3526e646b6fc3f356a479bbfdb6f3000000006b4830450221009dd10d69ccb2b5f9611526a1f436553369e8733fdcd240464a8b38194212de3102204a5dd0dd1c394c04629fd0ba951af64b637c27601744f1a481b71eca599781c50121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff14e0433b9ecbc33a3ff660cb29fb9f96e129b78ac29a7118a0f3a8cd7d2bcd7f000000006b483045022100a7f13e9f914bc15e8a22ea96287a938dbed1a187e908de7d291dc73c1c2fe2b3022038b947cc4f889eb75c277ee73207d019ab3fe33d15504ab587f7fd7f4de39b0e012102b8bdf9887d85b002b29fbe7eda85fe7d5b118982b7cea39ad8a205a0408a5524ffffffff16546743c50fd9e46305ccc59751b6a0e93641adf47c43fa057874114ace5303000000006b483045022100812a7218db3cffb0578987290b17c793f859051deb3dac5e35c92e2f2d875a85022045f25f6feaea98685ed6fe015ad5dcd82ac8b9ac8e3b0599d7513b2706051f990121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff169b6bc5130cae247bdf5b9673af7a468a122a91b7c65f39a5cf99ceeeb5a215000000006b483045022100ab5bc56c9fff9a0ceec39e57be04a18aa0a966d62ec124632fa4ced7fcca605b02205305186c6b93a81664f6a58f9de03174646f6dc3419d79451b5a55060272ba4c012103d86da9e0edb0b617b12c1946984ee761440a00dfb8d475cda0ec311f28c032d2ffffffff18ce29d9364b3289db6d67574c8762653ca5f8d2d8838f8d0a2b5268e9b07418000000006b4830450221008c7bf5710bdf9ac63cf888b643cee9e02bd4e69731b45c0efc2ed7d5ff5e0de502202aa1f0bd7be56ca2a97425ffb7001ea52dfd522e51f4f8c799d552ef66b90efe012102d108f2a4b018b4705b1b0b17a364c8d12f503795284ef96e2ee53ae97d6b1f18ffffffff1b3c67e61054667ca56bea12dd85f028af67323559784d0efc4ece0e93e68545000000006b483045022100c65d9ed6adafbab42e288cb2f8adeb4ab447bd8c68b5fed2342c7feb3952fd1e02202c3c7e79d96bec5b64e1698dc096d36c98c927a205ae5f8a4a2c4df3b792da89012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff1b645ed7bb10cf9baf9daa83be033d9acd1a25e132c595c5a3caca94a459fccd000000006b4830450221008c0bb877bea021eb8e976350c98b86ab8e4845ff8d79ba35de0f23a6fe1850f0022072b775abf1fd5d8be7c209077fe7dc35dd4e92c2021bf06b666659548d79a0b10121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff1c3731c9f82765d7fc35ed59d7edcbf3b91a43d0d7ed5c80bea9178bacd0e964000000006b483045022100ff456ef5ff3c039f7c05dfb3b70e8b8f17d1b5b2f0e7bba18f179aaf003031580220190dd79718f49d2856aa7baacdc8a424864c0f31090f523955c4a6c637b324120121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff1c5739be29eec0e4e453bb27afb43036705e36096a5ff70eb0c9836ac26b6ec8000000006b483045022100fb61a92061fa30af1865b38855ab0725f548df8ea53a58b94ff0aad143677c3802203db3a9e183ba7d6c459a0e6b7529973910b5a0f13a7347fc08389fae38279fa8012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff1cd242ed1ca8cd740b6410fbe70368b59b75ba7817294baafe9bec071dd1cc7b000000006a47304402204393102a0fd73051d087b78cfe0dccee0afd0cfb48a4a90792144a9a8d939eed022061615a50903106a67b39204597882c4d722130487811cdc39d40ec80376f8945012102c25f2cbe93d23fd2b75960cccfb28ec6845be5871a444953a9a9b09e727bc60dffffffff1d7cf664b518ed83c983cad3efda17756ad4ecf71c92c85e66e443d34c2aff4c000000006b483045022100f287288297148a5f5476560cc0598a463db78d47e5854dd2461649ac6e29e675022005229ca4fd5136ab09f0fe8602e13da21294a93a3538af98491103753cdbb9a9012103af84350c47a535791d7305ec8e9a8a3cbee9b7b01ba2d70337fde97ae34fa47fffffffff1ee7972df84d812817adcee6a84c47b59819722b77283da608e327fa2ce56767000000006b483045022100ed3d43e62c8d4dfe8b92609d2a21f3f90e5d273be69cdb82953a6024907aed59022018e71e6b10738a862ceed90d35cc41fdd67f874e28021459b6769681ad4c6aed01210365d3da5c8bf7db81720b3da2d9b5bc3a78cdf02675a713b4f3c87394588a8d3fffffffff21895eeb19bd1bbdd52392917201171eb44c8f30f0d685483dd835d1dfcdf848000000006b48304502210087f104698b85b2987aa4c1329ddf2b6c5184e8c9751e1f1fa95cf8e6035373eb022071c0fd2fd53802f4eba72c319d75cbe3dfb95001ae82d205d57f307a3abc685c012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff21bea67d90f94a2d9364263d7f6ac7e0bfc2e21b3d59099891f5bcfb6396119b000000006a47304402202480d8eda2a30bcf7f23b113cd66f6096595616cd8ed7db4ff024d78d058d8610220134346e068e8d88b59ac4743417a1121157b3b790a624076a76e081a4cd2a343012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff22d4dc938040fecc88cc22a41a80c6b5b3c60ef8582eeb3af743a6a23f538049000000006b483045022100d18801393eb6fcec7e6f20fa8d9c3101b1b57a32a777b1d104bd7fa4f923bf9802205d854a0b5b72dcd82b99a2b65e0cf91402ff2f4fc2742c4612408625f53aa51a012103af84350c47a535791d7305ec8e9a8a3cbee9b7b01ba2d70337fde97ae34fa47fffffffff22f523a5e7715e0a907c5e8b816846def93f8dd127962cba95754e893998ecda000000006a4730440220500585e103b18f23eaf379e5be302233ab06691749166959f1f8d1bb0b25eedc022034e85c183c3e9de866599556dac41c137a2e3f5e90bd7f9bb2693623a6d8992e012103adaff6af6db9f0085f9ba8c703f93858d3e0eeb4d8cdf5badc088e3ab517da4dffffffff235e82e7a435e5c9693e87de26f3b577cb2daa1dcefa925ec566d4adff4eab37000000006b4830450221009515f190a150fffe7649cba77b61d914bc39b6d37ef74ea724674911918e6a28022049a52c449012d2abfe489fc3abb65300e6666a74151fc7e6c8b94567e1f030820121029aae8dd393c519f5e567c1b84290b9111bf6243495934ffdca06ea8de44d64ffffffffff2382e3ee684c93096a669e6894947971303d8c17f5b0ace892f91efd63628d6d000000006b483045022100e1b5931e85099a048987298ef266517d593f731d01dabb471f74f7c21327f83a02200d7979619407a79b61c66f14561bed2078068d3c5979b8baeb2edf565a95a32b0121037cfd1285e9c3354ea7691af655b66c435658d798662316a5ca5d51aebb6e27e5ffffffff2472a5ef880da793dd3d0f62deb300f964f7346cc8c6cc0675415f8602373f02000000006a4730440220283de63b77a875eea560bbafddb82134a3b285c0b374d5c1fc3404aba79e922d02207ad99e7850c4154002bc19164c6a0d83384f70852d046cd21040174875253f390121028fce702e13eabc883303acd81508d7b6394ed14ca2cce874b3aeb1025b720b3bffffffff279f655c563b053941d6da18df39400f4c802e267db713a771ef16926bcc6038000000006a47304402204dffa1c60b92cf67dea6f4f864cffc2daeb7d16f125f5ceabb2a26b1735f8fc002203b389a2fc7631fad824ea0eb9c5ed2a4cc80b916ba3dcc69bbe494851790df3d012103bb2f845897f4cd8b00ce4897641c13efe35b4ddc4802fe2634d6103d9cd52b38ffffffff27de8aecbe6603aaf2291873b707dfe3939edfc33e2a08b43c10a177366f3de4000000006b483045022100ca28347ac6ed27cf9a2c885e67ddf642f839476b5cf389823afbd2fc0b5295430220562cfc370b450b47eee4315776ae362c07a17a9da34ff66c35885b67f3fcec04012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff280c516049c3d569c2a37f2985bde5482bd964d4eeaed00b9414fad3b5b85416000000006a473044022008d33b2493bdbf5badce1c061cb8def1335ec6baf2755f28e917bcc76ecd661f022023cfa5ecc804a1704fb57f40664a6ab90a98aa134ddd4eea4a417ec703460d30012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff28fd80badf03a1968d76ccabc5de6a09e361ce8a27fc90c49a6c5c5fbb0f9568000000006b483045022100cac7adc66e60fdddeea7cf08465af579e8308cdbea814b289f5010db050e644b022054ccb52207237aea255873ce4a902b92b75c3d6b8f9e2c5bf61ca9fb723f9888012103ac6ec71c7ddbb44054e3ecee5098848bb5bba14949d0c45b99a2ebe3f7743542ffffffff2b3d15c3f214052b414abd15d86612c21f93a69cc5abdfd8e17097e81ce3fd40000000006b4830450221009dd658d122e55e923fd86adf18b3c19f5a98b0e10fee56044b54525c6965c3c50220420341a5b45148e0e3d5314dfef62abdf47f4321f99f30f372f37afe5c81aede012103af84350c47a535791d7305ec8e9a8a3cbee9b7b01ba2d70337fde97ae34fa47fffffffff2cc012eb2cc139146aaf4ca62740defd05444d2ad34f3fc23104ca3a04a36c7c000000006b483045022100d6a767de24a4471953d52eada71719a8cb0aeec23201d3a65e324c888bd35a2302201fb389c291c1fcbf87d963515a934fc72c4e8c703258c02a05c092e1e4244531012102b8bdf9887d85b002b29fbe7eda85fe7d5b118982b7cea39ad8a205a0408a5524ffffffff2db946078cc7db145c63a4a059953be8d499fb327b255206ad76117b9fe8b68b000000006b483045022100e1e330c1971ec6d2c6bc680842933e1bb1d88e9f548ab914341bdc5183204d4c02207c950930155d44a2df347079e5510b27cf9f06a254677d297cb7d877349063c701210365d3da5c8bf7db81720b3da2d9b5bc3a78cdf02675a713b4f3c87394588a8d3fffffffff2e1dafd02a9c53862c1f5376189cb905a4cce51556e3ef09f31d864b6a91b0d1000000006b483045022100bf686c6452b9d447ee9ea20207ee4abb5d56398fa76792af9460f077ccbc6629022016089f17466292b45691ec0a125854a8bba40e7903f87dbaa701e13177f4e12501210387943defe7526b5a4cd71e55b4513bbdf7f08effc01864bf5748fc853c1d7917ffffffff2f9424c1cd018ceca13154fb325b34817e93616e3ff3e2302479f5ba8188ee43000000006b483045022100e1e1574a9a39ce0741b90404c46280c621b7c6c28e9f775e70b9e662c31c2899022033a68d300d3280ec911bf5d1834ca812676e346bd2a0f83288c286c6d2878e6d012102b8bdf9887d85b002b29fbe7eda85fe7d5b118982b7cea39ad8a205a0408a5524ffffffff2fb872e80fd5ee65368ef39111934d12570fb729abc2dfcdbf3ff24db0685a04000000006b483045022100856bf2d7c12a306f30b0cc61e775781be53d289860ffa069f6f8c777e10d247b02201d988c1a464d2629829b5964f3f9e048586e785243aa9ff0dc983d71c1b8a403012102df36783bfff09266f79711204629ae71a1ef3f0f96b29daee9b4a50fdff5b805ffffffff2fecd5b85e968bb7e3652c5971f0aa84e59bae4911217f3743b6006113b2a9bb000000006a473044022071ac6cfd5baffb9aba1426cb76f9568a2dc6d1d15b0b0a73fef57c6c49ca8f60022062c9d753c75f00a044b88289486a084a43dc8e0a8c31c39ebb6da496aecd48e60121037cfd1285e9c3354ea7691af655b66c435658d798662316a5ca5d51aebb6e27e5ffffffff3040a11e9bdedd1798ecb6d85af3c38e0f0e6a0417344db6d168b778518bf7bf000000006b483045022100f175c6972371e6c4298de8e3dd5dc446e2c53c422af179ac4ca11567508575310220518611abc6222d1cd8c9507cce48f93ab50fc8f74da83c8a68486fe65d36ad3a0121024e265ec3d2fd6ab2837a0b39d453b533d4bc46de8bd3c118cfc2353e6c48204cffffffff3080ecd18e45edf0890c3491bebf0e53dbe8514d9dd1512bbea467bfb535ec3e000000006a473044022100c6020d6e60e99552e0d6d4e75d0c226142ea88615ebb13f8832992285fb16808021f38a11b61dbb52bf28a263a0126340e8b894ca5a5a61ff5e733b93dce23971a012102d108f2a4b018b4705b1b0b17a364c8d12f503795284ef96e2ee53ae97d6b1f18ffffffff00000101d1e730f122b874ea2e1e0565e944b352fb7604f421ed6e9c382b2fd97dc89a035f713b35b3ed911f159c161bc777a2ee1951d0fdfdeea9fe1a9335782c8e2962af7efc7564fff81a8025d7d68088abb8ef7c97831e56d463c02fcad13cd936a93b31e75c95cfc8876e5aff7e5ed4a2950fc7cefab3fee9431c3e439863cfacc0ecea5e6a79308cfebc1cdb035237f9be157be2e6f13aa295c9a96f257097dac4fb3f9da0f196468e73a74ab6eba99f4500caa9e18cb09bbebdf34fe199ed077c6d6880a7d9fee5926114be3c318c84393840578442ff50aefef6bbf0170350a8e7bd846b142a050bbce207a91600cfe3339f166fe792fa3a9b1ecca625a130f7f58a5490861f46a6d2bed74db7e73d552ad4e3daf4a9b96625bd48a4c3a368bd4181e0b7dbb4724168cf482c79da368bb85aa7955cdee7ca54b224272a3f14215ba09168756895829fc36576a11bb7282d84c764425556d8e63804db8a7d92efb776f593855367625c8177960d1e9b4a39a7228fc1e02abb28740bf37781c9daf10cd6676835751191d350b1fed0a7ee1cb6c426aaffc80f257f082ac251023792cdf38ebae0cfb6038a3af384a1fef51884bd9cc2c0981286e807ed95ea8f33da6dc2d473129b31e21aee2baef6ce27961fb0f73c254f4c2ba1780f82a7572230cbd97696f1d17778426397c36aa41aa35a5303223223c15a9259e564cf0a796a9a98a7bea63d4638f817a6f2d8d884faaa65f95afd44714497dbd6a99b741fca2a08ed2ee0fc3d59c2595d748c61d06528dca0d61dce733823ca7aa0286bff8a6092045f1af4fdb64a80476671952bd023d0742cc452cfb948a64b8f49a4f3f629076551ec9d77388c3d5cbc8328639f51570b8cf9e075196e47637481a0f5f48d3e9098179bc9143bed4f9f03675e687ab21f67fea579a6d76db3153dffee941016e1e39f2ed1b9b1ea497d5276a38e8a9350bc000a6e3021be6b51729acb266cf24707607dc9c255c5b0b75695d19419119bae07e8bc653b18fabfa66794eb52e8a746eb50797c8ff9a2a938092306a7f5e826f116fdffffffa3834db46be375bf6f6824e34d96b0fa98487bf7c4af7986d33d0d173b1684c4b3fb14e2599fddcab7ccad820f9a15cb97d779ef07b9cbfe31f88d893119f54f407afc5211b20a4f82c1835deacf84b9cbd2c1f87f07f9de8cbd2434bdb3b8ae05e13cbff0e155d253e479e93ce8770e051a2bc0716ff502500eb1beecd6be322152f177954ba0c86d940d6fe76841fb843da01c6f170f502afeb712eaa00cde3608444c44e520807894650a47bab937f9240dfc18a9022b5a322d272c1c78f86bef6c0b77742a1f136daafb99e17dcd176d94f1d14bf9e4eb622941569f2081f1fe02c765b304d6b22425069784c4cc8625a3a4da454278c9591988cf5c6e0500 diff --git a/zebra-test/src/vectors/block-test-1-842-462.txt b/zebra-test/src/vectors/block-test-1-842-462.txt new file mode 100644 index 00000000000..33c41019b43 --- /dev/null +++ b/zebra-test/src/vectors/block-test-1-842-462.txt @@ -0,0 +1 @@ +0400000044c12df7f19ebf11fa54cd21c4f73b45746bfb7b8e07836f51900b2e8dd25200c6abda6bf0f8ca118e9a1edca40a772e274f759e942a46b4c11518352b44847103caf9d43e0e589c89c488e47774cc3f7afdf0c05c55f2a122bb43bfe5a86a07ec845e620582002003008fc8c1f526f88455358b0f1234e5ebf4272cbde710e1730e5e7da4d20000fd40050055b4c81fc54bb68a19f55635deb07e95bb782f7630299ae2c6b1651790dde3f8acf11882e7731ca9990152b1d73a59f6d7ed77014994eef6dee0d03ad63710bd52568f8a5763254361be422623cebbb39da73b0402be75c7c77cddf234307744faf4e5caaddb330427799748858edf89d2f7c2ca0ce0558d4659779d040ed0ef60c7cd52ddaae4e1eac5b6ba7cc14798ad2933ea34d0fe13ddff72924508327c3fe24e52137d2c028fa1e91d6591a1ba2bd3e99cbeb395dcdd6f99ff2f7a42ba7fcc5cf3be6955443654500e6ba5164a88186e5a628cf5cb31ee55f209da610aed60a558cca7259c8734d1a41a012fbd45d1e3ef0a2a48f8942a660a2759ea9ce5f6718db5b0b4f50a9e2ebd5bb71e071ef46d18eee05bc953502443cb63edfd5bb53cce300cd0fc8a2ec39ad14b7f928ec7cd72b574e2123f393634db511b9e97a16e2783ace7c4b009099bc93375008a4462380145aefc80a16b714e81ba8d799d85e80c1e85d2f1e5bb59c35c244c8fdbf4a934ec317d770cbfe176f7ce8be165b9929921c0caf2333c1a5a6d5758536141f6c9e5b95b17638047bedba6f63d7d3e0306130e6d13dd1b4f1c0a3510f01e12d3b8bec2ba158cf15ce5ac7025f9e0c242883eda3ceca49c2d3323d879880bdb47cf533ab5701d74ed7dec27ba093333d2b4501e91df097f31337599247b1cf566970bf100c7df5a264f526b951bc61798cb07d5f57352722106f4f6721cdae5613893839b3539605dd306393ce4125e4c5d1e59140102ee352844791b71920aced70f2fdda74536fa2b9ffc1bb31c9ffaa7752298d638b902934e8a8a22f3f589b8b12d8cf524a9a30e6f58c2066c0e973d174e42d4629330465689c59ae098d4370caa10a973c5b64f0ab1f0cc6fc99c603e03955a551406a48b8587a1abfc14187db6e680be9adef97fbe026696198eadf685ff40631d85293632435613e9520ceb4edf661924d142f576d6c3f27dfaad58fbe8bb1277edb87ea2b8a117dc05c504d08c59e146522e6716fb254cb01a1b99dadad2cdc6ecf03603c455c721064049befd99df1b5448323c0d979b80adeda8eecd23ecb2de35f9ed7bfd3842cdaa24ed1515b2be9a750beefc5aab8ff4919f08f12bccc29b6dffab30c11e15ee4631df8bcd0f901ff514dac9a34a6bf059a8050334fea735741919cb5932fd52a430f2bbd77dd0290c952e9c7e36b2c1e466271d7adf455fbd6dfdf1bb0746ccba0186f37bd2c040e1a74b99dae58f79f6462205c5aad41e2c09c93be2507cc014e0e038d79ab00c7521f425cd225fa791f261ec75d563ba8f7f410616148419d8cf3346866a12cc843fb039546fdba50f13078511078ab5aca351892f27e6d25ba496fd52583e45240f0faf9da59210f43e204d3d9254c452f44d0aec6b41339afd36db8e962887f0bc225e311d9ef4253cf97834e69e558361c4213b772ff964b514f05b14509e9eb38977c9465ef67481cbb34e46ceb615d22f5ffa813cf0c3e1ed53f549bb35d07d868875a37f109c12cc8d0dd15cfd0261cdb4b316487b7e3baa072f66bd245cf48a6bb968762d81dba0865faafba2271612a77ce52b6983bc16c29e55b437a0cb617d47b53f35cb28186e5bcff9e0552ec7f0e47ecba0da58b00f2aa0e5025428270bc071594d2f8de17e30dbd05053b52b92b9987c67202bf7e2db6338f62b911ab5dd14c2abf5bf0e05b574b061eeaf607cb0cd7aab85282ecfff7b50da1e98f4cc13be467d2618fa251410bc0ff0bedd63efb5ffba87632f7cff4fb91890799ff04ab93aec0195105bff0c95982e4177ef66825d355610abe9aa28c99429ac407271813b8e6211201b7d726522d4fe116bf13b8706f8120361e80f71011ee56402050000800a27a726b4d0d6c2000000001e1d1c00010000000000000000000000000000000000000000000000000000000000000000ffffffff06031e1d1c0101ffffffff0468b6e60e000000001976a91414285f65e316bcf26a88133403119bac457aedb288ac286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c8787000000050000800a27a726b4d0d6c200000000451d1c0002bf56659b19a1a300ef317c484a1aecf66319622a6e5cb0397ef6c3593f0b70d6000000006a473044022000c433559fffbd5678ec1ba3196db8b6d41c3964bdacce7ab2c679e27f50356602207a959a441d6352b825061ef59d5b68a608c8dab284cd26c2db52f7ad0dc5b95f0121020ea64863e0c7fc37d065a518c7dc74fd8315147f46255e0920d11344c4b173e1fffffffffe8cc557128e8f007eb1e0bf8a79eee8e314280ef966f972edb5db2ebd450537000000006a47304402202e2dfe1048534bf0edeab9ff8a7f02ab24d84fc87e8eefdcee551e8cfc16966f02205253592c63b510a12ad767b19f5f1643a8d656ed55f5897023d0a0654d7837e1012103afc0fa433ee54a03a3d95d0a0e4dfc1c41477f942a1fcdc09c04d8e2e4d8efd0ffffffff00000002865f9e118b18e21aa2fcf80c669af57273641a60df384ffffb001fa31d676b34442590097a391f55633dbcfa02194c336125a710ce58630bdd304113dc922213579b85120f0d8e396824a895aa6dd26372b85eef6453890f2e28ca490590f62285c0871fd4d613e50f129cdf1b3fef0945d8a9eabce54955ac8d0dd7619b891f515786bd6f952c38b88b7bd47be574bede0165ee0c011e61bc3ff6e0a4b691b338da8053ddb7cde919d4f3599996f3df1d621fc28b98cdf9b3762c6dd0e70235061de6176d479cab30522b8b63ba7e2265afe99c66f74e50fbd7798937c860fff3c2c7d0233d711cee91c8511d1b1e5210c18f0b3415a28b1bdbc6e7f864773925ed04a8634daef85ad06c817d0c76eb781acc405aa37423bf71bd54cd5e96eb17813ff30b0d8be590ba6ae954c6ee43bcc6f765c05676f55e510490fe4adbcbea5fbf223144a59b17ecc5ec64ae05c393289dc2e0fc8f0a34c0ad4aab6ddd356620ef94d7fbb019a3fa465090dd71b6796e5ecc2e7cf333a35cfbfffebb7135dfe7711fc96dafec4a9e3ffe5481a8cca2083cec275e536443d9efbbc4db3acecb9a59109c0b1c6cbe6355c5f93076aec7c270a2124adec53017c9e25fd3d21a0ecd99628596efe2027e57fa0665a17e5d2dc43995faee7bc0425105ed5b415b9824ec3d1bcdbd6bcdf1d288205fdf1be912118eb227d70732470e5fca0de9d9cfc5a1a176f207168a49af18795668de723984af0e70da73a577e5671f6514993613c3a3614f7153177df1e9c7991fb3b4104a796db3e8f2e3fc3deba622414d57e5293cb473d00f6337c965f086fd75cad5cb5f2da1b71ebe57831a598fb41960107268eb3967a41335b81f4f13905dd1e065043532f4bae6c0301b9fb76719725a304561c8237c5ae7e9bda780bea6a74f74f4f57eb1f1aa60100418eb52de74f9673c68bc43c0911519d489c191d56710d3ce8479666c71042bafe1e442fe0e9c8cca9dd0c0344e9c0b6e0bf4f7900fd47309255c6d951e4f5160480e415cfb4560f1eeddbd16000d9d5f7ccc1e1f5592a104a140a81f90286a884e943a0a1ac36966e252a73a819b540ebdf729428b200255cfce4e3cc5dd45d4a26e274abab416b3456c6c946317e62ab96f017d4a0c459aaa8154f7ece6a554edd2e54e220528afb044c1c268e6ca67c7ee994c9a1ab105e5cb96c9a02a45346b751099768a1d1edf6e6e97170a3928d86bef75d21c4c2a3b241971402540226a6125c2f6c2f8fe800daca28de59c6ac2a55d36ab16c23d7adb4be74ac02b1fbbafdccbf9bdffa261824707d685c23566a4057433e4083187eea9e666010af8d02ab83914ce40928708308c23d5b9c65c2f8c3e35814e32a420643bba9a60b8a292643a160a7b34f1b023cb3879edff22126fd5f50de6a90da4bf24646324d3e73a7d396667c1c1278948ae2827474f17f917173c12b3b6729cae9951a1a98cbf7d5a47f7eb4309365929e5bca04b20bcae27daa6e5111c041f68b53eedbfecb445b1a52ffad0cad3632fc6022dc6078e221d852cf3ee8fd2c207a60759899bb5bf6bd461c780c17ac513a8288ff8d6ad80e569485abc109548e37759f26ff19cb530912b3c2a398d8d7e039738b102bda941ba76194165110b5291a5ec0ea83ed93da401769d2fe4c4a12856dcb8a675d8601691b29dc192b19d475622dc42667614610076a29d08f86604034e8fa21cce183bc3457e020c8c87fcec82d34d924d17daab6324dcca4a2e36f6b9c8dc972b7d787c3414195a36b1849470f4f1ebabac8efdd448255cc919610e39f7ada69bb7af120966ba88f62836be393d7acc72a4714fe79b9f9f4693e60bb1a38c4db5973a31565e3dc755d668a0271053f19647c18fe583ec1b609606472c9581ae03ac9d8c8783747c48f9641304d30fe1d81a4567dced1dea25e99cc2f9f7dffc22ec5a99ce9e35486c5eaa55156549c520ab0daa14e3d3cac9f15ede379dfc9c2d134b0fbf6d2593454b9d0df3d59df92aa428e5950cb6dcb5b83f1ed6c30bca467951678fbbccfb3ab9f56075546aaf731b59a526e28b900c0e1ab842227ab4035dadc352476f05ce0abbf09afbc6ebf0f578005e8862d79655b4c5563078e51d84fe42deacc51e5433b376039e39cf941af0fa4eba71d9a25ee582009db8f46d56a7ce733319e9c3b45eaf6fd9bf6b19d9a59949fcacebb545e9373ade758c5800ee3d7b9af84265bc39c6649858604484f76ae5b1922a3bf5fd9f35dd31f6f32c0650f5573ad2be2a85ee42306731de067547f224af735674e003a02d32e2ffffffff28dfaa94b74670863beb1088ee3d97b38960c6c297c9dcf3d57d5a9259616523fd601c7b26666f7115a8dbbd70b7477ef56a06dfec59caf59e7cd1cb9cfd3cf040dc9f41593d57ae78d30c5b3f0267aaad2b188573ee256d603761dedbd3d5b806a513be60cf1154ac485abecb8703a24b6c4e710a4e938364fe6530ddf0ad0a62be0673ee5f4585244b03cd2a1a7828823df2a38b9ac5370b6dba0e3284a7c2bf8e15a16d0da499fb137b4bfd0b003e73b88a95de5ea06984069fb423476320330f91aec6f656ce967bab922ce5309a6e951790cbf45929b0f2966a0a44a6428730a303c09d05a8cc52c95162223513e5a81882d2076c925ef16f17881208c185cab21d1b903adc4d1753688baa40dc8063f6a92d2552c0898b40ebdf14b9ad6c63203e991f2478cb87b9b626d3b2d1503bfa5e40a17ea8896d6db18eaed266c875b241968ab864c0f0e13c069ce3ea873393da8961719c7429610b60cbdf8cd0f21f3f08ea2b1e1c0003ea326fa342aed3bdd629e03760c8ca248eda15cfc188ee324a21d207cf57fc32b1a559f232e3938727f07073ab47032b836b0f33effd59a9e6e6c2c1970f74db18b191d6428d1770e8c26929fd02c5b348b79eb8dac8a495286f621fab431917bfc852a60f27ab81584fc052692d318f86909b5f47c63316f305419d1e4c6ee18891e6fbaf9cde0e2beb407aaa0daa1afd655ef06340770c95f018ec956c79ccbf85bed468a5c21b6580fb6cfa8b5aaeb39e5c3e8beb1b02e8773920dd00fee07e5de0a0d176d214e852509344c2ea7fee18f3b3700163aeaf59b7f0788b63d118fef1a3122179cc68487147e5d08b603a9d710ca1464528699bd3eb4d112a66ed6d4596037a526cbc6c03bad20a530dd407e0a286954022fdb8a3abb11593bd2a363cd815fefb228e6a5d08d7a436d80dc50973b77c863818a3b2b7dacc82ea91f8eaca18406573de1e725e3b37d5d8702f70abff68b32bf059f15ed1a9dbcae1030c7036e34fbead919d426bac722664c308d702b1fa1d5073ce81669ff446e87e152aecaa11bf947ed143c91e3d76293421c3851ed13de75b381be44a5abdf80acd31b5e8db8ef46c02a5e24807767d4fa269e7c165249dfca0cf23e2dbce0ac67931a56e063a0ce1d5649795bc39f599b5b5f2d28b90f02894fd6a7acb03b4c4d24a75124f6c1d5df4a6953735cffd69d48985893fbe65687d30047ea4b712ff02251c886db37da95432419f700b412035e9a81203944e73d8e81857f30061f52956d3ed15b320a02df0bf064d41260e7ab55ec27e162ea4b86f9afbc604e3931c74dd10937beb1e96c8fba027fa325b8d906d093bb93e27c7c0383b9d7c7ba8702df8a0c6585a81151362332d5606b35c1b38bb7d256cb7c3649fdf3c587670b2004de04c35ef51de305be110d9e6e0fb4df7e4e8083fa8542bb1e8539de8b9785ccde80dca72af9b68277c86245770129ff838fe01d3c2dd302074b719e01f2b2fc40301ba94e0b7a63b9dd10b84a3ff78aa460a990d9c89a6df3e43e7f6b3c1df2d80d9a904ce0e27f94df04114833b761cea1623f85adbf90051d7d3554d5a215942a575cca69dc4065cf958669217b3674ea9032c784a99c1c4b7c55905a72c63d2cc2b07e35625ab31fb6eff43e571d8314e33ebf5a473b7ecead608679eb54221e25b62f2b9a13c534d64e5cd450d9bf227b6e88ccb51a66aae9d3ee9810e5457589c6d1f79ffa04e5809a4ba21d3ddb40604e91760881af9bfc2263a1d48dbb4a9a1082ebdefb067adeb28302067586e1f3cd34c0f01db89642c2e5d4872f13134a8eb7df81b2bc8fd87385ec805c9cd5eb11ed3deae05b64306dd07b9b2e2098da56cfc4c2c7bb54f663965a4165b6df01602ba936ee9bc1facb35343301f9e0ea2e24ed439a213cd485298d9884ca45d3260e0d9fc4556df10e6896dfd10d5267dc0d3f2a4e4c3622741eecee981c7d08130d6f2322eec000441e8bd05367a051a0e9640ea6d9c40bd4e9c0d1e8ed59dbabaa12586a8b45cb1da0eb3b1d1c62c9486edf3b0b25df02e806366fa1278fe2922a2953baa7456a24a0927baac5a0be6d34435e08ed6d9385d0ddc478bcf0331104d6e77c1505ca64389224daf4f997b8bd9c923d9fc2de50ffaded4c05c558c06f8b76a8f9a29d8d4863910afba3de6c87ffa2d3922e5f6fd26c3b29ad1291f60790c3ee043eb5990788c96f54d2be92b8aeb33cb1254515d075a8e9c980d34ee1c5f5ba4e6ba06a02a22ed5c6d1a8394f6dc295557101c1c9ac2118ba2c026b5ed059f4b5af1bd00031b698e160df5136a1c454123c86c6ff0fbc942c686ac02e0c289952ab92169ca0ebb5743560d3a2f0736533c2e2de40faf866e2c761038c001daa17a4268900603a506a032c821037752331557659be604ce619e3ca6c54a3378a591992b8b62395fe53d2e64468443e40de9dcd8b2ec3e2caf58a721671065f862fefca5f3111f03fbf3b36984825b3b73334c229d401d45c17e8a24f4bceef1e329d5357bc9d3c4ae8cac531452efcee0b62d48a3bba9fed392cd2e07b8273a3b9661672f21e8efbffd0b4bc90ad901fab648a2aacb0d38c72b1317f5f02780b4d36f104fb0a41bea3fd2ce8947d65e678daa4387e723903261b91aee628b2a90aec4590fe394b5d61d502944ca607c0aae398cec1b2744a145813c3d0275e4a778b08899bed376c90a704f2f6dbebeb220309fa983db942f1dfe20ef7aba77d33998daaceb8354ebe17420495e23097191ab3c6d2dbfbbd7bae90ebccbf2c0a96dc699fd3f5c79015f65560fd97ace818f0fa7c159957f4f9e4f311c88d006427eb842d1fc44f63a5ec1b3c97b8580424118a74663eac13d56ce0d33f6bd4b40db249ac8d8f2f6bae7beaba04c18877da1b4512f65d274dd61fe376e71acb10cf3a9835e0a6c8b32c6472bf1a944384a1772dea14917b94631933502f96a52e01ff057ca248732516ff8dab4b0d8b869cced03e4379dd690d73031025aa495ddf24502245055894ac09a7714b0fbb1dca1f05cb1229a89dba7bb2d27405843312ba580bd6ff16e5a5e4a3cc206674e17a9cae8bd4a156a5e41a43df1e913feceb16f78880b0b3334e18d354b5060408d17d6c57b7e48402f2edd237bc7e25bf5e032eb2d8e2ac389dec09637067b359b641cecd12a65d88e9cb11acf9e7306762e295042f0d588b83e64f652080957c469b19f4f3c82849100ce1d7a2da695c602ecaf6b5cf7308d0442a3490bd351bbdd8106710f4508e1a2322b9f834d7524d41eff49a97299812d8d956d961f6d9a91765e602751347be79017190bb4f04890e4c805f97a942625ccc163b5f8477c287c6e46d3d64022ce5e3fef5c93079cdc691f5b530f756d6e54e5ca03667fae1c9fe3c1c30ec11bfaba37e2d39b6e586336bcb14849bb94a661e0915ca4b35ed5282fcb7cffe76532a709c1aa63fb054c59e25a7ab5fcbbf7175f44cf2dd4cc095eac2acf3144af4749118464cbfea1da1c7d9e07264b3937684141a20377d350a00d7dc3a455b2a8d13698c1fda6c8be37435a694ca1c422cb9950e192fa7049184c0f5bfb312e69e31e91bcad897c2e0bb148548f49675e5434e1183d4d1d1513e2287310bec8b926058737056de50023acb2e81b6abe4c21bc671099ba9afc949a6a9aacb2af377b39e666e47edce9e4e8b62a6fac32db5d4d95c7f1ebce5329eda97de00bb1b8112bf914924a13e48b7d0162893e8f07a6b8ac42d9f6be1c7c4476e0e7a2e012ca093c92acac3ca5fcbd256b0c3b81ce1e7ab7fab007219d35e210aeb46cf6b5af1c8cef5941f3a928cb3a1ed12f901c79585afaba994cec51b48fe3237374487729e1ea0ae5534731f2eec8cddc8faa1478323df31c3a80ac81a91fa358f20ba53364018f2e06f94f77a066c791067d02058a7dcb491f205a181c2d5f3cf5147938ceac262902da0ce6b799089ddb6d0d944136c33f9a196a4b9b06720af4dbef0617dc4b4be83b2c2b92de85d5193ff2a3322c68ec6ee25c38551cba5fc1df7510ce1c5e27a9b0863a26152052f6c47261fb9a4aea0594c9ceff6b18f96cb3223c10c5f71ed0e613de78ebbf9e2da71bccd39a3ba3151a6a220e23734b3c9e6220e634e69340159c5664d2bc999abd6b9d9a1d3117ccd8135ffc4b1e6eb4671010b01dc19b03c91beab3bb2eba6a5a664b7f28fed01c57bd5d3040e0f83c6a3f2047a58e95250c6cde5143deb005ff6540fc06df9f158cbb0dfa000751725b53275b459788eba519bddcc356d086c7eef13ad0b4f7cb8f23c7f9aeeaa93ca0b20d27ac7ed78a29331181dca3fc0d4f940a39bc0a30a089472c69d6952571f878110881d6639bc1a67b4f33521af6b4dedd5befb79434ec47f15248f0c8bbdb74053386bb215315c24869d5f10f315996c3551d572f031d9d2388edb1145fdf5d3874f02390cadcd2db56b5cda1750b674b4d822dd506a0fb45b6e12026ef17273b5157f5bdcce8be42bed6ea960287f8c0b7283fc9d3e9e9d894c71cae048efd3862b6aafe43241284f972b4045ed76544e02d850b8a3e58f28fc0911a681cef1ec4e5a597f644a177d03836822d09c9fd3994c054e60f4ac6c5c6383d9ea0081db116ef022f3dcd875d5d53b8d33bbbf41dc0800104ed78c80e46ee860011662d32e7f249ce0019fd43444dd499ff563daeb5279fa90ff765c47883fc850aaf1ee3fc6726b10c29b61577dacb5431eb68acfb0515cd55f1935ce764da512fd01539561069ec91d07922e668995561a32fb58d7f3eb6d7339a2a210fa6d6cdbd20be775bfaed3053797b0c3ca01e081d789891359a18eb288f80c65465efe7181c1a8db32a14d9c79d146466e758f34f70b442721a8cff1e6b7304571ecd60912951862a8d98acf6ff16a080d35e488ae3f1f23d2a4cde0958914daab0f14c9c1981c584cea707a178052a84fb3bb302fb7f69d866493aa46b503b819030251015bd37f673ac756d4cfe55f2b78aa99dec7c8f9516e151ac8e3395ebf0f8f8c033e7a9a622a330fb7e920016704bfd3b41780c6337c56800eea63b48eba41bc11ff6bf5ce4325c5933b1470e3dff35dc010c06928e8ff6179bdfe742cb68ef4b0466ddb28fdb71903423faa3ee6487a526fefa9e54c3f2266f9c4f101900d2792df3012bd413f8d7047c4d5f1d7a4c60cd4fc5929b93159d012f3f07196fca7c307447aab9d405ab04f280bc25684d24336e53fe6d09752db0bd2d014561ab1b0b4334bfd4aaacfc557b7787d53684e1c7f471e717d874017b8d47b3c08553ee16d0d24dd8ed252029406beba89eaec7f2317fa2d356e112299d8e23fae3de931a5b6c297153dcb89b25d669ce8dd422e8ec7639ce9ef76a6735223e92e717c62a17d177a08e3d157f62ff873b7d97703930e29feba00ca4267faf7bc6aadc06096426390ba4237d48b6b9c0e7ff07b4380a1a43e60e0ed77e43690d957378fc218e7bc0ad5464a4703198402ab8c6a75234beb4ddd89af624acefc06836f2940c0ffdf16318e5daeba6c0e0b946480c1d4affffd039c2eadf8bf48c4b1d50920fe0bfd422b0bbc2b1f35d7dd2efe6f4041648145eb32a689c756f219accb7fa34892a41fbd8bf2648b0863eb02a5961ee3ff763a6a2733db0f5d354d37f163e1948839615ef3d2b5f659c48cd50731ab5292aec492f922e65f82bd98f40454910199c4440a119c1a8d78e96d2510b1c509c252e17178d057c415fd1dd5cf4eb1404d86cfb545200946d3059f6dc20ec256459bc83366e2fa39209a18f9c986f3352e238701ce18cbe70579f728896174e71c7641ee7ecaa577099dc79e13dc908e08f816dcf219840d4c762c6bdee23a59d94ad9e021d5f7b93e661d2e4fc6e033fd7bb8f86800a23c5bbf64758ebd31a4706d12e6dfb7ff14604182575d9140f6716364b0ebe2665faf0ea61401db5b3abc701d037c0153676896934ef96b13ae18c8cd6c454d32eb7b8106faac148ee722f47ed59e29bce36061ec2007c3a21e71886c02816e91d37cd699f0dc7cbb527b7cf83451353e983193df45ae74c1bec7a8bb15a4d55a62da59760102b5d75a579110b1f1745afbee60bb3fd8d49201505d9627e96a16095597978bb7e9040ba719779edfd746a30c4743a8136180fcdfb0ef325ae2d398d6051912b0cf0cecd85d36bcc3705c24950132fb2a3801ba507f7e6025635f2ee540e7e6c046e4ef832588716a8713393f67f9462a57925ec6e3c1f0a133100b4ec28f5695fa1917260dd08132aea09e413bf3e6d61983c946529050d53b8c0ca5faa7193b4b744979e55d027dc0d9813e085bf8d1b58131300c7d55bc97896f08f4c196c51770d41fce3f48053e7849fda0c67f67b6e09027d90008355093d08b2035b05e00d85ad1261dbe80e30eb146400de8de41b18f076f392c7ecc73e56eaaba1a839fb2c8407e7bd21005e27e4adebd4eb786c3d862ff3318353664c8a3717baf52b7db96d1b8d046f098ff37051aede8e83f70c38e85bc0424ae90eaaff4aa04f2905595b8efae6d59d1af751f775d190095b291093a8bc1248e29c633e645c888b7637105e1f7063ea57743b9cc9cb8d9d0f1564121956483cda7caf716bf1ca5a8361c1644e08299a802c6c8145214fc90b2c2795c4978502f3545daad1ae566507be1a38124aff9683d24f77596ca652a33984a892971974c4fa6ba3d9ea89982c5f54b8283f814d55e23a42212c5e36bf2bc502ca69cb7d026087c60d49105dba409e6e3f7f43fcf3688a724d13eaff982a16823b9f86479e95e119f44ab524f2b1030f770d3747485cf576e22d527b1b22a5d0de3eefb04160d2aebd143d631f5646ef3335bdff4383f2de6f42aedc002c54755d7948604eb6d8569289dc5298140f200fe08d0abdfc720581b2aa55781de8a56ce1f40b548486b96aebef6ddbb4b04ca8c299329ff29924fb585b03bd3f141870e723f65e3907ff48d57d51b7a3f1a425f678a9c3a9650349f221df4f3b6597932b258f2a940eaf72eefbb60985e6a9ea2ecc723ebfb518b48e9683dd3007c521a060ed5d9ecdb4a6c8e5f9ff953a0858dfc74de5c265d9a850c9e0211b15221e5507b05f3d97edfe3ad826a17e51571d2cf53ca1ced0e5f8395671a4150f93c3728cb6613d980db180328676b74f15ecf87a2a7f67266570a9f163f41cccd356fba2d4cf9f411d127418f069f964eb53e25922db63d662554c5401f20b7c2c887e78cb6cf3d7752e041c410771ca03db1865090b3cc54ace06fe40902fac9b2357873b1f758a653dec38dcb65b9c47b9244355a3d33ba9ef49fa745a1dd75153858df79d3a4be41626aa39b9a9b83530579698b2a797729a08349d3e2a0191c36c334c79a7c20ccc96896398f7968e71bdc2486eb643d4adea33515a256ce572490da586be27be370e6189373148754d2f842b74221681a76b13e2d91b0f39f5f7b0ba6ae37c05422d91479819d89dbbb8c332e368c049204b1f257000d7f24172ec1258b8edd42a975053ed245f47fc4d3811f09a72a8cfebd83cfc25c16e0e8988da519079260e9f5cdd56616dc8f9a34e5da14fe6bf4e8569d95f0c4d57a03ba3ce7f65dcecec6183dc801962ce22d2c8906a945ff87e3f4552242cc995ce454b3de80a3584b37e9700bf11520a79860ec21f24b5eb33101acf8f31d365a58bc11954ccffd0ad0d859220d6cdb5875436fcf8942a0f2da2a241521ffc2713c387c9f2711b0760b7e89f4f40a3db55631819e5c460944e788800d81b5df05dfb44d806964c14b18749309a717d5ae70dafd9fb5208b534e613e22337b720bc10eb361c74d816dd53e9ea2ae13830916f069f249083430b4a5d6daf3531767ee80c8554f96c6fba5449451241f1c0b35f32b8e46333660d0dd671de1e3deb84fa053a30c2fe1cca174edd78d872b0d93525afbda869e9d54b760e1e29bd0cce99a5a1449ddbb6237d2b7258996c37b2b39ecb99a1598489ff1345c90c5bab2b2e54f014ca6b6b2ab3b317a4fbd195269282cc18e5cc3e1c0734831d22b7bfacc1c89af7f6fb6ccafa3df74f341afff19b263d5dee8f0103a1dc18e61a314721ab3374125c6f0815d4501da7486a43baeeecad5e7cba670b1f7eb39c02c7f4c3cd36156cafae2c9d9c5ccb7d56c5c0ba4bd0c2d5ffc9f6a67bc148ac28274087a9b3a4d11e7c6d6c1a79dd86a546e30d1afc6739df4cd993595f59650121ae626645a815580acb9033184b9874da1386f476e75d8df9732b960b75530e13f16f4a78b7a708c912e0e950d99045a8287698ce75d02dc1cdd8391f71b91c1baaf2bfd69d3dd6bacb630d24511ce6833b785b1e6b8f57d68d1dbadf7bbc3127e8b5ac9fb2a87d09d0d6a617cabcb371972e755f596437181449a332dce32454a7a214887a017371d93c5c1c35622001fb8755e855cb3ecb9a7036daa3720f33badfabd0001b0059f172d9a1115611f56dc26ca6ace2c3855856618c0fb2058652ccec09926e1c9489de974cd9087a28fe3d75931b14fffa818ef77c46bc0998507c3a133ba4058ae030c2483d647f53c909efbb4c8d108ff1952c105b4b06b2e2cecb19c6d4fee66c08145e0b25ad9c15a5e749043f1b4f1452fc2dc67e1bba396e037cd40a825c7f994a2e4a1922373d6832358dec820bbd063336c9920330b93fa10e1afa9909a457d282c5f03ffff7a5e460e034347230cdeae1113a27cd1a5f9770837663c7a17f45547e8a64ea7b46b23d48d983892123b8f951e6102d06e75ea81558e112b1e9f374e332211c9f0508933789481993011b7d3c70044aa7438c6259b080fc4e68f0cef127974fca20dc96e4fed3d62e079ca4a27b04f11a807197e9e9200b1654a0c7e6d8c22240949d540d74210ea3f957f643430bef2da2abbbeddb7d8081746157faafe88e805a48dede60c5d25237ad702c2c3bb46fd3c41f7f672e8f992df1233db54e0ecf37505426b402f3231c86720ca31f1846ff2377f010890ef72013453ee3460de63d17919909045b6cfdd4223a8b1fc6f6ec872b5b882052a2a321b1225cd585c41db8e21430b309cfee92ff6ac138e275e939cc6b3b98e2b59f68f6020b83d10f0d410f34d68d2a306b1589912d1cd6f69dcc3f73782b8eba6888ddd11c6dda5f032b5b5ab0b11c11d6e29b1b2faf26a6c617b393936df8046c3916145b6d3c510c6abc8d501e98f50ffb73dc3b3d4eefe44325e483c88730365549f136f9afe5334660b0ecb8282c88cee0e635a930d8ac7c83195b7f72d43a9c10da40434dc35bef94ea8464d718a6423cd7a5991db34e7d07e13f7d4d70ae9fbc22a80335cfd70774e6d248c814bc34534029b1556616171d3efd7e581eb226ba3be35b59ff097af5ae8e08eec26590dece9fa4821532ca496989c34ceb1e3125435bf9463c94b5fc7d597731c33f35afacdc82fa9455cc1e994706eb75611735009a0e68d0326bc5358abdb1d1818373e01e13ba3150d2b7f00cd29815063f17b97cd28f7191f671cf2fb665e5cf879a329f33dadc483b9865b48d9c2ebce639fd41f8ce3d1cb10849d56076e55bafdbfe6e8271ff9c4ffef05d25995230379b17c91872ffd9a9807c655f380030b13c2e8a090f3eb72c696927c277225b8214bb4ce0e5f36632c0f5871016870ecf8ab960283769bab1a3a57cdf6336102bbda8997633f616e150559a04b35a4534389a44273127f3e72b830e9b352b9b8a604044c5da2e10286705f2e27256af588f11eb0189c8dcceb90c3efa56f4d342bde229f8d7dac30c476d865d47783b98be537c8ca5e1029bdf19e49eb291fd3a9e28c75ebd864498970dd43bbd29177910b138b0607d27cbc6d9f98dfa0ba875809260b0240bd204d1b67f858db48d04b80bf4a332182a239de9a326211dea965ff41fb6662b0e5d42cb71398e402fd8d62b1a9105a08af4e86f6ba0e386405e9c559b74a6122cda0ab1c530c1907129c8e464baa7f8de8db947c7e0503e4f5889950b27c7fc6096152f2a76daa08e7ee171c4095ba9376d490dddc28bd6ac8337e37c7acff3fcf6e4c46c4ff112324f73bcc02e7866172744c018764e35a69100020ef039726615fb63f78eed59b580025bc48ccde0c8abe27ee2bbbed89ab72225f5922c78baeffef461be5a0bcc908229c1a212aaec91e23b6e7a03933236e30a9bac2d4536e0bb7d868ee24c595e92c40419583da2b5c5494be02c3064c60448a2b3dce2dbaed8431c77c4fae84167aadb0f2a48d1e468cfbb5b6f450e31c69a5bacf1ee6c9c8b807452338d8471b70fac26601871fe59fb08cd9c92f6e075d44d4777c5c48f7be7b6aa6322f71f8b45992562029af2e5a5d0c69d1c791f93c2377410ea072193af26046935a1491b2f37afe579a4e748b78292a698c19e179db2d24a877121a079f62a140d021de8c4f32d10ab5e00ab7e89cecbbf489b5a5d0e7952e03f064bff79dcd913bd7bd6a77693406f2df4461b7daf4a2e5ec5aa6d39c458aec725bfcba2fc56a80ee93ce1b30c diff --git a/zebra-test/src/vectors/block-test-1-842-467.txt b/zebra-test/src/vectors/block-test-1-842-467.txt new file mode 100644 index 00000000000..da93d738c0c --- /dev/null +++ b/zebra-test/src/vectors/block-test-1-842-467.txt @@ -0,0 +1 @@ +040000009b0cc61db12789e71f38f2670acd5897cec3a3045001546a2dad6f44636238006d453e8ef73f6f6d7b0b16e30af30b6630bff5681a73934844784cd2859ada3db42e0633681cca334bb046e4e2bab6ac5460b9d4199d92195ba0ced0729a8b9f63855e62acfe761f240077b915a8ed7ad4c080720a50514c57acea2e8ee0405b4213959fb4f40000fd400500b77b138a22750db5fcd202d6aeaeddffaa56b7d6031d67791501d37c196156246c427e56c50b1c47d80219bf91c95314bebf1b3651525d4942c8fa5d5b6f221fbf788f6117d79e0502f9a1ff185ed9029c2ec012f90695c889ef7459f2219ac43a3d48b094584d5b2c3aea0af18d41509a4802cc75c7f0fa32281cd8d52427ca259b31748d990ba4df29dbf1ff396e7f04942ad3160e7de9f945cca3e51ab83f0e61bab1361d0c0166b70aada45fd3580f92c582186b30b98cae90b9310a7fcb1a661a3dc9c334ba1d650d4e1e27f96ccf1e5b8bc5355eb8c78d88221eb66babe12e34f79afa1ec261550ac8bce311d4669c88512293612d1e12a506bafb1e08c908aa6471893582ed3fc7013e9b0a2b09ff2e8fbf8330c26fb0135cea614700e2fd53e9d636eaba39242adc6789dc196397f63aa68f52feb483440e36463f1a7aa5d30c6653ba3ea7459e2e6e9df709efac05308ba932e8378a9ddffb0e975dbade66781a5f0a4b23fd5b15f4ca881665c5ec1ac8ef3c361b1701048d5b1408051f2bd2cd7d466814c61eb560ab19e73e478892ed89c6b131d0ea32e18105c68c9e800f33a095de892368ba35f1517e7c41a0f80ab5637122aa5c391a67ebb5987cd302c06dc51efd013af75a1965318e812287f125b752e2006bb650fe31e886b424a78a2e754efd92df42952a95b73081a6661487ba0ef164c67013ff4dce5316ade06219bfa0a45e32ba53972cbf03553baaef1f78e260cfc55b120538baf318c2e261715f013f1d2d21aab51152853a1bf4be1c1bc4aa24899a1a274ea5923009c3b7aa3b75fb2e2514097ccee2865e2cefc6733c1bd3d2d12e8f6dcd73150b4d14fb8a805ea781531d0bf0eb6da0605b3e33230dae5b3e4db7e4940ef7e700d65d931badfaf40f24fb627aeea014854734a3720970eee90e9ed2d20702547106d7ac1d35aa4d60ddadd2a0855251f9840e15799c3dd19236f17ffaa1f40e9e3f80ab3ddb8a8206e07861dfcd85cfc91fd2f50048c3c24b4a5e91c11df2f59487127b5b1965648497a5e2d5267f7d77070612e7b0b620d11ba5e9567abf5cfabb14659ef6fa1d833c19135f28b1a1ba64e4cd77a62540dc9a6e24067f3a942fa34d3f1feb01e5741065a32f397f8baf3b478a019e92c9e9774383ba0a60e502a30ddea2a508b61de2c2cf8ad745b7060f2fbb5d576a927fab9e13ce6d28b9ab3d55f027a9330affb47ef9a47a0321153d9739d9560837a240134123ab46e704b35e397b1c19e73da262553726d852983f7f9b22eaff5d896e10cea3b88093af46b1b858d737d4cd9b70b59ea5b82f422aa0cfa5446150d82620bdf85341e577d3a9221fabdf618b4e0959d640d2856ccbb5d6010fb14cb2390a86d7625e65499d8299788cdff6e28abb5705e6026593bfc257fca9f82af5c64dc897527988b579be2bf6471aadcef0ef43e155518140c801e8a05561eb16644da15627911940af04219659d7fa5f3fbc2db620a65de4e78a86708463a95295d61eb6aeb0b7ab7b04e5a1094e3b4145e1ba82be49f31a5195bdb0328c1c3c95a261a6b1979b5cc6656754ce5a1fd69a24710ec7bb04e42e3b23fb0f91c8b1e7701219e67858d1639bf64e919e49ad6a308c028f778affa004fd3dca029b8218fd4ae20d6d022566e56023795e5fd18aa90add8a042b2dd8899261d144258fa495d46d120dcd06906a726491ef51819852bdc05b3f877869ff6050190b48dbfa1d5303125882bce897e86577ea3b0c6a07df42ba4182af3eeabbd25de5504485ec0c39403256ffefd96d2df42fe11db89a6d71d7376a53ddd5b63590d60fa21b1ef769eb93a4322124ba4065336ce06976bf0967a5214f6efaf75ba1dd0cae182178fc1c02050000800a27a726b4d0d6c200000000231d1c00010000000000000000000000000000000000000000000000000000000000000000ffffffff0603231d1c0101ffffffff0468b6e60e000000001976a9149875aea085c4ac8a3493e14002a9d383e07ee9c988ac286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c8787000000050000800a27a726b4d0d6c2000000004a1d1c0000000000029c62e202c9a610454d9f91fe8027c0bea78ba3953c8930fe69c362969f3286192e590c0aa41166bba9da77350eb368f60ff890997fac1f5f8f85302426fa2910f7d88660aa3ddda6b17004b3e2856d4627da84309e1db250aa1ba86e4af6d93ece0ca4f2f5f0044d5ea149db9dceb9c5812e35ddab702caab9977885cddd3416fa38f4c6e84d69de5faf60ac3333a13cdd0db248e7e0c7a3ed6c8a4b77700807a091a4a61d4531a38e4635b9e97e6f139611729a648ba90b6bd7cac7facb8a0389d15e3bc7e211df4c8d4eddcf436ca54812c35c8b63f29ef2626d31c6919f3cc93ef98c7af35b0d008264ae1ebaca52df3efbdbac97fdac49a3a04007c195be9304301d153d008d0a8c468fb20aa4597141f9bc1feaf98da5c02bcd341a9c974ecb7d6b319c1b64a6de09ff8e04bc1e173f64fb8ec2fd9284b1f86dfe1927fb223e15661103a754e48f1914c672f1eea038434b113dda720e002814045c7c3b7b02cb9538fefc8fbfa2eb35f55f9d3fb18b5244e54777fe86f568d6c9bbcba2a9e64fc29979c517b61675064f26e5261dcf3c138cc264abcd44f7452a3fb3ef799b37460f2375165bb7959d1d94a015a8875faf5242ac606ed5e3f554e44e2211dd6a5982d3bd7fe4172dd1b7011760d88fa2751c8120d1ffe90eb408c72869815ebdff4b128c9667073328d31795a5941a9deb14666923837e8983a3aa9c4461637cdfe4447fcd00bc9ed9cfa262a1e8f17c07c99bd083f64214499ad162eb012a141b0435b086f501ee1c752e36c2544e058b69915866ff8e00094bc6ad30bef08a2128148eadbf756794b99eb615f5fc6794ea86076ba40da60fc1f8b883e7521bf211b5aa92b634a8f74c7eff847ed1c1fb33a05cad34d87cab30b36eae29cb48d6791e7872bb3edf08dd57a11ab1feb89416fb38ade3261a66c9d89ea95a55ca7fa7fe850e1b204683c3aba928e18ede7f7a1a52b1da4cd96151e39be9068b7daeebafe86f9ecefd818e5c7917ba76aee6c045c1255c7dd55e31da61a2b9d9a96f1a1b7632092209e25f42b9803598732db168ba523cf173418f03300b3a9087dbdc83bf59b32a17e511b84345d142690a0319aec301e51db5d381d84d3e973a1d3f4a9d6abffee55e5e31b70c74d4620abbef002666fbf20790784f702fb82b0fc4525ea4d7824bf30a07ce492465a88e9ed142dd1152b60095367fec3295d55e45959b18cbf2d72fe8f45a3f48b889104fde804c57689ca9d2ef24c78c04a212e0545ef60848015057623d3cdd66f6bf4242814356272f8cb7a06debb9bce63d8408f7c8478f0612207ca1d5952d293e2e15d4309540bafee8b25983e2648851c9d09e06da98fca0aa0c7ad72c6d29262323157b772d49b3734012364ff0a5b8a6d1b5165fd7007009d427a9842d034f9a68c35c8a5bb5a36d473d3c16c16d30f04e74eb0695ffecbc67a0bb5376777bcbd073f0ae7e8bb59ec20e0e188dcc3d52315b6df96879b458e9f07be8cb4934c29f2f5f324da73108d4a8720b561a9547c422f3ef4082e6be0ae5f915e54b6825c72ab71c4eecdf13282f2bc1c0a1f83e81ed611bace753130736884598fc843b0e168954b54885370bc83be9d645d8788e9ad4947fea54e54cb7adfe401432fd3e224d6fa1293a722b7dd43a39e7823a8feee66f599abda22ada2cdb02f10c6961b043e3c6f97cda5cfef8d9d8528b79ce2d7994e8c2c4f105506579d1c05387583c32674b8774de9e3f097e25caa187624fdae00b055e2a9c0cf749cc5d38a7471cae87f876fc5fc471e89c6357dafe05c68df18740be43d1e15436a84a973fd3e6f3c146924ede02963391e53a7ca2674f1628dcc205dc10a6693ebf34417cb1f8e2b3e966a90019624c82a8dff18655e6ddb7d152c7e15d07a6189a67615cc024258b7ce89fe88fe9acf97d64470f3259f7141655913c99fe14f70a3e357384fc651dc0e7cf073ba44c940598d3efb87a47e9797a6b029edc433c4879984b963d274197f7786b0070066dcde74a8d9fa550c4fc5976e1c83a665696c9ea5181053ae09e5f2058156d7fbb6df52b9aa796a520b77e6c646c95c19a091857fa18c699b6c57529ca5a828847b63a445de63404c40588ff5c8594fcd34cd1373f7083e57cc8df3e9608dcf188dc3a99c23029b5393d1a034d8b0cf7c2ffa9ea76a93c7e520595031d5825c61af2510ea7906de9aaafd74d6776198cf8d65e429144b4d1620811de3ec25c5ccfa96adde2f9a02692e41f8b97de77e22f08b938bd3c60fe732e8e68470d829e22037c82a15a832a03e8030000000000007750114ca6b95e44197e30ec98cadfb0dec1f1fb1a416eb4ef87f1dffb0f5937fd601cf5d3020280513b3b842f110bb6c41801df68172fc4ddc485cac6d8f1d7a7393de705c64e8a0d149345b9faee3ea2f651e2912b958ff4d2c875f0316d12fc380798d0a6f54cbbb3437f94be9000f1446adfff51e9f0e5c8421167e344ca325a2c1bcc8378b3bba36eba52a734613a33207115e4b3a092eb0c49abe3131174cb2aaebb5b7b4f19c1f0a08ad146844b8727439693e906cca86d1edc1ee8f50cfcbe2a6ed0ecc7912d6dc55ef60cf1440a397c49f6d275e75bef2eb9e1de5c2753046625d20d3703b66ae91314861986e5f534418dc42a2ecd0e74f77a83ea94ee8e490f3dbf83c16cf4b005d2e3176fa212c0177b41ade2f1bfb02705ccaff6f31f4242384bae9f850f869e354fc22a6ac63445ddcb783be55acc132f2c0d86be9e8745c0d5a0208e2a3cf0dad2698f35f87f4e6c0de883182f67ef186fae44749632f7b24a0c3e0d49bfb2ebb331b8df43ad44d45e51f20abcdf6fd9ae56a1f49f0c0c093135fcb2740c7cb883039a2f0422bf82c229f118c7f0d988e43e40cf3348994b5342df8d261256a91c13ff5a64093b9cbea98d00107f3699cb0052fd8db3f1779f002809cf1bf557c6513489736728ba71699567dfdbcf033fc25ee59b8f4fc561858c4f0de2fe9b845179554b54a8944e42fa2ee349abcb768543fc0d3eb1fd0ae55a7cbd750374c27fa9c5b5e446ffd892e22dd9b2a8a24e9aada795dd3f9bb28e04ea79f96ac4a78715577c3d425bc6c26cc499ac0fb1fe29ffbfbe44a36bca75ad2ae7ec92d17189e18da63692d714dbab10bb4bd15119e7a80d120afcdb8cef930c7ebd687c7efb1ebee4fd172c471f3d0b65597481f69d7c7204dac50b3e722b3d48b4102617331de42fc01b6eddb4f8a6c9546884ecf86d3e809a84d29f676321d68061ef88a532bf30f609a76a04b3636924988778e3522407c497db99a59cc1ea180d41ae40eae00a668b26f2fbb3e435a2f95e1f09dd490b8750ba658e5b914f0b7f86459645297e50add4f78f05fee6e9e332381e4ff02a2818d16290b147f23d948c1ae47dd1c740c168b3099d3a3ad648d8808e620ebcde8cb6b6bfb3218f22f76b9505d04ef13f9634f9059d0a3cc8f0a135631f8e9c1b517cce90ff33d0f764150a33c616b93ae91c70e0bf8b51a1a64fb826f72f3def91759bfd962beef74b66cf3d4a8e2211dc00e92dc671a89a6c13dea67540b35f5dcbc59c9bc5f0956fd19894cdf6080e0671e1bebbef8ce45fbecaf3da728d07fa37d9912b45c1cae01ed809be59dfc990c78bcd904bbd1dd4fa1beaf7c1a20f02652510e4b75022e07ae59443c2a46804ba87354e092f857e6ced4e038e9862ebe90fa962f8a8639002cec1f100c09194f4c31e1de788e67f7acb9dfdea346d07fc0fb222725f1a39bde88a8e0cc1de9e8c4dbc6205edbebc146e9f02ffb63a66907a489ad10d2b954c92577d2d7113c71f95fd0102b7a852fed23d6cfe9e1417ba26c1202f9129216056dd71dc37a4f5ebc55c7ef628aabf88ee3dd20c82b3ad7fb9f1a8839a2a5bfa5ec07a7086e973572b7686e8f54375ac07c4abad95f2c47189e321c111e6b9dc1fd7c05ff3312209a07e126417b17e6821f909bab90d9f9d6fc31bd82234a487e85c5fcd0e4b99989e07da9cd5bef1264c0bf0a515cab456d9cc93a18d0197612c9bf07b131c8fe7cce5c21f4f856d8f47e9bc29aaddf9f54ac6af190f53088de0086254a104ef79607ad6f44db59a95e1fe14b18d0155c371faae6444af02cc30a97f2e46f7af5e2a80b122c357564b3b33ba7cbaabd496c73d953d90f11d4a15cdbffc96abd4cba7372a080b62c07a712f6634be5bc753d4f899cd3973ed045a9859f48d711799b6708a98c224fa400eea0a65a0e7d880ac2d15dd0e4a202088f25bb28f43270b4c73554f1158e2d36a82f48f9403c1378b6f8505948996dccf4d0ec28d2d11370194510ff4ad19a70af2dd950bb41f7e52835b244903bb6c14c8bbb8ab494789863e70d453a7fc91ae5bc51a2f31a7646487ebfe6af0a74137e504b901be336febda7871c4eb6f925881f824a2462240e9837af1a9bdf1a1139d206d11adbdaa473f820ad142d12e786c5fc700f9101e9a6a8926bd63eec01a66c5be156429887d271c2989c2020b2991c7b917727f17fede1d32bb3b1f7e3b3883d72186ccad95b35889bfd5949a10b8a1190bf22227cc1454dd0a178d1460483c3555b3b93fb916482ac908a820a786cfb7a5c6a974ca232dfbd9f8a7d977e2f0ca52081971ba112b23db41293ead8e1c7b096782d95c20f6938a40353483c69004a6ee29c31ee62394b561370ec41efcc014d940509163fea64459f3816ce200ea165d522523708775b5aa33132c62ae04bde9dfc1157cabec969752b2a8e3842232f34047d1392ee19300f6176c23a8363707c55ffe51d07b354697426306cc6fe1110f908d7e2c02e3d30362ea2353673621f5b816b8416478ad8b8419c0233b2eb703a6906dff297dd5d88e68fa71e01e5cf08a5b89508a56e19208ad15f297cf6bee2a8fc2570322c26f482d8eadc52898cad1cc7878270ca33f7c6253e4e10eee337761877a40dc0b69b8109a41201ab1ff94e3996ddb6f34c3371dd730bb9a2d82f29f8472b3930e96e7749aeb922740de83edfb610b36b92e839fa9a4f670bfcb0e22459ea4a8157ac542904209372f5f444d016113321a55a4a4d8ea02a16048bc1b11a8701be2e98d5f7c9e251b036f55478e5358fe910f99cb918a8507d4f25dcfe50f6978fbc57ffe4582aa0ee0d4f3a5f0f92ec14ff0c72d0e773e18b6d1fcbb8c8976af284c8ad24fc94d35cc13bdaeb093848848b1f825576a979b010997e2bd27cef8c59cb1489ae3a81702db8fd230cd9dc2e32e8ff06e749894455eef10cf8b5d67d98b86c10c80e40198fcdd8c936ec96ca5276df5405a7fac32ecdcc1f2852202fc07dd43d5a9fb1c4b2f4ef0c1468f03f817fe536e637d153ff409a37f11de7ab17b82b2150ed326f0e8059238515b6a7292701f497af3aa95c6019a30f26a3f7b9033e9ff9def0b6301b8b84bed4e3052f7331585d7c4c74b80db7b2f9d17e93ed6c62302414c009c470961e9e29a0b3f73ba5de2dc681ec67806c2c8eddc3719bd83f47299ec03c307445123ce3f3d764be0537939b1351ff4079ac4e9be0c9cebf83cb7df162fb80190199ed8c9c98d7e1a694256d2fc7548aa82ad56dfad89c18c3964a95f2d605978ed1a90cded19768db138a640cbcf3532e83c3b9338f01fc0909c790510adb09143e042bb2acf0067816c7d37bbaa6b579ec770bec8f82f547c9b9638235f022808e487541023da32acba5df746aeb5aee81d56b338a9a37e1fc8fbaf0c9ea873e280e92c272a23c3f5046eacdcfb5f0aee25db9e19b000450949d7b7358c817524b32a8f8c1cfe1b3b9aecc7be9d6046488cc25be63ee446e57fb46b045f7fbb94510b3a62273035265f978e7a74c65004b90dafb745757b76374098251778d96aaa8e9b6a1cc0cba4d170a03d4518251f255a17080fbbf0de177b183dea6592ac563f7d5bfeee5d4c48dfdd9a00884b1363edd6f4c7de9dfc1d43eb0518b9b56dd0779428c2aaf2549e3f587684a5b71cd42bd11de3af34fe1eed0f28f80c0d2ac168e2cb9916a327dd180a2c21a22bdb4d9bd5a5535942df9ef4411a26f5baf926c0a52de1471cb2fadc53643823911feee68bdf847a663b523d2e0767b0555f34b62ab133d5d674cd9313331551f623435ac5210d7cc5157454172992745a570890d91c6af2516518882303018200f0e47abb3b6cf3f136da97e01220c2de0532080a1fa913a126af1df9235442489996e246bec56f08d8dee5633467b40cd1d8afc96279bb9c0630b2fa799d9a752e7ef000cecba7dc9be0720b3f3017ff076080507a54efae0468e1daafcc6ddb4479c662e8ee3a735d8bf7900e36aa48aa440327dfe4e04030b4765d58ae997de160643af18d264943c6bbf613da8ae8e5e0f15769665a148382ea56837aeed4cd2bd19cf6c345046e2823761e275765a6027b78519334081620fd6162e99144fff94736b54470bc605984431f892cfcbd72fcf6e2c79d9e0855a1a8df9472b74667049918ece9852f5f92372126e1d306713ef877165f4ce1ed5549244898ad4b369e8892fbb2a99849494301c229c8702894c0fb44f9cc05a76d425c45ea95dfb13a89560fa068fd10a390049208b9123f2ed7b9132bbe419648c706d7e73aca4d203b5c89d9c1249bb75016cbcd700d3e411579b0510887291ad2680a508aec4c038276da8214925eefc024325b57ab559778c0d871c2bf56e213548b61f68f1a5ddab55a585c323b90ce007d2f6583f5cffd8749e5047a8b1dec3e66c829692dd7bea5e1501020069b033f6756f3042406986309a4de2ce974cfc69ab9d7b8919e189e0999828f63c7462f0d5f7c31eb0d91893e0a1a11b91548971e7f289d548cd8f3f26dc78aea05ab15ab968ed08a99e6d6c6ba5f916be7ccf039a8c50768cf07cb61dfc7bd3807233d821f03656e636f2c151d9385c86455ae50cce8f864bf19692dfe62cf1d8c272dc8eb27edcb72168f2b0f01b010bc4bb3ea49a8206edc0c26b2552ec2979e390364e5ffd1c2c22316e3f663a94d5f42367db79056c20d9e6a5365a6fe00b0af3198b4b5e1aabf54691162a03b367c39793b7a5e1f93b6c4ce3ffe05bf64475501f922ec8d0f245837907cad5ef7a94708d433331f76c55e380afae9a24a148229cb2ac496efb05aff747f7ae67c9078179e441163763adac89aa2aba3d0277b0b8de1108ebbbbfb60ada6d074c382fbacff71f0b7d088824d7a594f45fc75ba28ae0419cd0d2a06f959d7d87c8c1b19cae73491848d4aaa1bd0e60ad67ab0483d22948a88e00b61144b6a40970b063c82a850290f5b2aaec29d68dc7bc40b0f20da23af3bf7558cd04347e075e9b4f66678d38c1fb77c9ef15d1ef9b2bc256812ce98a67a8e7b44cbc78bc66727a60fc0bc42aab4b308bbc318b182bfeafc5c35540f31757245429bf27434cf75c345e149cf2acd94ec2342f4e47fc29a5e590167753ce3141e658b52988e38c2e634cf7a248aecee8f1e2faa1d467021fc472f4e14e33ba3b1ea762131097eeb6848b37bf863290f7f1662c510cff1e230ff1bae6c6e5a34bf675bd03973b06b88ff19cfc2f991b6f93da82644472fc7db7c38651011c22cc98b59215b1a3f2cb5933befde876c7de938b742f74f5931c55105bae7be33f752539d644f01aa6923c008b1ce14ca034ae77766df27859ccad611b0b312a73a77caa268c0fc5935a411dc82ab6d2b25a735bd96b58c66788a5f13ba023ecafc0a92d7bb69fe0e05f29fa6420a1a9c737cbd00f9554050d51b841b136bc03d55bf00b7a31e5290c62249698980cef426cf9edbe639f0f76eebd42a2951cc666b3206cf4ef42ddc5c601be0af8f17d04efcd2da6648c95e2a60e103cd5b67cd76a49da3b1cbe259e02600d10758d59b3f4a14301ba4cece6400d6108661a40a3f004618f01fbdcf52ad52d5fb025fa504e899e0b522b3649611ac089f0b3ca371863eb2403a8efeea6d5e3db8e2c3e99a8f548da2d88540f7e6a336cd2fd6a74ee42709ae46f56e6465d18072b9d66e3367de5aae7118f1fd0d8f1d3416dadbe66654bab3c02cf9c950bca84c85065aad3c25a828630435d9b38f316be1bfd77113ce9d1019e9d6d204da75858d57d716c375eb8d065baf7d4b53172ea0d6dd6e66bc892741bd229bdb983b5c9240e4604e09b29108c03b793318147bfa68a31ce5b561eb350512ee7468ef130d6c47ed0b2666b8f5b95f9988e60c0dfe7e29e98d083bcfc819358a881a84b3b95346003b3ef9d6fde9d063525f0f0f83eb8181bd6237a82caeb4483d19c29e108abf0b11832e43c411be944e912f83e06756eae99aea997b18f56067f4ff3754cba50d60b155a6b546b33ae97b11534f84a1c9c06ebb34a4527565e8a09e006bcdb2a3bd9200dbb608c3d5bba21d3d321a48929093d4d1bd85c3ea022cead5f80160980c424795b72db77496c903389d1277b74ad0865cbf795abd0e09fde89fa8fecdcec0d128b774451058572b2a9d9afe4fe835de4a338278b54b0a35e7f13d07418f9fcec61653d5dcbab620412e2b7ac612c978d68e58ec5933c14cff7b84647084cb3205ddcc7a903bcd17c28e0f5c58b15bdc7faa1276a85eac6d3ff312d2e019ac90c891b7869836dc178cf9d1e813141a382e5e76c896e346d50c5bd3571371a1438ffce5bcb97414332b1c3aab309e4148e969469cfeaa014fbcfaa7f598fe4a9e326c7af6a5d70d270bf0b5684709c03f0d0489678a6b60acbb04ab3ca7846a6942bf77f67471d819a47010f7ef07eb088ebc166f11b5c6d0e7287d81eba0e275b26785952b2f79264b93ef0e99b88c3fcf1c579088bb7d451db4e95d6bf29969e3ad871bd9a9a408aaf50065a775b9ab7a5520b65ce70b916ab95fcabf58205af0ff5f2bcc172c2fa981c9da8e9e5cafb8d615b00afe9139f0b40f50fa8e3800cc90ba06d0030800e11ab8c1d7dce4c5065d85835aea84bce1efbeaef49a59e7481cf8ed529f8214317230d5a7e96d75e1b6a82dfb04752be5890e09491f08c4b05fc3b9ca8b42025082bf305dac7397cb63a2b211741a7fd06ad090fb60f0de6d4039a93e8a7d26ffbdf318eff7aa6002497842f05c2fbc833b104ff1b6887e496bd92fdb9b413deea321cd57f62b39e4df3b1590172648bef9f2734c8fde8ca8654bbb515a5e37ed9dfd173140d821e3fcfc143ecdaacbee56c1a19eb9e5712bd91523e2784e21b53ef906d2ad1b932d355301fee3657e469cad6214dd39317a573c369b11e51d0fc1f56b79c75d81721bc5cab0e6252c7d8d0b3b1d8a520529abaa6da5dbab06a67b08901426dafe3979ebe6d3ca8c138bc24c800f30d8b74093d025008f8b3758aa4da7ba35332e4890ed905b091b2cd87aef36e8573acde206df8d8bda742ef0573cbf399c248c934350017b324532fe7f8d0c660d170f9ffb03084964e73cff2e4e8271bc558216856cb3866998abf29ff5d5ead749b367f4a6297b04a502bc01d5767878ea7ec7fd9e38446f6ab9d09843535bc017bd577e29ed1a7e113872a074d887b61ea9616b98f06753e088f2c21fc450717a469bd31677740a96153e60778956db7dee05f7278e81332ace82776b1b853361ae4e0ec8e847325b10bddfe5af6a4d042e1829af67b6b3c57ac4198fd876dd5b16ed6072da149ceb39539f140b59ad46f5431604ac32a5388f335d027a9d738de883b0ad3b250ffa251124e82eea2165931909e447307c9807a5e8c0e9f3495e6155fa9362df3b372f7b170511baff68f1e6d63354b8e9f9a0eca122f5f788cc26b361d9a901b94a0b8df91d3c8a01f2663bd03808b42dc8eeeb7fe967a2f309f93cacb13f9f6df63fc05896d0b656ab73c72fbcb7c5b6683980073b47ef72bd2460356b32736a1c2ad1c313220d19b4e53f18bc5e3577f7612e818c9cbd0e0d26f14694f03f0fb0199e2361ce86461293188d4e8d72324ba463c60236972b241d24086a25410e020bdf4da0520eba8711fd1183a4c8bfe6a4a61fcdee667d89f170437d39a691e10d9022f72af6d4a5301aea4eb45d37af8c77cf584d569441a17485a0c04995150e24204142f3f5369daaae0a5ba87bcb45ce83e9a1313f4f4c8a0e7b61da168b1186ef5bb454b79885bcb43d32ca7aa65b5e89446c061546f900a383ee1d0eda303d2d6ce36d573f645bac783d97ed78de629110b3a3e1bec99c4256114b3c4a0c17930fea792519b9c2e127cced3708c19d10546833e8fda2dfd668346a519000b93df6e63263d2f483f1729ffe455adf60856c4ec7feca95f542dab2e1c1ca11d0b725297c05ac2069825027160f245a7b8288094514a5289ccf4570019e36285b81d0ebd56ac13cb48861eef63b210cbc6eee052efdc3f920c50dc8101f2128ce4e9ba7e6ac99cb136e49e790a696cf2486019bd17fa30cfe8cc1864f4054384398cb82afb35b7a273cc7c1718259801cfb048377796fd47eac4a417f6f8d0a589764eb6f361b8d470f805ec6b8f500ecf07dac2ec63b686deea11ee41aeb3a5fd5d0605e8618fca6b88a400143677fa5182c7f8677740131845bd6e5f88624ff38aeea14bbf3f32c63645dd08762d00be006e3021e907ed81330d4ad626914a0caab7a1dd95cac9601a8ed59376274002abb083cd971ab23efcc6a2ed01c269d6ba376c95abb3c5b6f9fbf94789817004adce0d9af5bb726b38b72bff50f064d2de7cc9f73d5028e81089a0f5274f62a4a8238a80751a5e011a5f91422a915846dca42bf08a4feac02ad15dff34555c592aec714be2e201e670602b41ef51d8d2078c1c8b1f68e0e593fc9e97a2f5e3fafae2af17b043f9667f34b9c45ac1b729ceb226c3e807fe1ecfceced365e4951abfe8c8be9702c13d7b20cca8d4e37b76396dea4d341f71fb6a80165106ef4463167b968f9122dfe8f2306de530c2782536813e89e0da61b2441b82b395aab8486d96e0a26285a76d9506c6a670b3a08d238c274d0be8a6a49af3ee372119fd91d6107d92cf7a04444496aeace252b76dfa43b3c7b517ea3583ed364c60f9b62484867c67c9ab24ce8a12c902da70cebf3d7b20d720a39bb17f7a3358b55d698980cf24d0867f11935eb3d5d57e822c2338da1b2b03ca8c57f08d85bc8c80017c39dd4ab2bdb82b8a6cb3f55155935a521ea0ec13889d568a0fe166caa7166345eafc2664ba38b3183c57943c52a36caf414888446067abedbdfa9e400ba06cc5e4c6c32bd3da2ac7d5b52a0ed0907d4d841e77e076eec1e3b614ecfb9851fd8e1f04f7fc1e163ca640d36f03f221f8449365b89a3ce25af1cf5364196e9b7e3316ba5c182b556a95b3d99cac0681a7c56c31995434b0a52f776b75b53a52462bed15764fd475905d07408fda2a801c7e9e461682378f6e9522bea6fcfa128bc20a90acca07ecd249e6410536ad2325bb9a1e64e9d57c1860089a67c9555c6411891eb092ade116593345435cac7006b978322a9a083bfb3098fab435c3dcff27f054c5567584b6f2a32ed14db8a8ee33f65619f4f94e50e01a7fad4c89ec36855627656b0193ef2732509a2749aaab9323934a2de31f52e54b275e2f67145d15e0483cfcb9b3d3bdacc39e8ae05ad0b80903260c774c2a00648e79c7b2ba1c922d2fab2920d07c709938efcf5271c5fca3f4f510726a40e08ec6b78fb047f88c0ca17779ed0d593e848707b43f9a2029b9dac5b74659cc36a392556c3862cc42b7d7a69d198460df905d45e22409a82e0dfad4b6d43ba2862d05556791c95eec67a48ba559db687356fb50bb290b0854727786e8ab99d6d22cb9b8994794048319684d197125e2b6fd7c8f6d2c79826687458cb895f575bcafc15d2729c6ceeadbc1ae9550720f0c91e1f10db2f0099cb49fc52813cef0a15f6eff5bf062da794e9f273f9c717489d26fc2cc6b08526d0fd20a957b8920cb417d412f379b7f596d7357fc08f0c3e48c6719cd385298cde947a1322d3d5d8762d3a7c0798b1f90282e52bb8bc75b775e2894db46309cdb77693352a45b7ce40aee3fb8c8d4fdc7e7910430e8de1aa80d5920bbd8600ca5de2327c3f2fbfa11b279b5a3f1f918b23ec821ea3049c80f92b9d57a63a378773df1468f6746c52df9884baba73982c7fdd9422436f495093438484d9259eaf71ff5f5313f9b591c9a09ce57aed7d5dd093a5e3b25393e73376a6b59c5328c31ba658a1b58bfe3025f157edf0ae8ca44226ca0b47ebc8194e6821534f8d3b79de6f0691214996310abf40acdf523bd3d0b95ebd39a3d90fb68e03394328aace307326e95008e0391b332b53e2046853f2a5f68bc9e404eeaec70a4e3ce738c3cfccde267e5f5e12627eb3b539d7f87de08da22bc86c57e7990d6a3bba7e1d3b9ff7a0ed8d5d6394e07eb3f03e9262618f9e623c5bc22f939cadd4c6356e920a74f847a5338565ba62593674fb85b8a4952764436cfb14e915ce22c0f76e98d445d331a9a3061e14430d9cc279fd44186602e240a8e5ff94b5e4e45458e08b8a1d6900ce851e422c6ff7d7626651626f55ae906c6e94f09ea524eab40cb731c3d7ad51a87e2f4f64253dc789a05be894ae77ae228f9c2b08bb7481fed98b2a34c02c4fac262048e57abf8bca1570185618dd6238d4ddc7b4de497108b88626f157ee15790855474d0eda8103700ae31befb158d84de105cda43fb54465ad2649a527996db0cc01fb004f579eafcbab9b0c61db6720a09e775313c3f4ebbc90aff349b5abd036c761e66f9b48b2ea08a9c6998707f8afa492be11d1b997c81bdf7c84ec85d2c3363d4ba12c2b6b87d895150c62803630d37fc2cab6b7380886e3422d442084a3a69f74b67870da31eaa52c3d7f175262b36b1f3e720edc5b1e diff --git a/zebra-test/src/vectors/block-test-1-842-468.txt b/zebra-test/src/vectors/block-test-1-842-468.txt new file mode 100644 index 00000000000..48a2aa4a885 --- /dev/null +++ b/zebra-test/src/vectors/block-test-1-842-468.txt @@ -0,0 +1 @@ +0400000015f951449e379d8dc6d0c0233f3262a43b468e8cc5e05f2386288e64af730600adf8a03a0bd272f2c43ee6870de9ea00ecf35ef30642cb320ca58bea730a6dc3689d96e44cf54be6b4bdd127188d7b913713fcd3a1b0bbaeac7687c79d59855d64855e62199a741f00001dc94b3d9ffce0f80c525f8e5c652195a4f88663af17458ace6b82d90000fd40050013c526a655ab42ec05a0e77b5f3f988df43bf2cf0fe50fa01760785910b63cc9ce7381ef3d533be5b22d822cdb796cafb7b56eb4b73dabe24a90619ac46341e6d4c7faf8f57fffe7d583b8b029e68a78b644fa044c4a882d8e6e8b167e7627d043d67a4d573d0e4939842cb91b55e161512264415570d4b3d2403f7453046c8b22c9e08ff76b5474b254bbdf29956799b67a2d10a6564da24a9bf487659352318bfb6bdd7fad02005bf8b9f895ada315cd10c6e79dd6812bcae9c96f0d0e914965f5292bbb7ad4e9d6e4c6cdbd14f38ad4012e0d178186ecf78290b08724e02e4d2a505690de07404a0b61f2c3b39d02b3354ac9ad67a0827f717b056e28fb24c6e2a85b9400fe553ffd04e58d33cbd40f8c12f705b62d73ca530232c328ec9b6c121c94991ce7af9884dee081d74533fbc54b40cabd225f9e831ff361970e32fd9bd841622d9a53c4130e593f022a014a7599a190b24ac6dff7a1806cbe8deca31333f61e510bb2e76e5d1bda19691d594cb416d46cbcfeff0985940cb1c9ed4ce7c5c1276b77f7d103fc8cd14f122537beb46727b9cbfe01c39e6e2cd4ca592a751c03de19b06eea2e2b9cce63483bd2f839cb29b1acbe2275ce92015384c1bf76822e87428f91c74df93d030c53728a9997eeb12740d2b0b8e348465f289e8ea76109cdc967606131cfa4b71640547366b3b299dc0c03f6df51b78ed95f5aa66244d261fad587a330e538390cda6d67dff0f341df0632b75af6da1e5bd36fff0820d2054bcfb20bcd01e317981cd8b22999b4aa177a4555e68ee76d1b5dd7980da4ec17ba184fff665507cfc3748b46718f38d0e83453d2498a6ad895a2571087ea3827558a45a91cd27646e346f0d326d4c0ae10812c31f04d83dd8d15e63a1d3871f743c49edd6a2dab9e99e87060f1cbf1a53715ce88ce02b0d1ce62001db9158a99fdf3d95772b2271ea1514aab9e289d240914235058fab6d1f40365cfdd493aa443d5cc8f336b3bb4008debab01aed3bf74be8986dee43b22c03d404b0846a396b93de416340c5a514a24c698000a03b5de6dd7a9b585dc18e26e95deb5f62f84590bb34c5f5726a21b8d337f00d50e8bd7c51a68b678cb530763d412b59a9af7494bc26f0c169e05bdeed0986d2838d3e32a37617bc2ea53b9d7ff7cbd39190a641402bc25e5cb1c16b9d022b320d2cde78d0c70b0663e1a5cc1b0975cb3fded78e25ed3fd60f0ae334a7b1e086bc3e0536cf9afbce0f0bed2d59c994d209e08a516dec66761270c5b5ef5b260beba29d229cdf5d2d30f481e7ece508123209d317598acfad8e23a5f8ef912ce92fd374baac8867d82ad317529a0b89c1217831a502c479848f1f12783f59bba536e0e37bd1721211f7266de566af9b963a86202ffc8e3317598b4639304a096efc0a9c10fba1f429072c52105d2d0bfc1781b8465e02d491fdb1346c627195573dabeec39bc283ecaf46bcfd572ad9ce3e3fca6e61541b278afe93c4461beb3679c3cfcfa92c4c32ad1b3eab2a7559acc10b7abc68d0b95fb4698c4a88bec32d695b219c1981e2e3a01c0a00773b9d9e35bf83e08d8fbb62ee2f21326456bd4057fa53d68115402c0d77d4b526d0e774570d2cc01b61073b45bc53be248cddad4b77e3a18059f4861840a640ea46d6b14baeed05af2967f8a522d88ffbc305c1db7ee1262dbcaec5484d92d9d335209ba3fa26d86e97d1c95f0fba891e5f6492bbe320e4aa273b6185aba23239955ef19d70ba1ba6bae4fc7063dcecac9823cc7bcc9f2d8b124a4ad1d36b74a562756318724611b911e6f231a6f6fa7e28adc95986214122a555b1346cebf5d85662cd8272a5c1795f7991f6d6948026dd7b387efc532d3c199498314ed1bd002050000800a27a726b4d0d6c200000000241d1c00010000000000000000000000000000000000000000000000000000000000000000ffffffff0603241d1c0102ffffffff0468b6e60e000000001976a9142e6b134dc2153ffb1cd4082cc8f3dba14a2b9a2c88ac286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c8787000000050000800a27a726b4d0d6c2000000004a1d1c000156dc2007b77b862a1a757406b4dc1a13c3bbdb6994c98af902f769420dfff279000000006a47304402206cf644a4db4581736378a8a9d8204d77c1e4404bede98361cd247339dd610f5502204f79d73ef108ab6cff2bd5ce2104f408721f24c301b9c1bffc622e752c20c81d012103325fb5096076275d7e7a13098c15672cf37e2727b636e301123e0ca4da8cbc6affffffff0000017d622b31eff4449daa3a22925207914ca7bf2605c1e99b1d74459f3e08197b286fe126f1661eec9cc30583c697df690230a3bf05e9aaf25a0d3a2230b9833c34ba6a20d628b4b507caffa20ffd1c8aeabfc00556c28d1920df816c7e2e66aaa3ea0497b686f4e03ee0d91516ffe26a56853b96e5d3a7e1be5ad99acdca0f69f49157b004e73401e2787791da0a761bdc588648502d26ae2b1cd47557df9e733cececc511847c66ac638ae6ce71a1d1790c0ca462034ec7739bcf86b0884626faa1e9c4183a5e7194ef27b5f1561e98e761227a975b113b9712387eb145968a8c20bc7026b461db2035bf07cd64d775f3aaf5554d66f912ca38852b26c8c4baf09d3f1bb084c31535064f02b99f7f51df9ff9b5d525da7f1d8ba8eb910db0dc70299b8553b831ce185ff9b6769735fd4d20c63adbaf971b3d4cb2398b8ed6010b663640c912a40d826a1ffdb830aeb6189d9e4cf7e2369e74e6e2a6fd012c1ebdb21067362c858f369a06b5ff7001d422828c71f472ebed69c9cb55bcb4519760d2047e0654fd34d7daf0aac02e88895727f84338a04927f4eb690837eb0669b25a1ee4cc6337c1a16b456ab44409b692776a44ed0dced02867f1c5a525a40ab4e04ba33b41894def3ec34c319d5cce0ceaaece1c04af41eb3b235e363cac3a8793a9a2425a373ec5c0159dfea92594d9f39fb73ceffdec49c3011b6e5daa60d869df30b1b20bfea3a4f93a1fb3e67694d33ae13f02e4ddfa937e100347fabe52bbba38269eb5a851bbfb330ccfea5ff0aad1c5ad40fee1e3031800364ffcc42b250fb7d0c14dec91ad7bc3442d4ce2c03cd31109b9bd14856f685d8056e38e16992f841b1bd24305f8d4b3325715b40281a6cce290ee9b8c2e7a83062d89f3ba968aeea54c2dce7b480aa5880642795f768c3ffe1e6c0eec831bf48a097ff2a38a73f918c667cade52605b32fcc2ba9ba128dcfe0179235dffb7e961fbd8310a5f7f1586f18e4bb5d47cf54536a46503fcbc1551a4c1e43e6023e11f0f4216afe78bc5bcc7db4628f2b38f96577b7bf578c56693685119f1ffffffffa5d8f7f46fe6d85076fbdb0ab255159733461ff511746363d271df57d7e76a69457c182b738736a6d00f605b84d4c67a8970dca1877f967fdbcbb1901838b6bfc78e695ef37897bf3612d21c726d0b3943c9dc8c7bcfb0d7e99b039ac5bdcc2e048648bd36563255eee23712f69665fda5510dace0da0647c84e5772d4f38ac537d2f1460ac4167016b22bf454c84801a125361aacb271e4870c4bfaf23363001c7e342ba7636067a7e1f49f77e5f8ec8caabb0c5a9af0e5dfb38bef2733b8ab589d06efc190d81f3791dacbde27749a317a150f2265dd79d31ccf0f5c8ac9adf61d51fe0348c89e7688c1a72865db2c96ed43d05f2f27d0e6a8d701726b4701029982f3d60f71d1b51b41757a5e464d9359e9e4a3c90a829bed3e3083af62fc0379b2bde7cf3bd06ac7ebaf39b3d62bfe0f0ea6722527b66b8b3129d9d1309f1694de33a61ad19de15fcdc5ffe02f5888fa02430198d680519200cad24f2ec0925c1088f97c252c3d72c033d2accb507714d550d37ba34a127df1d956261b1d0942d1fc79318287c84c0e53ed394c385d7fc05c77223780973248228fca6259329359a22c33f2b1c5cf62cfe5b1f6b06deb0309eb0c6fcd4305b61ae4dae6a533f861b66919464c70ab204a8ad6d5d4a63e523b4dc7fdd664c8ef4d8805eee4d91f4aacc8220ae525cab2bbece4a49424c87d93195934963aec7132f45d59eee784468ae704a9ab9e933a226300ffaed98ac353d11f8b33b4ee5f015f0fdb7059711c12c1a1e026534c5c52921edf46551eb4e14903e61a5bb0fa7078332cb5ae27a4095ce0f2fb69b5d6cde4d77f2fe0f5ede5cdeb57abef4286736eaeb2a323c756572ec3aaf3183216df818d71258cfc1ddc27637e6dad715ab6dd0a514ee3c08866eddfeffe79de3a584c926249a9613e1553d154e8b0fa3b38520ef8ba4beacd2795daa6693e9df50c88fca2dce6603280221981147ef87c297a6d0ca3f710039afb4562e729ac691590d181c715a39592faf27861acdba99cc6f215669e80d73277a48ddee2d090fbc161017e0fcdbb5e592b4ab77a991660a399e3b0b38cf7d52e90d48a04622f757b14d35b852ef394fca3282ad14e518cbed3666662cb2b884c8044747abbaebb44e33249e75f0b4cfc43f9f6bf790baa4f7ca2e562423252cb1080ce21ea3428e74c45cba987279fb39ba7fffbc1ebdbdcc41fe04931abd1c569ebe133cbde8a830d1357bcd90ca29c0dfc929dc824c0d70903270aba445661ce5552e91ffc41505c29bb0ff3ff09fa1d3385c8648089c7b79f326998064bfd6c6c2f60cc3013792ed4926b3368dbbe938874de6ca28d16ada7a89a7f5fd91e19f20db93d528bc07590edeb7423b5e615cd6bc594461a3f5e1a8f503a4c60047a4c07ac3ef04036a9a6f4b1ab2b9a50000909637cc93c6d593ca3bffa309485a5dbf777839a6e85585252c75954ecfaa9a23c9ffff4f4c7278149bab277fd33c89df7240472b761e337afbc2871abf208f7631d409f1ef1e509a6d4526683bb808fd72bf65c2621676f9e34bda11f9d306626604bc14029de426288f9779a58546c71d36f154adbe9e404aa013d8e309f78d1a789b3843e278a9a18f5a9f7edf918ae7e97e6714ce189e3f716d371aedada20b8cf39e7f97b990dc7a0e58c4f9916e6de8e88096266a46cd5a2eda137be3e222f49a3b720e9bc8eb16f74d2e01cf7f7a99e410cb070c2765627c14cb514997e42b62c9633379d5ff350ead7c5b22e3c77449d38b2106274b8ab39a18c0c2847b825ae5c3b40a60577fa3670f378f38effefc2eb30acce29a0c906e84165b6620f94de99449ee3f58f53cb081ce8520d902ecde3881252f876ed4261cc1729ce54a91c45216c78e367f8d181c68f1b6f07a7ea11a2b8340be6c3d91eb25974e9bf65cf214282b9f6f096446287b4fadee73fa897fbbf886e923f37228f5c7a9a5f836dc12dc9210cd8bf76a61a743605b97c15c7053412fe2aa70beb8a23aea535a1079ee79a8f81e913c8941f558291888d6dcdb2d6df28c3b8a4cb198f4329751801ee5cb27a42a50cb9f02e4024170535644068f78fc952c8c65a2e475de530347ba096d587786d09be1770d9b81148424bcaa1031ba16c5742b201229253692e95f326727183df76ff9848039cc9a78ae7ec9ec3bc1ba4a3380c12a004c47ad861b55310e904cf1d84edc2c20294d47ddd54025e7d8d4923bc3c23162c8df5040453c3d3ee7ed94dda77f40fe15383135956d6e0a0f54dde0e4810fe6c097caa975f1870ae1de7dd704a732efa3f5a6fd20c366d5952d0878f796568022e3c02fbeb5f2210b781e7e647f3be0b3df71b3da76c137ab6c07412501aa54811a6c6b39e6f0a973b7ca9abd9934f53f8e1739fe47e8ffe1eb3ac19de88aef73ba92e81c73caf2b34e12724bdd212e88a76408f2c15f5e30e5b7c048e0e851d6e1366844fdcb1025a56024d27914e2cb727032d1f392686f3731f922179b8855185d5f0c7a5198a8fddf6bad85dfb3fbe314f55a48fb2cb91df3552e85c45dff2613102a852946c784b5b6efc88843aa92c3ad1cf3914334ad54f064148c479e3f87c70974e1d33dd07b785a69ada45ca37402744369823880337a194245ee28a6d8507ad6c3ffa14070f0fbbda4366b0b40300000000000000007750114ca6b95e44197e30ec98cadfb0dec1f1fb1a416eb4ef87f1dffb0f5937fd601c421b8b084728add9698d08419db23324fc75d2115af9fcaff3b875fffc9f1422a29f7c183940420fa423b177fada5cfcac953efd5c380ee6292dcd4f7f932d80cba382aa4b693238617ef1ecc7fa20a2826a6540dd27c30bc8466f361d12e39da3758ef67497122d02752f129f65b66be4a476800fa562c04265f0148fadd929daf5893742b782186bd5bdcee9a21bbb3b24d14fead882abbae1ef4d5a368d84fe2cf6ff812b08e173279d46eb6cafd39818b20f0a5cf9daff96e6344b7eedb5aa4dd06215318354709f07e65487e5a3c2f95e2a03bda287dab6dab184012bbf4f9d84528ba58174832f17bf41a33d048f47c80f4f476743c0f15f10502c9711a7b03510244c9a474d27476a7aea308f65e55d3f7b7f5daab77112cd6733b41233b42f60b168c3e5195df9de7e6266fc05ff1e5dace8c8e6708ddbcd4e0ef808f6b789b7dada2113ef47d0ed585ef8ac56e55201a55b8516f98c5605aa913dabf471ee2ccfa90d608481861098e08d2767f683cd857c311cb9800eb4f6290383a93864979f096c5399926de4436120abacf91c24a6d256b55318761c8b91450178f9ddfa381cced12b3f46c9b7aae1b2078ea1ffb53a61c3377638946944bd9add71e3767931e345fdc1219716f814b6810d480f7d2c7a4209dfd1d128d0ce1592ed1b581c22c2fba3935840acdbb6496ac4f6cc68c9170ae4a2fab855b12d3069d632b9362cc28c29b282b48695ecd6b3b4e976cf1506e807cc1a2d024b928aa4fdcaff2498dc73eda0f39b0648ada0dbffb1aa2fe175d07775488e2524730de2438591e88c97d286188d69a8bb53ffffdaecbce96b1acd2a0a9b4f0461572ee61d6f6258e73665b8fed9f5634e25f1e0bd9278b5ed5d08db9cf04fb8213236d5ca45220529630aa94617026ee3e46410662b06208e9b7bd1bc486da5272d18b430b8d1249ca117a8a6786f0511720e537c711db46d62f4105c74bf82fa4a24b99b12e250b4a06370bf1b9b7859b36d8523118fbe6fd840ae7ae556627bfc8596f20c643d5b47284aa93b40cae20fa4a31f355940a48448ccab5662dc9e929301479943f4b7bf7b32439b9fd1945d6bf3341371f3891c58e2256807ac9aedbb91856589bd27f8281197659e89eeda0d8ac56639859db408ba7dfc104ff2ffb661a098f878b7971ae3bc442f73fd513f387c4a4099c7c3e7b54012b1e72105a61a7451eb3eed59f45c7ef0b3b11b1826d4e3aea60810bc6676b277335276e708d89ea3f0879dc28fe862fc4b7923af1358113d5614b2630d26edcbad4f83dcb3d9f49d89df42189dff9107e306fe49c811b05f9188b7487354272653abb134349356f2cb9d84b4402fc1bd26b8ea6cca4f2f2eeb98a43521374298902eb0911285bf07accc2b8e79457fed13d66b09e933c235714807812785ab884aacf62c2a535939ddcb2c0bc227111fca434860d11d08d2ea0385431b4ecf666fb0071233e4fdd1d956484f214dd0b396ddec71880c10c9a2c2aa5a3ce1db207b6146219598c7db5872f132b30c9649ef45ef9977b8ecc0e884f9b25616b01068ed06fd00f3c23f51e418c6398e316f8dd9f8fb1c885ecf80bd9858c6f27e6438f4c88d2648a1c2376f17b8218b3dd7a8a9ddb4b6c18290fbbe92b6a523febbd5d223a539f71013f8418695d016c11251c78738b7c5e75ae0d0ff14c218d455c7a234a3a9f65bd7801df25d1815033be94862ac3ccd52ed7864d89cb5f563525ad51d48bc8cef50a10dccc6526cf8ec840057c26041b07fd7203f7f50fdc7e6ae4b5a093aa55d13eb185b568190c85588f6d8c8b9ec408ea0ee26ae5987a2ebd687a3581cc66879f76289a6f5ca8d761cc4acb18a71fd9930969d7ec731354575322e5b10f1c34dd3258db70bb2097e3dd256e97552479271aeb672e15e322bc539eb72ac069685fecad2a2b022b789288572a086a090c198a713c1371e465c47ff9c34935919e04d264f95f50744d38fb913501177a6d57c8138f705f135ec556422803b6be9eaaf6822982a78e74965cdf0f510aaf225fb021870edc98cf2b786e85e2cbf61a0d4296645ec5627a9b03926c83ddd3dcb68527cbbe7bdda7a6256d5613b6e99cabc2f0eede224b85f5ab0ada796ea160d3f08c0642f793ecf6b81b0db89f4e402a1faac001045e6b1203e146d142122cdf6fb8655bb6b2c6ebcba3bb22b84df145ef9b9f05692943150015cd8915800a9078935f5a7ed726dcc8f213aa69bbee0e83b9627c6edd1e9e4cd8f22eb2bc6fd711d4d765c23a47e724123bc1d5fa554645257c6c030f8d9584e5dae58c063810566fa08a268d8c319a55caf88bab269fc6d78c8bb45b73f4f112e35806247dfdbbf786ffbc1a4066e6b1a9a34ac1afacc6cc9cd230be226a17bc66a93a0d10691fae579bac69a466014024d31e5e60ebdde430f662e7eb87c4df5f8c54a4c927dcd16b65ddcaaf18866511a1e7cade3305093cd82c70f5ad698c8749fe235f97c6c891261debae0a7f0486a1e845cb5db8f84f36b1439195fb1a32679397982af048b03c78c82288c04820710353c1665ab73083f91c0dc81df6ee098ec3f122e49072f077d5da3d0c8ca790d8cd6c32e9b3960f4206041f1b48f7cc5481adde13c8c31f3a33bfbed7bcca2192c321154589aca25be769687bf551c012cdd5a27c5ef6c7b1fcc5487d0a82337cf6c2ee0578c2074691a8e00cc8b99166469edbeb71b0d53328565ad8f106010ea66b69b4e57955c261768bb07735ba491343d569e70022a723a6f8418a71410c5b37a0be013363960143d684a1059c204732f4541c6d1cc846473a060de3002fcac97b350bce25aa4017dcafd64631b9f5bce6fc218a64f48f1e098d33df63626cedb5044493bef12e0ab832a02dd207cd26abf206923026e0667a70e5c2e3792d5b33f393c20dffa1c1fa6bbeab827222c639a1680b2ba159c4e88619aec327f63e472a31a2d44d3e9a69af5d5158fbebbcb7fa43e512c6ce2097f6715c513964bcd6b625398401494297db34fe184e296b7d4f94b71e3e0ed79446ae8013eeef162174f023b76496869e2ee0a91c74ff6357507122bac35659487f77494337533731cf9cdeb03bf677b7e73557c875f8aeaa3d3cb060af82360f8adfae104f807d09530990910b82ed0970bbead909a2931c8712690bc6fe66fdac789ed3929e87fbd0aacea812948cf3d0770695d239896b1c9fca042d78d30a14cf910316c3ce3f86829a47c6b71501b43168fee737ff7e5d6f217ab037cc5c22ccacc09de1c4746cccf60917de3d26f1667e038a8703f3d0a7d02beb17969ab2d8ae12daed94d5975b1c2fe347db644a62a38648aac649aa7eea8f52e0fd08d27f8f3002aac50e1f9a0b1b080b59bf412183fd9e7be8e73b691ca1ed0179c55ffcbad0434cc46d2530977bf912988036d131b54a9d7fbdd82722dbc696b5fec47daf23d3afc663ef605edf4a5591048a3ef6af5d7642ee4a8fef70395a7bb29986f1e367c91198ac8ac7a31e91f91ee5a903f23e391de003ccf32be86d3e4f7fa06c3269d9fd76cfb69e872a3338c6720fcae24c9aacde198699317212fe9a6603a78298bd737204c129f1df94ed5f86802674734825fd40cad96fd985a11ed8b005221765046d283414c5a552abe98e39f73e2e06e94a76704bf871c333b9ee2f544264676ff2a19ed3adf4c7600c77c3fc1a642d3a84de986d16e4e61c3ab0fa40c2d0e44acd964e77c015aeaea518a0c203bd79f8f2a307d17c17fd203adde81233ebe2e98a7e1e52dc684a6eeaa7d229398dd9d306f6b8ec219616e6aa6316dea3f7872909f945d7023e4809ec5c319d66759a7c26976e9b4834c95a13572581531e332b0f048d6d4a9ea41ac5e715f553b17bd50ca5c1471a4a7bfb9955c21b70f2570d62c972ebb474648912d318facca1cd8e57c08c02673125e21097b54403844873428d83b5a8b1e68e0a3a7f5e544a00f34cd93436da493573e43ba63153bc549856ce496ff174934b512727df6cbbddeeb7cafb0a142c9c4daa7d4513a166645197690ef0eb3bd8e9457e33422f65042d9a29dfe30d44619c79d1d7aaf3616b0adbdc4518eb19478ee6bb97bd182d38909b4d18c1067bcd2e99c0ca4303044f52e1c3f5f554ab2e03be2a50edadaa0bcfd8f031bd3f064142cf4566c531d626cac889e0ad77b1e31a8b4251a62673b454475c19abcbc74a4a933a4756d3a696cad536d60d7c3aa35835351428c4c56525c51b7d9dc8d0d6b2b9178fc1827669b4e7dbbe06b0aef3dd9214440e23c59e70f089aaa3c3ae1a099bfca18e932eda632e18fa54785388ec40b8cfda7684562c26483a06848d789fe8cad2c550dd9d6b9b325e43d2f7b89b9724b4dcfe92aea6c31ce89c98ea0b33958cbe8341a74b5f2e07292bdccd44ab223b0b53c67c67cf215cf995aa2472e3c1b15db001c09629c7ce3fe042206163a6f6918ca17f00b6d887ce36fa43113f73213ec15111c5749bef6afde88d4163da98a6008103822b3d82be6f4a4166a603f58eddf3ccce73384cc0564c10ee7fce4c08891148e70e4233c97cde0a7878798b28a272cc34b575cc1a21e8a8ca4eec190329c90c9eefe89f804a2e2b8a93ecf3704ec3b398f4d154737ece445a0bb03cd56237fc836dd5fe82c3534b2c767e1c8159109ee2668a1e0eb6b329a3f7bae2d35ccc96a1e336e73819a06358363f9ebfbe3092f0c72f9afa1645192b29e2820c6b8dd86cb3cde530c2d51f2daca8c395ff519e1901e910ff8844b472bf00beed3a82c4623b65612173defaf3cde6c80200c0b0b69be7dd3cbf1aa0d806ef1958a7188b739742b1ba8c04ce1329c906433da04e11ea04251b1f499a880670080612c0a8128f3bca12c3fd7667c1a725438e418919123606a1ff8e03a9dba54903e86da0864ef56e2a61e1d095f207ce7f5251156d84dc18fcdb8d654ac0ff855fa345f4383f2cd8ebca2be4b3ff90b8363f200ecf4ef0bbe9bf0a05bd3e22f4a5a7655cf6b168dd485a6c849e6198ff463c838ea937056a1ee68fdbf5e8026e334a518c20134364eeed39a8318fdb7fa46ef37f5de27e1a5a32e3c69c55ca1d7680847ea022fdf09d9160bb73615b5df30250bfb1db06e23a81c151c9b94907591744a9428c1465bcd97a79c5437632bd1212d2f6e6486e39f8c5ffcbacf9c61ee6e342db24ae0827497a7cffd063ebc1ead1cf9e6e679caebfc1a86941096b9b913c8ea8bee1ddafb04781481759c3c607a2cd23b16629e9336864c2fc3258fe239e19d3f661b6307c269e563b687871ab2083ee344635c313c2b8ae7491461738791d2336b223aa6e9e060748f0162a01524bcdcb943186498ef6b8e9c864598739f6351c5f28f0f034f5bcec4e855dc8611c0520047d16af1b1b445b03361a5e1a319dd0024e7f5160f3c8553e0c36f461f64d9cc6f4646a206778259af0505985bafc859f96bf998f967c6940e1dab2a0313a2a8a2765e3a1eb216aebfa6c9e1ed629c31390f42e5902bda6299cdb8a30adf242d69ce550b8e971ceb41c33c4a5243c8131cdc227400339bb5eaf7f4cd2d5c32c608eda6c824b12a488948f2813857aaa69699991addee8dfa5e1650a5038f0d3371b4a22e9a0c3f497ed05a647f66a6a328cca31c99b40dbc3732905e339588fa6d94cc0dd8cacc2625baf75c2f457ad72bbb2e55b243ee9f3a46d5f23fb7e3331bc80ce32ae321819124573d62afeaa0e54fa2a3507072186f3212d00a3a37595c2d8336fa04e70e702cfb70299f525b4f4c86f39b8bc05415c94b3609b9cb1776212b696a9b4679333b93757e05af24fcf1b537265a200b57e8a4ae3a81a190b0fa255e4c2a29c165b58e692e557a35797d8945251ca4b91bd6c5911b197ddf603276da1433459236254bd3514f1efac134f85c31b7287ce94fa92f3781f66be7b2e5d469072270cb1a6ab6a5de22f49906b5fea6295ae5634e9629232eb6504b72cfee55991deaa9f1baa8e387814f6d90c940259acb8f0028241d1b6a4056d0ad645d372e2b90c166756a8d22524959824938e96f62e9df5188660e10bc5e2894282d07a7fdc9d1c1439255ae1a5e18ca615d96704c94ce3469b6345460e0b54f769a9a120e6c5cac8f86d52049f7634525773785a76bf45a1a3004bf80091750863bd4a10d22402ec085ef9313065cd0d1c0f94589a323f1bf0e007666f7b0876485d1b8ca37abaf8f1767efa532f51fd0528ec47df6a779c5b1240d6907f8826296432d08d33e1068f8b6b33038e0d9d3f1371be01253212f7b225f62f2fa3637c948f887d1e6922f9e79d15f646395fdd62a27fb737fe192fa2ce988f1414c65fe13b59067363dabc08464b5996511e93337bc414af13cf13c3352f0b10700e12546430db0a4caef9c58272d92f837d0495fc42680a07ce5bb34e6957be78539b1fe175c1b7de25be0c937e84066a57206f25f9ab3aab4ba7419b26d54150c41427d73af07e4a2785f19b4cef2373c8b8c3922c789cac45008064397e716d21db8f54e91fd60229167cc923f7ee9400221daec5e79f9961d7d17c4654074241d2055ed48d41f37d9405365fd2d8abb5a7d8f9efdc96e839a8c2c9cbae8d5e8848c0292192d65a79ca323e2ac6933420772e1c9f837e3eea80d28f63ec5449bfea033b571f3b7e983d44c64ec184bfeba24c0c6b928fbe69ff42fab62aa40b411a2c810028399eec6efebbd445e7ee6eda386958767ac400f352c221955dbc3c00fa692d21f066e0369cffed2ec4dc28ebb39a523b67a74d0220496fb42408789e49753674a1f71a25a8e0942d2a8d40ea3df517aa5242e7d2f216247e693a415ac634346ab79b26d279edbc3607af8f6d2967552ae33a0db9c02e2fa9b61e1fc48ec84407b13b9c6ef51e9563a81d3cc7edbc8f21ba3a76bbf308df20ddf7927227ef07e8d758424f1821bb369df813bacbff566d2a4c044fa371d898e78a4d77e7334e0a7b575e9aa9d99202e9befd7357f710c570a058f3d3185f815b6a39e60dd0f1fe393d7007121d1327717637c6182c224ccc2fd748c16d61df0f8bd64d6c8365c4504bacc4570e6bf744bda4ac3519729a5ba8027012bb2a65a86f7b318828c0d498db681b3420d30873d42135719ede16965212bf31382322243f870842b4ffb72bb1f163a83dad4d88f2392ce7badc3867cad59c21eae8f73309d536ae3c4459c4bb7e5083622c4604a6d0d0825f8284b8e1b9ffd2ac04bce34ed12ef5fc47118bb11a0ba9d7c6d06464751a81a689e8bb55fc05618d138629010126a638be5316e08bfd00c5c06350147fe7fda62c59a3e6bb0bb2d65c9d8ff292079ef52028a4431e27ab077748d84583b8a57f8511b91bf48ba2b19a18b166a79d0b90a665314e8f6693ce76385be97a000c30664754917617c1d7928eb6e005c7fa17e060bc500612d49647e039faa91c648b0390ec3cc90ad300e0eb1e9488abf29cb36f3bc837f0deb6aa93f1e84bae29f2186615398158322eb76e817576e5f26be6df809217836f4fc13d140e3728d3699f995f58743760f5c123d62e8f26e0cfdc4fd080f66ded0b3bb9beadf75289ed50909300948d702e39052a7a565240c29617555093412488adf8d20c243447ebd6e213b0ed92608f8bd3335204bb58e4cc93c126d87d6fdb464f07e35be3095d066a24e91aea4023dc49570ea89e18a25648843e941cf07357d416e1d087e71c35a846ef35373271b8090efc0de6ef3f3e6567e3c672cb4a67008a4ec567c6b9000e04205debe0017f437f4c273f33faaeb703bdd18133aa3936e74a37f832814be0d7ade20a739fb2b35a4888a499a0938ad7ea4178600f93bfc7f4204564c9c250540e85b9a3c2c9929e2bf87f5a93e677afb6545d68fb975c41d5905b50b0c4a9853ecce312fbdb15a5fd2f4ffe05d604e0cdbea25f9ded9324124e4a54e3364b99491aa57084b7e79447819ab8ae2a279f1eaeeb88c84323f63e0d6cb56536f79c15906fd1bab049e7c816e402cfc54ee72ec07fc0e51ac6032bf6d1478a661cfe916e0ce0bccf3701ee088269033749d4185a34cbda25d427451657eb8ef9c50c24c566610445ba7c0c73a76cd621a2ef59e258eef23a350a248b0ce83cc0163a26820d5041f81647bee1ca6bb71152a26246eaeaeab14632eaf6e18fadee1079b5933a23cdcbef368045603f084521e2edf2930f6c1a6e2b3507cc50266706a6314258a21b989110e34c541613453ca8cbe700dff6f13f4a1ebc47b3143bdc0b0fc7c872aea1f28e799ab58f8885ee1accda08002eee671fa174d7c4977d5272c7d115733bc4883b0b1161fff9fa1712aef03c5b2b0cec13cd85b45817fcf9ffe47db4e0ad7e502426bacd039bcdae8395b2288c53aa9b617787088038466407f6a8eeb018e9b5609e1387d5b842b80a98e3e36a4fcad3f610484391e8573084dfab2592ecb9fe441591179b276a6b1ec97cdb7de99acc865ffe3c1f93dc285eebde471005b64fd7686721db6b5bf36651c3a6c84160ce14204b77857c397c2878ed9051a7cdba68578c2737505ba85d6305b1cd80faf6e94ba75368ec44ada7114ffb30c3c78eea5f2896460a7ed9a096bc5c60ed8ff88d3a8562db0fb924c5337565b02df2813db922f97dfe5fa4196f62bedb13572a5027ce48475230a3846e7966f2309def3d8ad6c8f1030964653c6fc84ecef8d8e397befa635755c9e6975975b05fdef9c63c7c52add64c1ebf1c5700874e622cbc2c9fc7efeb6e3850d4b90ed0b533aa4a7501bb9394bd3701063698b87e6f3dbf08ccdf333287dfeec5e74890f0bcf800cbc7a25a611d8db7f89aae569dcb72be999b63ed44624c13fe63c96a797f4c66220d0f3a95e36005db7bd08c5af8b78742c8bd00fdfe937ae56b4f821f68d0a3a819ad9ac4352a5880ccfa76f0f0ab4115767e18e26ce106e012ca71637e6e175c24afc7158a18c38af446c992f7af34f5d8bb2ed748818bb6a9733001b567272a874d059a516c4a08844ef7426a620870450f3cc1b1028d72d984e24f25fa423c8ed3b5a7d0b51388ab408b0039641f85b076326c97fde742b0aa2008a63f9341d0233b5049972ee463ba69ca2d3f352b35620d4c7786e2139c5fe2ef0488855e62a258ceca5c7cf0b10daeef04fa8dd94206998bb51d0b88d22601a81ee8c317e96bbff11bbfcb31eb96816ea251d187522f0d5c73cca584da617b7e1ef6eba086f75ad8aae79251e3b8115cdbb8d5a800886da2960c2dfc7b1603a39694f5050225c1a78ded7bc11a1f3923a8615d14e2b62e29a04a9bb7b8021959c626fd6be3973eb3c302209e2ab14e152bb2c56bab63e7e4b0a881c09045414efac609349b744837dc3a17701e4307bb5ef6f7f35e613d8ba047b7bdfe94b348238f18da0dd213cbabeb6da0509c79f359a7b03d837d2b259a81b822bb0bf3fcba9a931fffac2d1f5f8227724daacdf93730549ad515c3a24759f492e8c9caaec7b9d68a453f4c4c0ea8eb357ce4609bc8260e37774f592fe21f50462d1520a7cf37bb494a8bb58fc63720e2b55d9cf4eae8001638f55d6a1e20f20ff638e9f49435ef0b0bf720730d222a03232b8cc7c29b4843803aa809933d1533580d321d955e075c8b8b1aa035ecbefa20d0ae42ae269c41cbed4232068bff806509812ea57acfc4895cedbc32d47c290a0901dc365d1d5fa5f9b20c55e7af793e2d22a2ef2c8cff9202592d6ca0a254110fdd2a34bb3bf78adac99d0ce1fdd6b21b83a8e1072642430840f82dd3ec1cd24fa1109c7f81a53453a604b282899bd4910300ee28f19547466caa742e5df0fd70d149e1c7a8e9c056e08649afddb1ebaeea3112733d68a1f0020a6b0f607eb1a7814b89148a35b0d6c88a655132fb8213ea7f9c1701475e15033305de59709452d1d3abae815f8e52f7a175d27ee4d24130756a6c3b30b3b77a47320ad00e54442408920474fa7a78f29e6830caba0e50e0a8e1fcb2e5413d687d6570014eaf0a3b2f620b80dcf5f3ce977e9a414a5dde206335e28fcf5230c749501edd77a1c0ffb966a8d77886db859adfee8355c36ab0e8ec2c868aace30a511255fab236a5ac8b7116e0daaa64c66eceb961774cf7722d26b8f262e83bd1d36383430d12ec8db00ba416b8c4cd6f72570b746c678c10400ddee09b452725187687fc783c09554c82a2ce4557741edc8ab894f05e5051ae4360705ff0d54c9434fc5685627e5702559fcdbf5953285cec72810b7c90f2f56a0d472014ba85a1280a2ae558b2b1ab0fd72b7f941bcbe129d2871490a3514d88277df7efb99a1f45a2027c58c7d7199f1022a3cb96d6d710c6176c29779135877033243a148dee5e8706757a06a4962833ac65490e785b14345720a8fab2f10f461ec1bfe738557065eab277e0a76f632fa9055898bddea2f56ea09f002aa526cd7f0eda4b18bd920f84531164e995c23a1845fb14d1d390969bd9ad6f801 diff --git a/zebra-test/src/vectors/block.rs b/zebra-test/src/vectors/block.rs index d25eb5d7c29..04d43d506e1 100644 --- a/zebra-test/src/vectors/block.rs +++ b/zebra-test/src/vectors/block.rs @@ -113,6 +113,26 @@ lazy_static! { (1_046_400, BLOCK_MAINNET_1046400_BYTES.as_ref()), (1_046_401, BLOCK_MAINNET_1046401_BYTES.as_ref()), (1_180_900, BLOCK_MAINNET_1180900_BYTES.as_ref()), + + // NU5 + // + // Contains: + // + // - First Sapling to Sapling V5 txs. + // - First Sapling to Sapling V4 tx after NU5. + (1_687_106, BLOCK_MAINNET_1687106_BYTES.as_ref()), + // - First transparent to Sapling V5 tx. + // - First Sapling to Orchard tx. + (1_687_107, BLOCK_MAINNET_1687107_BYTES.as_ref()), + // - First transparent V5 txs. + // - First Sapling to transparent V5 txs. + (1_687_108, BLOCK_MAINNET_1687108_BYTES.as_ref()), + // - First transparent to (transparent + Sapling) V5 tx. + (1_687_113, BLOCK_MAINNET_1687113_BYTES.as_ref()), + // - First Sapling to (Sapling + Orchard) tx. + (1_687_118, BLOCK_MAINNET_1687118_BYTES.as_ref()), + // - First Orchard to Orchard tx. + (1_687_121, BLOCK_MAINNET_1687121_BYTES.as_ref()), ].iter().cloned().collect(); /// Mainnet final Sprout roots, indexed by height. @@ -160,6 +180,27 @@ lazy_static! { (1_046_400, SAPLING_FINAL_ROOT_MAINNET_1046400_BYTES.as_ref().try_into().unwrap()), (1_046_401, SAPLING_FINAL_ROOT_MAINNET_1046401_BYTES.as_ref().try_into().unwrap()), (1_180_900, SAPLING_FINAL_ROOT_MAINNET_1180900_BYTES.as_ref().try_into().unwrap()), + // NU5 + (1_687_106, SAPLING_FINAL_ROOT_MAINNET_1687106_BYTES.as_ref().try_into().unwrap()), + (1_687_107, SAPLING_FINAL_ROOT_MAINNET_1687107_BYTES.as_ref().try_into().unwrap()), + (1_687_108, SAPLING_FINAL_ROOT_MAINNET_1687108_BYTES.as_ref().try_into().unwrap()), + (1_687_113, SAPLING_FINAL_ROOT_MAINNET_1687113_BYTES.as_ref().try_into().unwrap()), + (1_687_118, SAPLING_FINAL_ROOT_MAINNET_1687118_BYTES.as_ref().try_into().unwrap()), + (1_687_121, SAPLING_FINAL_ROOT_MAINNET_1687121_BYTES.as_ref().try_into().unwrap()), + ].iter().cloned().collect(); + + /// Mainnet final Orchard roots (anchors), indexed by height. + /// + /// Pre-Orchard anchors are all-zeroes. If there are no Orchard actions in a block, the anchor + /// is the same as the one for the previous block. + pub static ref MAINNET_FINAL_ORCHARD_ROOTS: BTreeMap = [ + // NU5 + (1_687_106, ORCHARD_FINAL_ROOT_MAINNET_1687106_BYTES.as_ref().try_into().unwrap()), + (1_687_107, ORCHARD_FINAL_ROOT_MAINNET_1687107_BYTES.as_ref().try_into().unwrap()), + (1_687_108, ORCHARD_FINAL_ROOT_MAINNET_1687108_BYTES.as_ref().try_into().unwrap()), + (1_687_113, ORCHARD_FINAL_ROOT_MAINNET_1687113_BYTES.as_ref().try_into().unwrap()), + (1_687_118, ORCHARD_FINAL_ROOT_MAINNET_1687118_BYTES.as_ref().try_into().unwrap()), + (1_687_121, ORCHARD_FINAL_ROOT_MAINNET_1687121_BYTES.as_ref().try_into().unwrap()), ].iter().cloned().collect(); /// Testnet blocks, indexed by height @@ -223,7 +264,34 @@ lazy_static! { (1_116_000, BLOCK_TESTNET_1116000_BYTES.as_ref()), (1_116_001, BLOCK_TESTNET_1116001_BYTES.as_ref()), (1_326_100, BLOCK_TESTNET_1326100_BYTES.as_ref()), - (1_599_199, BLOCK_TESTNET_1599199_BYTES.as_ref()), + + // NU5 + // + // Contains: + + // First V5 tx with: + // + // - 4 Sapling spends, and + // - 2 Orchard actions. + (1_842_421, BLOCK_TESTNET_1842421_BYTES.as_ref()), + // First V5 tx with: + // + // - 50 transparent inputs, and + // - 1 Sapling output. + (1_842_432, BLOCK_TESTNET_1842432_BYTES.as_ref()), + // First V5 tx with: + // + // - 2 transparent inputs, and + // - 2 Orchard actions. + (1_842_462, BLOCK_TESTNET_1842462_BYTES.as_ref()), + // First V5 tx with two Orchard actions and no other transfers. + (1_842_467, BLOCK_TESTNET_1842467_BYTES.as_ref()), + // First V5 tx with: + // + // - 1 transparent input, + // - 1 Sapling output, and + // - 2 Orchard actions. + (1_842_468, BLOCK_TESTNET_1842468_BYTES.as_ref()), ].iter().cloned().collect(); /// Testnet final Sprout roots, indexed by height. @@ -277,7 +345,25 @@ lazy_static! { (1_116_000, SAPLING_FINAL_ROOT_TESTNET_1116000_BYTES.as_ref().try_into().unwrap()), (1_116_001, SAPLING_FINAL_ROOT_TESTNET_1116001_BYTES.as_ref().try_into().unwrap()), (1_326_100, SAPLING_FINAL_ROOT_TESTNET_1326100_BYTES.as_ref().try_into().unwrap()), - (1_599_199, SAPLING_FINAL_ROOT_TESTNET_1599199_BYTES.as_ref().try_into().unwrap()), + // NU5 + (1_842_421, SAPLING_FINAL_ROOT_TESTNET_1842421_BYTES.as_ref().try_into().unwrap()), + (1_842_432, SAPLING_FINAL_ROOT_TESTNET_1842432_BYTES.as_ref().try_into().unwrap()), + (1_842_462, SAPLING_FINAL_ROOT_TESTNET_1842462_BYTES.as_ref().try_into().unwrap()), + (1_842_467, SAPLING_FINAL_ROOT_TESTNET_1842467_BYTES.as_ref().try_into().unwrap()), + (1_842_468, SAPLING_FINAL_ROOT_TESTNET_1842468_BYTES.as_ref().try_into().unwrap()), + ].iter().cloned().collect(); + + /// Testnet final Orchard roots (anchors), indexed by height. + /// + /// Pre-Orchard anchors are all-zeroes. If there are no Orchard actions in a block, the anchor + /// is the same as the one for the previous block. + pub static ref TESTNET_FINAL_ORCHARD_ROOTS: BTreeMap = [ + // NU5 + (1_842_421, ORCHARD_FINAL_ROOT_TESTNET_1842421_BYTES.as_ref().try_into().unwrap()), + (1_842_432, ORCHARD_FINAL_ROOT_TESTNET_1842432_BYTES.as_ref().try_into().unwrap()), + (1_842_462, ORCHARD_FINAL_ROOT_TESTNET_1842462_BYTES.as_ref().try_into().unwrap()), + (1_842_467, ORCHARD_FINAL_ROOT_TESTNET_1842467_BYTES.as_ref().try_into().unwrap()), + (1_842_468, ORCHARD_FINAL_ROOT_TESTNET_1842468_BYTES.as_ref().try_into().unwrap()), ].iter().cloned().collect(); // Mainnet @@ -550,6 +636,66 @@ lazy_static! { <[u8; 32]>::from_hex("4a51c1b879f49637873ac4b261e9c625e16d9400b22d8aa4f27cd6fd1138ddda") .expect("final root bytes are in valid hex representation").rev(); + // NU5 + pub static ref BLOCK_MAINNET_1687106_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-106.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687106_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("41f3adfe94f336b6581923c9d20e07bccab0c47cd57ecb66cb9f35bb3e62066f") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687106_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_MAINNET_1687107_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-107.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687107_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("4b013ee86a4a7b19871aa9b7a1dff0b29e0a737812c8970356c18340752957c7") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687107_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("7b61fc613cea5c2c84c5e2c64d4fd4afb8c8c9d10dce9bcad49431c9cf32f131") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_MAINNET_1687108_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-108.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687108_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("174d81c4d858ab7932c424ee44f5e47ceafc50fcc44fb39bfaf74258570eb9b2") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687108_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("7b61fc613cea5c2c84c5e2c64d4fd4afb8c8c9d10dce9bcad49431c9cf32f131") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_MAINNET_1687113_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-113.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687113_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("53189fd8855fb8d8f49a4cf505669ce6fd95e28abfe8f3def7c2493b1ae4fc1f") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687113_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("ed17182c783c649f53c0ecfe900a58cd818e89d2784f750df6c906999337dc10") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_MAINNET_1687118_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-118.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687118_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("50f2481034cb80ec7e3e56177981ab47739e5a0c650638b2ac29909468c5f225") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687118_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("d5226e91ca7f9275d7f62711fa52875cdbae6cfc689815c9872aebfe64b60f34") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_MAINNET_1687121_BYTES: Vec = + >::from_hex(include_str!("block-main-1-687-121.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_MAINNET_1687121_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("39e29473df7f0692ab6072d65389f6626611fea9f00aea14c0d80add9e9ef6e5") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_MAINNET_1687121_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("09b17d8907236adea12b83ed69054f4996686d7c4ca76db25c4c48b90e272b0f") + .expect("final root bytes are in valid hex representation").rev(); + + // Sapling treestate. + pub static ref SAPLING_TREESTATE_MAINNET_419201_STRING: String = + String::from(include_str!("sapling-treestate-main-0-419-201.txt")); + // Testnet // Genesis/BeforeOverwinter @@ -852,20 +998,52 @@ lazy_static! { <[u8; 32]>::from_hex("2b30b19f4254709fe365bd0b381b2e3d9d0c933eb4dba4dd1d07f0f6e196a183") .expect("final root bytes are in valid hex representation").rev(); - // Nu5 transition - // for i in 1599199 1599200 1599201; do - // zcash-cli -testnet getblock $i 0 > block-test-$[i/1000000]-$[i/1000%1000]-$[i%1000].txt - // done - pub static ref BLOCK_TESTNET_1599199_BYTES: Vec = - >::from_hex(include_str!("block-test-1-599-199.txt").trim()) + // NU5 + pub static ref BLOCK_TESTNET_1842421_BYTES: Vec = + >::from_hex(include_str!("block-test-1-842-421.txt").trim()) .expect("Block bytes are in valid hex representation"); - pub static ref SAPLING_FINAL_ROOT_TESTNET_1599199_BYTES: [u8; 32] = - <[u8; 32]>::from_hex("4de75d10def701ad22ecc17517a3adc8789ea8c214ac5bfc917b8924377e6c89") + pub static ref SAPLING_FINAL_ROOT_TESTNET_1842421_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("5cad695f17ae06cc165e01938fe95cac3ea0fbd236dc9c806f3411e0929309a6") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_TESTNET_1842421_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("28dfaa94b74670863beb1088ee3d97b38960c6c297c9dcf3d57d5a9259616523") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_TESTNET_1842432_BYTES: Vec = + >::from_hex(include_str!("block-test-1-842-432.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_TESTNET_1842432_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("68626c76ad774e3982450c20900415fd85b33d820627565d588c0c22437d8c62") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_TESTNET_1842432_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("28dfaa94b74670863beb1088ee3d97b38960c6c297c9dcf3d57d5a9259616523") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_TESTNET_1842462_BYTES: Vec = + >::from_hex(include_str!("block-test-1-842-462.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_TESTNET_1842462_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("4c06beb18bb0cb22879f04c2c80aa33450d8e02ddd3d2cbc441f40f8cd8ea0a8") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_TESTNET_1842462_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("7750114ca6b95e44197e30ec98cadfb0dec1f1fb1a416eb4ef87f1dffb0f5937") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_TESTNET_1842467_BYTES: Vec = + >::from_hex(include_str!("block-test-1-842-467.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_TESTNET_1842467_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("13a33099eb9b8dafc031271938f35b627eb0811f124847abbfb43b587c6e11ae") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_TESTNET_1842467_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("580aceb586f30a5801a23414464aa8482add97b6d55c06483b4ffb725e0e793b") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref BLOCK_TESTNET_1842468_BYTES: Vec = + >::from_hex(include_str!("block-test-1-842-468.txt").trim()) + .expect("Block bytes are in valid hex representation"); + pub static ref SAPLING_FINAL_ROOT_TESTNET_1842468_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("555af4b0b2fa18162d6280c942fae6cdc846b9c4a3126df682821db250dc2bb3") + .expect("final root bytes are in valid hex representation").rev(); + pub static ref ORCHARD_FINAL_ROOT_TESTNET_1842468_BYTES: [u8; 32] = + <[u8; 32]>::from_hex("66c47f2160474363948150cbb5d53f3c17efa7456bd843dd9b851ddbcb6fb002") .expect("final root bytes are in valid hex representation").rev(); - - // Sapling note commitment tree. - pub static ref SAPLING_TREESTATE_MAINNET_419201_STRING: String = - String::from(include_str!("sapling-treestate-main-0-419-201.txt")); } #[cfg(test)] From 050cdc39e89ae9ccf25bbf6f0598f386cb7307a7 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 15 Jan 2025 13:02:08 +0100 Subject: [PATCH 049/245] Remove the "release crates" workflow (#9120) ## Motivation Fix https://github.com/ZcashFoundation/zebra/actions/runs/12751834865/job/35539829251?pr=9114. ## Solution - Remove the workflow and its associated bash script. ### Rationale > It didn't catch publishing issues in the past, and it skips a bunch of things anyway. > ..., it wasn't doing much with `--no-verify` anyway. > It was working fine with the previous version of Rust. I think the latest stable version broke `--no-verify`, because it wasn't pulling dependencies from [crates.io](http://crates.io/) before (which is how we ended up with git sources on main when we were trying to publish Zebra), but now it does even if `--no-verify` is passed in explicitly. From 91bfb2df3eb589b8459f3682e430e3f7a800aa12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 17:58:39 +0000 Subject: [PATCH 050/245] build(deps): bump docker/build-push-action in the devops group (#9125) Bumps the devops group with 1 update: [docker/build-push-action](https://github.com/docker/build-push-action). Updates `docker/build-push-action` from 6.11.0 to 6.12.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.11.0...v6.12.0) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/sub-build-docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 31e965976a8..aba8e160313 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -161,7 +161,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.11.0 + uses: docker/build-push-action@v6.12.0 with: target: ${{ inputs.dockerfile_target }} context: . From 43869bfcb0bb3fe5f0b36bf26bc6c51004a5b943 Mon Sep 17 00:00:00 2001 From: idky137 <150072198+idky137@users.noreply.github.com> Date: Wed, 15 Jan 2025 20:22:27 +0000 Subject: [PATCH 051/245] Add public getter / setter func to Zebra-RPC::methods.rs (#9113) * added getters and setter for GetInfo and GetBlockchainInfo structs * updated BlockHeader * added fetcher for address strings * publicised address balance * added getter and setter for SentTransactionHash * finished adding pub func * added pub constructor for addressstrings * added debug to legacycode * review changes --- zebra-rpc/src/methods.rs | 162 +++++++++++++++++++++++++++++++++- zebra-rpc/src/server/error.rs | 2 +- 2 files changed, 161 insertions(+), 3 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 8634ec43ef5..7b3611c72a8 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -1489,6 +1489,18 @@ impl Default for GetInfo { } } +impl GetInfo { + /// Constructs [`GetInfo`] from its constituent parts. + pub fn from_parts(build: String, subversion: String) -> Self { + Self { build, subversion } + } + + /// Returns the contents of ['GetInfo']. + pub fn into_parts(self) -> (String, String) { + (self.build, self.subversion) + } +} + /// Response to a `getblockchaininfo` RPC request. /// /// See the notes for the [`Rpc::get_blockchain_info` method]. @@ -1538,6 +1550,68 @@ impl Default for GetBlockChainInfo { } } +impl GetBlockChainInfo { + /// Creates a new [`GetBlockChainInfo`] instance. + pub fn new( + chain: String, + blocks: Height, + best_block_hash: block::Hash, + estimated_height: Height, + value_pools: [types::ValuePoolBalance; 5], + upgrades: IndexMap, + consensus: TipConsensusBranch, + ) -> Self { + Self { + chain, + blocks, + best_block_hash, + estimated_height, + value_pools, + upgrades, + consensus, + } + } + + /// Returns the current network name as defined in BIP70 (main, test, regtest). + pub fn chain(&self) -> String { + self.chain.clone() + } + + /// Returns the current number of blocks processed in the server. + pub fn blocks(&self) -> Height { + self.blocks + } + + /// Returns the hash of the current best chain tip block, in big-endian order, hex-encoded. + pub fn best_block_hash(&self) -> &block::Hash { + &self.best_block_hash + } + + /// Returns the estimated height of the chain. + /// + /// If syncing, the estimated height of the chain, else the current best height, numeric. + /// + /// In Zebra, this is always the height estimate, so it might be a little inaccurate. + pub fn estimated_height(&self) -> Height { + self.estimated_height + } + + /// Returns the value pool balances. + pub fn value_pools(&self) -> &[types::ValuePoolBalance; 5] { + &self.value_pools + } + + /// Returns the network upgrades. + pub fn upgrades(&self) -> &IndexMap { + &self.upgrades + } + + /// Returns the Branch IDs of the current and upcoming consensus rules. + pub fn consensus(&self) -> &TipConsensusBranch { + &self.consensus + } +} + /// A wrapper type with a list of transparent address strings. /// /// This is used for the input parameter of [`RpcServer::get_address_balance`], @@ -1555,6 +1629,13 @@ impl AddressStrings { AddressStrings { addresses } } + /// Creates a new [`AddessStrings`] from a given vector, returns an error if any addresses are incorrect. + pub fn new_valid(addresses: Vec) -> Result { + let address_strings = Self { addresses }; + address_strings.clone().valid_addresses()?; + Ok(address_strings) + } + /// Given a list of addresses as strings: /// - check if provided list have all valid transparent addresses. /// - return valid addresses as a set of `Address`. @@ -1573,13 +1654,21 @@ impl AddressStrings { Ok(valid_addresses) } + + /// Given a list of addresses as strings: + /// - check if provided list have all valid transparent addresses. + /// - return valid addresses as a vec of strings. + pub fn valid_address_strings(self) -> Result> { + self.clone().valid_addresses()?; + Ok(self.addresses) + } } /// The transparent balance of a set of addresses. #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, serde::Serialize)] pub struct AddressBalance { /// The total transparent balance. - balance: u64, + pub balance: u64, } /// A hex-encoded [`ConsensusBranchId`] string. @@ -1696,6 +1785,18 @@ impl Default for SentTransactionHash { } } +impl SentTransactionHash { + /// Constructs a new [`SentTransactionHash`]. + pub fn new(hash: transaction::Hash) -> Self { + SentTransactionHash(hash) + } + + /// Returns the contents of ['SentTransactionHash']. + pub fn inner(&self) -> transaction::Hash { + self.0 + } +} + /// Response to a `getblock` RPC request. /// /// See the notes for the [`RpcServer::get_block`] method. @@ -1881,7 +1982,7 @@ pub struct GetBlockHeaderObject { /// The Equihash solution in the requested block header. #[serde(with = "hex")] - solution: Solution, + pub solution: Solution, /// The difficulty threshold of the requested block header displayed in compact form. #[serde(with = "hex")] @@ -2071,6 +2172,48 @@ impl Default for GetAddressUtxos { } } +impl GetAddressUtxos { + /// Constructs a new instance of [`GetAddressUtxos`]. + pub fn from_parts( + address: transparent::Address, + txid: transaction::Hash, + output_index: OutputIndex, + script: transparent::Script, + satoshis: u64, + height: Height, + ) -> Self { + GetAddressUtxos { + address, + txid, + output_index, + script, + satoshis, + height, + } + } + + /// Returns the contents of [`GetAddressUtxos`]. + pub fn into_parts( + &self, + ) -> ( + transparent::Address, + transaction::Hash, + OutputIndex, + transparent::Script, + u64, + Height, + ) { + ( + self.address.clone(), + self.txid, + self.output_index, + self.script.clone(), + self.satoshis, + self.height, + ) + } +} + /// A struct to use as parameter of the `getaddresstxids`. /// /// See the notes for the [`Rpc::get_address_tx_ids` method]. @@ -2084,6 +2227,21 @@ pub struct GetAddressTxIdsRequest { end: u32, } +impl GetAddressTxIdsRequest { + /// Constructs [`GetAddressTxIdsRequest`] from its constituent parts. + pub fn from_parts(addresses: Vec, start: u32, end: u32) -> Self { + GetAddressTxIdsRequest { + addresses, + start, + end, + } + } + /// Returns the contents of [`GetAddressTxIdsRequest`]. + pub fn into_parts(&self) -> (Vec, u32, u32) { + (self.addresses.clone(), self.start, self.end) + } +} + /// Information about the sapling and orchard note commitment trees if any. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] pub struct GetBlockTrees { diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs index 5130a16d533..835e3c4581c 100644 --- a/zebra-rpc/src/server/error.rs +++ b/zebra-rpc/src/server/error.rs @@ -8,7 +8,7 @@ use jsonrpsee_types::{ErrorCode, ErrorObject, ErrorObjectOwned}; /// ## Notes /// /// - All explicit discriminants fit within `i64`. -#[derive(Default)] +#[derive(Default, Debug)] pub enum LegacyCode { // General application defined errors /// `std::exception` thrown in command handling From 522d955a2ad783464390bd034d54366a5aa98b9f Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 15 Jan 2025 21:22:31 +0100 Subject: [PATCH 052/245] change(consensus): Optimize checks for coinbase transactions (#9126) * Avoid tx conversion in coinbase tx * Update a section number * Refactor the test for decrypting coinbase outputs --- zebra-consensus/src/error.rs | 3 + zebra-consensus/src/transaction/check.rs | 15 ++- zebra-consensus/src/transaction/tests.rs | 139 ++++++++++++++--------- 3 files changed, 102 insertions(+), 55 deletions(-) diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index ac7e339eb55..caf68bad225 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -83,6 +83,9 @@ pub enum TransactionError { #[error("non-coinbase transactions MUST NOT have coinbase inputs")] NonCoinbaseHasCoinbaseInput, + #[error("the tx is not coinbase, but it should be")] + NotCoinbase, + #[error("transaction is locked until after block height {}", _0.0)] LockedUntilAfterBlockHeight(block::Height), diff --git a/zebra-consensus/src/transaction/check.rs b/zebra-consensus/src/transaction/check.rs index b7338bbdadd..3e12578cfe6 100644 --- a/zebra-consensus/src/transaction/check.rs +++ b/zebra-consensus/src/transaction/check.rs @@ -309,7 +309,7 @@ where /// # Consensus /// /// > [Heartwood onward] All Sapling and Orchard outputs in coinbase transactions MUST decrypt to a note -/// > plaintext, i.e. the procedure in § 4.19.3 ‘Decryption using a Full Viewing Key ( Sapling and Orchard )’ on p. 67 +/// > plaintext, i.e. the procedure in § 4.20.3 ‘Decryption using a Full Viewing Key (Sapling and Orchard)’ /// > does not return ⊥, using a sequence of 32 zero bytes as the outgoing viewing key. (This implies that before /// > Canopy activation, Sapling outputs of a coinbase transaction MUST have note plaintext lead byte equal to /// > 0x01.) @@ -330,6 +330,14 @@ pub fn coinbase_outputs_are_decryptable( network: &Network, height: Height, ) -> Result<(), TransactionError> { + // Do quick checks first so we can avoid an expensive tx conversion + // in `zcash_note_encryption::decrypts_successfully`. + + // The consensus rule only applies to coinbase txs with shielded outputs. + if !transaction.has_shielded_outputs() { + return Ok(()); + } + // The consensus rule only applies to Heartwood onward. if height < NetworkUpgrade::Heartwood @@ -339,6 +347,11 @@ pub fn coinbase_outputs_are_decryptable( return Ok(()); } + // The passed tx should have been be a coinbase tx. + if !transaction.is_coinbase() { + return Err(TransactionError::NotCoinbase); + } + if !zcash_note_encryption::decrypts_successfully(transaction, network, height) { return Err(TransactionError::CoinbaseOutputsNotDecryptable); } diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 044c8b01842..597f6b9335f 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -3289,69 +3289,100 @@ fn add_to_sprout_pool_after_nu() { ); } +/// Checks that Heartwood onward, all Sapling and Orchard outputs in coinbase txs decrypt to a note +/// plaintext, i.e. the procedure in § 4.20.3 ‘Decryption using a Full Viewing Key (Sapling and +/// Orchard )’ does not return ⊥, using a sequence of 32 zero bytes as the outgoing viewing key. We +/// will refer to such a sequence as the _zero key_. #[test] -fn coinbase_outputs_are_decryptable_for_historical_blocks() -> Result<(), Report> { +fn coinbase_outputs_are_decryptable() -> Result<(), Report> { let _init_guard = zebra_test::init(); - for network in Network::iter() { - coinbase_outputs_are_decryptable_for_historical_blocks_for_network(network)?; - } + for net in Network::iter() { + let mut tested_post_heartwood_shielded_coinbase_tx = false; + let mut tested_pre_heartwood_shielded_coinbase_tx = false; - Ok(()) -} + let mut tested_post_heartwood_unshielded_coinbase_tx = false; + let mut tested_pre_heartwood_unshielded_coinbase_tx = false; -fn coinbase_outputs_are_decryptable_for_historical_blocks_for_network( - network: Network, -) -> Result<(), Report> { - let block_iter = network.block_iter(); - - let mut tested_coinbase_txs = 0; - let mut tested_non_coinbase_txs = 0; - - for (height, block) in block_iter { - let block = block - .zcash_deserialize_into::() - .expect("block is structurally valid"); - let height = Height(*height); - let heartwood_onward = height - >= NetworkUpgrade::Heartwood - .activation_height(&network) - .unwrap(); - let coinbase_tx = block - .transactions - .first() - .expect("must have coinbase transaction"); - - // Check if the coinbase outputs are decryptable with an all-zero key. - if heartwood_onward - && (coinbase_tx.sapling_outputs().count() > 0 - || coinbase_tx.orchard_actions().count() > 0) - { - // We are only truly decrypting something if it's Heartwood-onward - // and there are relevant outputs. - tested_coinbase_txs += 1; - } - check::coinbase_outputs_are_decryptable(coinbase_tx, &network, height) - .expect("coinbase outputs must be decryptable with an all-zero key"); - - // For remaining transactions, check if existing outputs are NOT decryptable - // with an all-zero key, if applicable. - for tx in block.transactions.iter().skip(1) { - let has_outputs = tx.sapling_outputs().count() > 0 || tx.orchard_actions().count() > 0; - if has_outputs && heartwood_onward { - tested_non_coinbase_txs += 1; - check::coinbase_outputs_are_decryptable(tx, &network, height).expect_err( - "decrypting a non-coinbase output with an all-zero key should fail", + let mut tested_post_heartwood_shielded_non_coinbase_tx = false; + let mut tested_pre_heartwood_shielded_non_coinbase_tx = false; + + let mut tested_post_heartwood_unshielded_non_coinbase_tx = false; + let mut tested_pre_heartwood_unshielded_non_coinbase_tx = false; + + for (height, block) in net.block_iter() { + let block = block.zcash_deserialize_into::().expect("block"); + let height = Height(*height); + let is_heartwood = height >= NetworkUpgrade::Heartwood.activation_height(&net).unwrap(); + let coinbase = block.transactions.first().expect("coinbase transaction"); + + if coinbase.has_shielded_outputs() && is_heartwood { + tested_post_heartwood_shielded_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(coinbase, &net, height).expect( + "post-Heartwood shielded coinbase outputs must be decryptable with the zero key", ); - } else { - check::coinbase_outputs_are_decryptable(tx, &network, height) - .expect("a transaction without outputs, or pre-Heartwood, must be considered 'decryptable'"); + } + + if coinbase.has_shielded_outputs() && !is_heartwood { + tested_pre_heartwood_shielded_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(coinbase, &net, height) + .expect("the consensus rule does not apply to pre-Heartwood txs"); + } + + if !coinbase.has_shielded_outputs() && is_heartwood { + tested_post_heartwood_unshielded_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(coinbase, &net, height) + .expect("the consensus rule does not apply to txs with no shielded outputs"); + } + + if !coinbase.has_shielded_outputs() && !is_heartwood { + tested_pre_heartwood_unshielded_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(coinbase, &net, height) + .expect("the consensus rule does not apply to pre-Heartwood txs"); + } + + // For non-coinbase txs, check if existing outputs are NOT decryptable with an all-zero + // key, if applicable. + for non_coinbase in block.transactions.iter().skip(1) { + if non_coinbase.has_shielded_outputs() && is_heartwood { + tested_post_heartwood_shielded_non_coinbase_tx = true; + assert_eq!( + check::coinbase_outputs_are_decryptable(non_coinbase, &net, height), + Err(TransactionError::NotCoinbase) + ) + } + + if non_coinbase.has_shielded_outputs() && !is_heartwood { + tested_pre_heartwood_shielded_non_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(non_coinbase, &net, height) + .expect("the consensus rule does not apply to pre-Heartwood txs"); + } + + if !non_coinbase.has_shielded_outputs() && is_heartwood { + tested_post_heartwood_unshielded_non_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(non_coinbase, &net, height).expect( + "the consensus rule does not apply to txs with no shielded outputs", + ); + } + + if !non_coinbase.has_shielded_outputs() && !is_heartwood { + tested_pre_heartwood_unshielded_non_coinbase_tx = true; + check::coinbase_outputs_are_decryptable(non_coinbase, &net, height) + .expect("the consensus rule does not apply to pre-Heartwood txs"); + } } } - } - assert!(tested_coinbase_txs > 0, "ensure it was actually tested"); - assert!(tested_non_coinbase_txs > 0, "ensure it was actually tested"); + assert!(tested_post_heartwood_shielded_coinbase_tx); + // We have no pre-Heartwood shielded coinbase txs. + assert!(!tested_pre_heartwood_shielded_coinbase_tx); + assert!(tested_post_heartwood_unshielded_coinbase_tx); + assert!(tested_pre_heartwood_unshielded_coinbase_tx); + assert!(tested_post_heartwood_shielded_non_coinbase_tx); + assert!(tested_pre_heartwood_shielded_non_coinbase_tx); + assert!(tested_post_heartwood_unshielded_non_coinbase_tx); + assert!(tested_pre_heartwood_unshielded_non_coinbase_tx); + } Ok(()) } From 1976a569709451cc16d9b002f62d6f0c5fb4a5d9 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 16 Jan 2025 01:37:28 +0000 Subject: [PATCH 053/245] feat(actions): use GitHub `environments` for infra deployments (#9003) * feat(actions): use GitHub `environments` for infra deployments * chore(workflows): enhance environment handling in CI/CD configurations - Added dynamic environment assignment in `cd-deploy-nodes-gcp.yml` based on event type (release or dev). - Updated `sub-build-docker-image.yml` to utilize the `inputs.environment` for environment configuration. - Introduced a strategy matrix for environment selection in `sub-deploy-integration-tests-gcp.yml`, allowing for both dev and prod environments based on the branch. - Ensured `sub-find-cached-disks.yml` uses the `inputs.environment` for consistency across workflows. * fix(workflows): streamline environment input handling in CI configurations - Removed the required environment input from `sub-ci-integration-tests-gcp.yml`. - Updated comments in `sub-deploy-integration-tests-gcp.yml` to clarify the strategy for creating images in dev and prod environments based on the main branch. * test: just set the environmet when diff to dev * refactor(workflows): unify environment handling across CI configurations - Removed the optional environment input from multiple workflow files, including `manual-zcashd-deploy.yml`, `sub-build-docker-image.yml`, and `sub-deploy-integration-tests-gcp.yml`. - Updated environment assignment logic to consistently use 'prod' for release events across `cd-deploy-nodes-gcp.yml`, `sub-build-docker-image.yml`, `sub-find-cached-disks.yml`, and `sub-deploy-integration-tests-gcp.yml`. - Enhanced clarity in comments regarding environment strategies in `sub-deploy-integration-tests-gcp.yml`. * fix(workflows): update environment assignment logic for CI configurations - Changed environment assignment in `cd-deploy-nodes-gcp.yml`, `sub-build-docker-image.yml`, and `sub-find-cached-disks.yml` to use 'dev' as a fallback when the event is not a release, to avoid a `false` fallback --- .github/workflows/cd-deploy-nodes-gcp.yml | 1 + .github/workflows/chore-delete-gcp-resources.yml | 8 ++++++++ .github/workflows/sub-build-docker-image.yml | 1 + .github/workflows/sub-deploy-integration-tests-gcp.yml | 7 ++++++- .github/workflows/sub-find-cached-disks.yml | 1 + 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 6eb3f10e9d6..b515179732b 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -239,6 +239,7 @@ jobs: timeout-minutes: 60 env: CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} + environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} permissions: contents: 'read' id-token: 'write' diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index b4e9eda2f64..fe0c42de7c6 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -38,6 +38,10 @@ jobs: permissions: contents: 'read' id-token: 'write' + strategy: + matrix: + environment: [dev, prod] + environment: ${{ matrix.environment }} steps: - uses: actions/checkout@v4.2.2 with: @@ -105,6 +109,10 @@ jobs: permissions: contents: 'read' id-token: 'write' + strategy: + matrix: + environment: [dev, prod] + environment: ${{ matrix.environment }} steps: - uses: actions/checkout@v4.2.2 with: diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index aba8e160313..95b472b18e4 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -70,6 +70,7 @@ jobs: name: Build images timeout-minutes: 210 runs-on: ubuntu-latest + environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} outputs: image_digest: ${{ steps.docker_build.outputs.digest }} image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 05b2c42019f..fd737de9460 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -384,6 +384,11 @@ jobs: permissions: contents: 'read' id-token: 'write' + # We want to create main branch images for both dev and prod environments + strategy: + matrix: + environment: ${{ github.ref_name == 'main' && fromJSON('["dev", "prod"]') }} + environment: ${{ matrix.environment }} steps: - uses: actions/checkout@v4.2.2 with: @@ -663,7 +668,7 @@ jobs: --source-disk-zone=${{ vars.GCP_ZONE }} \ --storage-location=us \ --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ - --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" + --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},branch=${{ env.GITHUB_REF_SLUG_URL }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" else echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME" fi diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 9c2ee919d32..a45e3f731fa 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -48,6 +48,7 @@ jobs: get-cached-disks: name: Get ${{ inputs.test_id || inputs.network }} cached disk runs-on: ubuntu-latest + environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} outputs: state_version: ${{ steps.get-available-disks.outputs.state_version }} cached_disk_name: ${{ steps.get-available-disks.outputs.cached_disk_name }} From 926993aa8561ee20316ca92c62e85e89c3858280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Thu, 16 Jan 2025 10:06:19 +0100 Subject: [PATCH 054/245] Update copyright year to 2025 (#9127) --- LICENSE-APACHE | 2 +- LICENSE-MIT | 2 +- tower-batch-control/LICENSE | 2 +- zebra-network/LICENSE | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/LICENSE-APACHE b/LICENSE-APACHE index 456a4aa21ad..26d4cc05323 100644 --- a/LICENSE-APACHE +++ b/LICENSE-APACHE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2024 Zcash Foundation +Copyright (c) 2019-2025 Zcash Foundation Apache License Version 2.0, January 2004 diff --git a/LICENSE-MIT b/LICENSE-MIT index d85e83e16ab..7f46ce5537a 100644 --- a/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2019-2024 Zcash Foundation +Copyright (c) 2019-2025 Zcash Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tower-batch-control/LICENSE b/tower-batch-control/LICENSE index 5428318519b..c4e34c0f7c6 100644 --- a/tower-batch-control/LICENSE +++ b/tower-batch-control/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2024 Zcash Foundation +Copyright (c) 2019-2025 Zcash Foundation Copyright (c) 2019 Tower Contributors Permission is hereby granted, free of charge, to any diff --git a/zebra-network/LICENSE b/zebra-network/LICENSE index 5428318519b..c4e34c0f7c6 100644 --- a/zebra-network/LICENSE +++ b/zebra-network/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2024 Zcash Foundation +Copyright (c) 2019-2025 Zcash Foundation Copyright (c) 2019 Tower Contributors Permission is hereby granted, free of charge, to any From baf20fa9f9968104fc5e3d40775fae95f6338e41 Mon Sep 17 00:00:00 2001 From: Pili Guerra <1311133+mpguerra@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:24:03 +0100 Subject: [PATCH 055/245] fix: typos with `address` (#9134) * Fix typos in RPC docs * Fixing other typos in zebra-state --- zebra-rpc/src/methods.rs | 2 +- zebra-state/src/request.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 7b3611c72a8..450b8c9cfa9 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -1629,7 +1629,7 @@ impl AddressStrings { AddressStrings { addresses } } - /// Creates a new [`AddessStrings`] from a given vector, returns an error if any addresses are incorrect. + /// Creates a new [`AddressStrings`] from a given vector, returns an error if any addresses are incorrect. pub fn new_valid(addresses: Vec) -> Result { let address_strings = Self { addresses }; address_strings.clone().valid_addresses()?; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 5894b7da55a..0eeb9b940b2 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -1099,8 +1099,8 @@ impl ReadRequest { ReadRequest::SaplingSubtrees { .. } => "sapling_subtrees", ReadRequest::OrchardSubtrees { .. } => "orchard_subtrees", ReadRequest::AddressBalance { .. } => "address_balance", - ReadRequest::TransactionIdsByAddresses { .. } => "transaction_ids_by_addesses", - ReadRequest::UtxosByAddresses(_) => "utxos_by_addesses", + ReadRequest::TransactionIdsByAddresses { .. } => "transaction_ids_by_addresses", + ReadRequest::UtxosByAddresses(_) => "utxos_by_addresses", ReadRequest::CheckBestChainTipNullifiersAndAnchors(_) => { "best_chain_tip_nullifiers_anchors" } From 94e0d81a71ce718f04df6fbcb85b0a49b66a79a9 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 17 Jan 2025 21:19:45 +0800 Subject: [PATCH 056/245] chore: delete repetitive words (#9131) * repeat words * Update zebra-chain/src/block/serialize.rs Co-authored-by: Pili Guerra <1311133+mpguerra@users.noreply.github.com> --------- Co-authored-by: Pili Guerra <1311133+mpguerra@users.noreply.github.com> --- book/src/dev/zebra-dependencies-for-audit.md | 2 +- tower-batch-control/src/worker.rs | 2 +- zebra-chain/src/history_tree/tests/vectors.rs | 2 +- zebra-chain/src/transaction/builder.rs | 2 +- zebra-consensus/src/checkpoint.rs | 2 +- zebra-rpc/src/queue.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/book/src/dev/zebra-dependencies-for-audit.md b/book/src/dev/zebra-dependencies-for-audit.md index f33aba321b8..7655ff275de 100644 --- a/book/src/dev/zebra-dependencies-for-audit.md +++ b/book/src/dev/zebra-dependencies-for-audit.md @@ -63,7 +63,7 @@ The following consensus, security, and functional changes are in Zebra's develop The following list of dependencies is out of scope for the audit. -Please ignore the dependency versions in these tables, some of them are are outdated. All versions of these dependencies are out of scope. +Please ignore the dependency versions in these tables, some of them are outdated. All versions of these dependencies are out of scope. The latest versions of Zebra's dependencies are in [`Cargo.lock`](https://github.com/ZcashFoundation/zebra/tree/audit-v1.0.0-rc.0/Cargo.lock), including transitive dependencies. They can be viewed using `cargo tree`. diff --git a/tower-batch-control/src/worker.rs b/tower-batch-control/src/worker.rs index f2266e67100..865a2f37009 100644 --- a/tower-batch-control/src/worker.rs +++ b/tower-batch-control/src/worker.rs @@ -323,7 +323,7 @@ where // We don't schedule any batches on an errored service self.pending_batch_timer = None; - // By closing the mpsc::Receiver, we know that that the run() loop will + // By closing the mpsc::Receiver, we know that the run() loop will // drain all pending requests. We just need to make sure that any // requests that we receive before we've exhausted the receiver receive // the error: diff --git a/zebra-chain/src/history_tree/tests/vectors.rs b/zebra-chain/src/history_tree/tests/vectors.rs index e65da99b676..e8aeeeb447d 100644 --- a/zebra-chain/src/history_tree/tests/vectors.rs +++ b/zebra-chain/src/history_tree/tests/vectors.rs @@ -55,7 +55,7 @@ fn push_and_prune_for_network_upgrade( assert_eq!(first_commitment, ChainHistoryActivationReserved); } - // Build initial history tree tree with only the first block + // Build initial history tree with only the first block let first_sapling_root = sapling::tree::Root::try_from(**sapling_roots.get(&height).expect("test vector exists"))?; let mut tree = NonEmptyHistoryTree::from_block( diff --git a/zebra-chain/src/transaction/builder.rs b/zebra-chain/src/transaction/builder.rs index 37397353aab..bcf1570d595 100644 --- a/zebra-chain/src/transaction/builder.rs +++ b/zebra-chain/src/transaction/builder.rs @@ -26,7 +26,7 @@ impl Transaction { // > (nActionsOrchard > 0 and enableSpendsOrchard = 1). // // > A coinbase transaction for a block at block height greater than 0 MUST have - // > a script that, as its first item, encodes the block height height as follows. ... + // > a script that, as its first item, encodes the block height as follows. ... // > let heightBytes be the signed little-endian representation of height, // > using the minimum nonzero number of bytes such that the most significant byte // > is < 0x80. The length of heightBytes MUST be in the range {1 .. 5}. diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 36b3a76d57f..e9fc2b616cb 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -1059,7 +1059,7 @@ where #[instrument(name = "checkpoint", skip(self, block))] fn call(&mut self, block: Arc) -> Self::Future { // Reset the verifier back to the state tip if requested - // (e.g. due to an error when committing a block to to the state) + // (e.g. due to an error when committing a block to the state) if let Ok(tip) = self.reset_receiver.try_recv() { self.reset_progress(tip); } diff --git a/zebra-rpc/src/queue.rs b/zebra-rpc/src/queue.rs index 4504cd2070f..78c9fdd9033 100644 --- a/zebra-rpc/src/queue.rs +++ b/zebra-rpc/src/queue.rs @@ -211,7 +211,7 @@ impl Runner { /// Remove transactions that are expired according to number of blocks and current spacing between blocks. fn remove_expired(&mut self, spacing: Duration) { - // Have some extra time to to make sure we re-submit each transaction `NUMBER_OF_BLOCKS_TO_EXPIRE` + // Have some extra time to make sure we re-submit each transaction `NUMBER_OF_BLOCKS_TO_EXPIRE` // times, as the main loop also takes some time to run. let extra_time = Duration::seconds(5); From c7d8d712698c182b3ba46e1b82ff19c2b51a45c4 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 20 Jan 2025 13:28:38 +0000 Subject: [PATCH 057/245] refactor(workflows): avoid using cached states for release instances (#9137) - Renamed `no_cached_disk` input to `need_cached_disk` in `cd-deploy-nodes-gcp.yml` to clarify its purpose. - Adjusted conditional logic for cached disk usage based on event types (release vs. non-release) in `cd-deploy-nodes-gcp.yml`. - Removed the environment strategy matrix from `sub-deploy-integration-tests-gcp.yml` to simplify the workflow configuration. - Enhanced comments for better clarity on caching and environment handling across workflows. --- .github/workflows/cd-deploy-nodes-gcp.yml | 18 +++++++++++------- .../sub-deploy-integration-tests-gcp.yml | 5 ----- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index b515179732b..cccdd4af542 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -53,9 +53,9 @@ on: description: 'Prefer cached state from the main branch' required: false type: boolean - no_cached_disk: - default: false - description: 'Do not use a cached state disk' + need_cached_disk: + default: true + description: 'Use a cached state disk' required: false type: boolean no_cache: @@ -153,7 +153,7 @@ jobs: uses: ./.github/workflows/sub-find-cached-disks.yml # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. # This workflow also runs on release tags, the event name check will run it on releases. - if: ${{ (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && !inputs.no_cached_disk }} + if: ${{ inputs.need_cached_disk && github.event_name != 'release' && !(github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} disk_prefix: zebrad-cache @@ -279,11 +279,15 @@ jobs: - name: Create instance template for ${{ matrix.network }} run: | - DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" + if [ "${{ github.event_name }}" == "release" ]; then + DISK_NAME="zebrad-cache-${NETWORK}" + else + DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" + fi DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" - elif [ ${{ inputs.no_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then + elif [ ${{ !inputs.need_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then echo "No cached disk required" else echo "No cached disk found for ${{ matrix.network }} in main branch" @@ -395,7 +399,7 @@ jobs: DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" - elif [ ${{ inputs.no_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then + elif [ ${{ !inputs.need_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then echo "No cached disk required" else echo "No cached disk found for ${{ matrix.network }} in main branch" diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index fd737de9460..bd23da1f31b 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -384,11 +384,6 @@ jobs: permissions: contents: 'read' id-token: 'write' - # We want to create main branch images for both dev and prod environments - strategy: - matrix: - environment: ${{ github.ref_name == 'main' && fromJSON('["dev", "prod"]') }} - environment: ${{ matrix.environment }} steps: - uses: actions/checkout@v4.2.2 with: From c103c2d948f773e867447069f57ab6e413cbfd4f Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 20 Jan 2025 18:33:47 -0300 Subject: [PATCH 058/245] fix(rpc): Use `jsonrpsee` in `zebra-node-services` (#9151) * replace jsonrpc-core with jsonrpsee-types in zebra-node-services * remove non needed feature from dependency * remove jsonrpc-core as a dev dependency in zebra-node-services * add jsonrpsee-types as a dev dependency --- Cargo.lock | 17 +---------------- zebra-node-services/Cargo.toml | 6 +++--- zebra-node-services/src/rpc_client.rs | 13 +++++++------ zebra-rpc/Cargo.toml | 4 +--- 4 files changed, 12 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53b5a13ba46..8d2a7518d32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2149,21 +2149,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "jsonrpsee" version = "0.24.7" @@ -5895,7 +5880,7 @@ name = "zebra-node-services" version = "1.0.0-beta.44" dependencies = [ "color-eyre", - "jsonrpc-core", + "jsonrpsee-types", "reqwest", "serde", "serde_json", diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 6aa11ad45e9..42d51c117b3 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -28,7 +28,7 @@ getblocktemplate-rpcs = [ rpc-client = [ "color-eyre", - "jsonrpc-core", + "jsonrpsee-types", "reqwest", "serde", "serde_json", @@ -43,7 +43,7 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } # Tool and test feature rpc-client color-eyre = { version = "0.6.3", optional = true } -jsonrpc-core = { version = "18.0.0", optional = true } +jsonrpsee-types = { version = "0.24.7", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.215", optional = true } @@ -53,7 +53,7 @@ tokio = { version = "1.42.0", features = ["time", "sync"] } [dev-dependencies] color-eyre = "0.6.3" -jsonrpc-core = "18.0.0" reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"] } serde = "1.0.215" serde_json = "1.0.133" +jsonrpsee-types = "0.24.7" diff --git a/zebra-node-services/src/rpc_client.rs b/zebra-node-services/src/rpc_client.rs index 7f5ffbf192e..f61381b2d21 100644 --- a/zebra-node-services/src/rpc_client.rs +++ b/zebra-node-services/src/rpc_client.rs @@ -108,12 +108,13 @@ impl RpcRequestClient { fn json_result_from_response_text( response_text: &str, ) -> std::result::Result { - use jsonrpc_core::Output; - - let output: Output = serde_json::from_str(response_text)?; - match output { - Output::Success(success) => Ok(serde_json::from_value(success.result)?), - Output::Failure(failure) => Err(failure.error.into()), + let output: jsonrpsee_types::Response = + serde_json::from_str(response_text)?; + match output.payload { + jsonrpsee_types::ResponsePayload::Success(success) => { + Ok(serde_json::from_value(success.into_owned())?) + } + jsonrpsee_types::ResponsePayload::Error(failure) => Err(failure.to_string().into()), } } } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 270d7a63ecf..0fb6d4176af 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -64,9 +64,7 @@ jsonrpsee-types = "0.24.7" jsonrpsee-proc-macros = "0.24.7" hyper = "1.5.0" http-body-util = "0.1.2" - -# zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.133", features = ["preserve_order"] } +serde_json = "1.0.133" indexmap = { version = "2.7.0", features = ["serde"] } # RPC endpoint basic auth From c537bae35242b05baeeacbfc87d7596be95ddc64 Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Wed, 22 Jan 2025 12:58:19 +0200 Subject: [PATCH 059/245] fix: fix RUSTSEC-2024-0402 (#9141) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d2a7518d32..c7a22a6093f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1664,9 +1664,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "foldhash", ] @@ -2006,7 +2006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.2", "serde", ] @@ -2477,7 +2477,7 @@ checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.15.0", + "hashbrown 0.15.2", "metrics", "quanta", "sketches-ddsketch", From c30187a8f847eebf6a2333ed9f25a8b24ff0cf88 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 22 Jan 2025 18:58:23 +0800 Subject: [PATCH 060/245] fix typos (#9153) --- .github/ISSUE_TEMPLATE/usability_testing_plan.md | 4 ++-- book/src/dev/rfcs/0000-template.md | 2 +- book/src/dev/rfcs/0009-zebra-client.md | 2 +- book/src/dev/rfcs/drafts/xxxx-basic-integration-testing.md | 2 +- zebra-network/src/address_book.rs | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/usability_testing_plan.md b/.github/ISSUE_TEMPLATE/usability_testing_plan.md index c93f413b605..570f1393d1f 100644 --- a/.github/ISSUE_TEMPLATE/usability_testing_plan.md +++ b/.github/ISSUE_TEMPLATE/usability_testing_plan.md @@ -31,7 +31,7 @@ assignees: '' ### Method - + ### Test environment, equipment and logistics @@ -56,7 +56,7 @@ assignees: '' ## Session Outline and timing - + ### 1.Introduction to the session (5\') diff --git a/book/src/dev/rfcs/0000-template.md b/book/src/dev/rfcs/0000-template.md index 72bcd9affcf..cdf4022bc24 100644 --- a/book/src/dev/rfcs/0000-template.md +++ b/book/src/dev/rfcs/0000-template.md @@ -122,7 +122,7 @@ Think about what the natural extension and evolution of your proposal would be and how it would affect Zebra and Zcash as a whole. Try to use this section as a tool to more fully consider all possible interactions with the project and cryptocurrency ecosystem in your proposal. -Also consider how the this all fits into the roadmap for the project +Also consider how this all fits into the roadmap for the project and of the relevant sub-team. This is also a good place to "dump ideas", if they are out of scope for the diff --git a/book/src/dev/rfcs/0009-zebra-client.md b/book/src/dev/rfcs/0009-zebra-client.md index 3aa153daea6..771aa7d3a2d 100644 --- a/book/src/dev/rfcs/0009-zebra-client.md +++ b/book/src/dev/rfcs/0009-zebra-client.md @@ -341,7 +341,7 @@ endpoint - + diff --git a/book/src/dev/rfcs/drafts/xxxx-basic-integration-testing.md b/book/src/dev/rfcs/drafts/xxxx-basic-integration-testing.md index 7f9c0594e3c..dd77cc14cdc 100644 --- a/book/src/dev/rfcs/drafts/xxxx-basic-integration-testing.md +++ b/book/src/dev/rfcs/drafts/xxxx-basic-integration-testing.md @@ -124,7 +124,7 @@ Think about what the natural extension and evolution of your proposal would be and how it would affect Zebra and Zcash as a whole. Try to use this section as a tool to more fully consider all possible interactions with the project and cryptocurrency ecosystem in your proposal. -Also consider how the this all fits into the roadmap for the project +Also consider how this all fits into the roadmap for the project and of the relevant sub-team. This is also a good place to "dump ideas", if they are out of scope for the diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 9ecf5aa24b0..aee8629f3bc 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -265,7 +265,7 @@ impl AddressBook { /// Get the active addresses in `self` in random order with sanitized timestamps, /// including our local listener address. /// - /// Limited to a the number of peer addresses Zebra should give out per `GetAddr` request. + /// Limited to the number of peer addresses Zebra should give out per `GetAddr` request. pub fn fresh_get_addr_response(&self) -> Vec { let now = Utc::now(); let mut peers = self.sanitized(now); From 05ab2501292e1ac8a56a9e1f334cb5ea0133fe4e Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Wed, 22 Jan 2025 12:58:29 +0200 Subject: [PATCH 061/245] fix: fix typos (#9140) --- book/src/dev/rfcs/0006-contextual-difficulty.md | 8 ++++---- zebra-chain/src/amount.rs | 2 +- zebra-chain/src/amount/tests/vectors.rs | 4 ++-- .../common/get_block_template_rpcs/get_block_template.rs | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/book/src/dev/rfcs/0006-contextual-difficulty.md b/book/src/dev/rfcs/0006-contextual-difficulty.md index dc27692adb2..accc8f00332 100644 --- a/book/src/dev/rfcs/0006-contextual-difficulty.md +++ b/book/src/dev/rfcs/0006-contextual-difficulty.md @@ -421,7 +421,7 @@ fn mean_target_difficulty(&self) -> ExpandedDifficulty { ... } Since the `PoWLimit`s are `2^251 − 1` for Testnet, and `2^243 − 1` for Mainnet, the sum of these difficulty thresholds will be less than or equal to `(2^251 − 1)*17 = 2^255 + 2^251 - 17`. Therefore, this calculation can not -overflow a `u256` value. So the function is infalliable. +overflow a `u256` value. So the function is infallible. In Zebra, contextual validation starts after Canopy activation, so we can assume that the relevant chain contains at least 17 blocks. Therefore, the `PoWLimit` @@ -499,7 +499,7 @@ that the relevant chain contains at least 28 blocks. Therefore: * there is always an odd number of blocks in `MedianTime()`, so the median is always the exact middle of the sequence. -Therefore, the function is infalliable. +Therefore, the function is infallible. ### Test network minimum difficulty calculation [test-net-min-difficulty-calculation]: #test-net-min-difficulty-calculation @@ -580,7 +580,7 @@ In Zcash, the Testnet minimum difficulty rule starts at block 299188, and in Zebra, contextual validation starts after Canopy activation. So we can assume that there is always a previous block. -Therefore, this function is infalliable. +Therefore, this function is infallible. ### Block difficulty threshold calculation [block-difficulty-threshold-calculation]: #block-difficulty-threshold-calculation @@ -647,7 +647,7 @@ Note that the multiplication by `ActualTimespanBounded` must happen after the division by `AveragingWindowTimespan`. Performing the multiplication first could overflow. -If implemented in this way, the function is infalliable. +If implemented in this way, the function is infallible. `zcashd` truncates the `MeanTarget` after the mean calculation, and after dividing by `AveragingWindowTimespan`. But as long as there is no overflow, diff --git a/zebra-chain/src/amount.rs b/zebra-chain/src/amount.rs index f4a81c14893..4b10699102a 100644 --- a/zebra-chain/src/amount.rs +++ b/zebra-chain/src/amount.rs @@ -416,7 +416,7 @@ where } } -// TODO: add infalliable impls for NonNegative <-> NegativeOrZero, +// TODO: add infallible impls for NonNegative <-> NegativeOrZero, // when Rust uses trait output types to disambiguate overlapping impls. impl std::ops::Neg for Amount where diff --git a/zebra-chain/src/amount/tests/vectors.rs b/zebra-chain/src/amount/tests/vectors.rs index 933b2824d41..e3b1fb78d47 100644 --- a/zebra-chain/src/amount/tests/vectors.rs +++ b/zebra-chain/src/amount/tests/vectors.rs @@ -180,12 +180,12 @@ fn deserialize_checks_bounds() -> Result<()> { let mut big_bytes = Vec::new(); (&mut big_bytes) .write_u64::(big) - .expect("unexpected serialization failure: vec should be infalliable"); + .expect("unexpected serialization failure: vec should be infallible"); let mut neg_bytes = Vec::new(); (&mut neg_bytes) .write_i64::(neg) - .expect("unexpected serialization failure: vec should be infalliable"); + .expect("unexpected serialization failure: vec should be infallible"); Amount::::zcash_deserialize(big_bytes.as_slice()) .expect_err("deserialization should reject too large values"); diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index b13c7f04236..a91c500834f 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -34,7 +34,7 @@ use crate::common::{ /// This ensures that a new template can be deserialized and sent to interrupt the /// block proposal requests if the old template is no longer valid in edge-cases where /// an old template becomes invalid right after it's returned. We've seen the getblocktemplate -/// respond within ~50ms of a request locallly, and this test is run on GCP compute instances +/// respond within ~50ms of a request locally, and this test is run on GCP compute instances /// that should offer comparable latency in CI. pub const EXTRA_LONGPOLL_WAIT_TIME: Duration = Duration::from_millis(150); From 8b840178076da0839827676ebd98a5b47806a087 Mon Sep 17 00:00:00 2001 From: Elijah Hampton Date: Thu, 23 Jan 2025 09:54:05 +0000 Subject: [PATCH 062/245] Respond to getblockchaininfo with genesis block when empty state (#9138) * Refactors getblockchaininfo to return gensis block if tip pool request fails. Adds test in prop.rs * clippy, check and fmt * Removes unused imports. Refactors code for correctness in genesis bytes reference and tokio pause() call --------- Co-authored-by: Elijah Hampton --- zebra-rpc/src/methods.rs | 68 ++++++++++------- zebra-rpc/src/methods/tests/prop.rs | 112 +++++++++++++++++++++++++++- 2 files changed, 151 insertions(+), 29 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 450b8c9cfa9..4c6de27b69b 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -29,6 +29,7 @@ use zebra_chain::{ subtree::NoteCommitmentSubtreeIndex, transaction::{self, SerializedTransaction, Transaction, UnminedTx}, transparent::{self, Address}, + value_balance::ValueBalance, work::{ difficulty::{CompactDifficulty, ExpandedDifficulty}, equihash::Solution, @@ -550,35 +551,52 @@ where // `chain` field let chain = network.bip70_network_name(); - let request = zebra_state::ReadRequest::TipPoolValues; - let response: zebra_state::ReadResponse = state + let (tip_height, tip_hash, tip_block_time, value_balance) = match state .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let zebra_state::ReadResponse::TipPoolValues { - tip_height, - tip_hash, - value_balance, - } = response - else { - unreachable!("unmatched response to a TipPoolValues request") - }; - - let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) + .and_then(|service| service.call(zebra_state::ReadRequest::TipPoolValues)) .await - .map_misc_error()?; - - let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { - unreachable!("unmatched response to a BlockHeader request") + { + Ok(zebra_state::ReadResponse::TipPoolValues { + tip_height, + tip_hash, + value_balance, + }) => { + let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + if let zebra_state::ReadResponse::BlockHeader { header, .. } = response { + (tip_height, tip_hash, header.time, value_balance) + } else { + unreachable!("unmatched response to a TipPoolValues request") + } + } + _ => { + let request = + zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(Height::MIN)); + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + if let zebra_state::ReadResponse::BlockHeader { + header, + hash, + height, + .. + } = response + { + (height, hash, header.time, ValueBalance::zero()) + } else { + unreachable!("unmatched response to a BlockHeader request") + } + } }; - let tip_block_time = header.time; - let now = Utc::now(); let zebra_estimated_height = NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 8753d514c23..cf7aec6c67a 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -14,18 +14,18 @@ use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, Height}, chain_tip::{mock::MockChainTip, ChainTip, NoChainTip}, - parameters::{Network, NetworkUpgrade}, - serialization::{ZcashDeserialize, ZcashSerialize}, + parameters::{ConsensusBranchId, Network, NetworkUpgrade}, + serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}, transaction::{self, Transaction, UnminedTx, VerifiedUnminedTx}, transparent, value_balance::ValueBalance, }; use zebra_node_services::mempool; -use zebra_state::BoxError; +use zebra_state::{BoxError, HashOrHeight}; use zebra_test::mock_service::MockService; -use crate::methods; +use crate::methods::{self, types::ValuePoolBalance}; use super::super::{ AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, @@ -370,6 +370,8 @@ proptest! { .await .expect("getblockchaininfo should call mock state service with correct request") .respond(Err(BoxError::from("no chain tip available yet"))); + + state.expect_request(zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(block::Height(0)))).await.expect("no chain tip available yet").respond(Err(BoxError::from("no chain tip available yet"))); } }; @@ -493,6 +495,108 @@ proptest! { })?; } + /// Test the `get_blockchain_info` response when tip_pool request fails. + #[test] + fn get_blockchain_info_returns_genesis_when_tip_pool_fails(network in any::()) { + let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network.clone(), NoChainTip); + + // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. + tokio::time::pause(); + + let genesis_block = match network { + Network::Mainnet => { + let block_bytes = &zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES; + let block: Arc = block_bytes.zcash_deserialize_into().expect("block is valid"); + block + }, + Network::Testnet(_) => { + let block_bytes = &zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES; + let block: Arc = block_bytes.zcash_deserialize_into().expect("block is valid"); + block + }, + }; + + // Genesis block fields + let block_time = genesis_block.header.time; + let block_version = genesis_block.header.version; + let block_prev_block_hash = genesis_block.header.previous_block_hash; + let block_merkle_root = genesis_block.header.merkle_root; + let block_commitment_bytes = genesis_block.header.commitment_bytes; + let block_difficulty_threshold = genesis_block.header.difficulty_threshold; + let block_nonce = genesis_block.header.nonce; + let block_solution = genesis_block.header.solution; + let block_hash = genesis_block.header.hash(); + + runtime.block_on(async move { + let response_fut = rpc.get_blockchain_info(); + let mock_state_handler = { + let mut state = state.clone(); + async move { + state.expect_request(zebra_state::ReadRequest::TipPoolValues) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(Err(BoxError::from("tip values not available"))); + + + state + .expect_request(zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(Height::MIN))) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(zebra_state::ReadResponse::BlockHeader { + header: Arc::new(block::Header { + time: block_time, + version: block_version, + previous_block_hash: block_prev_block_hash, + merkle_root: block_merkle_root, + commitment_bytes: block_commitment_bytes, + difficulty_threshold: block_difficulty_threshold, + nonce: block_nonce, + solution: block_solution + }), + hash: block_hash, + height: Height::MIN, + next_block_hash: None, + }); + } + }; + + let (response, _) = tokio::join!(response_fut, mock_state_handler); + + let response = response.expect("should succeed with genesis block info"); + + prop_assert_eq!(response.best_block_hash, genesis_block.header.hash()); + prop_assert_eq!(response.chain, network.bip70_network_name()); + prop_assert_eq!(response.blocks, Height::MIN); + prop_assert_eq!(response.value_pools, ValuePoolBalance::from_value_balance(ValueBalance::zero())); + + let genesis_branch_id = NetworkUpgrade::current(&network, Height::MIN).branch_id().unwrap_or(ConsensusBranchId::RPC_MISSING_ID); + let next_height = (Height::MIN + 1).expect("genesis height plus one is next height and valid"); + let next_branch_id = NetworkUpgrade::current(&network, next_height).branch_id().unwrap_or(ConsensusBranchId::RPC_MISSING_ID); + + prop_assert_eq!(response.consensus.chain_tip.0, genesis_branch_id); + prop_assert_eq!(response.consensus.next_block.0, next_branch_id); + + for (_, upgrade_info) in response.upgrades { + let status = if Height::MIN < upgrade_info.activation_height { + NetworkUpgradeStatus::Pending + } else { + NetworkUpgradeStatus::Active + }; + prop_assert_eq!(upgrade_info.status, status); + } + + mempool.expect_no_requests().await?; + state.expect_no_requests().await?; + + // The queue task should continue without errors or panics + prop_assert!(mempool_tx_queue.now_or_never().is_none()); + + Ok(()) + })?; + } + /// Test the `get_address_balance` RPC using an arbitrary set of addresses. #[test] fn queries_balance_for_valid_addresses( From 79fbc03ece01c1efab1a243219b4cc49b7fb149f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 11:44:39 +0000 Subject: [PATCH 063/245] build(deps): bump codecov/codecov-action in the devops group (#9155) Bumps the devops group with 1 update: [codecov/codecov-action](https://github.com/codecov/codecov-action). Updates `codecov/codecov-action` from 5.1.2 to 5.2.0 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.1.2...v5.2.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index d2dfdd9ff1a..e1a774db11a 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.1.2 + uses: codecov/codecov-action@v5.2.0 From f0c497160a17d776306356fa28b4acb7156ccfbe Mon Sep 17 00:00:00 2001 From: Arya Date: Mon, 27 Jan 2025 09:34:48 -0500 Subject: [PATCH 064/245] add(state): Track spending transaction ids by spent outpoints and revealed nullifiers (#8895) * Adds new column family for [spent_out_loc] -> [spending_tx_loc] with a read method and an update to `prepare_spending_transparent_tx_ids_batch()` for maintaining it when committing blocks to the finalized state. Adds TODOs for remaining production changes needed for issue #8837. * add spending tx ids for spent outpoints to non-finalized chains * adds a `spending_transaction_hash()` read fn for the new column family * Adds a `TransactionIdForSpentOutPoint` ReadRequest and a `TransactionId` ReadResponse * Updates snapshots, removes outdated TODOs, moves a TODO. * Clarifies `spent_utxos` field docs, fixes an assertion * import TypedColumnFamily from `finalized_state` instead of from the crate. * adds db format upgrade for spent outpoints -> tx hash * adds revealing tx ids for nullifiers in finalized and non-finalized states * updates nullifiers column families to include revaling transaction locations in db format upgrade * Renames new read state request to `SpendingTransactionId` and updates its type to a `Spend` enum * refactor db format upgrade and prepare_nullifiers_batch() to use ZebraDb instead of DiskDb, checks cancel_receiver before every db operation * Adds acceptance test for checking that the finalized state has spending transaction ids * Adds variant docs to zebra_state::request::Spend enum * Updates Zebra book with the latest changes to the rocks db column families * Updates acceptance test to check non-finalized state * adds a few log messages to the acceptance test, reduces frequency of logs for progress updates * fixes docs lint and skips test when there is no cached state * Avoids returning genesis coinbase tx hash when indexes are missing * Adds `indexer` compilation feature in zebra-state and build metadata in db format version file * stops tracking new indexes in finalized state when feature is unselected * stops tracking new indexes in non-finalized state when indexer feature is unselected * condenses imports * - adds build metadata when writing db version file, if any. - adds the build metadata to the db version file before adding indexes. - deletes indexes when running without the `indexer` feature * Replaces dropping cf with deleting range of all items to avoid a panic when trying to open the db with that column family. * Fixes lint, avoids reading coinbase transactions from disk * updates db column families table * Document need for having an indexed cached state and use a multi-threaded tokio runtime in has_spending_transaction_ids test * fixes call to renamed `future_blocks` test fn * improves test logs and fixes a disk format deserialization bug * Replaces a new expr with a previously existing constant, fixes typo --- Cargo.lock | 1 + book/src/dev/state-db-upgrades.md | 13 ++ zebra-rpc/Cargo.toml | 1 + zebra-state/Cargo.toml | 4 + zebra-state/src/config.rs | 16 +- zebra-state/src/constants.rs | 15 +- zebra-state/src/lib.rs | 4 + zebra-state/src/request.rs | 54 +++++++ zebra-state/src/response.rs | 9 ++ zebra-state/src/service.rs | 29 ++++ zebra-state/src/service/check/nullifier.rs | 28 ++-- zebra-state/src/service/check/tests/utxo.rs | 10 +- zebra-state/src/service/check/utxo.rs | 10 +- zebra-state/src/service/finalized_state.rs | 2 + .../finalized_state/disk_format/block.rs | 10 ++ .../tests/snapshots/column_family_names.snap | 2 +- .../empty_column_families@mainnet_0.snap | 1 + .../empty_column_families@mainnet_1.snap | 1 + .../empty_column_families@mainnet_2.snap | 1 + .../empty_column_families@no_blocks.snap | 2 +- .../empty_column_families@testnet_0.snap | 1 + .../empty_column_families@testnet_1.snap | 1 + .../empty_column_families@testnet_2.snap | 1 + .../finalized_state/disk_format/upgrade.rs | 89 ++++++++-- .../disk_format/upgrade/add_subtrees.rs | 29 ++-- .../upgrade/cache_genesis_roots.rs | 15 +- .../upgrade/drop_tx_locs_by_spends.rs | 74 +++++++++ .../disk_format/upgrade/fix_tree_key_type.rs | 11 +- .../upgrade/track_tx_locs_by_spends.rs | 109 +++++++++++++ .../src/service/finalized_state/zebra_db.rs | 9 +- .../service/finalized_state/zebra_db/block.rs | 105 ++++++++---- .../finalized_state/zebra_db/shielded.rs | 71 ++++++-- .../finalized_state/zebra_db/transparent.rs | 76 ++++++++- .../src/service/non_finalized_state.rs | 16 +- .../src/service/non_finalized_state/chain.rs | 153 ++++++++++++++---- zebra-state/src/service/read.rs | 4 + zebra-state/src/service/read/block.rs | 23 ++- zebrad/Cargo.toml | 6 +- zebrad/src/commands/start.rs | 4 +- zebrad/tests/acceptance.rs | 151 +++++++++++++++++ zebrad/tests/common/cached_state.rs | 10 +- .../get_block_template_rpcs/submit_block.rs | 4 +- .../lightwalletd/send_transaction_test.rs | 19 ++- 43 files changed, 1008 insertions(+), 186 deletions(-) create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/drop_tx_locs_by_spends.rs create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/track_tx_locs_by_spends.rs diff --git a/Cargo.lock b/Cargo.lock index c7a22a6093f..a33bbb48c3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5994,6 +5994,7 @@ dependencies = [ "bincode", "chrono", "color-eyre", + "crossbeam-channel", "dirs", "elasticsearch", "futures", diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md index 15f962e88b4..65eb4744b76 100644 --- a/book/src/dev/state-db-upgrades.md +++ b/book/src/dev/state-db-upgrades.md @@ -326,6 +326,19 @@ We use the following rocksdb column families: | `history_tree` | `()` | `NonEmptyHistoryTree` | Update | | `tip_chain_value_pool` | `()` | `ValueBalance` | Update | +With the following additional modifications when compiled with the `indexer` feature: + +| Column Family | Keys | Values | Changes | +| ---------------------------------- | ---------------------- | ----------------------------- | ------- | +| *Transparent* | | | | +| `tx_loc_by_spent_out_loc` | `OutputLocation` | `TransactionLocation` | Create | +| *Sprout* | | | | +| `sprout_nullifiers` | `sprout::Nullifier` | `TransactionLocation` | Create | +| *Sapling* | | | | +| `sapling_nullifiers` | `sapling::Nullifier` | `TransactionLocation` | Create | +| *Orchard* | | | | +| `orchard_nullifiers` | `orchard::Nullifier` | `TransactionLocation` | Create | + ### Data Formats [rocksdb-data-format]: #rocksdb-data-format diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 0fb6d4176af..0e58deaeee5 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -27,6 +27,7 @@ indexer-rpcs = [ "tonic-reflection", "prost", "tokio-stream", + "zebra-state/indexer" ] # Production features that activate extra dependencies, or extra features in dependencies diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index fc2a6505791..e7d8dc54215 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -27,6 +27,9 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +# Indexes spending transaction ids by spent outpoints and revealed nullifiers +indexer = [] + # Test-only features proptest-impl = [ "proptest", @@ -63,6 +66,7 @@ regex = "1.11.0" rlimit = "0.10.2" rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } semver = "1.0.23" +crossbeam-channel = "0.5.13" serde = { version = "1.0.215", features = ["serde_derive"] } tempfile = "3.14.0" thiserror = "2.0.6" diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index 4cd800f3975..7d175d4d614 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -431,15 +431,7 @@ pub(crate) fn database_format_version_at_path( // The database has a version file on disk if let Some(version) = disk_version_file { - let (minor, patch) = version - .split_once('.') - .ok_or("invalid database format version file")?; - - return Ok(Some(Version::new( - major_version, - minor.parse()?, - patch.parse()?, - ))); + return Ok(Some(format!("{major_version}.{version}").parse()?)); } // There's no version file on disk, so we need to guess the version @@ -508,7 +500,11 @@ pub(crate) mod hidden { ) -> Result<(), BoxError> { let version_path = config.version_file_path(db_kind, changed_version.major, network); - let version = format!("{}.{}", changed_version.minor, changed_version.patch); + let mut version = format!("{}.{}", changed_version.minor, changed_version.patch); + + if !changed_version.build.is_empty() { + version.push_str(&format!("+{}", changed_version.build)); + } // Write the version file atomically so the cache is not corrupted if Zebra shuts down or // crashes. diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 167ce011955..2c3671d838f 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -66,11 +66,16 @@ const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; /// This is the version implemented by the Zebra code that's currently running, /// the minor and patch versions on disk can be different. pub fn state_database_format_version_in_code() -> Version { - Version::new( - DATABASE_FORMAT_VERSION, - DATABASE_FORMAT_MINOR_VERSION, - DATABASE_FORMAT_PATCH_VERSION, - ) + Version { + major: DATABASE_FORMAT_VERSION, + minor: DATABASE_FORMAT_MINOR_VERSION, + patch: DATABASE_FORMAT_PATCH_VERSION, + pre: semver::Prerelease::EMPTY, + #[cfg(feature = "indexer")] + build: semver::BuildMetadata::new("indexer").expect("hard-coded value should be valid"), + #[cfg(not(feature = "indexer"))] + build: semver::BuildMetadata::EMPTY, + } } /// Returns the highest database version that modifies the subtree index format. diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index e93a3b8f905..848d30950ec 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -44,6 +44,10 @@ pub use error::{ pub use request::{ CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock, }; + +#[cfg(feature = "indexer")] +pub use request::Spend; + pub use response::{KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip, TipAction}, diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 0eeb9b940b2..6336438412d 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -29,6 +29,51 @@ use crate::{ ReadResponse, Response, }; +/// Identify a spend by a transparent outpoint or revealed nullifier. +/// +/// This enum implements `From` for [`transparent::OutPoint`], [`sprout::Nullifier`], +/// [`sapling::Nullifier`], and [`orchard::Nullifier`]. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[cfg(feature = "indexer")] +pub enum Spend { + /// A spend identified by a [`transparent::OutPoint`]. + OutPoint(transparent::OutPoint), + /// A spend identified by a [`sprout::Nullifier`]. + Sprout(sprout::Nullifier), + /// A spend identified by a [`sapling::Nullifier`]. + Sapling(sapling::Nullifier), + /// A spend identified by a [`orchard::Nullifier`]. + Orchard(orchard::Nullifier), +} + +#[cfg(feature = "indexer")] +impl From for Spend { + fn from(outpoint: transparent::OutPoint) -> Self { + Self::OutPoint(outpoint) + } +} + +#[cfg(feature = "indexer")] +impl From for Spend { + fn from(sprout_nullifier: sprout::Nullifier) -> Self { + Self::Sprout(sprout_nullifier) + } +} + +#[cfg(feature = "indexer")] +impl From for Spend { + fn from(sapling_nullifier: sapling::Nullifier) -> Self { + Self::Sapling(sapling_nullifier) + } +} + +#[cfg(feature = "indexer")] +impl From for Spend { + fn from(orchard_nullifier: orchard::Nullifier) -> Self { + Self::Orchard(orchard_nullifier) + } +} + /// Identify a block by hash or height. /// /// This enum implements `From` for [`block::Hash`] and [`block::Height`], @@ -1020,6 +1065,13 @@ pub enum ReadRequest { height_range: RangeInclusive, }, + /// Looks up a spending transaction id by its spent transparent input. + /// + /// Returns [`ReadResponse::TransactionId`] with the hash of the transaction + /// that spent the output at the provided [`transparent::OutPoint`]. + #[cfg(feature = "indexer")] + SpendingTransactionId(Spend), + /// Looks up utxos for the provided addresses. /// /// Returns a type with found utxos and transaction information. @@ -1106,6 +1158,8 @@ impl ReadRequest { } ReadRequest::BestChainNextMedianTimePast => "best_chain_next_median_time_past", ReadRequest::BestChainBlockHash(_) => "best_chain_block_hash", + #[cfg(feature = "indexer")] + ReadRequest::SpendingTransactionId(_) => "spending_transaction_id", #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::ChainInfo => "chain_info", #[cfg(feature = "getblocktemplate-rpcs")] diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index daa2fbe2829..73b2ab09a96 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -175,6 +175,12 @@ pub enum ReadResponse { /// or `None` if the block was not found. TransactionIdsForBlock(Option>), + /// Response to [`ReadRequest::SpendingTransactionId`], + /// with an list of transaction hashes in block order, + /// or `None` if the block was not found. + #[cfg(feature = "indexer")] + TransactionId(Option), + /// Response to [`ReadRequest::BlockLocator`] with a block locator object. BlockLocator(Vec), @@ -343,6 +349,9 @@ impl TryFrom for Response { Err("there is no corresponding Response for this ReadResponse") } + #[cfg(feature = "indexer")] + ReadResponse::TransactionId(_) => Err("there is no corresponding Response for this ReadResponse"), + #[cfg(feature = "getblocktemplate-rpcs")] ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal), diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index be3a78f0772..487d152c62c 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1383,6 +1383,35 @@ impl Service for ReadStateService { .wait_for_panics() } + #[cfg(feature = "indexer")] + ReadRequest::SpendingTransactionId(spend) => { + let state = self.clone(); + + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + let spending_transaction_id = state + .non_finalized_state_receiver + .with_watch_data(|non_finalized_state| { + read::spending_transaction_hash( + non_finalized_state.best_chain(), + &state.db, + spend, + ) + }); + + // The work is done in the future. + timer.finish( + module_path!(), + line!(), + "ReadRequest::TransactionIdForSpentOutPoint", + ); + + Ok(ReadResponse::TransactionId(spending_transaction_id)) + }) + }) + .wait_for_panics() + } + ReadRequest::UnspentBestChainUtxo(outpoint) => { let state = self.clone(); diff --git a/zebra-state/src/service/check/nullifier.rs b/zebra-state/src/service/check/nullifier.rs index 809e78383ba..bd7e2b834be 100644 --- a/zebra-state/src/service/check/nullifier.rs +++ b/zebra-state/src/service/check/nullifier.rs @@ -1,13 +1,16 @@ //! Checks for nullifier uniqueness. -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use tracing::trace; use zebra_chain::transaction::Transaction; use crate::{ error::DuplicateNullifierError, - service::{finalized_state::ZebraDb, non_finalized_state::Chain}, + service::{ + finalized_state::ZebraDb, + non_finalized_state::{Chain, SpendingTransactionId}, + }, SemanticallyVerifiedBlock, ValidateContextError, }; @@ -105,19 +108,22 @@ pub(crate) fn tx_no_duplicates_in_chain( find_duplicate_nullifier( transaction.sprout_nullifiers(), |nullifier| finalized_chain.contains_sprout_nullifier(nullifier), - non_finalized_chain.map(|chain| |nullifier| chain.sprout_nullifiers.contains(nullifier)), + non_finalized_chain + .map(|chain| |nullifier| chain.sprout_nullifiers.contains_key(nullifier)), )?; find_duplicate_nullifier( transaction.sapling_nullifiers(), |nullifier| finalized_chain.contains_sapling_nullifier(nullifier), - non_finalized_chain.map(|chain| |nullifier| chain.sapling_nullifiers.contains(nullifier)), + non_finalized_chain + .map(|chain| |nullifier| chain.sapling_nullifiers.contains_key(nullifier)), )?; find_duplicate_nullifier( transaction.orchard_nullifiers(), |nullifier| finalized_chain.contains_orchard_nullifier(nullifier), - non_finalized_chain.map(|chain| |nullifier| chain.orchard_nullifiers.contains(nullifier)), + non_finalized_chain + .map(|chain| |nullifier| chain.orchard_nullifiers.contains_key(nullifier)), )?; Ok(()) @@ -156,8 +162,9 @@ pub(crate) fn tx_no_duplicates_in_chain( /// [5]: service::non_finalized_state::Chain #[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))] pub(crate) fn add_to_non_finalized_chain_unique<'block, NullifierT>( - chain_nullifiers: &mut HashSet, + chain_nullifiers: &mut HashMap, shielded_data_nullifiers: impl IntoIterator, + revealing_tx_id: SpendingTransactionId, ) -> Result<(), ValidateContextError> where NullifierT: DuplicateNullifierError + Copy + std::fmt::Debug + Eq + std::hash::Hash + 'block, @@ -166,7 +173,10 @@ where trace!(?nullifier, "adding nullifier"); // reject the nullifier if it is already present in this non-finalized chain - if !chain_nullifiers.insert(*nullifier) { + if chain_nullifiers + .insert(*nullifier, revealing_tx_id) + .is_some() + { Err(nullifier.duplicate_nullifier_error(false))?; } } @@ -200,7 +210,7 @@ where /// [1]: service::non_finalized_state::Chain #[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))] pub(crate) fn remove_from_non_finalized_chain<'block, NullifierT>( - chain_nullifiers: &mut HashSet, + chain_nullifiers: &mut HashMap, shielded_data_nullifiers: impl IntoIterator, ) where NullifierT: std::fmt::Debug + Eq + std::hash::Hash + 'block, @@ -209,7 +219,7 @@ pub(crate) fn remove_from_non_finalized_chain<'block, NullifierT>( trace!(?nullifier, "removing nullifier"); assert!( - chain_nullifiers.remove(nullifier), + chain_nullifiers.remove(nullifier).is_some(), "nullifier must be present if block was added to chain" ); } diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index 57d087c552d..3e37fdc8173 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -221,7 +221,7 @@ proptest! { .unwrap(); prop_assert!(!chain.unspent_utxos().contains_key(&expected_outpoint)); prop_assert!(chain.created_utxos.contains_key(&expected_outpoint)); - prop_assert!(chain.spent_utxos.contains(&expected_outpoint)); + prop_assert!(chain.spent_utxos.contains_key(&expected_outpoint)); // the finalized state does not have the UTXO prop_assert!(finalized_state.utxo(&expected_outpoint).is_none()); @@ -310,14 +310,14 @@ proptest! { if use_finalized_state_output { // the chain has spent the UTXO from the finalized state prop_assert!(!chain.created_utxos.contains_key(&expected_outpoint)); - prop_assert!(chain.spent_utxos.contains(&expected_outpoint)); + prop_assert!(chain.spent_utxos.contains_key(&expected_outpoint)); // the finalized state has the UTXO, but it will get deleted on commit prop_assert!(finalized_state.utxo(&expected_outpoint).is_some()); } else { // the chain has spent its own UTXO prop_assert!(!chain.unspent_utxos().contains_key(&expected_outpoint)); prop_assert!(chain.created_utxos.contains_key(&expected_outpoint)); - prop_assert!(chain.spent_utxos.contains(&expected_outpoint)); + prop_assert!(chain.spent_utxos.contains_key(&expected_outpoint)); // the finalized state does not have the UTXO prop_assert!(finalized_state.utxo(&expected_outpoint).is_none()); } @@ -650,12 +650,12 @@ proptest! { // the finalized state has the unspent UTXO prop_assert!(finalized_state.utxo(&expected_outpoint).is_some()); // the non-finalized state has spent the UTXO - prop_assert!(chain.spent_utxos.contains(&expected_outpoint)); + prop_assert!(chain.spent_utxos.contains_key(&expected_outpoint)); } else { // the non-finalized state has created and spent the UTXO prop_assert!(!chain.unspent_utxos().contains_key(&expected_outpoint)); prop_assert!(chain.created_utxos.contains_key(&expected_outpoint)); - prop_assert!(chain.spent_utxos.contains(&expected_outpoint)); + prop_assert!(chain.spent_utxos.contains_key(&expected_outpoint)); // the finalized state does not have the UTXO prop_assert!(finalized_state.utxo(&expected_outpoint).is_none()); } diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index df3981ec0b8..4f1a307c402 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -1,6 +1,6 @@ //! Consensus rule checks for the finalized state. -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use zebra_chain::{ amount, @@ -9,7 +9,7 @@ use zebra_chain::{ use crate::{ constants::MIN_TRANSPARENT_COINBASE_MATURITY, - service::finalized_state::ZebraDb, + service::{finalized_state::ZebraDb, non_finalized_state::SpendingTransactionId}, SemanticallyVerifiedBlock, ValidateContextError::{ self, DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend, @@ -38,7 +38,7 @@ use crate::{ pub fn transparent_spend( semantically_verified: &SemanticallyVerifiedBlock, non_finalized_chain_unspent_utxos: &HashMap, - non_finalized_chain_spent_utxos: &HashSet, + non_finalized_chain_spent_utxos: &HashMap, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { let mut block_spends = HashMap::new(); @@ -128,7 +128,7 @@ fn transparent_spend_chain_order( spend_tx_index_in_block: usize, block_new_outputs: &HashMap, non_finalized_chain_unspent_utxos: &HashMap, - non_finalized_chain_spent_utxos: &HashSet, + non_finalized_chain_spent_utxos: &HashMap, finalized_state: &ZebraDb, ) -> Result { if let Some(output) = block_new_outputs.get(&spend) { @@ -148,7 +148,7 @@ fn transparent_spend_chain_order( } } - if non_finalized_chain_spent_utxos.contains(&spend) { + if non_finalized_chain_spent_utxos.contains_key(&spend) { // reject the spend if its UTXO is already spent in the // non-finalized parent chain return Err(DuplicateTransparentSpend { diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index f8c9bade5c1..57d22493cef 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -20,6 +20,7 @@ use std::{ }; use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; +use zebra_db::transparent::TX_LOC_BY_SPENT_OUT_LOC; use crate::{ constants::{state_database_format_version_in_code, STATE_DATABASE_KIND}, @@ -77,6 +78,7 @@ pub const STATE_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ "tx_loc_by_transparent_addr_loc", "utxo_by_out_loc", "utxo_loc_by_transparent_addr_loc", + TX_LOC_BY_SPENT_OUT_LOC, // Sprout "sprout_nullifiers", "sprout_anchors", diff --git a/zebra-state/src/service/finalized_state/disk_format/block.rs b/zebra-state/src/service/finalized_state/disk_format/block.rs index 22495ebf332..ed57dae03fc 100644 --- a/zebra-state/src/service/finalized_state/disk_format/block.rs +++ b/zebra-state/src/service/finalized_state/disk_format/block.rs @@ -331,6 +331,16 @@ impl IntoDisk for TransactionLocation { } } +impl FromDisk for Option { + fn from_bytes(disk_bytes: impl AsRef<[u8]>) -> Self { + if disk_bytes.as_ref().len() == TRANSACTION_LOCATION_DISK_BYTES { + Some(TransactionLocation::from_bytes(disk_bytes)) + } else { + None + } + } +} + impl FromDisk for TransactionLocation { fn from_bytes(disk_bytes: impl AsRef<[u8]>) -> Self { let (height_bytes, index_bytes) = disk_bytes.as_ref().split_at(HEIGHT_DISK_BYTES); diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap index d37e037cac7..3a1191beda9 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap @@ -1,6 +1,5 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs -assertion_line: 81 expression: cf_names --- [ @@ -25,6 +24,7 @@ expression: cf_names "tip_chain_value_pool", "tx_by_loc", "tx_loc_by_hash", + "tx_loc_by_spent_out_loc", "tx_loc_by_transparent_addr_loc", "utxo_by_out_loc", "utxo_loc_by_transparent_addr_loc", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap index 3c333a9fc43..5511807d28c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap @@ -11,6 +11,7 @@ expression: empty_column_families "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", + "tx_loc_by_spent_out_loc: no entries", "tx_loc_by_transparent_addr_loc: no entries", "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_1.snap index cb8ac5f6aed..8fcb84c844f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_1.snap @@ -9,4 +9,5 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", + "tx_loc_by_spent_out_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_2.snap index cb8ac5f6aed..8fcb84c844f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_2.snap @@ -9,4 +9,5 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", + "tx_loc_by_spent_out_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap index a2abce2083b..e461b0d0f1e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap @@ -1,6 +1,5 @@ --- source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs -assertion_line: 166 expression: empty_column_families --- [ @@ -24,6 +23,7 @@ expression: empty_column_families "tip_chain_value_pool: no entries", "tx_by_loc: no entries", "tx_loc_by_hash: no entries", + "tx_loc_by_spent_out_loc: no entries", "tx_loc_by_transparent_addr_loc: no entries", "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap index 3c333a9fc43..5511807d28c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap @@ -11,6 +11,7 @@ expression: empty_column_families "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", + "tx_loc_by_spent_out_loc: no entries", "tx_loc_by_transparent_addr_loc: no entries", "utxo_by_out_loc: no entries", "utxo_loc_by_transparent_addr_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_1.snap index cb8ac5f6aed..8fcb84c844f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_1.snap @@ -9,4 +9,5 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", + "tx_loc_by_spent_out_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_2.snap index cb8ac5f6aed..8fcb84c844f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_2.snap @@ -9,4 +9,5 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", + "tx_loc_by_spent_out_loc: no entries", ] diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index f8ce127843f..93625a848dc 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -2,10 +2,11 @@ use std::{ cmp::Ordering, - sync::{mpsc, Arc}, + sync::Arc, thread::{self, JoinHandle}, }; +use crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender, TryRecvError}; use semver::Version; use tracing::Span; @@ -28,6 +29,12 @@ pub(crate) mod add_subtrees; pub(crate) mod cache_genesis_roots; pub(crate) mod fix_tree_key_type; +#[cfg(not(feature = "indexer"))] +pub(crate) mod drop_tx_locs_by_spends; + +#[cfg(feature = "indexer")] +pub(crate) mod track_tx_locs_by_spends; + /// The kind of database format change or validity check we're performing. #[derive(Clone, Debug, Eq, PartialEq)] pub enum DbFormatChange { @@ -96,7 +103,7 @@ pub struct DbFormatChangeThreadHandle { update_task: Option>>>, /// A channel that tells the running format thread to finish early. - cancel_handle: mpsc::SyncSender, + cancel_handle: Sender, } /// Marker type that is sent to cancel a format upgrade, and returned as an error on cancellation. @@ -121,7 +128,7 @@ impl DbFormatChange { return NewlyCreated { running_version }; }; - match disk_version.cmp(&running_version) { + match disk_version.cmp_precedence(&running_version) { Ordering::Less => { info!( %running_version, @@ -228,7 +235,7 @@ impl DbFormatChange { // // Cancel handles must use try_send() to avoid blocking waiting for the format change // thread to shut down. - let (cancel_handle, cancel_receiver) = mpsc::sync_channel(1); + let (cancel_handle, cancel_receiver) = bounded(1); let span = Span::current(); let update_task = thread::spawn(move || { @@ -256,7 +263,7 @@ impl DbFormatChange { self, db: ZebraDb, initial_tip_height: Option, - cancel_receiver: mpsc::Receiver, + cancel_receiver: Receiver, ) -> Result<(), CancelFormatChange> { self.run_format_change_or_check(&db, initial_tip_height, &cancel_receiver)?; @@ -269,7 +276,7 @@ impl DbFormatChange { // But return early if there is a cancel signal. if !matches!( cancel_receiver.recv_timeout(debug_validity_check_interval), - Err(mpsc::RecvTimeoutError::Timeout) + Err(RecvTimeoutError::Timeout) ) { return Err(CancelFormatChange); } @@ -288,7 +295,7 @@ impl DbFormatChange { &self, db: &ZebraDb, initial_tip_height: Option, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { match self { // Perform any required upgrades, then mark the state as upgraded. @@ -337,6 +344,60 @@ impl DbFormatChange { } } + #[cfg(feature = "indexer")] + if let ( + Upgrade { .. } | CheckOpenCurrent { .. } | Downgrade { .. }, + Some(initial_tip_height), + ) = (self, initial_tip_height) + { + // Indexing transaction locations by their spent outpoints and revealed nullifiers. + let timer = CodeTimer::start(); + + // Add build metadata to on-disk version file just before starting to add indexes + let mut version = db + .format_version_on_disk() + .expect("unable to read database format version file") + .expect("should write database format version file above"); + version.build = db.format_version_in_code().build; + + db.update_format_version_on_disk(&version) + .expect("unable to write database format version file to disk"); + + info!("started checking/adding indexes for spending tx ids"); + track_tx_locs_by_spends::run(initial_tip_height, db, cancel_receiver)?; + info!("finished checking/adding indexes for spending tx ids"); + + timer.finish(module_path!(), line!(), "indexing spending transaction ids"); + }; + + #[cfg(not(feature = "indexer"))] + if let ( + Upgrade { .. } | CheckOpenCurrent { .. } | Downgrade { .. }, + Some(initial_tip_height), + ) = (self, initial_tip_height) + { + let mut version = db + .format_version_on_disk() + .expect("unable to read database format version file") + .expect("should write database format version file above"); + + if version.build.contains("indexer") { + // Indexing transaction locations by their spent outpoints and revealed nullifiers. + let timer = CodeTimer::start(); + + info!("started removing indexes for spending tx ids"); + drop_tx_locs_by_spends::run(initial_tip_height, db, cancel_receiver)?; + info!("finished removing indexes for spending tx ids"); + + // Remove build metadata to on-disk version file after indexes have been dropped. + version.build = db.format_version_in_code().build; + db.update_format_version_on_disk(&version) + .expect("unable to write database format version file to disk"); + + timer.finish(module_path!(), line!(), "removing spending transaction ids"); + } + }; + // These checks should pass for all format changes: // - upgrades should produce a valid format (and they already do that check) // - an empty state should pass all the format checks @@ -381,7 +442,7 @@ impl DbFormatChange { &self, db: &ZebraDb, initial_tip_height: Option, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { let Upgrade { newer_running_version, @@ -433,7 +494,7 @@ impl DbFormatChange { // The block after genesis is the first possible duplicate. for (height, tree) in db.sapling_tree_by_height_range(Height(1)..=initial_tip_height) { // Return early if there is a cancel signal. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -460,7 +521,7 @@ impl DbFormatChange { // The block after genesis is the first possible duplicate. for (height, tree) in db.orchard_tree_by_height_range(Height(1)..=initial_tip_height) { // Return early if there is a cancel signal. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -598,7 +659,7 @@ impl DbFormatChange { #[allow(clippy::vec_init_then_push)] pub fn format_validity_checks_detailed( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { let timer = CodeTimer::start(); let mut results = Vec::new(); @@ -635,7 +696,7 @@ impl DbFormatChange { #[allow(clippy::unwrap_in_result)] fn check_for_duplicate_trees( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { // Runtime test: make sure we removed all duplicates. // We always run this test, even if the state has supposedly been upgraded. @@ -645,7 +706,7 @@ impl DbFormatChange { let mut prev_tree = None; for (height, tree) in db.sapling_tree_by_height_range(..) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -667,7 +728,7 @@ impl DbFormatChange { let mut prev_tree = None; for (height, tree) in db.orchard_tree_by_height_range(..) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs index d84392ebf84..8f47e4f28d7 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs @@ -1,7 +1,8 @@ //! Fully populate the Sapling and Orchard note commitment subtrees for existing blocks in the database. -use std::sync::{mpsc, Arc}; +use std::sync::Arc; +use crossbeam_channel::{Receiver, TryRecvError}; use hex_literal::hex; use itertools::Itertools; use tracing::instrument; @@ -30,7 +31,7 @@ use crate::service::finalized_state::{ pub fn run( initial_tip_height: Height, upgrade_db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { // # Consensus // @@ -65,7 +66,7 @@ pub fn run( for (prev_end_height, prev_tree, end_height, tree) in subtrees { // Return early if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -90,7 +91,7 @@ pub fn run( for (prev_end_height, prev_tree, end_height, tree) in subtrees { // Return early if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -110,10 +111,10 @@ pub fn run( pub fn reset( _initial_tip_height: Height, upgrade_db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { // Return early if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -127,7 +128,7 @@ pub fn reset( .write_batch(batch) .expect("deleting old sapling note commitment subtrees is a valid database operation"); - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -306,7 +307,7 @@ fn quick_check_orchard_subtrees(db: &ZebraDb) -> Result<(), &'static str> { /// Check that note commitment subtrees were correctly added. pub fn subtree_format_validity_checks_detailed( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { // This is redundant in some code paths, but not in others. But it's quick anyway. let quick_result = subtree_format_calculation_pre_checks(db); @@ -332,7 +333,7 @@ pub fn subtree_format_validity_checks_detailed( /// Returns an error if a note commitment subtree is missing or incorrect. fn check_sapling_subtrees( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) = db.sapling_tree_for_tip().subtree_index() @@ -348,7 +349,7 @@ fn check_sapling_subtrees( let mut result = Ok(()); for index in 0..first_incomplete_subtree_index { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -418,7 +419,7 @@ fn check_sapling_subtrees( }) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -462,7 +463,7 @@ fn check_sapling_subtrees( /// Returns an error if a note commitment subtree is missing or incorrect. fn check_orchard_subtrees( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) = db.orchard_tree_for_tip().subtree_index() @@ -478,7 +479,7 @@ fn check_orchard_subtrees( let mut result = Ok(()); for index in 0..first_incomplete_subtree_index { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -548,7 +549,7 @@ fn check_orchard_subtrees( }) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs index 57fcacb9d5b..186cfe5f51c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs @@ -3,8 +3,7 @@ //! This reduces CPU usage when the genesis tree roots are used for transaction validation. //! Since mempool transactions are cheap to create, this is a potential remote denial of service. -use std::sync::mpsc; - +use crossbeam_channel::{Receiver, TryRecvError}; use zebra_chain::{block::Height, sprout}; use crate::service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb}; @@ -23,7 +22,7 @@ use super::CancelFormatChange; pub fn run( _initial_tip_height: Height, upgrade_db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { let sprout_genesis_tree = sprout::tree::NoteCommitmentTree::default(); let sprout_tip_tree = upgrade_db.sprout_tree_for_tip(); @@ -50,7 +49,7 @@ pub fn run( batch.create_orchard_tree(upgrade_db, &Height(0), &orchard_genesis_tree); // Return before we write if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -126,7 +125,7 @@ pub fn quick_check(db: &ZebraDb) -> Result<(), String> { /// If the state is empty. pub fn detailed_check( db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { // This is redundant in some code paths, but not in others. But it's quick anyway. // Check the entire format before returning any errors. @@ -134,7 +133,7 @@ pub fn detailed_check( for (root, tree) in db.sprout_trees_full_map() { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -149,7 +148,7 @@ pub fn detailed_check( for (height, tree) in db.sapling_tree_by_height_range(..) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -164,7 +163,7 @@ pub fn detailed_check( for (height, tree) in db.orchard_tree_by_height_range(..) { // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/drop_tx_locs_by_spends.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/drop_tx_locs_by_spends.rs new file mode 100644 index 00000000000..cfc82b1aec1 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/drop_tx_locs_by_spends.rs @@ -0,0 +1,74 @@ +//! Tracks transaction locations by their inputs and revealed nullifiers. + +use crossbeam_channel::{Receiver, TryRecvError}; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +use zebra_chain::block::Height; + +use crate::service::finalized_state::ZebraDb; + +use super::{super::super::DiskWriteBatch, CancelFormatChange}; + +/// Runs disk format upgrade for tracking transaction locations by their inputs and revealed nullifiers. +/// +/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. +#[allow(clippy::unwrap_in_result)] +#[instrument(skip(zebra_db, cancel_receiver))] +pub fn run( + initial_tip_height: Height, + zebra_db: &ZebraDb, + cancel_receiver: &Receiver, +) -> Result<(), CancelFormatChange> { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + let _ = zebra_db + .tx_loc_by_spent_output_loc_cf() + .new_batch_for_writing() + .zs_delete_range( + &crate::OutputLocation::from_output_index(crate::TransactionLocation::MIN, 0), + &crate::OutputLocation::from_output_index(crate::TransactionLocation::MAX, u32::MAX), + ) + .write_batch(); + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + (0..=initial_tip_height.0) + .into_par_iter() + .try_for_each(|height| { + let height = Height(height); + let mut batch = DiskWriteBatch::new(); + + let transactions = zebra_db.transactions_by_location_range( + crate::TransactionLocation::from_index(height, 1) + ..=crate::TransactionLocation::max_for_height(height), + ); + + for (_tx_loc, tx) in transactions { + if tx.is_coinbase() { + continue; + } + + batch + .prepare_nullifier_batch(zebra_db, &tx) + .expect("method should never return an error"); + } + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + zebra_db + .write_batch(batch) + .expect("unexpected database write failure"); + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + Ok(()) + }) +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs index 4bcd5d8cd4c..25665f419c1 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs @@ -2,8 +2,9 @@ //! //! This avoids a potential concurrency bug, and a known database performance issue. -use std::sync::{mpsc, Arc}; +use std::sync::Arc; +use crossbeam_channel::{Receiver, TryRecvError}; use zebra_chain::{block::Height, history_tree::HistoryTree, sprout}; use crate::service::finalized_state::{ @@ -20,7 +21,7 @@ use super::CancelFormatChange; pub fn run( _initial_tip_height: Height, upgrade_db: &ZebraDb, - cancel_receiver: &mpsc::Receiver, + cancel_receiver: &Receiver, ) -> Result<(), CancelFormatChange> { let sprout_tip_tree = upgrade_db.sprout_tree_for_tip(); let history_tip_tree = upgrade_db.history_tree(); @@ -33,7 +34,7 @@ pub fn run( batch.update_history_tree(upgrade_db, &history_tip_tree); // Return before we write if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -51,7 +52,7 @@ pub fn run( batch.delete_range_history_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); // Return before we write if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { return Err(CancelFormatChange); } @@ -144,7 +145,7 @@ pub fn quick_check(db: &ZebraDb) -> Result<(), String> { /// If the state is empty. pub fn detailed_check( db: &ZebraDb, - _cancel_receiver: &mpsc::Receiver, + _cancel_receiver: &Receiver, ) -> Result, CancelFormatChange> { // This upgrade only changes two key-value pairs, so checking it is always quick. Ok(quick_check(db)) diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/track_tx_locs_by_spends.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/track_tx_locs_by_spends.rs new file mode 100644 index 00000000000..be754fac452 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/track_tx_locs_by_spends.rs @@ -0,0 +1,109 @@ +//! Tracks transaction locations by their inputs and revealed nullifiers. + +use std::sync::Arc; + +use crossbeam_channel::{Receiver, TryRecvError}; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +use zebra_chain::block::Height; + +use crate::{ + service::{finalized_state::ZebraDb, non_finalized_state::Chain, read}, + Spend, +}; + +use super::{super::super::DiskWriteBatch, CancelFormatChange}; + +/// Runs disk format upgrade for tracking transaction locations by their inputs and revealed nullifiers. +/// +/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. +#[allow(clippy::unwrap_in_result)] +#[instrument(skip(zebra_db, cancel_receiver))] +pub fn run( + initial_tip_height: Height, + zebra_db: &ZebraDb, + cancel_receiver: &Receiver, +) -> Result<(), CancelFormatChange> { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + (0..=initial_tip_height.0) + .into_par_iter() + .try_for_each(|height| { + let height = Height(height); + let mut batch = DiskWriteBatch::new(); + let mut should_index_at_height = false; + + let transactions = zebra_db.transactions_by_location_range( + crate::TransactionLocation::from_index(height, 1) + ..=crate::TransactionLocation::max_for_height(height), + ); + + for (tx_loc, tx) in transactions { + if tx.is_coinbase() { + continue; + } + + if !should_index_at_height { + if let Some(spend) = tx + .inputs() + .iter() + .filter_map(|input| Some(input.outpoint()?.into())) + .chain(tx.sprout_nullifiers().cloned().map(Spend::from)) + .chain(tx.sapling_nullifiers().cloned().map(Spend::from)) + .chain(tx.orchard_nullifiers().cloned().map(Spend::from)) + .next() + { + if read::spending_transaction_hash::>(None, zebra_db, spend) + .is_some() + { + // Skip transactions in blocks with existing indexes + return Ok(()); + } else { + should_index_at_height = true + } + } else { + continue; + }; + } + + for input in tx.inputs() { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + let spent_outpoint = input + .outpoint() + .expect("should filter out coinbase transactions"); + + let spent_output_location = zebra_db + .output_location(&spent_outpoint) + .expect("should have location for spent outpoint"); + + let _ = zebra_db + .tx_loc_by_spent_output_loc_cf() + .with_batch_for_writing(&mut batch) + .zs_insert(&spent_output_location, &tx_loc); + } + + batch + .prepare_nullifier_batch(zebra_db, &tx, tx_loc) + .expect("method should never return an error"); + } + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + zebra_db + .write_batch(batch) + .expect("unexpected database write failure"); + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + Ok(()) + }) +} diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index b7ae76ea3a1..951c78ec93c 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -9,12 +9,11 @@ //! [`crate::constants::state_database_format_version_in_code()`] must be incremented //! each time the database format (column, serialization, etc) changes. -use std::{ - path::Path, - sync::{mpsc, Arc}, -}; +use std::{path::Path, sync::Arc}; +use crossbeam_channel::bounded; use semver::Version; + use zebra_chain::parameters::Network; use crate::{ @@ -292,7 +291,7 @@ impl ZebraDb { if let Some(disk_version) = disk_version { // We need to keep the cancel handle until the format check has finished, // because dropping it cancels the format check. - let (_never_cancel_handle, never_cancel_receiver) = mpsc::sync_channel(1); + let (_never_cancel_handle, never_cancel_receiver) = bounded(1); // We block here because the checks are quick and database validity is // consensus-critical. diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 4dc3a801ef3..6ad4cd93a60 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -11,6 +11,7 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, + ops::RangeBounds, sync::Arc, }; @@ -42,6 +43,9 @@ use crate::{ BoxError, HashOrHeight, }; +#[cfg(feature = "indexer")] +use crate::request::Spend; + #[cfg(test)] mod tests; @@ -139,25 +143,17 @@ impl ZebraDb { let header = self.block_header(height.into())?; // Transactions - let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); - - // Manually fetch the entire block's transactions - let mut transactions = Vec::new(); // TODO: // - split disk reads from deserialization, and run deserialization in parallel, // this improves performance for blocks with multiple large shielded transactions // - is this loop more efficient if we store the number of transactions? // - is the difference large enough to matter? - for tx_index in 0..=Transaction::max_allocation() { - let tx_loc = TransactionLocation::from_u64(height, tx_index); - - if let Some(tx) = self.db.zs_get(&tx_by_loc, &tx_loc) { - transactions.push(tx); - } else { - break; - } - } + let transactions = self + .transactions_by_height(height) + .map(|(_, tx)| tx) + .map(Arc::new) + .collect(); Some(Arc::new(Block { header, @@ -212,6 +208,45 @@ impl ZebraDb { // Read transaction methods + /// Returns the [`Transaction`] with [`transaction::Hash`], and its [`Height`], + /// if a transaction with that hash exists in the finalized chain. + #[allow(clippy::unwrap_in_result)] + pub fn transaction(&self, hash: transaction::Hash) -> Option<(Arc, Height)> { + let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); + + let transaction_location = self.transaction_location(hash)?; + + self.db + .zs_get(&tx_by_loc, &transaction_location) + .map(|tx| (tx, transaction_location.height)) + } + + /// Returns an iterator of all [`Transaction`]s for a provided block height in finalized state. + #[allow(clippy::unwrap_in_result)] + pub fn transactions_by_height( + &self, + height: Height, + ) -> impl Iterator + '_ { + self.transactions_by_location_range( + TransactionLocation::min_for_height(height) + ..=TransactionLocation::max_for_height(height), + ) + } + + /// Returns an iterator of all [`Transaction`]s in the provided range + /// of [`TransactionLocation`]s in finalized state. + #[allow(clippy::unwrap_in_result)] + pub fn transactions_by_location_range( + &self, + range: R, + ) -> impl Iterator + '_ + where + R: RangeBounds, + { + let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); + self.db.zs_forward_range_iter(tx_by_loc, range) + } + /// Returns the [`TransactionLocation`] for [`transaction::Hash`], /// if it exists in the finalized chain. #[allow(clippy::unwrap_in_result)] @@ -229,19 +264,18 @@ impl ZebraDb { self.db.zs_get(&hash_by_tx_loc, &location) } - /// Returns the [`Transaction`] with [`transaction::Hash`], and its [`Height`], - /// if a transaction with that hash exists in the finalized chain. - // - // TODO: move this method to the start of the section - #[allow(clippy::unwrap_in_result)] - pub fn transaction(&self, hash: transaction::Hash) -> Option<(Arc, Height)> { - let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); - - let transaction_location = self.transaction_location(hash)?; - - self.db - .zs_get(&tx_by_loc, &transaction_location) - .map(|tx| (tx, transaction_location.height)) + /// Returns the [`transaction::Hash`] of the transaction that spent or revealed the given + /// [`transparent::OutPoint`] or nullifier, if it is spent or revealed in the finalized state. + #[cfg(feature = "indexer")] + pub fn spending_transaction_hash(&self, spend: &Spend) -> Option { + let tx_loc = match spend { + Spend::OutPoint(outpoint) => self.spending_tx_loc(outpoint)?, + Spend::Sprout(nullifier) => self.sprout_revealing_tx_loc(nullifier)?, + Spend::Sapling(nullifier) => self.sapling_revealing_tx_loc(nullifier)?, + Spend::Orchard(nullifier) => self.orchard_revealing_tx_loc(nullifier)?, + }; + + self.transaction_hash(tx_loc) } /// Returns the [`transaction::Hash`]es in the block with `hash_or_height`, @@ -355,6 +389,13 @@ impl ZebraDb { .iter() .map(|(outpoint, _output_loc, utxo)| (*outpoint, utxo.clone())) .collect(); + + // TODO: Add `OutputLocation`s to the values in `spent_utxos_by_outpoint` to avoid creating a second hashmap with the same keys + #[cfg(feature = "indexer")] + let out_loc_by_outpoint: HashMap = spent_utxos + .iter() + .map(|(outpoint, out_loc, _utxo)| (*outpoint, *out_loc)) + .collect(); let spent_utxos_by_out_loc: BTreeMap = spent_utxos .into_iter() .map(|(_outpoint, out_loc, utxo)| (out_loc, utxo)) @@ -392,6 +433,8 @@ impl ZebraDb { new_outputs_by_out_loc, spent_utxos_by_outpoint, spent_utxos_by_out_loc, + #[cfg(feature = "indexer")] + out_loc_by_outpoint, address_balances, self.finalized_value_pool(), prev_note_commitment_trees, @@ -448,6 +491,10 @@ impl DiskWriteBatch { new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, + #[cfg(feature = "indexer")] out_loc_by_outpoint: HashMap< + transparent::OutPoint, + OutputLocation, + >, address_balances: HashMap, value_pool: ValueBalance, prev_note_commitment_trees: Option, @@ -463,7 +510,7 @@ impl DiskWriteBatch { // which is already present from height 1 to the first shielded transaction. // // In Zebra we include the nullifiers and note commitments in the genesis block because it simplifies our code. - self.prepare_shielded_transaction_batch(db, finalized)?; + self.prepare_shielded_transaction_batch(zebra_db, finalized)?; self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; // # Consensus @@ -479,12 +526,14 @@ impl DiskWriteBatch { if !finalized.height.is_min() { // Commit transaction indexes self.prepare_transparent_transaction_batch( - db, + zebra_db, network, finalized, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, + #[cfg(feature = "indexer")] + &out_loc_by_outpoint, address_balances, )?; diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 4bba75b1891..b4036a31e5f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -29,11 +29,11 @@ use zebra_chain::{ use crate::{ request::{FinalizedBlock, Treestate}, service::finalized_state::{ - disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, + disk_db::{DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::RawBytes, zebra_db::ZebraDb, }, - BoxError, + BoxError, TransactionLocation, }; // Doc-only items @@ -61,6 +61,42 @@ impl ZebraDb { self.db.zs_contains(&orchard_nullifiers, &orchard_nullifier) } + /// Returns the [`TransactionLocation`] of the transaction that revealed + /// the given [`sprout::Nullifier`], if it is revealed in the finalized state and its + /// spending transaction hash has been indexed. + #[allow(clippy::unwrap_in_result)] + pub fn sprout_revealing_tx_loc( + &self, + sprout_nullifier: &sprout::Nullifier, + ) -> Option { + let sprout_nullifiers = self.db.cf_handle("sprout_nullifiers").unwrap(); + self.db.zs_get(&sprout_nullifiers, &sprout_nullifier)? + } + + /// Returns the [`TransactionLocation`] of the transaction that revealed + /// the given [`sapling::Nullifier`], if it is revealed in the finalized state and its + /// spending transaction hash has been indexed. + #[allow(clippy::unwrap_in_result)] + pub fn sapling_revealing_tx_loc( + &self, + sapling_nullifier: &sapling::Nullifier, + ) -> Option { + let sapling_nullifiers = self.db.cf_handle("sapling_nullifiers").unwrap(); + self.db.zs_get(&sapling_nullifiers, &sapling_nullifier)? + } + + /// Returns the [`TransactionLocation`] of the transaction that revealed + /// the given [`orchard::Nullifier`], if it is revealed in the finalized state and its + /// spending transaction hash has been indexed. + #[allow(clippy::unwrap_in_result)] + pub fn orchard_revealing_tx_loc( + &self, + orchard_nullifier: &orchard::Nullifier, + ) -> Option { + let orchard_nullifiers = self.db.cf_handle("orchard_nullifiers").unwrap(); + self.db.zs_get(&orchard_nullifiers, &orchard_nullifier)? + } + /// Returns `true` if the finalized state contains `sprout_anchor`. #[allow(dead_code)] pub fn contains_sprout_anchor(&self, sprout_anchor: &sprout::tree::Root) -> bool { @@ -437,14 +473,22 @@ impl DiskWriteBatch { /// - Propagates any errors from updating note commitment trees pub fn prepare_shielded_transaction_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, finalized: &FinalizedBlock, ) -> Result<(), BoxError> { - let FinalizedBlock { block, .. } = finalized; + #[cfg(feature = "indexer")] + let FinalizedBlock { block, height, .. } = finalized; // Index each transaction's shielded data - for transaction in &block.transactions { - self.prepare_nullifier_batch(db, transaction)?; + #[cfg(feature = "indexer")] + for (tx_index, transaction) in block.transactions.iter().enumerate() { + let tx_loc = TransactionLocation::from_usize(*height, tx_index); + self.prepare_nullifier_batch(zebra_db, transaction, tx_loc)?; + } + + #[cfg(not(feature = "indexer"))] + for transaction in &finalized.block.transactions { + self.prepare_nullifier_batch(zebra_db, transaction)?; } Ok(()) @@ -459,22 +503,29 @@ impl DiskWriteBatch { #[allow(clippy::unwrap_in_result)] pub fn prepare_nullifier_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, transaction: &Transaction, + #[cfg(feature = "indexer")] transaction_location: TransactionLocation, ) -> Result<(), BoxError> { + let db = &zebra_db.db; let sprout_nullifiers = db.cf_handle("sprout_nullifiers").unwrap(); let sapling_nullifiers = db.cf_handle("sapling_nullifiers").unwrap(); let orchard_nullifiers = db.cf_handle("orchard_nullifiers").unwrap(); + #[cfg(feature = "indexer")] + let insert_value = transaction_location; + #[cfg(not(feature = "indexer"))] + let insert_value = (); + // Mark sprout, sapling and orchard nullifiers as spent for sprout_nullifier in transaction.sprout_nullifiers() { - self.zs_insert(&sprout_nullifiers, sprout_nullifier, ()); + self.zs_insert(&sprout_nullifiers, sprout_nullifier, insert_value); } for sapling_nullifier in transaction.sapling_nullifiers() { - self.zs_insert(&sapling_nullifiers, sapling_nullifier, ()); + self.zs_insert(&sapling_nullifiers, sapling_nullifier, insert_value); } for orchard_nullifier in transaction.orchard_nullifiers() { - self.zs_insert(&orchard_nullifiers, orchard_nullifier, ()); + self.zs_insert(&orchard_nullifiers, orchard_nullifier, insert_value); } Ok(()) diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index edfcf509b76..06a09d803e6 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -1,5 +1,6 @@ //! Provides high-level access to database: -//! - unspent [`transparent::Output`]s (UTXOs), and +//! - unspent [`transparent::Output`]s (UTXOs), +//! - spent [`transparent::Output`]s, and //! - transparent address indexes. //! //! This module makes sure that: @@ -40,9 +41,42 @@ use crate::{ BoxError, }; +use super::super::TypedColumnFamily; + +/// The name of the transaction hash by spent outpoints column family. +/// +/// This constant should be used so the compiler can detect typos. +pub const TX_LOC_BY_SPENT_OUT_LOC: &str = "tx_loc_by_spent_out_loc"; + +/// The type for reading value pools from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type TransactionLocationBySpentOutputLocationCf<'cf> = + TypedColumnFamily<'cf, OutputLocation, TransactionLocation>; + impl ZebraDb { + // Column family convenience methods + + /// Returns a typed handle to the transaction location by spent output location column family. + pub(crate) fn tx_loc_by_spent_output_loc_cf( + &self, + ) -> TransactionLocationBySpentOutputLocationCf { + TransactionLocationBySpentOutputLocationCf::new(&self.db, TX_LOC_BY_SPENT_OUT_LOC) + .expect("column family was created when database was created") + } + // Read transparent methods + /// Returns the [`TransactionLocation`] for a transaction that spent the output + /// at the provided [`OutputLocation`], if it is in the finalized state. + pub fn tx_location_by_spent_output_location( + &self, + output_location: &OutputLocation, + ) -> Option { + self.tx_loc_by_spent_output_loc_cf().zs_get(output_location) + } + /// Returns the [`AddressBalanceLocation`] for a [`transparent::Address`], /// if it is in the finalized state. #[allow(clippy::unwrap_in_result)] @@ -90,6 +124,14 @@ impl ZebraDb { self.utxo_by_location(output_location) } + /// Returns the [`TransactionLocation`] of the transaction that spent the given + /// [`transparent::OutPoint`], if it is unspent in the finalized state and its + /// spending transaction hash has been indexed. + pub fn spending_tx_loc(&self, outpoint: &transparent::OutPoint) -> Option { + let output_location = self.output_location(outpoint)?; + self.tx_location_by_spent_output_location(&output_location) + } + /// Returns the transparent output for an [`OutputLocation`], /// if it is unspent in the finalized state. #[allow(clippy::unwrap_in_result)] @@ -342,14 +384,19 @@ impl DiskWriteBatch { #[allow(clippy::too_many_arguments)] pub fn prepare_transparent_transaction_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, network: &Network, finalized: &FinalizedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, + #[cfg(feature = "indexer")] out_loc_by_outpoint: &HashMap< + transparent::OutPoint, + OutputLocation, + >, mut address_balances: HashMap, ) -> Result<(), BoxError> { + let db = &zebra_db.db; let FinalizedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs @@ -371,11 +418,13 @@ impl DiskWriteBatch { let spending_tx_location = TransactionLocation::from_usize(*height, tx_index); self.prepare_spending_transparent_tx_ids_batch( - db, + zebra_db, network, spending_tx_location, transaction, spent_utxos_by_outpoint, + #[cfg(feature = "indexer")] + out_loc_by_outpoint, &address_balances, )?; } @@ -531,16 +580,21 @@ impl DiskWriteBatch { /// # Errors /// /// - This method doesn't currently return any errors, but it might in future - #[allow(clippy::unwrap_in_result)] + #[allow(clippy::unwrap_in_result, clippy::too_many_arguments)] pub fn prepare_spending_transparent_tx_ids_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, network: &Network, spending_tx_location: TransactionLocation, transaction: &Transaction, spent_utxos_by_outpoint: &HashMap, + #[cfg(feature = "indexer")] out_loc_by_outpoint: &HashMap< + transparent::OutPoint, + OutputLocation, + >, address_balances: &HashMap, ) -> Result<(), BoxError> { + let db = &zebra_db.db; let tx_loc_by_transparent_addr_loc = db.cf_handle("tx_loc_by_transparent_addr_loc").unwrap(); @@ -569,6 +623,18 @@ impl DiskWriteBatch { AddressTransaction::new(sending_address_location, spending_tx_location); self.zs_insert(&tx_loc_by_transparent_addr_loc, address_transaction, ()); } + + #[cfg(feature = "indexer")] + { + let spent_output_location = out_loc_by_outpoint + .get(&spent_outpoint) + .expect("spent outpoints must already have output locations"); + + let _ = zebra_db + .tx_loc_by_spent_output_loc_cf() + .with_batch_for_writing(self) + .zs_insert(spent_output_location, &spending_tx_location); + } } Ok(()) diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 08d64455024..ebcbb2cfd35 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -26,7 +26,7 @@ mod chain; #[cfg(test)] mod tests; -pub(crate) use chain::Chain; +pub(crate) use chain::{Chain, SpendingTransactionId}; /// The state of the chains in memory, including queued blocks. /// @@ -540,7 +540,7 @@ impl NonFinalizedState { #[allow(dead_code)] pub fn best_contains_sprout_nullifier(&self, sprout_nullifier: &sprout::Nullifier) -> bool { self.best_chain() - .map(|best_chain| best_chain.sprout_nullifiers.contains(sprout_nullifier)) + .map(|best_chain| best_chain.sprout_nullifiers.contains_key(sprout_nullifier)) .unwrap_or(false) } @@ -552,7 +552,11 @@ impl NonFinalizedState { sapling_nullifier: &zebra_chain::sapling::Nullifier, ) -> bool { self.best_chain() - .map(|best_chain| best_chain.sapling_nullifiers.contains(sapling_nullifier)) + .map(|best_chain| { + best_chain + .sapling_nullifiers + .contains_key(sapling_nullifier) + }) .unwrap_or(false) } @@ -564,7 +568,11 @@ impl NonFinalizedState { orchard_nullifier: &zebra_chain::orchard::Nullifier, ) -> bool { self.best_chain() - .map(|best_chain| best_chain.orchard_nullifiers.contains(orchard_nullifier)) + .map(|best_chain| { + best_chain + .orchard_nullifiers + .contains_key(orchard_nullifier) + }) .unwrap_or(false) } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 0dfcd585c12..6ad284a23f5 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -33,6 +33,9 @@ use crate::{ TransactionLocation, ValidateContextError, }; +#[cfg(feature = "indexer")] +use crate::request::Spend; + use self::index::TransparentTransfers; pub mod index; @@ -67,6 +70,14 @@ pub struct Chain { pub(super) last_fork_height: Option, } +/// Spending transaction id type when the `indexer` feature is selected. +#[cfg(feature = "indexer")] +pub(crate) type SpendingTransactionId = transaction::Hash; + +/// Spending transaction id type when the `indexer` feature is not selected. +#[cfg(not(feature = "indexer"))] +pub(crate) type SpendingTransactionId = (); + /// The internal state of [`Chain`]. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ChainInner { @@ -90,9 +101,11 @@ pub struct ChainInner { // // TODO: replace OutPoint with OutputLocation? pub(crate) created_utxos: HashMap, - /// The [`transparent::OutPoint`]s spent by `blocks`, - /// including those created by earlier transactions or blocks in the chain. - pub(crate) spent_utxos: HashSet, + /// The spending transaction ids by [`transparent::OutPoint`]s spent by `blocks`, + /// including spent outputs created by earlier transactions or blocks in the chain. + /// + /// Note: Spending transaction ids are only tracked when the `indexer` feature is selected. + pub(crate) spent_utxos: HashMap, // Note commitment trees // @@ -176,12 +189,15 @@ pub struct ChainInner { // Nullifiers // - /// The Sprout nullifiers revealed by `blocks`. - pub(crate) sprout_nullifiers: HashSet, - /// The Sapling nullifiers revealed by `blocks`. - pub(crate) sapling_nullifiers: HashSet, - /// The Orchard nullifiers revealed by `blocks`. - pub(crate) orchard_nullifiers: HashSet, + /// The Sprout nullifiers revealed by `blocks` and, if the `indexer` feature is selected, + /// the id of the transaction that revealed them. + pub(crate) sprout_nullifiers: HashMap, + /// The Sapling nullifiers revealed by `blocks` and, if the `indexer` feature is selected, + /// the id of the transaction that revealed them. + pub(crate) sapling_nullifiers: HashMap, + /// The Orchard nullifiers revealed by `blocks` and, if the `indexer` feature is selected, + /// the id of the transaction that revealed them. + pub(crate) orchard_nullifiers: HashMap, // Transparent Transfers // TODO: move to the transparent section @@ -1234,7 +1250,7 @@ impl Chain { /// and removed from the relevant chain(s). pub fn unspent_utxos(&self) -> HashMap { let mut unspent_utxos = self.created_utxos.clone(); - unspent_utxos.retain(|outpoint, _utxo| !self.spent_utxos.contains(outpoint)); + unspent_utxos.retain(|outpoint, _utxo| !self.spent_utxos.contains_key(outpoint)); unspent_utxos } @@ -1244,11 +1260,23 @@ impl Chain { /// /// UTXOs are returned regardless of whether they have been spent. pub fn created_utxo(&self, outpoint: &transparent::OutPoint) -> Option { - if let Some(utxo) = self.created_utxos.get(outpoint) { - return Some(utxo.utxo.clone()); + self.created_utxos + .get(outpoint) + .map(|utxo| utxo.utxo.clone()) + } + + /// Returns the [`Hash`](transaction::Hash) of the transaction that spent an output at + /// the provided [`transparent::OutPoint`] or revealed the provided nullifier, if it exists + /// and is spent or revealed by this chain. + #[cfg(feature = "indexer")] + pub fn spending_transaction_hash(&self, spend: &Spend) -> Option { + match spend { + Spend::OutPoint(outpoint) => self.spent_utxos.get(outpoint), + Spend::Sprout(nullifier) => self.sprout_nullifiers.get(nullifier), + Spend::Sapling(nullifier) => self.sapling_nullifiers.get(nullifier), + Spend::Orchard(nullifier) => self.orchard_nullifiers.get(nullifier), } - - None + .cloned() } // Address index queries @@ -1536,10 +1564,17 @@ impl Chain { self.update_chain_tip_with(&(inputs, &transaction_hash, spent_outputs))?; // add the shielded data - self.update_chain_tip_with(joinsplit_data)?; - self.update_chain_tip_with(sapling_shielded_data_per_spend_anchor)?; - self.update_chain_tip_with(sapling_shielded_data_shared_anchor)?; - self.update_chain_tip_with(orchard_shielded_data)?; + + #[cfg(not(feature = "indexer"))] + let transaction_hash = (); + + self.update_chain_tip_with(&(joinsplit_data, &transaction_hash))?; + self.update_chain_tip_with(&( + sapling_shielded_data_per_spend_anchor, + &transaction_hash, + ))?; + self.update_chain_tip_with(&(sapling_shielded_data_shared_anchor, &transaction_hash))?; + self.update_chain_tip_with(&(orchard_shielded_data, &transaction_hash))?; } // update the chain value pool balances @@ -1694,10 +1729,20 @@ impl UpdateWith for Chain { ); // remove the shielded data - self.revert_chain_with(joinsplit_data, position); - self.revert_chain_with(sapling_shielded_data_per_spend_anchor, position); - self.revert_chain_with(sapling_shielded_data_shared_anchor, position); - self.revert_chain_with(orchard_shielded_data, position); + + #[cfg(not(feature = "indexer"))] + let transaction_hash = &(); + + self.revert_chain_with(&(joinsplit_data, transaction_hash), position); + self.revert_chain_with( + &(sapling_shielded_data_per_spend_anchor, transaction_hash), + position, + ); + self.revert_chain_with( + &(sapling_shielded_data_shared_anchor, transaction_hash), + position, + ); + self.revert_chain_with(&(orchard_shielded_data, transaction_hash), position); } // TODO: move these to the shielded UpdateWith.revert...()? @@ -1838,10 +1883,18 @@ impl continue; }; + #[cfg(feature = "indexer")] + let insert_value = *spending_tx_hash; + #[cfg(not(feature = "indexer"))] + let insert_value = (); + // Index the spent outpoint in the chain - let first_spend = self.spent_utxos.insert(spent_outpoint); + let was_spend_newly_inserted = self + .spent_utxos + .insert(spent_outpoint, insert_value) + .is_none(); assert!( - first_spend, + was_spend_newly_inserted, "unexpected duplicate spent output: should be checked earlier" ); @@ -1889,9 +1942,9 @@ impl }; // Revert the spent outpoint in the chain - let spent_outpoint_was_removed = self.spent_utxos.remove(&spent_outpoint); + let was_spent_outpoint_removed = self.spent_utxos.remove(&spent_outpoint).is_some(); assert!( - spent_outpoint_was_removed, + was_spent_outpoint_removed, "spent_utxos must be present if block was added to chain" ); @@ -1926,11 +1979,19 @@ impl } } -impl UpdateWith>> for Chain { +impl + UpdateWith<( + &Option>, + &SpendingTransactionId, + )> for Chain +{ #[instrument(skip(self, joinsplit_data))] fn update_chain_tip_with( &mut self, - joinsplit_data: &Option>, + &(joinsplit_data, revealing_tx_id): &( + &Option>, + &SpendingTransactionId, + ), ) -> Result<(), ValidateContextError> { if let Some(joinsplit_data) = joinsplit_data { // We do note commitment tree updates in parallel rayon threads. @@ -1938,6 +1999,7 @@ impl UpdateWith>> for Chain { check::nullifier::add_to_non_finalized_chain_unique( &mut self.sprout_nullifiers, joinsplit_data.nullifiers(), + *revealing_tx_id, )?; } Ok(()) @@ -1951,7 +2013,10 @@ impl UpdateWith>> for Chain { #[instrument(skip(self, joinsplit_data))] fn revert_chain_with( &mut self, - joinsplit_data: &Option>, + &(joinsplit_data, _revealing_tx_id): &( + &Option>, + &SpendingTransactionId, + ), _position: RevertPosition, ) { if let Some(joinsplit_data) = joinsplit_data { @@ -1967,14 +2032,21 @@ impl UpdateWith>> for Chain { } } -impl UpdateWith>> for Chain +impl + UpdateWith<( + &Option>, + &SpendingTransactionId, + )> for Chain where AnchorV: sapling::AnchorVariant + Clone, { #[instrument(skip(self, sapling_shielded_data))] fn update_chain_tip_with( &mut self, - sapling_shielded_data: &Option>, + &(sapling_shielded_data, revealing_tx_id): &( + &Option>, + &SpendingTransactionId, + ), ) -> Result<(), ValidateContextError> { if let Some(sapling_shielded_data) = sapling_shielded_data { // We do note commitment tree updates in parallel rayon threads. @@ -1982,6 +2054,7 @@ where check::nullifier::add_to_non_finalized_chain_unique( &mut self.sapling_nullifiers, sapling_shielded_data.nullifiers(), + *revealing_tx_id, )?; } Ok(()) @@ -1995,7 +2068,10 @@ where #[instrument(skip(self, sapling_shielded_data))] fn revert_chain_with( &mut self, - sapling_shielded_data: &Option>, + &(sapling_shielded_data, _revealing_tx_id): &( + &Option>, + &SpendingTransactionId, + ), _position: RevertPosition, ) { if let Some(sapling_shielded_data) = sapling_shielded_data { @@ -2011,11 +2087,14 @@ where } } -impl UpdateWith> for Chain { +impl UpdateWith<(&Option, &SpendingTransactionId)> for Chain { #[instrument(skip(self, orchard_shielded_data))] fn update_chain_tip_with( &mut self, - orchard_shielded_data: &Option, + &(orchard_shielded_data, revealing_tx_id): &( + &Option, + &SpendingTransactionId, + ), ) -> Result<(), ValidateContextError> { if let Some(orchard_shielded_data) = orchard_shielded_data { // We do note commitment tree updates in parallel rayon threads. @@ -2023,6 +2102,7 @@ impl UpdateWith> for Chain { check::nullifier::add_to_non_finalized_chain_unique( &mut self.orchard_nullifiers, orchard_shielded_data.nullifiers(), + *revealing_tx_id, )?; } Ok(()) @@ -2036,7 +2116,10 @@ impl UpdateWith> for Chain { #[instrument(skip(self, orchard_shielded_data))] fn revert_chain_with( &mut self, - orchard_shielded_data: &Option, + (orchard_shielded_data, _revealing_tx_id): &( + &Option, + &SpendingTransactionId, + ), _position: RevertPosition, ) { if let Some(orchard_shielded_data) = orchard_shielded_data { diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index 0188ca1bf5e..0b0ece3a358 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -33,6 +33,10 @@ pub use address::{ pub use block::{ any_utxo, block, block_header, mined_transaction, transaction_hashes_for_block, unspent_utxo, }; + +#[cfg(feature = "indexer")] +pub use block::spending_transaction_hash; + pub use find::{ best_tip, block_locator, depth, finalized_state_contains_block_hash, find_chain_hashes, find_chain_headers, hash_by_height, height_by_hash, next_median_time_past, diff --git a/zebra-state/src/service/read/block.rs b/zebra-state/src/service/read/block.rs index 283fe9ddc4f..99d4189a3e4 100644 --- a/zebra-state/src/service/read/block.rs +++ b/zebra-state/src/service/read/block.rs @@ -30,6 +30,9 @@ use crate::{ HashOrHeight, }; +#[cfg(feature = "indexer")] +use crate::request::Spend; + /// Returns the [`Block`] with [`block::Hash`] or /// [`Height`], if it exists in the non-finalized `chain` or finalized `db`. pub fn block(chain: Option, db: &ZebraDb, hash_or_height: HashOrHeight) -> Option> @@ -176,11 +179,29 @@ where C: AsRef, { match chain { - Some(chain) if chain.as_ref().spent_utxos.contains(&outpoint) => None, + Some(chain) if chain.as_ref().spent_utxos.contains_key(&outpoint) => None, chain => utxo(chain, db, outpoint), } } +/// Returns the [`Hash`](transaction::Hash) of the transaction that spent an output at +/// the provided [`transparent::OutPoint`] or revealed the provided nullifier, if it exists +/// and is spent or revealed in the non-finalized `chain` or finalized `db` and its +/// spending transaction hash has been indexed. +#[cfg(feature = "indexer")] +pub fn spending_transaction_hash( + chain: Option, + db: &ZebraDb, + spend: Spend, +) -> Option +where + C: AsRef, +{ + chain + .and_then(|chain| chain.as_ref().spending_transaction_hash(&spend)) + .or_else(|| db.spending_transaction_hash(&spend)) +} + /// Returns the [`Utxo`] for [`transparent::OutPoint`], if it exists in any chain /// in the `non_finalized_state`, or in the finalized `db`. /// diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index ce5fbde2a21..c0bb6f0873d 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -48,6 +48,8 @@ features = [ "journald", "prometheus", "sentry", + "indexer", + "getblocktemplate-rpcs" ] [features] @@ -59,8 +61,8 @@ default-release-binaries = ["default", "sentry"] # Production features that activate extra dependencies, or extra features in dependencies -# Indexer RPC support -indexer-rpcs = ["zebra-rpc/indexer-rpcs"] +# Indexer support +indexer = ["zebra-rpc/indexer-rpcs", "zebra-state/indexer"] # Mining RPC support getblocktemplate-rpcs = [ diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index ab06e546fc8..035b9a20100 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -271,7 +271,7 @@ impl StartCmd { // TODO: Add a shutdown signal and start the server with `serve_with_incoming_shutdown()` if // any related unit tests sometimes crash with memory errors - #[cfg(feature = "indexer-rpcs")] + #[cfg(feature = "indexer")] let indexer_rpc_task_handle = if let Some(indexer_listen_addr) = config.rpc.indexer_listen_addr { info!("spawning indexer RPC server"); @@ -289,7 +289,7 @@ impl StartCmd { tokio::spawn(std::future::pending().in_current_span()) }; - #[cfg(not(feature = "indexer-rpcs"))] + #[cfg(not(feature = "indexer"))] // Spawn a dummy indexer rpc task which doesn't do anything and never finishes. let indexer_rpc_task_handle: tokio::task::JoinHandle> = tokio::spawn(std::future::pending().in_current_span()); diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index ef2de55dc83..34cd0ba682d 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -122,6 +122,12 @@ //! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture //! ``` //! +//! Example of how to run the has_spending_transaction_ids test: +//! +//! ```console +//! RUST_LOG=info ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test has_spending_transaction_ids --features "indexer" --release -- --ignored --nocapture +//! ``` +//! //! Please refer to the documentation of each test for more information. //! //! ## Checkpoint Generation Tests @@ -3539,6 +3545,151 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { Ok(()) } +/// Checks that the cached finalized state has the spending transaction ids for every +/// spent outpoint and revealed nullifier in the last 100 blocks of a cached state. +// +// Note: This test is meant to be run locally with a prepared finalized state that +// has spending transaction ids. This can be done by starting Zebra with the +// `indexer` feature and waiting until the db format upgrade is complete. It +// can be undone (removing the indexes) by starting Zebra without the feature +// and waiting until the db format downgrade is complete. +#[tokio::test(flavor = "multi_thread")] +#[ignore] +#[cfg(feature = "indexer")] +async fn has_spending_transaction_ids() -> Result<()> { + use std::sync::Arc; + use tower::Service; + use zebra_chain::{chain_tip::ChainTip, transparent::Input}; + use zebra_state::{ + ReadRequest, ReadResponse, Request, Response, SemanticallyVerifiedBlock, Spend, + }; + + use common::cached_state::future_blocks; + + let _init_guard = zebra_test::init(); + let test_type = UpdateZebraCachedStateWithRpc; + let test_name = "has_spending_transaction_ids_test"; + let network = Mainnet; + + let Some(zebrad_state_path) = test_type.zebrad_state_path(test_name) else { + // Skip test if there's no cached state. + return Ok(()); + }; + + tracing::info!("loading blocks for non-finalized state"); + + let non_finalized_blocks = future_blocks(&network, test_type, test_name, 100).await?; + + let (mut state, mut read_state, latest_chain_tip, _chain_tip_change) = + common::cached_state::start_state_service_with_cache_dir(&Mainnet, zebrad_state_path) + .await?; + + tracing::info!("committing blocks to non-finalized state"); + + for block in non_finalized_blocks { + let expected_hash = block.hash(); + let block = SemanticallyVerifiedBlock::with_hash(Arc::new(block), expected_hash); + let Response::Committed(block_hash) = state + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::CommitSemanticallyVerifiedBlock(block)) + .await + .map_err(|err| eyre!(err))? + else { + panic!("unexpected response to Block request"); + }; + + assert_eq!( + expected_hash, block_hash, + "state should respond with expected block hash" + ); + } + + let mut tip_hash = latest_chain_tip + .best_tip_hash() + .expect("cached state must not be empty"); + + tracing::info!("checking indexes of spending transaction ids"); + + // Read the last 500 blocks - should be greater than the MAX_BLOCK_REORG_HEIGHT so that + // both the finalized and non-finalized state are checked. + let num_blocks_to_check = 500; + let mut is_failure = false; + for i in 0..num_blocks_to_check { + let ReadResponse::Block(block) = read_state + .ready() + .await + .map_err(|err| eyre!(err))? + .call(ReadRequest::Block(tip_hash.into())) + .await + .map_err(|err| eyre!(err))? + else { + panic!("unexpected response to Block request"); + }; + + let block = block.expect("should have block with latest_chain_tip hash"); + + let spends_with_spending_tx_hashes = block.transactions.iter().cloned().flat_map(|tx| { + let tx_hash = tx.hash(); + tx.inputs() + .iter() + .filter_map(Input::outpoint) + .map(Spend::from) + .chain(tx.sprout_nullifiers().cloned().map(Spend::from)) + .chain(tx.sapling_nullifiers().cloned().map(Spend::from)) + .chain(tx.orchard_nullifiers().cloned().map(Spend::from)) + .map(|spend| (spend, tx_hash)) + .collect::>() + }); + + for (spend, expected_transaction_hash) in spends_with_spending_tx_hashes { + let ReadResponse::TransactionId(transaction_hash) = read_state + .ready() + .await + .map_err(|err| eyre!(err))? + .call(ReadRequest::SpendingTransactionId(spend)) + .await + .map_err(|err| eyre!(err))? + else { + panic!("unexpected response to Block request"); + }; + + let Some(transaction_hash) = transaction_hash else { + tracing::warn!( + ?spend, + depth = i, + height = ?block.coinbase_height(), + "querying spending tx id for spend failed" + ); + is_failure = true; + continue; + }; + + assert_eq!( + transaction_hash, expected_transaction_hash, + "spending transaction hash should match expected transaction hash" + ); + } + + if i % 25 == 0 { + tracing::info!( + height = ?block.coinbase_height(), + "has all spending tx ids at and above block" + ); + } + + tip_hash = block.header.previous_block_hash; + } + + assert!( + !is_failure, + "at least one spend was missing a spending transaction id" + ); + + Ok(()) +} + /// Check that Zebra does not depend on any crates from git sources. #[test] #[ignore] diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 58f6064cdf5..c290cde2cd9 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -167,13 +167,13 @@ pub async fn load_tip_height_from_state_directory( /// ## Panics /// /// If the provided `test_type` doesn't need an rpc server and cached state, or if `max_num_blocks` is 0 -pub async fn get_future_blocks( +pub async fn future_blocks( network: &Network, test_type: TestType, test_name: &str, max_num_blocks: u32, ) -> Result> { - let blocks: Vec = get_raw_future_blocks(network, test_type, test_name, max_num_blocks) + let blocks: Vec = raw_future_blocks(network, test_type, test_name, max_num_blocks) .await? .into_iter() .map(hex::decode) @@ -198,7 +198,7 @@ pub async fn get_future_blocks( /// ## Panics /// /// If the provided `test_type` doesn't need an rpc server and cached state, or if `max_num_blocks` is 0 -pub async fn get_raw_future_blocks( +pub async fn raw_future_blocks( network: &Network, test_type: TestType, test_name: &str, @@ -211,13 +211,13 @@ pub async fn get_raw_future_blocks( assert!( test_type.needs_zebra_cached_state() && test_type.needs_zebra_rpc_server(), - "get_raw_future_blocks needs zebra cached state and rpc server" + "raw_future_blocks needs zebra cached state and rpc server" ); let should_sync = true; let (zebrad, zebra_rpc_address) = spawn_zebrad_for_rpc(network.clone(), test_name, test_type, should_sync)? - .ok_or_else(|| eyre!("get_raw_future_blocks requires a cached state"))?; + .ok_or_else(|| eyre!("raw_future_blocks requires a cached state"))?; let rpc_address = zebra_rpc_address.expect("test type must have RPC port"); let mut zebrad = check_sync_logs_until( diff --git a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs index 399efc8d99e..d135a4d533e 100644 --- a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs +++ b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs @@ -14,7 +14,7 @@ use zebra_chain::parameters::Network; use zebra_node_services::rpc_client::RpcRequestClient; use crate::common::{ - cached_state::get_raw_future_blocks, + cached_state::raw_future_blocks, launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, test_type::TestType, }; @@ -42,7 +42,7 @@ pub(crate) async fn run() -> Result<()> { ); let raw_blocks: Vec = - get_raw_future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS).await?; + raw_future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS).await?; tracing::info!("got raw future blocks, spawning isolated zebrad...",); diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 6ac031e491b..3a2fdeeb0f0 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -32,7 +32,7 @@ use zebra_rpc::queue::CHANNEL_AND_QUEUE_CAPACITY; use zebrad::components::mempool::downloads::MAX_INBOUND_CONCURRENCY; use crate::common::{ - cached_state::get_future_blocks, + cached_state::future_blocks, launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, lightwalletd::{ can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc, @@ -92,15 +92,14 @@ pub async fn run() -> Result<()> { ); let mut count = 0; - let blocks: Vec = - get_future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS) - .await? - .into_iter() - .take_while(|block| { - count += block.transactions.len() - 1; - count <= max_sent_transactions() - }) - .collect(); + let blocks: Vec = future_blocks(&network, test_type, test_name, MAX_NUM_FUTURE_BLOCKS) + .await? + .into_iter() + .take_while(|block| { + count += block.transactions.len() - 1; + count <= max_sent_transactions() + }) + .collect(); tracing::info!( blocks_count = ?blocks.len(), From 0dcc4205ee1758daf2475ffc5609a94e4f5acc05 Mon Sep 17 00:00:00 2001 From: Pili Guerra <1311133+mpguerra@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:05:48 +0100 Subject: [PATCH 065/245] Run dependabot monthly (#9171) This is the next possible interval after weekly --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index da2e1f1206a..6fa1f4ef67e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ updates: directory: '/' # serde, clap, and other dependencies sometimes have multiple updates in a week schedule: - interval: weekly + interval: monthly day: monday timezone: America/New_York # Limit dependabot to 1 PR per reviewer @@ -53,4 +53,4 @@ updates: groups: devops: patterns: - - "*" \ No newline at end of file + - "*" From 451ad3549bad8f7f49b28747d501d51cfd7271e3 Mon Sep 17 00:00:00 2001 From: futreall <86553580+futreall@users.noreply.github.com> Date: Thu, 30 Jan 2025 10:28:22 +0200 Subject: [PATCH 066/245] fix spelling issues (#9177) * Update chore-delete-gcp-resources.yml * Update data-flow-2020-07-22.md --- .github/workflows/chore-delete-gcp-resources.yml | 2 +- book/src/dev/rfcs/drafts/data-flow-2020-07-22.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index fe0c42de7c6..962442fc8d8 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -136,7 +136,7 @@ jobs: # Deletes all images older than $DELETE_IMAGE_HOURS days. - uses: 'docker://us-docker.pkg.dev/gcr-cleaner/gcr-cleaner/gcr-cleaner-cli' - continue-on-error: true # TODO: remove after fixig https://github.com/ZcashFoundation/zebra/issues/5933 + continue-on-error: true # TODO: remove after fixing https://github.com/ZcashFoundation/zebra/issues/5933 # Refer to the official documentation to understand available arguments: # https://github.com/GoogleCloudPlatform/gcr-cleaner with: diff --git a/book/src/dev/rfcs/drafts/data-flow-2020-07-22.md b/book/src/dev/rfcs/drafts/data-flow-2020-07-22.md index 5818ad119a1..8c366472ad4 100644 --- a/book/src/dev/rfcs/drafts/data-flow-2020-07-22.md +++ b/book/src/dev/rfcs/drafts/data-flow-2020-07-22.md @@ -23,7 +23,7 @@ - nullifiers (within a single transaction) - // Transactions containing empty `vin` must have either non-empty `vJoinSplit` or non-empty `vShieldedSpend`. - // Transactions containing empty `vout` must have either non-empty `vJoinSplit` or non-empty `vShieldedOutput`. - - Moar: https://github.com/zcash/zcash/blob/ab2b7c0969391d8a57d90d008665da02f3f618e7/src/main.cpp#L1091 + - More: https://github.com/zcash/zcash/blob/ab2b7c0969391d8a57d90d008665da02f3f618e7/src/main.cpp#L1091 - Sum up "LegacySigOps" for each transaction and check that it's less than some maximum - Acquires a lock, then calls `MarkBlockAsReceived` (networking?) - Calls `AcceptBlock`, defined at: https://github.com/zcash/zcash/blob/ab2b7c0969391d8a57d90d008665da02f3f618e7/src/main.cpp#L4180 From ba50a6c1d167b556b507c04d9905be120b289688 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:26:46 +0000 Subject: [PATCH 067/245] build(deps): bump the ecc group across 1 directory with 3 updates (#9092) Bumps the ecc group with 3 updates in the / directory: [incrementalmerkletree](https://github.com/zcash/incrementalmerkletree), [zcash_encoding](https://github.com/zcash/librustzcash) and [zcash_note_encryption](https://github.com/zcash/librustzcash). Updates `incrementalmerkletree` from 0.7.0 to 0.7.1 - [Commits](https://github.com/zcash/incrementalmerkletree/compare/incrementalmerkletree-v0.7.0...incrementalmerkletree-v0.7.1) Updates `zcash_encoding` from 0.2.1 to 0.2.2 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_encoding-0.2.1...zcash_encoding-0.2.2) Updates `zcash_note_encryption` from 0.4.0 to 0.4.1 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_note_encryption-0.4.0...zcash_protocol-0.4.1) --- updated-dependencies: - dependency-name: incrementalmerkletree dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc - dependency-name: zcash_encoding dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc - dependency-name: zcash_note_encryption dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 23 ++++++++++++++++------- Cargo.toml | 4 ++-- zebra-chain/Cargo.toml | 2 +- zebra-scan/Cargo.toml | 4 ++-- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a33bbb48c3a..11bae743368 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -944,6 +944,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core2" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239fa3ae9b63c2dc74bd3fa852d4792b8b305ae64eeede946265b6af62f1fff3" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.14" @@ -1975,9 +1984,9 @@ dependencies = [ [[package]] name = "incrementalmerkletree" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d45063fbc4b0a37837f6bfe0445f269d13d730ad0aa3b5a7f74aa7bf27a0f4df" +checksum = "216c71634ac6f6ed13c2102d64354c0a04dcbdc30e31692c5972d3974d8b6d97" dependencies = [ "either", ] @@ -5552,11 +5561,11 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052d8230202f0a018cd9b5d1b56b94cd25e18eccc2d8665073bcea8261ab87fc" +checksum = "3654116ae23ab67dd1f849b01f8821a8a156f884807ff665eac109bf28306c4d" dependencies = [ - "byteorder", + "core2", "nonempty", ] @@ -5599,9 +5608,9 @@ dependencies = [ [[package]] name = "zcash_note_encryption" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4580cd6cee12e44421dac43169be8d23791650816bdb34e6ddfa70ac89c1c5" +checksum = "77efec759c3798b6e4d829fcc762070d9b229b0f13338c40bf993b7b609c2272" dependencies = [ "chacha20", "chacha20poly1305", diff --git a/Cargo.toml b/Cargo.toml index c50c93ad414..dccad95c218 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,12 +22,12 @@ resolver = "2" # `cargo release` settings [workspace.dependencies] -incrementalmerkletree = { version = "0.7.0", features = ["legacy-api"] } +incrementalmerkletree = { version = "0.7.1", features = ["legacy-api"] } orchard = "0.10.0" sapling-crypto = "0.3.0" zcash_address = "0.6.0" zcash_client_backend = "0.14.0" -zcash_encoding = "0.2.1" +zcash_encoding = "0.2.2" zcash_history = "0.4.0" zcash_keys = "0.4.0" zcash_primitives = "0.19.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index eb8f098dcf0..25b6006240c 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -98,7 +98,7 @@ halo2 = { package = "halo2_proofs", version = "0.3.0" } orchard.workspace = true zcash_encoding.workspace = true zcash_history.workspace = true -zcash_note_encryption = "0.4.0" +zcash_note_encryption = "0.4.1" zcash_primitives = { workspace = true, features = ["transparent-inputs"] } sapling-crypto.workspace = true zcash_protocol.workspace = true diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 42cf878a8e6..57a6f12fd2c 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -94,7 +94,7 @@ ff = { version = "0.13.0", optional = true } group = { version = "0.13.0", optional = true } jubjub = { version = "0.10.0", optional = true } rand = { version = "0.8.5", optional = true } -zcash_note_encryption = { version = "0.4.0", optional = true } +zcash_note_encryption = { version = "0.4.1", optional = true } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44", optional = true } @@ -121,7 +121,7 @@ group = "0.13.0" jubjub = "0.10.0" rand = "0.8.5" tempfile = "3.14.0" -zcash_note_encryption = "0.4.0" +zcash_note_encryption = "0.4.1" toml = "0.8.19" tonic = "0.12.3" From 433633e700465ac542a02aa3aeb95af15cbab225 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:26:51 +0000 Subject: [PATCH 068/245] build(deps): bump the devops group with 2 updates (#9174) Bumps the devops group with 2 updates: [codecov/codecov-action](https://github.com/codecov/codecov-action) and [docker/build-push-action](https://github.com/docker/build-push-action). Updates `codecov/codecov-action` from 5.2.0 to 5.3.1 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.2.0...v5.3.1) Updates `docker/build-push-action` from 6.12.0 to 6.13.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.12.0...v6.13.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index e1a774db11a..3314bf0959c 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.2.0 + uses: codecov/codecov-action@v5.3.1 diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 95b472b18e4..743b3e1565c 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -162,7 +162,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.12.0 + uses: docker/build-push-action@v6.13.0 with: target: ${{ inputs.dockerfile_target }} context: . From 8bec4f376646e71dd4c24bb9b43c21a8994967e8 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 30 Jan 2025 23:03:17 -0300 Subject: [PATCH 069/245] chore(checkpoints): Update the mainnet and testnet checkpoints for v2.2.0 (#9180) * add checkpoints for 2.2.0 release * update automatic checkpoint readme --- .../src/checkpoint/main-checkpoints.txt | 162 +++++++++++ .../src/checkpoint/test-checkpoints.txt | 255 ++++++++++++++++++ zebra-utils/README.md | 8 +- 3 files changed, 421 insertions(+), 4 deletions(-) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index e43aede25c7..0e3ef46075a 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -12462,3 +12462,165 @@ 2738406 0000000000e0fce4549ce421eec68895d377bd63404846e1640b82a19d93164e 2738806 0000000000355369814abacc697c0ac355f938c32cf6b3cdfa47857a71685717 2739206 000000000090219b6c7c7fcc5890c632ee9322f77c193239208bb74bac90df53 +2739606 00000000013cc21e7c3d33de49e5237251907b312e44d77c764fdf297c219496 +2740006 000000000082ba3f41ddd21ef7c0d300ce4a8b7d1c31bfb179f2cd6ab06af13a +2740406 0000000000f8e618708ef54bb4661a37cc8cae8094a0e344b3bfaa590dbf7816 +2740806 0000000000e85d5a761f33fa075d6b0bd5b00bdab479dde676ace4231194e6f8 +2741206 0000000001879d712f455e12f41a2182399c73ee63114af998af75afa1f2a819 +2741606 0000000000fd54ec9a0f733b8c149fc9cab24b04f692f54ffe8b784dff0da3d7 +2742006 000000000150f053fe5d6dbf0386bd5c289041f87cbd57e29638242b16d8d8da +2742406 0000000000b8fb9c2430317aa3a9c3ec064bfa49a30dffa85dbbd08ed00f24e8 +2742806 0000000000af0c7ee3ccfbcaf737c14ec8c4bb0cc7917dc32daaa6a9c786b74c +2743206 0000000000eb95abfa70d209990a726fb335760da343d13ca4a2ba0e182baccf +2743606 0000000001aa44c15f5eaeeb6e5641c86e5f5486555db0418fc5a1e1a2479080 +2744006 0000000000ddb2a5bd07883bd0ab3f577095da1aac693892671669bd12b73406 +2744406 00000000017b56837c4d33a1563af4a9712618c381145098937b99e2b8db4026 +2744806 00000000004080e34ac20d917297f715fad8dd0a5fc482a8bf87483cc4e8275a +2745206 00000000003aa9b6609d875e09c18099b38a80e37ac3b0867e851e743f0eb6d0 +2745606 000000000043469ac6bb28884f1878e4df9f05c2258e55b14bca2fb93f6ea07a +2746006 00000000009d387b9d9fec9b8a34ebf6f25904da01e24d30144d9190ed15429e +2746406 0000000000fe9a9049caec453daba726ea81b909a3593365b25a1cc49fa99339 +2746806 00000000011c2417859f5168054aa720bec72fd544ca8952b7e96037b3f4851b +2747206 0000000000e77371176c985aadd484b0fcca3db41ff70790fcbbd0b29b0687d2 +2747606 0000000001330236f1a5b7e811a9c1e4c677f617b85d030c0a2e10a7761f4c38 +2748006 00000000005cfaa963ab271c90236b00842741410f439ffc5f9cbdc98d7f7ddb +2748406 0000000000da3de32ba931e7b77cf6f76441965eafd16cee1684c1e6c84952f5 +2748806 00000000005964129dc447f984994b33099bc3a8ebb026637ab32e90de4a543c +2749206 000000000090bcc87a23353f2b834db0e2575003877368105db17d982abbd853 +2749606 00000000011bb237a6d460b86bec31aad00137a1d9420d03c71c92c65ddd077b +2750006 0000000000c082ac2649db2a2855557951830db0268ef179a3253b850cbc3d08 +2750406 00000000003a5c570dc438b2ce852d38bc2880940cb9a5b7d9c38a8dd6c6d5d7 +2750806 00000000008437f29608f282c925fd5ae0e651df989f4ae5a7d7e954baab403b +2751206 0000000000c4b2bdda356ee7bcc4eeebc1949850d7720de3b3be6bf14dc71af3 +2751606 00000000014b19ca15f5ec7a31cd6a4a8fb97c1b5b09afa6f66ea4d37ccfa83e +2752006 000000000100d26278441055366f165b3cdc5830a569069ddf42a4b4c150b860 +2752406 00000000006bb0782c3ee11f6b732eb37038f689807e43d8911f7150fc985318 +2752806 0000000000f10669c0e8c75d1d89055ee1976735247fc2d3b4f9fe38fa0a6100 +2753206 00000000003328234818184ab9070175afbe940acb160bb9eddd282275487b3c +2753606 000000000067bb014c984ef6f175f698a9510f77ae679669e69c00c34ec582eb +2754006 000000000050fb9e10183e6b2f4f1bfb1d31fb598b7d33dc43d1786a80b6565c +2754406 0000000001100186bf9db5be4615228ed498f16309c9855b8300fe5d76866325 +2754806 0000000000a14386d5f56c4ca1c8736e1d245fc2251e1cdb9e22795da74a9806 +2755206 0000000000f9115870e7db62bbad23a04472ceee8d543bdbcba7065d794d53ef +2755606 00000000004b67c88942438e8189f328f4f4118fe443e901654ef384b3a2a840 +2756006 0000000000b7d5bf6e198deb99a1fe6c7df97c77c6cfb03a6ef3f3f61e5e693e +2756406 000000000055177b156c499cf1fe81eb078aec679391dd9b4f905af52314db5d +2756806 000000000122f65a04a1b96877d3e490e69e88468e968b46432c8fc0966f151b +2757206 00000000016e0c6e86a6a434e5c4395ca5803ea8238e9dfc3191fbba5ecad010 +2757606 0000000001de945c2173f34493a09efb3d4426564862ae2e15ed7aa7a09d8460 +2758006 00000000006bda0aa4f972e8f0b5784df8e01fff92388cb6d2b5472b315a0726 +2758406 00000000002fa3526897b04ba2807b40b63c9e800486fd10f84f82b92ecc05c4 +2758806 0000000000ab77f2a43b444f687fe0ceef107fc70cc9ecfd76db44d3494857f4 +2759206 0000000000549abb350e013cf65bb4f93da2476173f141d29fa115d40f7d56f4 +2759606 0000000000c4aa948722f531a59214de30f0a6f10e37ac8c8938b582e0488018 +2760006 0000000000e9f3dbd6806433be71dcd247b95b42fb4343b01ffe9ae330b14290 +2760406 00000000015ff92772c8830a7d288a21eb0d9bb01cd6a4508a3719fc4a8d4807 +2760806 0000000000f3a77b38213adf4357f616cb13f64282c81d9ab9caa6a67ffebafa +2761206 000000000121ae054658d98e635017a9c208d5de4a0b8e9b92bd6a558a262377 +2761606 0000000000975ac669dd07a22cd58d7472091c6db6f0dd2140e44abdd4bbf2d8 +2762006 000000000151ec3b5378c894a5abe21081bd421e2999e260478d50c9656c3d14 +2762406 0000000000a73097e6282ee2265d14fa434f6b4c3339c5ea48b681b4b6f1d1cf +2762806 00000000002219b28797fc2a11e8813cc6c6d6639be98976acf5dab6e369d1d2 +2763206 00000000018531da38d2fa89f97982688938c5be99a9b4f213d49c481045ff79 +2763606 0000000000d36b131d019a60fd2ef2013d70264d326f4a6a25530c508f070a2b +2764006 00000000009a321964bea6fd56417859b8e7807c932090aa6d1783bcf442ff44 +2764406 0000000000dbb62ce4bc23847b73c1f86e78d9e02d2f225b62922601f70353ff +2764806 00000000010083bc11f21995fc30a115eb6537c6f450937409ad7ea1a5c1c9dd +2765206 00000000013778830a0277f41a157caf0b793e7e9e393846265fe3d20d59d7d6 +2765606 0000000001161051c1dc38be36456d57dd957698f8eec8bdc00f84dbb8c4a5a1 +2766006 00000000004ea17cfc83b347ff98b2ad24b283399646dd835c4f6a6eae3aafe8 +2766406 000000000031a81a08f0906a0f07c32b1019f8017c9a07ee2639c27d36924e58 +2766806 000000000074bee8f1a490de7eafd9385d8c4e31819d3a434765c7fe9fe1066a +2767206 0000000000ea4f976cd91dea1a3e0fe2008abf0c4b0f3ed3cbb9a05835eb8425 +2767606 00000000003e06ece480e38ccf6a499598c7f5910cadea49a967a5ac6d775d77 +2768006 0000000000a4347db0f9f0b6d21274a324603d819322eaa0128d69c3ca429440 +2768406 00000000012b13bb625aedc62ea44b9da224e415df589005d0f67c28eeb0d628 +2768806 000000000034824cb54bed1dfd11a05a26087af3073fe629c78374e97a3342a2 +2769206 00000000016b8b79abb21505eca7e4c9d3c7c76cf9201fdb288baba79912dfd1 +2769606 00000000018c8519015e146cce3efb3a5f9e07bc6a67c236a62b4f10807dd4ad +2770006 0000000001a59b7507d26ddc65d2642c44c8f9e40a7cf3bd3fc68c5fe327d154 +2770406 0000000000172ba426ff0d2086e6f9f04a55841b6c5ba4b986b6862977f703a6 +2770806 0000000000ebd4f87dabfecb1890eae2b33f6aa8a26b12a3d40f053f8b782fe1 +2771206 000000000100d9e6b2ed1e5fc63c78471ae4d8a8a2908441e4884b23a7ff9e9b +2771606 00000000001d858d0e3d62e4522e601688f8177a61ed8446e56b0715e8e75411 +2772006 0000000000042d0c6bdee199af7d32a4609063344533e05c5fb76f0f169a2410 +2772406 0000000001246031615d106182ba5c730f086ab32c7efa01ce93dcbe20e5c2f6 +2772806 00000000010c459ab6df4d22c36782025e75bd40ce3d1270cf34b7e13f40e452 +2773206 000000000161dc5f33a99aef1c516659eb920f10431d120b530fdbcb22a89432 +2773606 000000000125be153bb3923f103c35cea14d660f72f48bdbd4e0a5ef0dbe7d10 +2774006 0000000000b0eb0202388511109d67db6d3de027347d4415505e50e92165c3cd +2774406 000000000086e54999bf9eecf79f1c52459ded1cb59b0458d03c5e798eddd8ae +2774806 00000000005c218287c121821f70c56dc1ef5d2039a680d498b96937384b057b +2775206 0000000000e636160773a7e53ec29425557e139008c8ac99dbb7ee48a9b03a7e +2775606 0000000000d7c608e305585a801a44839d89d55f9ce3180342c750923a3d618f +2776006 00000000010eb7f31655d75606c76a3e6ad2f5840e08651659e35c0cf339c808 +2776406 0000000000fdc5af708ba37e0dae6351d25f7a9dc275d4a6f65c049f4c34558b +2776806 00000000006bcad70b931fa3c3df8243104e25cc8d0c31cc86348be9368fd7d6 +2777206 00000000000990695a35f4db2443129037e54b8b931e7f4c6ce137badab39431 +2777606 000000000162df86f0a89222cdfec5a7779e647d38cab71544978efb921c3106 +2778006 000000000129e6b55eae63eec9f0e2238a3971cee8a7c730672c59a8761c5486 +2778406 0000000001d36d0c7a5fadc374756c7bf777e50e7169e09e2795c8a330bd18eb +2778806 0000000000513909b622848405f2ec7b12d90f3e09ad8f718017cb82b6870c05 +2779206 00000000002ac760b9faff309ae89de9021b5af6f06091ba3e01156ccd12dc75 +2779606 00000000008367259d4d05b1d481d2d5f257ee709c74ba5e9743e560459ae5a9 +2780006 0000000001d48430feb0b333ea620a2da0a1bb689d30798e4a6f17b612fa4239 +2780406 0000000001404f1fc4a82d32a1110d95bf6887f25001ccc605b1ee7b2fc6ed3c +2780806 000000000058cd8b790c3178c444be02a92b0f183072f5b307c98afb88ca8afa +2781206 000000000122d08c6b2cc820b8efa753eb8e948859aee5151d76eddfd2f386d7 +2781606 0000000001d202804a9edc0ceef7f92db81d67b66421b2f91fbadc44884fd05a +2782006 000000000016f80ac8e1ec101ed551cb72190dd9275831841a858b008667c684 +2782406 00000000006c233036f65c30bcfdb46b8b29b9fd207acc5fe025a23e3e956231 +2782806 00000000018454bc5832a2960b0e936098ccbb72eec403b63c6975202bb8bd95 +2783206 000000000002dfac03e3ef121362920c9936e655a0f6f0c9aad3f582edf87768 +2783606 0000000000a93a65b195c115acbd5af532793b9b1b937f576d645349b7d594d1 +2784006 00000000006b7ea9bd52ce358010610793e880a96e91dd08272f8083d60856f5 +2784406 00000000002afdb3374dc38ee73f017d835bef1add61d0fe4eb73b2433043065 +2784806 00000000014ed47c96bae08fd6a4bd61b892dc2233274083d25f2d6351a82356 +2785206 00000000009a90d52b88aae799ab70c39b53c2d763bc7b8b95ee1ff3862356c0 +2785606 0000000000d035307cf87895f043739506cf0b351df6c70ea33964611c6b7ab6 +2786006 00000000001d382e649602981542858a56657045342ed630afb036afece1e747 +2786406 0000000001b5f54d1995ded771bb397527e5da30da92b7cb1bc1bdd71402d97c +2786806 0000000000765c04c56ffebc48f7b897bb03f659dd6d1020011f8b91abb4814b +2787206 0000000000e822d5e1506572198d01021c5b90c69de88759cac6b08db538f65d +2787606 0000000001d5f6f9f97ed24d1d5236afdf113ba76495ea9049eee7bf4e28cc20 +2788006 000000000181fb7b8843015cc23b75ec8d52920007a2e8e4ee38160d75376336 +2788406 00000000016bebc3407c1a4f00b7e526f07181af1c51880777ba8cd00a545ec8 +2788806 000000000038682687c07cc772ec0541b1f18a1f7a2728a89267436a24605218 +2789206 0000000001c8a00c409d33bddd8ac16c2a952e380b9e2018ac7eaa5ab5736553 +2789606 00000000008ec7ddb741c93145da93524afb755e6d1a9d075e0fb997bac333d4 +2790006 0000000000c8f57bcbcd07a85194fb5a743808b80dfdf920cb864b6d1e05872a +2790406 0000000000cfa81555b46f3c56a0f0e7b5640eb6ed5d6ee3177d3f49b9e1c45f +2790806 000000000167790d1c6ab306663b2ed532ba50fc187c9587eb0e68f83030dbeb +2791206 000000000057ab98368983c0676b19e7334e87d8aee34ad8095d68cc7a7d6167 +2791606 0000000000456d8b7473cf96f6d5ba03b1e41f1dede79538a2b46e90499c0050 +2792006 0000000000233e5c66bf0869280377239f8d1c7e6e6458d9748e50bbc6bbda27 +2792406 00000000015d19a5b49bc6809b102d4327b159b0c8c752b79e9b9b9ddecb39e4 +2792806 0000000000b898620c407c7d1fad826ae40afa544901e8facdbfb1d398df7cc4 +2793206 0000000001a217c785745bcf27675cc5a3b68df36057284f230fc99c8392e6d0 +2793606 000000000115a58218d2810a1ca2cf0bd2e150bbfef26fe5826b974fcda9f385 +2794006 0000000000995c14e64db2c59d5b89fea6d4d48cce24b0655a3cf0f931aa1b12 +2794406 000000000041e61a4cd46c2b3321c3b701b5f0ef10231f9eaea7eca441353f98 +2794806 00000000012041aedac13f5cd09dae6770ec01f75966207b8c5a270d145341ad +2795206 0000000000b8612c653d1565e4bac227315fc75bf37789c1f946ae9aa2a0823f +2795606 00000000016796486431d2dce09dc2f9f8a5b8f471b3fd4453734f5339ea248f +2796006 000000000012b9d254187d11f8f021efc52375552e2b776eba70151b7eaedf50 +2796406 0000000000b11557fb28670570d803387fd573d64b6732446df657d7eb5eb1d9 +2796806 0000000001354f7f855d126578a4a655427ba701ca53e1baa1988f89e61e3a7b +2797206 00000000011679a50bbf35aa05ede31fdc1d4fb70ed896ca16be6a3a42739493 +2797606 000000000160f0910deee959725bed749edf2034ea9a0fb638a1c6909e32906c +2798006 000000000045f8ee48a39702fe70afab79a49fb7626e0ed61fb338e492246746 +2798406 0000000000349045ebee83e2a21eb74ecb8ca0d1e90ad3c9ff1caf2369ca4f9e +2798806 00000000018683cca36928a9b4446734e5967846b2f25d5bafd840fa01940628 +2799206 000000000033a4ff562b60094109353e67baad3b91d39dd245cab9b9a6a458cb +2799606 0000000001642c1c6408c1f4a749664905514c0e3afcbf3ad2a75aba70b1222a +2800006 00000000016f6717dc6d22b358a3ca1b77253351dc9147b4b1dbf62fcb434680 +2800406 0000000000706b2cf66c9438e13b34d5b73ebf0c011acb5ca60f03190420a7af +2800806 000000000153d4cb21766798424cfeaa9d1e694232485215f09f9d18522eeea0 +2801206 000000000166cfd84abdf857777f7940994b0ec38c4d78e633049ce3d4acd144 +2801606 00000000017fad570b0115392ebc673d93a974b8796636513c421447b675bcc9 +2802006 000000000087cfa6f8df85663d34a32fad11bcb7a942add0672ec096ef7b4a84 +2802406 00000000010a97f10a2c791f70d410bdecc3ce662436d0c5e513faa05bf76769 +2802806 0000000000f8bfdbb9a7938a7265af95b3a604141d60592208a37b582c417337 +2803206 000000000143df11dc312516155284d98a3061be2866e69354c5ab68f2d9bc78 +2803606 00000000013c64db870430b5c9e3605bca034bebf15d2292197037beca9f326d +2804006 00000000007e3643c215f1175cb93f70ace9510e6e515745914072065c3896c8 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index 5108ce28405..9ebb1282ec6 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -7785,3 +7785,258 @@ 3113600 0020e6d001c5a143b75e728a77ac192224aed587b6986bfa3ec3a118b32faa25 3114000 0000e4217cdd650fce03725c64ec76fba633ac45b66ef651d1357927ef5bdc81 3114400 0089707b70b58f2db28a000c346e66a7367a9b76cac122dfb469e1c6d52cf48a +3114800 00801befdc19a9d2f1deb80f0a2ac3d3196a375a6dbdc5f71071633e59fd8de7 +3115200 000d23d0a3c2f461d5f093621d417c37863ced661d662ffd98755c59b56f44b9 +3115600 003a92bac3e35ae205aa9d53174caf7de3e9823d1cb72958cde243d0c110ac9c +3116000 000d451f45a6557e0cddd33fcc084412719decb8903768fb727bfc889dc84515 +3116400 005bde2a8adedd006e33a0d95e393b522984b767b4a5d66103fc0018d43c75ee +3116800 00273f371ca6d1764362a1ea17cd2a671c76b5a8d280045eb373ca7cb6fe2175 +3117200 000dadb32c53c5b26060471c03acd91e170da2de1d50466a321cc0213a351fb8 +3117600 00809bb474722e1fc072a4e39998e151a2a0c8949dea326b73b98c2dcb92c324 +3118000 000345ef9d4c86213f961d3f627996945f4773434f83aaf0d4cee157ced86eb4 +3118400 0013c50f7c2c92ec45552d6bdc3f6ff484a0dd7af2fd901574d386798825a776 +3118800 00139d73a38562f2d041d69fabe9ec9bd2c9f458becc0e007dc33ba112b62ce7 +3119200 0002e27ce6e5d70d2daabcef6bce9bceda8194df59675f5768e052684d54457a +3119600 0006e4342d64ec65f4094c6b47f1ac36afb28dd247805b6e0daa99781e1dee98 +3120000 001401bf335c8926ef6be415b395519bbcf062553c84208996cc2b4e7147f0d0 +3120400 000ce2e1b28c453cbb82639aee66ba1d08d5d6e36ca8c9bd4f5d960a380ef285 +3120800 00260cd12e67a455088a10c78cb3772120e6b29ef344ce5ef660d8cab54a109b +3121200 0007e596d5f8f4174abbbd3fbbaaf378614a18e07530cac8526dbb7e6f2ac53e +3121600 003a83b38ca4bc5b3e28b8cae51fafd22bb31ff00c946cbc9585e8865964b6d8 +3122000 001166eaa283761e6aafbe1ef7145d9f16e0547b24d99c3e0b026f0307710a72 +3122400 00178c51fb7aa005b16874fc81ec67bb5f1b8c4a0c64f2e09fc714cce34a44ec +3122800 0007b3fe8c03569e375a395ca089279f798ade04b4cf4961f604380d5ba37def +3123200 0016c1e139741953b84a4a055414e7503693a3e282708144440193e0cb37998b +3123600 002258e7813d88762174f04055b0c1057c09fd798be78abfbcb19bfed6fd39f2 +3124000 000495582a85d1d1b11fc45f4585f85f4b77d1b7f3160276e849efa4a0081e2d +3124400 00250bf6ccf22208a176913189e2eb172e3d761af4c23fbf58213b5906ba6c9c +3124800 0013be85f78ccd6c589d6d17c9823a5aa69c8d055d4f749e2a91db8a457f960b +3125200 001389eb7311499ed559c5bf5f91bb1a30746e68b8ac80fa1eca771d6c03cdc5 +3125600 0009f7eff120ac721cb7a22273c8974cae93f35f398bf998d8d20d307f9fe9f6 +3126000 000de13f8a41d6206eeffa50b484e5316f0560ac7e05c5394500af0da398f118 +3126400 00045480f80f870b9922ccc975c52b9c06720ef1c4236f71c51d8cdf5d45e8e4 +3126800 000b96c4a7815038ad7f0f85be4490a91ef4c0e74b395b70e26339b28fdc2e09 +3127200 001f3d754bae48722b66c1829530761a9432ce81d137c83a8250e940dea2208c +3127600 0006871a4de19d185c5ad2f5cdeb209e763cc5529ed8604a3302a47e6ac967d6 +3128000 000ea0cac045930d047f9cdce936ce9c5ba8b05ab6193ae6178a70b572cde3f8 +3128400 0018ba08e8ee23c266d83848c7a21de0008c001b41439b2a955137641fb331d0 +3128800 000f0984aa7bd1bd6ad931dfeab9dd7c6b5507809a09575eac16456e3a5c1d2c +3129200 003f9e26005626c86bbc2a9867d3f713c17af2080619316168af4d9afa420401 +3129600 004c80a23c1ca9c271892dc60e68c87f9ed99927a4ce65a425831c3ad43f8da5 +3130000 00063844e1af63384456562b5571bbe4c3a100626f3131eb7b533c13e429e70e +3130400 000dd0c02ae18396e6eb5affcc610b633391f62c4130a43da4bfeabd070bc342 +3130800 0021a2dd2b640635b278b026e459c26b94f43970779891900ed31f732478ffb1 +3131200 001762cdc2703e764f215bdfcf80964a9375ed9ff15571613c9a99ef541454e4 +3131600 0010310df839ae9c8a66ce02c1cc1c6d571587747e0170ad89fa4182ec9fd00f +3132000 001de3b4eee0cf23c18b9ad5b4eb52ddb93a91ce9c8ebf8b6536554866be27c9 +3132400 00050e398655b7e0b603af3e38e9874e8ce722c93a9b6bc7cf2f9c8c30fdce6c +3132800 00377ddf89eed90f3677e8fa933d4aea894a825e62daaf91041a64fdf2940525 +3133200 00661de5222dcff36ef68e4852b910fdf50148021902a21e2ad5f4e44b5ab72e +3133600 0020ed1d8f84e50aa37804f2ea84a8fc720808310cf32ef5f343f36c2ca99ec2 +3134000 0014d067346f4c66ec9b7294b3a70a2ec742320a14bf03206d6b3014c7c24120 +3134400 00159358f3c9dc13a905e198b328cfe2d27d2cdce20ff0080adb1b2c087ddd0a +3134800 0006d66c1ea6ccbf6b33bfb304d19dda4327e423d174034c7c8bce8503177a45 +3135200 0014f7ec4eb8920d2005d4862e193d7cf73d001544586e666a0a08d98a8c1ed3 +3135600 00066ad8507194ae384895ce8286685972a02ee4dd7256b6f2ba730de5a4a7f0 +3136000 000d8b7e3cff5d367849e9dbe422a28170765c4dce06df047b69444d70b4bcc9 +3136400 006519f84ce51d70664e7b3acde0be69a413be4c0e40339dc5d235465f39c20d +3136800 001c675dbef2240d470d1cf257cf384974ed0c14f615efa6e86a64f64d843262 +3137200 00269a2cde7beb811dca31778263cffd50df1a3fb05c485ec5bfa039e47fe2c3 +3137600 000b64dc66badc601e239c05f140ed082c778ec82215e0930d1058b3cb657023 +3138000 00147f29a1823bf200876c8fc04b8ec0d3874231d9b879e1859cb8b8d39298dc +3138400 00287a971ba389b24b3b688221ff10ef2f8b2f7f864cb5af734f46f7cf578249 +3138800 001baa7005d14a2cb69490b5b4dcc9e9d2098b0786f434002bc9ea8560f34571 +3139200 001b7315b7749b7b352df6d3f497b800274da04b75babf7ab97dc8199c4bbdfd +3139600 003d88d368703c31b825ea7c42d53ed073de57da2e3066585d6c654f2b97d1e7 +3140000 008420fd68d99001d3a82429c46ce5ee56bb2d40729b4657882bb068a13bd22e +3140400 00329682fba76839b5526d7270e2dc2963a06f754116bdbef55cfe9134d4796a +3140800 00320771952926e6f8bddb5fead0f6d142d19b96654b14a58e5dc9077b1cfaa8 +3141200 0054835db4608784d2a60b77ef5b51bddb724314a3d4e33b8eb6f2ae8ceb78f4 +3141600 0004e86082b7efbe88ac6d1bec196413f41e990aadfd3355370ecabeb7830200 +3142000 00d94a32436e4770372b187d701136894ced4fd717a13251fdab5e9dd6ec7bf4 +3142400 0050c0471916135ae9b016a0e84958892ca9cd68bf84d42b0d99fcc6eb6ce18e +3142800 000b22b7042bc3233d823b4a1e98334dcae5cc7e7a5a99ce757df0ffa9df16ca +3143200 001b1c563968a9470b3bd8c1f077458d240be125194f061280abea38f208d569 +3143600 00b3780ea939e7037edffc9a9ffa11af349ec4279c8750c6e37273cb15f4e642 +3144000 0002b96bc106f90be22bd18e6f49e1e26be7a0908594666a853843b8c599f29a +3144400 0011cad16b55dd630dbde8ab5d39c7e686b379bcdf9c98919838eb874bc62fb2 +3144800 0002cdb179bc36788972f05a080b5ae7c411a49f5fecd03a34a854a3dfbed926 +3145200 0045958bcd3cf25f329c8d5ad7934522c265f9bc839f58db034acec44fc92dac +3145600 000c23aa7057c2132bf0daf902d5ca50997e85f2fc74e8b068b0e88ac1251a94 +3146000 002f62711ab94339aa80fb60c82fb0d197100b09ee30a78f5a46ad01cd6aa58e +3146400 00bd63046164c0a412578977553c937cc01da37bded2bc4c287c15448ec90cdc +3146800 01189101bd4b304b0fb6bbcb2b6785f4ba761230e7efb00ace363f7119a063dc +3147200 0003941f5d517da847b10ad60a30c642b9c93cce0b15fdf0faab71bb156f3786 +3147600 0000307fc721f72341a6ddf017302208684aa4be539fa2f7c23ee3bc7714f704 +3148000 000f9a45edee52e5abb2f25d490236dab1590018658ad41d54c2b41fdcbad2dc +3148400 000ecc61fb7dee915bee5203b89f7faee0571decd5ee241febc23521a747ce84 +3148800 008254e34e67c3cc42deabae84035b7d8ca5ec4e064a31528dc1408bc852670a +3149200 001f2267ed7a82a9a1d7f709c5f3f3f076a982b573fe4d654a2b4a9854240cef +3149600 002491fd3484f7253f62129dabf5332d7c41ca79902618b6316e0bdf62818af1 +3150000 000ec4e45b2e58f057b04846d38c8be40a469b41644342f5e7d7f3306ea702fb +3150400 00080581882c6e85bccd97501d4be883cdaa1e8500d55793d421f7b0ec9040ab +3150800 007badc68d71d559ca506241d51aa2da327a658402ff3bcc945262c128aabb9c +3151200 000f7d44038975e5a549b07d42ddca85aec9a2ae8cac3242e39de378c47eb8ad +3151600 000c47d384afd0ebb7f369051bf81b66f935e514ff24371ad633ef3cc6305ced +3152000 006001f82489e412816c01ce9224d63e4eae445f7a99c29de48fa77a98490d9f +3152400 009a71adf38a7fcdfcfdf2b9476d654d681a6ed196acd87dde2a73c5f57e9ad6 +3152800 00008371e9468a68f8cd8c2bea4adc056317d92939557461c0b09e5111aaaca8 +3153200 0060b61595d73a8e9a13d37cac4be0d74a0f86742136a0967e7283433f7dd176 +3153600 003232ee43990d93eb31fc40938398888f8e026dc0aa6d23e55a494bafed15e3 +3154000 001b889288b9376f775be772a9b6bdecc60054142b745035cbc8aaa7cecbbae0 +3154400 0007baf87c7b212781f8e277bc027a13c518c62434bd2bc2fb4d3f06a0b4ecf0 +3154800 000943b3aeff89717af471e30eb4dc311ccc30a4a6437e1cb27fb4ec7d453e13 +3155200 0008a35d371ed781b2a1ce0fd6f4f7767296e4a0457314c4b05f3c11b47e8e03 +3155600 0028020f8af86077e8f4451a18bd1e20b6e1bee9391693163685be8790e38666 +3156000 000f9410d17a24aa4d8676af46a1c8a9c73b88ebf3a155a9dcf24a43624d949d +3156400 0084e33387122604e4e425da979794afa2a0d7d06b44d01181b6d8d4aed0a550 +3156800 00006f926fd768b2e3821868fd74b81122ea970fd0da35eccd8d66e109c8e438 +3157200 0042e2a1c578420d188c4a8a572f64fa6f27ce8e03e84fc6592a0341a73571a9 +3157600 00274d26b1bc7ef5993f4f19b69c577c3343c524e0b60e81fc9f48c594d0f6cc +3158000 0003e7cff4d92a31abff825ae6befedb3f1d17f3d646c32deeb56f4e047d0105 +3158400 009ab786993658e04f194d9528a2816705a95767f87e33e043128b35521df712 +3158800 000b15a9f16ea77c69220b2e3878598291a4351b3fe7bfa9bfb3abc26571c84b +3159200 0010bfc5c72033d4d3a04e6a71f599ed038851e27ead650c55fe963301a6cd47 +3159600 00d7a336a5683f2507ad4ed8360be6d22e2f20bf13ed3c7baef5e813a8839d70 +3160000 001337835b4e8867d09c16af753f6ce1b2aba733f26919f53dbfcfe6345afa76 +3160400 003a2eb613f8fa1430ee8e836957d08312504bbf8abea4a0373eca62e7fe1f7d +3160800 0020f13fe3d59dda1d8e0d4dd642e11ba2db718ac3f737f09e29cb0409674347 +3161200 001c165f7a40d1ca4ee4abf0535f9f296a1f2eb35db2e90957106443bfd0eb5d +3161600 008953b30c170bef78f24aa96c5c3bbcdc46096f828eebe02d098686a1ef8e7f +3162000 000d84236466e1d5b1516d1cb5109804fff7ab56a845356d9778f14aa41127c1 +3162400 001027a69b873a0aa47a97dd6ca4b3769f7419bb11e805991b1152ea768fab03 +3162800 00026148cf4b02445674fffc1f30347de15b16cf97605a8df3ee96d72bc0df47 +3163200 003387aad998ebb2722d485dd434d96394181fc8e0397b6ceb04a873f4ddef1a +3163600 003428140959666f756289ae63546d2cd553666e533b25365010c1785eaadf49 +3164000 0032475f5cb6d010e01efb93ce5c5c5cbacd6b426ddc8aa5c6aa69d107792f86 +3164400 003b0325127121f5b94b2df65cb03f0174009504589ca170d41f6806d88b3434 +3164800 0065b4fdf75a8164d85a53034436d74a276a995ceb92ad2014df347a94fbdf67 +3165200 002a320edc63bf133807f33d88c12163c481c4ec1c212dfe7a5e52eec250536e +3165600 0005af01a34d4005f058dbf9e88321dc728c4c0f5ecd732943a7bcad53fced34 +3166000 0013170f5ccc6bf3d7c1d86b436f758de3b39592cc85aaa463fa7a02f43d3a49 +3166400 00251619213951d9c441943b6a319e46a474740ccf5f00a081494582cc45430a +3166800 00332b2e009021eaf2b8c6184856be3033f93134abf4f55e1d4dc9fd01fdc99a +3167200 000724fa56e98bb89fdc4fc790907a7c4a649078f0bd64eaee472bca2a1c46cd +3167600 000157b5ef54335463eca3b253ea688e45a00b04c539c5c4aa641ca21d60928b +3168000 00adc7c0a35acc3665487415d41a7d6926045334d7bbc6879d498f680a4f1b41 +3168400 0000e1c1703691d6bad5ea123dfe852f7d79e1a2cea00b535b6f04f7ee2b2cbf +3168800 001e735c30c72c2b5382124c1a2a96f0aeace5c053c07f54bd507d4690b2b439 +3169200 002a96b73bcb192395d8d140db998760c773e7b55754d8f3cce74e3b84f24816 +3169600 0009e48a1069420163b0d558c89f3c176ef2c694f579ffa2af966cbec2ef69aa +3170000 00005a7b729eb675c94faacd3a7fe3af25089d7a40b122f279f13d2979fadec9 +3170400 0075121828c5358a68594b8e5aa0208ba6f3df59cd8ed2218ce16bd9f07d5f1e +3170800 0009cc439721f9dbc74f36d5abe24f7d32759183daaf92b6e20d0cc3151d0720 +3171200 0025ea31cbb55d2e5892561bff7f3d582d613807f95479bffe4107d7c49337ee +3171600 0015e644848af1cb65b0c0bc090d954cb033195ae42b054dca2e01afabe14ac1 +3172000 004c9150f95291bd33f086943ea6d23859058ee6e867ba26a03a56c16a21fe2d +3172400 00026731a1c37eef7c2ce7c588f388c548bf38d9269ff87b34fe73dc3b66f8e9 +3172800 0012c8f6924d5f66e61719ed16bf5f95c2415a9644b361c63095a1f08bcaa2e1 +3173200 0047f26f592db3c4fcd3daa9061a5ab8ae6aedd6c5f0482167351483fcfe7877 +3173600 007fecf668f422baa9d9a799d90e57ccd830de7fdc6052a00ce6ef49f07822ca +3174000 0075c2f35ee49937d1867cea406259fdb1d1ac918c0a009251ee22145d809475 +3174400 0015d802f21469e19157244b83cd2b35ebb4b61e5c3d3e1609daac5d0be407b3 +3174800 0005c1237d386ea57d714b8a83e37ec2b027ae04258f050001001c7ec4e43a81 +3175200 00fcadee65cc520bd8b50d9f63aab585d671dfe753e6e9d1bd8bf603b7a0b2c1 +3175600 001a0d8ae6855cf80d4bad82d148b0a82c31e6515c4ed2048f4edd8b67101762 +3176000 002f058b30767f0fd982be39ef9415364bcb783b6ed479d501b5d7b0bd200008 +3176400 0071d415275bfaaf0fa8e2834ac26de003ba595101a4a05a687cf1b0954e7653 +3176800 000d079218ce7bec1e0168f93a24c9c46c9dc00cda858766a78f8686a7260485 +3177200 00099466c235c720b49132255dc3f90a70743b829b93018e228b846e5a81428f +3177600 001ca03ee9e9ea1689639875ea6dc22faefa739b75cdbc599b538504ad4c975d +3178000 002a36650c4b810c63a2cb9dd6c3f85e21d82750e1734403b33ebc45937f2b40 +3178400 00262360ae847b3b7719e75fc38f0f160cf1f5b11912ad49756099d582c8dcc1 +3178800 001a491e3926a3fe6217636a88016696a0676c4ec4d24de65829a17ab41fcbd2 +3179200 0002b8095c580ec054419b070dd4ac600cfd979a4a02a4fb51b6c5f7ae1b366b +3179600 004147fd1abd8f02ac63d301aff5546349dfffb046f5e0119456b9df46e82fcd +3180000 00233058a28cc90a279021bd6a3462a9f634a02de00a22094c71a192536b210b +3180400 008be7ec9a4affac7a28d229db5812665f21bc4dc8ce9e6a35c1a26e6c5fa349 +3180800 002691eb666cea2a90255cd3c476c39d89a9e0d50da4164366a1ffeae6370e50 +3181200 004d102bf4723e6b51bc1772ca934e716e08730b6040d8735a92b9a36e67e03d +3181600 00224305970bf8e1edd169810b029ef9d435a4deda06f19a156084b29b887f6c +3182000 00063f6abb74b82ed9b8bcef489a56012c9aca2d84c51455e2794ce7e57f874d +3182400 0020e6fa33cf56c6d09f6969f41c3fbaed7e1c8d293f2de52301dae17c966edb +3182800 00618e7633d9e2dccf404dd274bbadd18f5f6310d926245e412e2cdc5e699f7e +3183200 00432c36f7272734cd4e0087fb1121b0a164762de3503df994586ff1d4e37f30 +3183600 001b4f084eb4aec028b74a2df00670b9be55b5770593e20be98626c6608bc124 +3184000 008003aca9b3506bdd75e207308c76b0e8b5723b032e22a52ee2d2a882636dfe +3184400 000645e9bf4c10376635b0f51132da928ce5eb2e71715669ec8e9c7fd83d765b +3184800 0036eddbeba21c506b45789793ebc33cd4e0f02cd5ed36b1423ffafbdd2bdb9d +3185200 003f0e1f3b681de67c90506794e35355f4eb98aa6243df58c8ac33ada58e67cd +3185600 006be1d0784f644f6cc308b163a81beacc2d06a398a883d45fb73af8798e8d11 +3186000 00613a2db0989feb01f229bbf0a2267c47e3fdccbc7cbe1ce963678ef4128092 +3186400 000baabe982cb43ae18cb6b1d2150ba573729c8f88fa16e2e57274d031799539 +3186800 0012a5110fbd8e1ae00fe21d6cac31b86fb9363ca49b6e103d15eb05f5bf3c72 +3187200 0129e91d147adc43b73f0fd193074be99975137d933e1a5f35dfb510358e8538 +3187600 002c8821bf8373c2d24daacaeabeed47c30f3facc5499c07e162838af99cc929 +3188000 0029fd939e7270e24428dd48ea34c7773d7ed9c228fec01b01e8233da30a95ef +3188400 000b58e87d95e2bd5b7429a5bba7a62a701200dcd2310cbbb781f1b4dd175df9 +3188800 00290522758fc70c140437fb4b3a9af371a51d83537ac3c20d8e69b0641ad430 +3189200 0003f7d8cdc1567eca71cf3b255d7803a6abf8a39b7340ff9ac7e8da689deb93 +3189600 0020149fe848580f2d5ae09c49f518d4ffb22815da8714dbfd76f95502fb0b5f +3190000 0085c044a5bd47a5f35189c3868ed0ad79958889ffd0d3bab642c45dcb3daccd +3190400 0050f62cdcccb52f8270e0c6f49bb6cf1a02b2548d2dcf8a8808022479d24c01 +3190800 001d8a372722cc4e751e3ec0bfecd8a07ea0e108b98af5a95ca895df0779ac9e +3191200 0036855e00fae0cee06f95766c35988dac8ef6914c442e8eed50f3cc75954cb0 +3191600 000ddf25dd35020750de3fa676508c7f2d482ce08cbaceee0b12e06d647aa9b6 +3192000 000ffb9eba654d8a1e4a68784e09da6e96df43b1048932a6cdce0199718d1f9d +3192400 00138b8b6f600b58ca255632a37dbf5a7a09e7f9d1714ede6579588761a45a7b +3192800 0031e8b9c34bb8b85284bb0a2c4c3960c8b1b63efa003f54112704d1ea41fa54 +3193200 007a0f7d624ff12265c9053178d1b2c7d47682f659b67d5f514ff06cf62cfb92 +3193600 00101087c41863fda9afeff1e289f5b3c43ea6443470af6a79b664f1ecbe2681 +3194000 00a80629693d8bd39c256ae6ff29af7dd5d5829fa304f5652ed86ac71c5790c8 +3194400 0003c2fb96a6b14a09d0f19af36f7a842cd2a6268130465cef19e14ff48fb4f2 +3194800 000964db3495d4119b12f156ab510650fbebc90c76866dd97a723d95dcb33c3b +3195200 0024c77abb529c2180a7a64c2709b8c96af99aeeb1d2ce7b497e8bac864e721c +3195600 003139976a450a78120dd1ff6afd6d07ebbe3ecbbaf2c754c6baf3c7df7bc1c0 +3196000 000bc0a3335a7dfc978e037d6fcf199d938c410c1f78cc020800622e507d24fc +3196400 000459d3709d12fccb0bc0cf0e56a3c384741303d8bade13bba82f3b67c323c4 +3196800 0006e9912b22efc94f6348dbbe0352f1ea6284b5d567ab3441fcc3074edd2c89 +3197200 001d7b3efd08340c08aa5cd8f4fd6d64100a94d798986f9aad2b01bc82d26874 +3197600 001cf35aed1eb7ec936d540e68bed8bff673521bbe370f907b1b74ee1bc6b9fe +3198000 001240ffe75a8123b9b63355f05a27a2d0f5eaa7129f1ca5cd59268b386929c2 +3198400 007178947bf9ec65bf48dba195f408b2ad2f02cf707c6f6248166c189a92f42a +3198800 0004b602b01a66ca88f5221b9579c6c45160ca26d83f1ca6db78dc86b8685979 +3199200 000a44c189e55543e141c235c57f510ddf7bcfef45841d0e991b2e86803c0be3 +3199600 0011d52a8c7f2dba60dfe802c24126f66620785281a0102d1e8aed12a0de793e +3200000 001ad7bc5984af656740fb2964c4396fa5c0b06fc717bfe855e4c6ac43449767 +3200400 0014a7b38f354632c3a4ec3086dc3e881c9363b7b7862abf3c7e522d117e6e6a +3200800 001d12ae1bb6b863d9ab7ec3059828f2b2d458f69f0a10df9da680beccea4153 +3201200 001084d745c14701ec0ec3a3914823993c3192e011234fcb1795ca2d60a62ed5 +3201600 005931bf3f11c526b47552e27de389a556bb5708f0d09789c9b222793683b344 +3202000 0011bd7d37ef18031f3c9e2bb4a1cee9af38352598a1f3aad88ff3a41361e58c +3202400 005424bf3c48c8ecfbfdb21c51516bc30698f83af7522119b641701062694d9e +3202800 00d51f21ecc4a9a565711d4920dc542a9ed557917bb01457f0c89ce414dbaa25 +3203200 0022c587ad5cd2c43c5c5e3a14d985f9ec76b7531e68c000894076f076044501 +3203600 00121b2f456210c9b43fb92818b0fae154b8ca45b238a1c92751f98c442e920a +3204000 001084d49326dc0b1a53ea3219ee64e4f1b5ab8871d31f26c03d379cefb70c62 +3204400 001c8a5c65e5487a00be325042dcbb435c2fd712d1c840ef842dd3609c5c9a5c +3204800 0007864a20b64694d70976ebb80566766da44af9361f795c69c059fc1f87156d +3205200 000d3184729b81146ac72f23126e63f06d87cb7272a6614d7c1958aaaacbcdc7 +3205600 00b5941c916b92ddb2bad1858102f14b5227ca8ac51d3a2a3a8d080cf5425c88 +3206000 00d9d311769245db1caf4681c4e18258b91c6d69e3dcf1ffb1c1db801bb621a6 +3206400 0012a0abb218f8399d0974809537eeb16c6e4e39b98c7adf4ff98899d68c03d8 +3206800 003298428f6de8c3a39f8c621d02debebda618b59fa21e8e77be599a7cf0b852 +3207200 00047c27f2593d25a5f181640405f4924343829d65b6ba9803886bd22787c9ba +3207600 0032b36ba17060dff3f3b0eb0a0bc3a989d9b36bf1c6ce88a73449f27ea48ddd +3208000 00ac093dbbb933326561dabbe83a97c770e69ad397259397bc3f85b868c6091b +3208400 000eca197a98d0bec6d145f1e393d889da31f4b2c2924c36880b1f64c2385963 +3208800 006f35efe3be683efaf881d6654f637d603c3d8c50a405ed82413472a13b2970 +3209200 0029d297ed4c3d739a33a1bc51185905341eaddde11fd153aa4790d04418348e +3209600 000da383a79b7c2338f431d901372d14751ca6f97cafb4426a4b9ebe90587714 +3210000 00145629a160dc87593dbf2a4c0882b1e15c3b83d279d8d9bdb19ec66708633a +3210400 003b6698d63f22c41d41b496b8958539d5a5c2abf08e9a8339e57ee3a9f15378 +3210800 003a95a9d2351f616add3ea7b77de8452d36ad326e8de3daf9d12e5b0fab69c7 +3211200 0025584c1e1f7bdcc4675f6945c330806bf8f5ef1b705a451d24480ebecc5655 +3211600 001c9940460d636494526cc06531fe2bc91172062ae60390cfe3cd0a00fe0eb2 +3212000 002d20ea5264b39b7ae5b3cb717bfc09344109b4ee751212976b47ba48edc394 +3212400 0044c4572e3d32ee30a1e0948da95fcca2261e35f52661203cb55dcc43956dad +3212800 00053f13d2bf42c04209d24813b6f2356d01a8c2cd596d8e3f030c9b91c92556 +3213200 0013ec6ccc4f21e843f6abb574ef5ae27fe2ca88754cb4a5a0117a59873774e7 +3213600 0020b01462364b7f8c3479255dfd73c355a3f1a321cce5d17cd63050fe4583a1 +3214000 0081cd9c7106ea2c89b22b80e131633ff0121f34c8103ba3d51d1300c6a342cc +3214400 00270632bf3b8ab109c80fc7a3ae38114a235489719f4e43ed7a6900489726d8 +3214800 000d2dc891a5ea86569dff3f8281b38fdaf34f01b67237aa24a1d43ec4a4cd18 +3215200 003b85f19f78ace3aa0da06ad218cd76b7c0b1e0af314c52662547ce4e3eac14 +3215600 002a34a04dc75173ce838e0146b78ef0419add9bdcb852867d5efe6af0b92e95 +3216000 00116b95243e992cc66091d2b9de0ae07c5f75439bbbe92123ebd7f6a48fa816 +3216400 00c93a145bc882ca20dd4f622a718fcff52bd1ce09577a2dd8a7f4308f46afdf diff --git a/zebra-utils/README.md b/zebra-utils/README.md index 2422264ea4a..bef84569447 100644 --- a/zebra-utils/README.md +++ b/zebra-utils/README.md @@ -21,12 +21,12 @@ Zebra's GitHub workflows automatically generate checkpoints after every `main` b These checkpoints can be copied into the `main-checkpoints.txt` and `test-checkpoints.txt` files. To find the latest checkpoints on the `main` branch: -1. Find the [latest completed `CI Docker` workflow run on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain). +1. Find the [latest completed `Run Tests` workflow run on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml?query=branch%3Amain). Due to GitHub UI issues, some runs will show as waiting, cancelled, or failed, but the checkpoints have still been generated. -2. Go to the `Result of checkpoints-mainnet` step in the - `Run checkpoints-mainnet` job, in the `Generate checkpoints mainnet` job -3. Scroll down until you see the list of checkpoints, it should start around line 200 +2. From the list on the left, go to the `Integration tests` and find the `Run checkpoints-mainnet test`, then click in the + `Result of checkpoints-mainnet` step. +3. Scroll down until you see the list of checkpoints. 4. Add those checkpoints to the end of `zebra-consensus/src/checkpoint/main-checkpoints.txt` 5. Repeat steps 2 to 4 for `Generate checkpoints testnet` 6. Open a pull request at https://github.com/ZcashFoundation/zebra/pulls From af89544b697fd6f05744f0f9fd007a2897cc2e2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 04:11:33 +0000 Subject: [PATCH 070/245] build(deps): bump the prod group across 1 directory with 36 updates (#9179) * build(deps): bump the prod group across 1 directory with 36 updates Bumps the prod group with 36 updates in the / directory: | Package | From | To | | --- | --- | --- | | [clap](https://github.com/clap-rs/clap) | `4.5.23` | `4.5.27` | | [indexmap](https://github.com/indexmap-rs/indexmap) | `2.7.0` | `2.7.1` | | [semver](https://github.com/dtolnay/semver) | `1.0.23` | `1.0.25` | | [serde](https://github.com/serde-rs/serde) | `1.0.215` | `1.0.217` | | [tokio](https://github.com/tokio-rs/tokio) | `1.42.0` | `1.43.0` | | [tower](https://github.com/tower-rs/tower) | `0.4.13` | `0.5.1` | | [pin-project](https://github.com/taiki-e/pin-project) | `1.1.7` | `1.1.8` | | [tinyvec](https://github.com/Lokathor/tinyvec) | `1.8.0` | `1.8.1` | | [thiserror](https://github.com/dtolnay/thiserror) | `2.0.6` | `2.0.11` | | [dirs](https://github.com/soc/dirs-rs) | `5.0.1` | `6.0.0` | | [rand](https://github.com/rust-random/rand) | `0.8.5` | `0.9.0` | | [sentry](https://github.com/getsentry/sentry-rust) | `0.35.0` | `0.36.0` | | [inferno](https://github.com/jonhoo/inferno) | `0.12.0` | `0.12.1` | | [hyper](https://github.com/hyperium/hyper) | `1.5.1` | `1.6.0` | | [metrics-exporter-prometheus](https://github.com/metrics-rs/metrics) | `0.16.0` | `0.16.1` | | [log](https://github.com/rust-lang/log) | `0.4.22` | `0.4.25` | | [indicatif](https://github.com/console-rs/indicatif) | `0.17.9` | `0.17.11` | | [proptest](https://github.com/proptest-rs/proptest) | `1.5.0` | `1.6.0` | | [proptest-derive](https://github.com/proptest-rs/proptest) | `0.5.0` | `0.5.1` | | [jsonrpsee-types](https://github.com/paritytech/jsonrpsee) | `0.24.7` | `0.24.8` | | [insta](https://github.com/mitsuhiko/insta) | `1.41.1` | `1.42.1` | | [serde_json](https://github.com/serde-rs/json) | `1.0.133` | `1.0.138` | | [tempfile](https://github.com/Stebalien/tempfile) | `3.14.0` | `3.16.0` | | [bitflags](https://github.com/bitflags/bitflags) | `2.6.0` | `2.8.0` | | [primitive-types](https://github.com/paritytech/parity-common) | `0.12.2` | `0.13.1` | | [rand_core](https://github.com/rust-random/rand) | `0.6.4` | `0.9.0` | | [serde_with](https://github.com/jonasbb/serde_with) | `3.11.0` | `3.12.0` | | [itertools](https://github.com/rust-itertools/itertools) | `0.13.0` | `0.14.0` | | [rand_chacha](https://github.com/rust-random/rand) | `0.3.1` | `0.9.0` | | [rocksdb](https://github.com/rust-rocksdb/rust-rocksdb) | `0.22.0` | `0.23.0` | | [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) | `0.5.13` | `0.5.14` | | [elasticsearch](https://github.com/elastic/elasticsearch-rs) | `8.16.0-alpha.1` | `8.17.0-alpha.1` | | [jsonrpsee](https://github.com/paritytech/jsonrpsee) | `0.24.7` | `0.24.8` | | [jsonrpsee-proc-macros](https://github.com/paritytech/jsonrpsee) | `0.24.7` | `0.24.8` | | [syn](https://github.com/dtolnay/syn) | `2.0.90` | `2.0.96` | | [quote](https://github.com/dtolnay/quote) | `1.0.37` | `1.0.38` | Updates `clap` from 4.5.23 to 4.5.27 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.23...clap_complete-v4.5.27) Updates `indexmap` from 2.7.0 to 2.7.1 - [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.7.0...2.7.1) Updates `semver` from 1.0.23 to 1.0.25 - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.23...1.0.25) Updates `serde` from 1.0.215 to 1.0.217 - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.215...v1.0.217) Updates `tokio` from 1.42.0 to 1.43.0 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.42.0...tokio-1.43.0) Updates `tower` from 0.4.13 to 0.5.1 - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/tower-0.4.13...tower-0.5.1) Updates `pin-project` from 1.1.7 to 1.1.8 - [Release notes](https://github.com/taiki-e/pin-project/releases) - [Changelog](https://github.com/taiki-e/pin-project/blob/main/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project/compare/v1.1.7...v1.1.8) Updates `tinyvec` from 1.8.0 to 1.8.1 - [Changelog](https://github.com/Lokathor/tinyvec/blob/main/CHANGELOG.md) - [Commits](https://github.com/Lokathor/tinyvec/compare/v1.8.0...v1.8.1) Updates `thiserror` from 2.0.6 to 2.0.11 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/2.0.6...2.0.11) Updates `dirs` from 5.0.1 to 6.0.0 - [Commits](https://github.com/soc/dirs-rs/commits) Updates `rand` from 0.8.5 to 0.9.0 - [Release notes](https://github.com/rust-random/rand/releases) - [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/rand/compare/0.8.5...0.9.0) Updates `sentry` from 0.35.0 to 0.36.0 - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.35.0...0.36.0) Updates `inferno` from 0.12.0 to 0.12.1 - [Changelog](https://github.com/jonhoo/inferno/blob/main/CHANGELOG.md) - [Commits](https://github.com/jonhoo/inferno/compare/v0.12.0...v0.12.1) Updates `hyper` from 1.5.1 to 1.6.0 - [Release notes](https://github.com/hyperium/hyper/releases) - [Changelog](https://github.com/hyperium/hyper/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper/compare/v1.5.1...v1.6.0) Updates `metrics-exporter-prometheus` from 0.16.0 to 0.16.1 - [Changelog](https://github.com/metrics-rs/metrics/blob/main/release.toml) - [Commits](https://github.com/metrics-rs/metrics/compare/metrics-exporter-prometheus-v0.16.0...metrics-exporter-prometheus-v0.16.1) Updates `log` from 0.4.22 to 0.4.25 - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.22...0.4.25) Updates `indicatif` from 0.17.9 to 0.17.11 - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.9...0.17.11) Updates `proptest` from 1.5.0 to 1.6.0 - [Release notes](https://github.com/proptest-rs/proptest/releases) - [Changelog](https://github.com/proptest-rs/proptest/blob/main/CHANGELOG.md) - [Commits](https://github.com/proptest-rs/proptest/compare/v1.5.0...v1.6.0) Updates `proptest-derive` from 0.5.0 to 0.5.1 - [Release notes](https://github.com/proptest-rs/proptest/releases) - [Changelog](https://github.com/proptest-rs/proptest/blob/0.5.1/CHANGELOG.md) - [Commits](https://github.com/proptest-rs/proptest/compare/proptest-derive-0.5.0...0.5.1) Updates `jsonrpsee-types` from 0.24.7 to 0.24.8 - [Release notes](https://github.com/paritytech/jsonrpsee/releases) - [Changelog](https://github.com/paritytech/jsonrpsee/blob/v0.24.8/CHANGELOG.md) - [Commits](https://github.com/paritytech/jsonrpsee/compare/v0.24.7...v0.24.8) Updates `insta` from 1.41.1 to 1.42.1 - [Release notes](https://github.com/mitsuhiko/insta/releases) - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.41.1...1.42.1) Updates `serde_json` from 1.0.133 to 1.0.138 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.133...v1.0.138) Updates `tempfile` from 3.14.0 to 3.16.0 - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.14.0...v3.16.0) Updates `bitflags` from 2.6.0 to 2.8.0 - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/2.6.0...2.8.0) Updates `primitive-types` from 0.12.2 to 0.13.1 - [Commits](https://github.com/paritytech/parity-common/commits/primitive-types-v0.13.1) Updates `rand_core` from 0.6.4 to 0.9.0 - [Release notes](https://github.com/rust-random/rand/releases) - [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/rand/compare/rand_core-0.6.4...0.9.0) Updates `serde_with` from 3.11.0 to 3.12.0 - [Release notes](https://github.com/jonasbb/serde_with/releases) - [Commits](https://github.com/jonasbb/serde_with/compare/v3.11.0...v3.12.0) Updates `itertools` from 0.13.0 to 0.14.0 - [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-itertools/itertools/compare/v0.13.0...v0.14.0) Updates `rand_chacha` from 0.3.1 to 0.9.0 - [Release notes](https://github.com/rust-random/rand/releases) - [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/rand/compare/rand_chacha-0.3.1...0.9.0) Updates `rocksdb` from 0.22.0 to 0.23.0 - [Release notes](https://github.com/rust-rocksdb/rust-rocksdb/releases) - [Changelog](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-rocksdb/rust-rocksdb/compare/v0.22.0...v0.23.0) Updates `crossbeam-channel` from 0.5.13 to 0.5.14 - [Release notes](https://github.com/crossbeam-rs/crossbeam/releases) - [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md) - [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.13...crossbeam-channel-0.5.14) Updates `elasticsearch` from 8.16.0-alpha.1 to 8.17.0-alpha.1 - [Release notes](https://github.com/elastic/elasticsearch-rs/releases) - [Commits](https://github.com/elastic/elasticsearch-rs/compare/v8.16.0-alpha.1...v8.17.0-alpha.1) Updates `jsonrpsee` from 0.24.7 to 0.24.8 - [Release notes](https://github.com/paritytech/jsonrpsee/releases) - [Changelog](https://github.com/paritytech/jsonrpsee/blob/v0.24.8/CHANGELOG.md) - [Commits](https://github.com/paritytech/jsonrpsee/compare/v0.24.7...v0.24.8) Updates `jsonrpsee-proc-macros` from 0.24.7 to 0.24.8 - [Release notes](https://github.com/paritytech/jsonrpsee/releases) - [Changelog](https://github.com/paritytech/jsonrpsee/blob/v0.24.8/CHANGELOG.md) - [Commits](https://github.com/paritytech/jsonrpsee/compare/v0.24.7...v0.24.8) Updates `syn` from 2.0.90 to 2.0.96 - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.90...2.0.96) Updates `quote` from 1.0.37 to 1.0.38 - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.37...1.0.38) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: tower dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: pin-project dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tinyvec dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: dirs dependency-type: direct:production update-type: version-update:semver-major dependency-group: prod - dependency-name: rand dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: inferno dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: hyper dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: metrics-exporter-prometheus dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: proptest dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: proptest-derive dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: jsonrpsee-types dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: insta dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: bitflags dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: primitive-types dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: rand_core dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: serde_with dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: itertools dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: rand_chacha dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: rocksdb dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: crossbeam-channel dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: elasticsearch dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: jsonrpsee dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: jsonrpsee-proc-macros dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod ... Signed-off-by: dependabot[bot] * downgrade some dependencies, fixes where needed with what is left * update denies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 503 ++++++++++---------- deny.toml | 17 - tower-batch-control/Cargo.toml | 8 +- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 30 +- zebra-chain/src/parameters/network/magic.rs | 2 +- zebra-consensus/Cargo.toml | 18 +- zebra-grpc/Cargo.toml | 6 +- zebra-network/Cargo.toml | 28 +- zebra-node-services/Cargo.toml | 14 +- zebra-rpc/Cargo.toml | 26 +- zebra-scan/Cargo.toml | 26 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 36 +- zebra-test/Cargo.toml | 16 +- zebra-utils/Cargo.toml | 18 +- zebrad/Cargo.toml | 50 +- zebrad/tests/acceptance.rs | 2 - 18 files changed, 382 insertions(+), 424 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11bae743368..a2d61944a5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.5.23", + "clap 4.5.27", "color-eyre", "fs-err", "once_cell", @@ -246,7 +246,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -257,7 +257,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -411,7 +411,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -422,7 +422,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -431,10 +431,10 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.12.1", "log", "prettyplease", "proc-macro2", @@ -442,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -463,18 +463,18 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" @@ -484,9 +484,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "bitflags-serde-legacy" @@ -494,7 +494,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "serde", ] @@ -733,7 +733,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -802,9 +802,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" dependencies = [ "clap_builder", "clap_derive", @@ -812,9 +812,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -824,14 +824,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -980,7 +980,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.23", + "clap 4.5.27", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1009,9 +1009,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] @@ -1082,7 +1082,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1106,7 +1106,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1117,7 +1117,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1163,23 +1163,23 @@ dependencies = [ [[package]] name = "dirs" -version = "5.0.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" dependencies = [ "libc", "option-ext", "redox_users", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -1232,13 +1232,14 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elasticsearch" -version = "8.16.0-alpha.1" +version = "8.17.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774166217d4f9b96e9ab9d6832302e3d47196de507fddddb21de8184a39e2c6d" +checksum = "52be486463ef0b89e45191803db146387d5f594c26a0c8790807bb3e988ec5f6" dependencies = [ "base64 0.22.1", "bytes", "dyn-clone", + "flate2", "lazy_static", "percent-encoding", "reqwest", @@ -1482,7 +1483,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1549,6 +1550,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", +] + [[package]] name = "gimli" version = "0.28.1" @@ -1561,7 +1574,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", "libgit2-sys", "log", @@ -1598,7 +1611,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -1854,9 +1867,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -2010,9 +2023,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2021,9 +2034,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.9" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" +checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" dependencies = [ "console", "number_prefix", @@ -2034,9 +2047,9 @@ dependencies = [ [[package]] name = "inferno" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a5d75fee4d36809e6b021e4b96b686e763d365ffdb03af2bd00786353f84fe" +checksum = "692eda1cc790750b9f5a5e3921ef9c117fd5498b97cfacbc910693e5b29002dc" dependencies = [ "ahash", "itoa", @@ -2059,15 +2072,16 @@ dependencies = [ [[package]] name = "insta" -version = "1.41.1" +version = "1.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" +checksum = "71c1b125e30d93896b365e156c33dadfffab45ee8400afcbba4752f59de08a86" dependencies = [ "console", - "lazy_static", "linked-hash-map", + "once_cell", "pest", "pest_derive", + "pin-project", "ron", "serde", "similar", @@ -2116,9 +2130,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] @@ -2160,9 +2174,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" +checksum = "834af00800e962dee8f7bfc0f60601de215e73e78e5497d733a2919da837d3c8" dependencies = [ "jsonrpsee-core", "jsonrpsee-server", @@ -2195,15 +2209,15 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" +checksum = "6fcae0c6c159e11541080f1f829873d8f374f81eda0abc67695a13fc8dc1a580" dependencies = [ "heck 0.5.0", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2235,9 +2249,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" +checksum = "ddb81adb1a5ae9182df379e374a79e24e992334e7346af4d065ae5b2acb8d4c6" dependencies = [ "http", "serde", @@ -2285,9 +2299,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.161" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libgit2-sys" @@ -2308,7 +2322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -2323,7 +2337,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", ] @@ -2394,9 +2408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "lz4-sys" @@ -2460,15 +2474,15 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" +checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" dependencies = [ "base64 0.22.1", "http-body-util", "hyper", "hyper-util", - "indexmap 2.7.0", + "indexmap 2.7.1", "ipnet", "metrics", "metrics-util", @@ -2480,15 +2494,17 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" +checksum = "dbd4884b1dd24f7d6628274a2f5ae22465c337c5ba065ec9b6edccddf8acc673" dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", "metrics", "quanta", + "rand 0.8.5", + "rand_xoshiro", "sketches-ddsketch", ] @@ -2561,7 +2577,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -2635,7 +2651,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -2821,7 +2836,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -2876,7 +2891,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2897,27 +2912,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.7.0", + "indexmap 2.7.1", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3015,7 +3030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3073,13 +3088,13 @@ dependencies = [ [[package]] name = "proptest" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.8.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -3093,13 +3108,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3120,7 +3135,7 @@ checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.13.0", + "itertools 0.12.1", "log", "multimap", "once_cell", @@ -3129,7 +3144,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.90", + "syn 2.0.96", "tempfile", ] @@ -3140,10 +3155,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3259,9 +3274,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -3352,13 +3367,22 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "raw-cpuid" version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -3418,18 +3442,18 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] name = "redox_users" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror 1.0.69", + "thiserror 2.0.11", ] [[package]] @@ -3629,7 +3653,7 @@ version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", @@ -3779,18 +3803,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" dependencies = [ "serde", ] [[package]] name = "sentry" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" +checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" dependencies = [ "httpdate", "reqwest", @@ -3806,9 +3830,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" +checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" dependencies = [ "backtrace", "once_cell", @@ -3818,9 +3842,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" +checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" dependencies = [ "hostname", "libc", @@ -3832,9 +3856,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" +checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" dependencies = [ "once_cell", "rand 0.8.5", @@ -3845,9 +3869,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" +checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3857,9 +3881,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" +checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" dependencies = [ "debugid", "hex", @@ -3874,9 +3898,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -3892,22 +3916,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "memchr", "ryu", @@ -3937,15 +3961,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_derive", "serde_json", @@ -3955,14 +3979,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3971,7 +3995,7 @@ version = "0.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "libyml", "memchr", @@ -4017,7 +4041,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5f2390975ebfe8838f9e861f7a588123d49a7a7a0a08568ea831d8ad53fc9b4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "either", "incrementalmerkletree", "tracing", @@ -4208,9 +4232,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", @@ -4252,12 +4276,13 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", + "getrandom 0.3.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -4292,11 +4317,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.11", ] [[package]] @@ -4307,18 +4332,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4327,7 +4352,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "libc", "log", @@ -4390,9 +4415,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -4405,9 +4430,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -4424,13 +4449,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4519,7 +4544,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", @@ -4567,7 +4592,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4711,7 +4736,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4824,7 +4849,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5035,7 +5060,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5134,6 +5159,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.95" @@ -5156,7 +5190,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-shared", ] @@ -5190,7 +5224,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5280,7 +5314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -5289,7 +5323,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -5300,7 +5334,7 @@ checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ "windows-result", "windows-strings", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -5309,7 +5343,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -5319,16 +5353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ "windows-result", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", + "windows-targets", ] [[package]] @@ -5337,7 +5362,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -5346,22 +5371,7 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows-targets", ] [[package]] @@ -5370,46 +5380,28 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -5422,48 +5414,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -5479,6 +5447,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "wyz" version = "0.5.1" @@ -5714,7 +5691,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.44" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -5734,7 +5711,7 @@ dependencies = [ "hex", "humantime", "incrementalmerkletree", - "itertools 0.13.0", + "itertools 0.14.0", "jubjub", "lazy_static", "num-integer", @@ -5759,7 +5736,7 @@ dependencies = [ "spandoc", "static_assertions", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing", @@ -5802,7 +5779,7 @@ dependencies = [ "sapling-crypto", "serde", "spandoc", - "thiserror 2.0.6", + "thiserror 2.0.11", "tinyvec", "tokio", "tower 0.4.13", @@ -5847,7 +5824,7 @@ dependencies = [ name = "zebra-network" version = "1.0.0-beta.44" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "byteorder", "bytes", "chrono", @@ -5856,8 +5833,8 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.7.0", - "itertools 0.13.0", + "indexmap 2.7.1", + "itertools 0.14.0", "lazy_static", "metrics", "num-integer", @@ -5871,7 +5848,7 @@ dependencies = [ "serde", "static_assertions", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -5908,7 +5885,7 @@ dependencies = [ "hex", "http-body-util", "hyper", - "indexmap 2.7.0", + "indexmap 2.7.1", "insta", "jsonrpsee", "jsonrpsee-proc-macros", @@ -5919,7 +5896,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.11", "tokio", "tokio-stream", "tonic", @@ -5949,9 +5926,9 @@ dependencies = [ "futures", "group", "hex", - "indexmap 2.7.0", + "indexmap 2.7.1", "insta", - "itertools 0.13.0", + "itertools 0.14.0", "jsonrpc", "jubjub", "lazy_static", @@ -5990,7 +5967,7 @@ version = "1.0.0-beta.44" dependencies = [ "hex", "lazy_static", - "thiserror 2.0.6", + "thiserror 2.0.11", "zcash_script", "zebra-chain", "zebra-test", @@ -6013,9 +5990,9 @@ dependencies = [ "howudoin", "human_bytes", "humantime-serde", - "indexmap 2.7.0", + "indexmap 2.7.1", "insta", - "itertools 0.13.0", + "itertools 0.14.0", "jubjub", "lazy_static", "metrics", @@ -6033,7 +6010,7 @@ dependencies = [ "serde_json", "spandoc", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.11", "tinyvec", "tokio", "tower 0.4.13", @@ -6050,9 +6027,9 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.7.0", + "indexmap 2.7.1", "insta", - "itertools 0.13.0", + "itertools 0.14.0", "lazy_static", "once_cell", "owo-colors 4.1.0", @@ -6061,7 +6038,7 @@ dependencies = [ "regex", "spandoc", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.11", "tinyvec", "tokio", "tower 0.4.13", @@ -6076,8 +6053,8 @@ version = "1.0.0-beta.44" dependencies = [ "color-eyre", "hex", - "indexmap 2.7.0", - "itertools 0.13.0", + "indexmap 2.7.1", + "itertools 0.14.0", "jsonrpc", "quote", "rand 0.8.5", @@ -6087,8 +6064,8 @@ dependencies = [ "serde_json", "serde_yml", "structopt", - "syn 2.0.90", - "thiserror 2.0.6", + "syn 2.0.96", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing-error", @@ -6109,7 +6086,7 @@ dependencies = [ "atty", "bytes", "chrono", - "clap 4.5.23", + "clap 4.5.27", "color-eyre", "console-subscriber", "dirs", @@ -6121,7 +6098,7 @@ dependencies = [ "humantime-serde", "hyper", "hyper-util", - "indexmap 2.7.0", + "indexmap 2.7.1", "indicatif", "inferno", "insta", @@ -6144,7 +6121,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.11", "thread-priority", "tinyvec", "tokio", @@ -6191,7 +6168,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -6211,7 +6188,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] diff --git a/deny.toml b/deny.toml index 3ae46206943..33e9872da29 100644 --- a/deny.toml +++ b/deny.toml @@ -52,9 +52,6 @@ skip-tree = [ # wait for ordered-map to release a dependency fix { name = "ordered-map", version = "=0.4.2" }, - # wait for primitive-types to upgrade - { name = "proc-macro-crate", version = "=0.1.5" }, - # wait for `color-eyre` to upgrade { name = "owo-colors", version = "=3.5.0" }, @@ -64,16 +61,10 @@ skip-tree = [ # wait for abscissa_core to upgrade {name = "tracing-log", version = "=0.1.4" }, - # wait for tokio-test -> tokio-stream to upgrade - { name = "tokio-util", version = "=0.6.10" }, - # wait for console-subscriber and tower to update hdrhistogram. # also wait for ron to update insta, and wait for tonic update. { name = "base64", version = "=0.13.1" }, - # wait for elasticsearch to update base64, darling, rustc_version, serde_with - { name = "elasticsearch", version = "=8.5.0-alpha.1" }, - # wait for reqwest to update base64 { name = "base64", version = "=0.21.7" }, { name = "sync_wrapper", version = "0.1.2" }, @@ -91,9 +82,6 @@ skip-tree = [ # wait for halo2_gadgets and primitive-types to update uint { name = "uint", version = "=0.9.5" }, - # wait for dirs-sys to update windows-sys - { name = "windows-sys", version = "=0.48.0" }, - # wait for zebra to update tower { name = "tower", version = "=0.4.13" }, { name = "hashbrown", version = "=0.14.5" }, @@ -105,11 +93,6 @@ skip-tree = [ # Remove after release candicate period is over and the ECC crates are not patched anymore { name = "equihash", version = "=0.2.0" }, { name = "f4jumble", version = "=0.1.0" }, - { name = "incrementalmerkletree", version = "=0.6.0" }, - { name = "zcash_address", version = "=0.4.0" }, - { name = "zcash_keys", version = "=0.3.0" }, - { name = "zcash_primitives", version = "=0.16.0" }, - { name = "zcash_protocol", version = "=0.2.0" } ] # This section is considered when running `cargo deny check sources`. diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index fb76c19f499..6170840c098 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -24,9 +24,9 @@ categories = ["algorithms", "asynchronous"] [dependencies] futures = "0.3.31" futures-core = "0.3.28" -pin-project = "1.1.6" +pin-project = "1.1.8" rayon = "1.10.0" -tokio = { version = "1.42.0", features = ["time", "sync", "tracing", "macros"] } +tokio = { version = "1.43.0", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.13" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.41" @@ -36,12 +36,12 @@ tracing-futures = "0.2.5" color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } ed25519-zebra = "4.0.3" rand = "0.8.5" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.4" tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 61a0258d4d4..2141c2bdc4b 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -16,12 +16,12 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.6" +pin-project = "1.1.8" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.41" [dev-dependencies] -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 25b6006240c..07a38ac0818 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -64,7 +64,7 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.5.0" +bitflags = "2.8.0" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.2" blake2s_simd = "1.0.2" @@ -81,8 +81,8 @@ group = "0.13.0" incrementalmerkletree.workspace = true jubjub = "0.10.0" lazy_static = "1.4.0" -tempfile = "3.14.0" -dirs = "5.0.1" +tempfile = "3.16.0" +dirs = "6.0.0" num-integer = "0.1.46" primitive-types = "0.12.2" rand_core = "0.6.4" @@ -110,18 +110,18 @@ humantime = "2.1.0" # Error Handling & Formatting static_assertions = "1.1.0" -thiserror = "2.0.6" +thiserror = "2.0.11" tracing = "0.1.41" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.215", features = ["serde_derive", "rc"] } -serde_with = "3.11.0" +serde = { version = "1.0.217", features = ["serde_derive", "rc"] } +serde_with = "3.12.0" serde-big-array = "0.5.1" # Processing futures = "0.3.31" -itertools = "0.13.0" +itertools = "0.14.0" rayon = "1.10.0" # ZF deps @@ -130,17 +130,17 @@ redjubjub = "0.7.0" reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.133", optional = true } +serde_json = { version = "1.0.138", optional = true } # Production feature async-error and testing feature proptest-impl -tokio = { version = "1.42.0", optional = true } +tokio = { version = "1.43.0", optional = true } # Experimental feature shielded-scan zcash_client_backend = { workspace = true, optional = true } # Optional testing dependencies -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } @@ -155,18 +155,18 @@ criterion = { version = "0.5.1", features = ["html_reports"] } color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } spandoc = "0.2.2" tracing = "0.1.41" # Make the optional testing dependencies required -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-chain/src/parameters/network/magic.rs b/zebra-chain/src/parameters/network/magic.rs index 692a1b8d1ab..236e0149091 100644 --- a/zebra-chain/src/parameters/network/magic.rs +++ b/zebra-chain/src/parameters/network/magic.rs @@ -29,7 +29,7 @@ impl Network { } #[cfg(test)] -mod proptest { +mod magic_proptest { use proptest::prelude::*; diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index a6d86e30a60..8a01cbf44ad 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -46,13 +46,13 @@ rayon = "1.10.0" chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } lazy_static = "1.4.0" once_cell = "1.20.2" -serde = { version = "1.0.215", features = ["serde_derive"] } +serde = { version = "1.0.217", features = ["serde_derive"] } futures = "0.3.31" futures-util = "0.3.28" metrics = "0.24.1" -thiserror = "2.0.6" -tokio = { version = "1.42.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +thiserror = "2.0.11" +tokio = { version = "1.43.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.41" tracing-futures = "0.2.5" @@ -75,22 +75,22 @@ zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } howudoin = { version = "0.1.2", optional = true } # Test-only dependencies -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } [dev-dependencies] color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } hex = "0.4.3" num-integer = "0.1.46" -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" spandoc = "0.2.2" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.1" tracing-subscriber = "0.3.19" diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index cef52a4c584..411abc378ad 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -20,8 +20,8 @@ futures-util = "0.3.28" tonic = "0.12.3" tonic-reflection = "0.12.3" prost = "0.13.4" -serde = { version = "1.0.215", features = ["serde_derive"] } -tokio = { version = "1.42.0", features = ["macros", "rt-multi-thread"] } +serde = { version = "1.0.217", features = ["serde_derive"] } +tokio = { version = "1.43.0", features = ["macros", "rt-multi-thread"] } tokio-stream = "0.1.17" tower = { version = "0.4.13", features = ["util", "buffer", "timeout"] } color-eyre = "0.6.3" @@ -35,7 +35,7 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } tonic-build = "0.12.3" [dev-dependencies] -insta = { version = "1.41.1", features = ["redactions", "json", "ron"] } +insta = { version = "1.42.1", features = ["redactions", "json", "ron"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-state = { path = "../zebra-state" } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 7ccf4f1f259..c7410b6b02a 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -40,28 +40,28 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.5.0" +bitflags = "2.8.0" byteorder = "1.5.0" bytes = "1.9.0" chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -dirs = "5.0.1" +dirs = "6.0.0" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "2.7.0", features = ["serde"] } -itertools = "0.13.0" +indexmap = { version = "2.7.1", features = ["serde"] } +itertools = "0.14.0" lazy_static = "1.4.0" num-integer = "0.1.46" ordered-map = "0.4.2" -pin-project = "1.1.6" +pin-project = "1.1.8" rand = "0.8.5" rayon = "1.10.0" regex = "1.11.0" -serde = { version = "1.0.215", features = ["serde_derive"] } -tempfile = "3.14.0" -thiserror = "2.0.6" +serde = { version = "1.0.217", features = ["serde_derive"] } +tempfile = "3.16.0" +thiserror = "2.0.11" futures = "0.3.31" -tokio = { version = "1.42.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.43.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.17", features = ["sync", "time"] } tokio-util = { version = "0.7.13", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } @@ -80,17 +80,17 @@ howudoin = { version = "0.1.2", optional = true } # tor-rtcompat = { version = "0.0.2", optional = true } # proptest dependencies -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } [dev-dependencies] -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" static_assertions = "1.1.0" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } toml = "0.8.19" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 42d51c117b3..c84e8e9e2f2 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -43,17 +43,17 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } # Tool and test feature rpc-client color-eyre = { version = "0.6.3", optional = true } -jsonrpsee-types = { version = "0.24.7", optional = true } +jsonrpsee-types = { version = "0.24.8", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.215", optional = true } -serde_json = { version = "1.0.133", optional = true } -tokio = { version = "1.42.0", features = ["time", "sync"] } +serde = { version = "1.0.217", optional = true } +serde_json = { version = "1.0.138", optional = true } +tokio = { version = "1.43.0", features = ["time", "sync"] } [dev-dependencies] color-eyre = "0.6.3" reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"] } -serde = "1.0.215" -serde_json = "1.0.133" -jsonrpsee-types = "0.24.7" +serde = "1.0.217" +serde_json = "1.0.138" +jsonrpsee-types = "0.24.8" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 0e58deaeee5..0c93ba9b4e9 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -60,13 +60,13 @@ chrono = { version = "0.4.39", default-features = false, features = [ ] } futures = "0.3.31" -jsonrpsee = { version = "0.24.7", features = ["server"] } -jsonrpsee-types = "0.24.7" -jsonrpsee-proc-macros = "0.24.7" -hyper = "1.5.0" +jsonrpsee = { version = "0.24.8", features = ["server"] } +jsonrpsee-types = "0.24.8" +jsonrpsee-proc-macros = "0.24.8" +hyper = "1.6.0" http-body-util = "0.1.2" -serde_json = "1.0.133" -indexmap = { version = "2.7.0", features = ["serde"] } +serde_json = "1.0.138" +indexmap = { version = "2.7.1", features = ["serde"] } # RPC endpoint basic auth base64 = "0.22.1" @@ -75,7 +75,7 @@ rand = "0.8.5" # Error handling color-eyre = "0.6.3" -tokio = { version = "1.42.0", features = [ +tokio = { version = "1.43.0", features = [ "time", "rt-multi-thread", "macros", @@ -92,7 +92,7 @@ tokio-stream = { version = "0.1.17", optional = true } tracing = "0.1.41" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.215", features = ["serde_derive"] } +serde = { version = "1.0.217", features = ["serde_derive"] } # For the `stop` RPC method. nix = { version = "0.29.0", features = ["signal"] } @@ -103,7 +103,7 @@ zcash_primitives = { workspace = true, features = ["transparent-inputs"] } zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl -proptest = { version = "1.4.0", optional = true } +proptest = { version = "1.6.0", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "json-conversion", @@ -120,12 +120,12 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } tonic-build = { version = "0.12.3", optional = true } [dev-dependencies] -insta = { version = "1.41.1", features = ["redactions", "json", "ron"] } +insta = { version = "1.42.1", features = ["redactions", "json", "ron"] } -proptest = "1.4.0" +proptest = "1.6.0" -thiserror = "2.0.6" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +thiserror = "2.0.11" +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "proptest-impl", diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 57a6f12fd2c..e1e4e2618ff 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -61,11 +61,11 @@ results-reader = [ [dependencies] color-eyre = "0.6.3" -indexmap = { version = "2.7.0", features = ["serde"] } -itertools = "0.13.0" -semver = "1.0.23" -serde = { version = "1.0.215", features = ["serde_derive"] } -tokio = { version = "1.42.0", features = ["time"] } +indexmap = { version = "2.7.1", features = ["serde"] } +itertools = "0.14.0" +semver = "1.0.25" +serde = { version = "1.0.217", features = ["serde_derive"] } +tokio = { version = "1.43.0", features = ["time"] } tower = "0.4.13" tracing = "0.1.41" futures = "0.3.31" @@ -86,8 +86,8 @@ zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } # test feature proptest-impl -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } bls12_381 = { version = "0.8.0", optional = true } ff = { version = "0.13.0", optional = true } @@ -102,7 +102,7 @@ zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44", optional = tru tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } structopt = "0.3.26" lazy_static = "1.4.0" -serde_json = "1.0.133" +serde_json = "1.0.138" jsonrpc = { version = "0.18.0", optional = true } hex = { version = "0.4.3", optional = true } @@ -110,17 +110,17 @@ hex = { version = "0.4.3", optional = true } zebrad = { path = "../zebrad", version = "2.1.1" } [dev-dependencies] -insta = { version = "1.41.1", features = ["ron", "redactions"] } -tokio = { version = "1.42.0", features = ["test-util"] } +insta = { version = "1.42.1", features = ["ron", "redactions"] } +tokio = { version = "1.43.0", features = ["test-util"] } -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" bls12_381 = "0.8.0" ff = "0.13.0" group = "0.13.0" jubjub = "0.10.0" rand = "0.8.5" -tempfile = "3.14.0" +tempfile = "3.16.0" zcash_note_encryption = "0.4.1" toml = "0.8.19" tonic = "0.12.3" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 0a187676798..5aed0b0a949 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -18,7 +18,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] zcash_script = "0.2.0" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } -thiserror = "2.0.6" +thiserror = "2.0.11" [dev-dependencies] hex = "0.4.3" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index e7d8dc54215..ab236e830d1 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -51,35 +51,35 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -dirs = "5.0.1" +dirs = "6.0.0" futures = "0.3.31" hex = "0.4.3" hex-literal = "0.4.1" humantime-serde = "1.1.1" human_bytes = { version = "0.4.3", default-features = false } -indexmap = "2.7.0" -itertools = "0.13.0" +indexmap = "2.7.1" +itertools = "0.14.0" lazy_static = "1.4.0" metrics = "0.24.1" mset = "0.1.1" regex = "1.11.0" rlimit = "0.10.2" rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } -semver = "1.0.23" -crossbeam-channel = "0.5.13" -serde = { version = "1.0.215", features = ["serde_derive"] } -tempfile = "3.14.0" -thiserror = "2.0.6" +semver = "1.0.25" +crossbeam-channel = "0.5.14" +serde = { version = "1.0.217", features = ["serde_derive"] } +tempfile = "3.16.0" +thiserror = "2.0.11" rayon = "1.10.0" -tokio = { version = "1.42.0", features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { version = "1.43.0", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.41" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl -elasticsearch = { version = "8.16.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.133", package = "serde_json", optional = true } +elasticsearch = { version = "8.17.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } +serde_json = { version = "1.0.138", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } @@ -88,29 +88,29 @@ howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } [dev-dependencies] color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } once_cell = "1.20.2" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.41.1", features = ["ron", "redactions"] } +insta = { version = "1.42.1", features = ["ron", "redactions"] } -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index d7e22448a7f..b23711733c6 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -16,32 +16,32 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "2.7.0" +indexmap = "2.7.1" lazy_static = "1.4.0" -insta = "1.41.1" -itertools = "0.13.0" -proptest = "1.4.0" +insta = "1.42.1" +itertools = "0.14.0" +proptest = "1.6.0" once_cell = "1.20.2" rand = "0.8.5" regex = "1.11.0" -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.31" color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "4.1.0" spandoc = "0.2.2" -thiserror = "2.0.6" +thiserror = "2.0.11" tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } tracing-error = "0.2.1" tracing = "0.1.41" [dev-dependencies] -tempfile = "3.14.0" +tempfile = "3.16.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 51929caff71..49b1db4c65b 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -85,14 +85,14 @@ openapi-generator = [ color-eyre = "0.6.3" # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.133" +serde_json = "1.0.138" tracing-error = "0.2.1" tracing-subscriber = "0.3.19" -thiserror = "2.0.6" +thiserror = "2.0.11" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } @@ -101,7 +101,7 @@ zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.13.0", optional = true } +itertools = { version = "0.14.0", optional = true } # These crates are needed for the search-issue-refs binary regex = { version = "1.11.0", optional = true } @@ -109,7 +109,7 @@ regex = { version = "1.11.0", optional = true } reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.42.0", features = ["full"], optional = true } +tokio = { version = "1.43.0", features = ["full"], optional = true } jsonrpc = { version = "0.18.0", optional = true } @@ -119,9 +119,9 @@ zcash_protocol.workspace = true # For the openapi generator rand = "0.8.5" -syn = { version = "2.0.79", features = ["full"], optional = true } -quote = { version = "1.0.37", optional = true } +syn = { version = "2.0.96", features = ["full"], optional = true } +quote = { version = "1.0.38", optional = true } serde_yml = { version = "0.0.12", optional = true } -serde = { version = "1.0.215", features = ["serde_derive"], optional = true } -indexmap = "2.7.0" +serde = { version = "1.0.217", features = ["serde_derive"], optional = true } +indexmap = "2.7.1" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index c0bb6f0873d..63e3372c1a5 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -170,28 +170,28 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.5.23", features = ["cargo"] } +clap = { version = "4.5.27", features = ["cargo"] } chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "2.7.0" +indexmap = "2.7.1" lazy_static = "1.4.0" -semver = "1.0.23" -serde = { version = "1.0.215", features = ["serde_derive"] } +semver = "1.0.25" +serde = { version = "1.0.217", features = ["serde_derive"] } toml = "0.8.19" futures = "0.3.31" rayon = "1.10.0" -tokio = { version = "1.42.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.43.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tokio-stream = { version = "0.1.17", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.6" +pin-project = "1.1.8" color-eyre = { version = "0.6.3", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.0", features = ["rustc_1_55"] } +tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } -thiserror = "2.0.6" +thiserror = "2.0.11" tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } tracing-appender = "0.2.3" @@ -201,7 +201,7 @@ tracing = "0.1.41" metrics = "0.24.1" -dirs = "5.0.1" +dirs = "6.0.0" atty = "0.2.14" num-integer = "0.1.46" @@ -211,37 +211,37 @@ rand = "0.8.5" thread-priority = { version = "1.2.0", optional = true } # prod feature sentry -sentry = { version = "0.35.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.36.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.12.0", default-features = false, optional = true } +inferno = { version = "0.12.1", default-features = false, optional = true } # prod feature journald tracing-journald = { version = "0.3.0", optional = true } # prod feature filter-reload -hyper = { version = "1.5.1", features = ["http1", "http2", "server"], optional = true } +hyper = { version = "1.6.0", features = ["http1", "http2", "server"], optional = true } http-body-util = { version = "0.1.2", optional = true } hyper-util = { version = "0.1.9", optional = true } bytes = { version = "1.9.0", optional = true } # prod feature prometheus -metrics-exporter-prometheus = { version = "0.16.0", default-features = false, features = ["http-listener"], optional = true } +metrics-exporter-prometheus = { version = "0.16.1", default-features = false, features = ["http-listener"], optional = true } # prod feature release_max_level_info # # zebrad uses tracing for logging, # we only use `log` to set and print the static log levels in transitive dependencies -log = "0.4.22" +log = "0.4.25" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.9", optional = true } +indicatif = { version = "0.17.11", optional = true } # test feature proptest-impl -proptest = { version = "1.4.0", optional = true } -proptest-derive = { version = "0.5.0", optional = true } +proptest = { version = "1.6.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } # test feature tokio-console console-subscriber = { version = "0.4.0", optional = true } @@ -256,27 +256,27 @@ tonic-build = { version = "0.12.3", optional = true } abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" hex-literal = "0.4.1" -jsonrpsee-types = "0.24.7" +jsonrpsee-types = "0.24.8" once_cell = "1.20.2" regex = "1.11.0" -insta = { version = "1.41.1", features = ["json"] } +insta = { version = "1.42.1", features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.133", features = ["preserve_order"] } -tempfile = "3.14.0" +serde_json = { version = "1.0.138", features = ["preserve_order"] } +tempfile = "3.16.0" -hyper = { version = "1.5.1", features = ["http1", "http2", "server"]} +hyper = { version = "1.6.0", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.42.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.17" # test feature lightwalletd-grpc-tests prost = "0.13.4" tonic = "0.12.3" -proptest = "1.4.0" -proptest-derive = "0.5.0" +proptest = "1.6.0" +proptest-derive = "0.5.1" # enable span traces and track caller in tests color-eyre = { version = "0.6.3" } diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 34cd0ba682d..584a28f7705 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1726,8 +1726,6 @@ fn non_blocking_logger() -> Result<()> { let (done_tx, done_rx) = mpsc::channel(); let test_task_handle: tokio::task::JoinHandle> = rt.spawn(async move { - let _init_guard = zebra_test::init(); - let mut config = os_assigned_rpc_port_config(false, &Mainnet)?; config.tracing.filter = Some("trace".to_string()); config.tracing.buffer_limit = 100; From 13e8e7991c5ca2b2d49ece9f9e95214fe6286c66 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 31 Jan 2025 07:13:22 -0500 Subject: [PATCH 071/245] fix(consensus): Avoid a concurrency bug when verifying transactions in blocks that are already present in the mempool (#9118) * Wait for spent UTXOs to arrive in the state if they're not found elsewhere instead of returning an error when a transaction in a candidate block has already been verified in the mempool * updates test, avoids returning InternalDowncastErrors when AwaitUtxo requests time out * return early if there are no missing deps * Applies suggestions from code review * Updates field documentation --- zebra-consensus/src/transaction.rs | 56 ++++++++++++++----- zebra-consensus/src/transaction/tests.rs | 46 ++++++++++----- .../src/mempool/transaction_dependencies.rs | 7 +++ zebra-state/src/request.rs | 3 +- 4 files changed, 82 insertions(+), 30 deletions(-) diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 044a9569f9a..c0c735871e7 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -402,7 +402,7 @@ where async move { tracing::trace!(?tx_id, ?req, "got tx verify request"); - if let Some(result) = Self::try_find_verified_unmined_tx(&req, mempool.clone()).await { + if let Some(result) = Self::find_verified_unmined_tx(&req, mempool.clone(), state.clone()).await { let verified_tx = result?; return Ok(Response::Block { @@ -645,17 +645,22 @@ where } /// Attempts to find a transaction in the mempool by its transaction hash and checks - /// that all of its dependencies are available in the block. + /// that all of its dependencies are available in the block or in the state. Waits + /// for UTXOs being spent by the given transaction to arrive in the state if they're + /// not found elsewhere. /// /// Returns [`Some(Ok(VerifiedUnminedTx))`](VerifiedUnminedTx) if successful, /// None if the transaction id was not found in the mempool, /// or `Some(Err(TransparentInputNotFound))` if the transaction was found, but some of its - /// dependencies are missing in the block. - async fn try_find_verified_unmined_tx( + /// dependencies were not found in the block or state after a timeout. + async fn find_verified_unmined_tx( req: &Request, mempool: Option>, + state: Timeout, ) -> Option> { - if req.is_mempool() || req.transaction().is_coinbase() { + let tx = req.transaction(); + + if req.is_mempool() || tx.is_coinbase() { return None; } @@ -664,7 +669,7 @@ where let tx_id = req.tx_mined_id(); let mempool::Response::TransactionWithDeps { - transaction, + transaction: verified_tx, dependencies, } = mempool .oneshot(mempool::Request::TransactionWithDepsByMinedId(tx_id)) @@ -676,17 +681,35 @@ where // Note: This does not verify that the spends are in order, the spend order // should be verified during contextual validation in zebra-state. - let has_all_tx_deps = dependencies + let missing_deps: HashSet<_> = dependencies .into_iter() - .all(|dependency_id| known_outpoint_hashes.contains(&dependency_id)); + .filter(|dependency_id| !known_outpoint_hashes.contains(dependency_id)) + .collect(); - let result = if has_all_tx_deps { - Ok(transaction) - } else { - Err(TransactionError::TransparentInputNotFound) - }; + if missing_deps.is_empty() { + return Some(Ok(verified_tx)); + } + + let missing_outpoints = tx.inputs().iter().filter_map(|input| { + if let transparent::Input::PrevOut { outpoint, .. } = input { + missing_deps.contains(&outpoint.hash).then_some(outpoint) + } else { + None + } + }); + + for missing_outpoint in missing_outpoints { + let query = state + .clone() + .oneshot(zebra_state::Request::AwaitUtxo(*missing_outpoint)); + match query.await { + Ok(zebra_state::Response::Utxo(_)) => {} + Err(_) => return Some(Err(TransactionError::TransparentInputNotFound)), + _ => unreachable!("AwaitUtxo always responds with Utxo"), + }; + } - Some(result) + Some(Ok(verified_tx)) } /// Wait for the UTXOs that are being spent by the given transaction. @@ -732,7 +755,10 @@ where .clone() .oneshot(zs::Request::UnspentBestChainUtxo(*outpoint)); - let zebra_state::Response::UnspentBestChainUtxo(utxo) = query.await? else { + let zebra_state::Response::UnspentBestChainUtxo(utxo) = query + .await + .map_err(|_| TransactionError::TransparentInputNotFound)? + else { unreachable!("UnspentBestChainUtxo always responds with Option") }; diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 597f6b9335f..96ddaaa8903 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -753,8 +753,9 @@ async fn skips_verification_of_block_transactions_in_mempool() { transparent::Input::Coinbase { .. } => panic!("requires a non-coinbase transaction"), }; + let mut state_clone = state.clone(); tokio::spawn(async move { - state + state_clone .expect_request(zebra_state::Request::BestChainNextMedianTimePast) .await .expect("verifier should call mock state service with correct request") @@ -762,13 +763,13 @@ async fn skips_verification_of_block_transactions_in_mempool() { DateTime32::MAX, )); - state + state_clone .expect_request(zebra_state::Request::UnspentBestChainUtxo(input_outpoint)) .await .expect("verifier should call mock state service with correct request") .respond(zebra_state::Response::UnspentBestChainUtxo(None)); - state + state_clone .expect_request_that(|req| { matches!( req, @@ -780,20 +781,20 @@ async fn skips_verification_of_block_transactions_in_mempool() { .respond(zebra_state::Response::ValidBestChainTipNullifiersAndAnchors); }); + let utxo = known_utxos + .get(&input_outpoint) + .expect("input outpoint should exist in known_utxos") + .utxo + .clone(); + let mut mempool_clone = mempool.clone(); + let output = utxo.output.clone(); tokio::spawn(async move { mempool_clone .expect_request(mempool::Request::AwaitOutput(input_outpoint)) .await .expect("verifier should call mock state service with correct request") - .respond(mempool::Response::UnspentOutput( - known_utxos - .get(&input_outpoint) - .expect("input outpoint should exist in known_utxos") - .utxo - .output - .clone(), - )); + .respond(mempool::Response::UnspentOutput(output)); }); let verifier_response = verifier @@ -825,11 +826,11 @@ async fn skips_verification_of_block_transactions_in_mempool() { let mut mempool_clone = mempool.clone(); tokio::spawn(async move { - for _ in 0..2 { + for _ in 0..3 { mempool_clone .expect_request(mempool::Request::TransactionWithDepsByMinedId(tx_hash)) .await - .expect("verifier should call mock state service with correct request") + .expect("verifier should call mock mempool service with correct request") .respond(mempool::Response::TransactionWithDeps { transaction: transaction.clone(), dependencies: [input_outpoint.hash].into(), @@ -855,6 +856,23 @@ async fn skips_verification_of_block_transactions_in_mempool() { panic!("unexpected response variant from transaction verifier for Block request") }; + tokio::spawn(async move { + state + .expect_request(zebra_state::Request::AwaitUtxo(input_outpoint)) + .await + .expect("verifier should call mock state service with correct request") + .respond(zebra_state::Response::Utxo(utxo)); + }); + + let crate::transaction::Response::Block { .. } = verifier + .clone() + .oneshot(make_request.clone()(Arc::new(HashSet::new()))) + .await + .expect("should succeed after calling state service") + else { + panic!("unexpected response variant from transaction verifier for Block request") + }; + let verifier_response_err = *verifier .clone() .oneshot(make_request(Arc::new(HashSet::new()))) @@ -875,7 +893,7 @@ async fn skips_verification_of_block_transactions_in_mempool() { // already the mempool. assert_eq!( mempool.poll_count(), - 4, + 5, "the mempool service should have been polled 4 times" ); } diff --git a/zebra-node-services/src/mempool/transaction_dependencies.rs b/zebra-node-services/src/mempool/transaction_dependencies.rs index 2b333060b77..dfab5b6c208 100644 --- a/zebra-node-services/src/mempool/transaction_dependencies.rs +++ b/zebra-node-services/src/mempool/transaction_dependencies.rs @@ -11,6 +11,13 @@ pub struct TransactionDependencies { /// a mempool transaction. Used during block template construction /// to exclude transactions from block templates unless all of the /// transactions they depend on have been included. + /// + /// # Note + /// + /// Dependencies that have been mined into blocks are not removed here until those blocks have + /// been committed to the best chain. Dependencies that have been committed onto side chains, or + /// which are in the verification pipeline but have not yet been committed to the best chain, + /// are not removed here unless and until they arrive in the best chain, and the mempool is polled. dependencies: HashMap>, /// Lists of transaction ids in the mempool that spend UTXOs created diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 6336438412d..ee7ddedc73f 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -733,7 +733,8 @@ pub enum Request { /// /// This request is purely informational, and there are no guarantees about /// whether the UTXO remains unspent or is on the best chain, or any chain. - /// Its purpose is to allow asynchronous script verification. + /// Its purpose is to allow asynchronous script verification or to wait until + /// the UTXO arrives in the state before validating dependant transactions. /// /// # Correctness /// From bb7ef30d968d3671f9e69baf0a08d7fc7ed09418 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:15:40 +0100 Subject: [PATCH 072/245] docs: corrections (#9189) * Update sub-test-zebra-config.yml * Update sub-ci-unit-tests-docker.yml --- .github/workflows/sub-ci-unit-tests-docker.yml | 2 +- .github/workflows/sub-test-zebra-config.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index da69d12e286..0847806f610 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -151,7 +151,7 @@ jobs: test_variables: '-e NETWORK' network: 'Mainnet' - # Test reconfiguring the the docker image for tesnet. + # Test reconfiguring the docker image for tesnet. test-configuration-file-testnet: name: Test CI testnet Docker config file # Make sure Zebra can sync the genesis block on testnet diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 41586052cd4..69990bdb3e1 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -25,7 +25,7 @@ on: test_variables: required: true type: string - description: 'Environmental variables used to select and configure the test' + description: 'Environment variables used to select and configure the test' network: required: false type: string From 09538d417c598b718d932e242e90372b63e7a91d Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Fri, 31 Jan 2025 20:08:21 -0300 Subject: [PATCH 073/245] release(maintenance): Run cargo update for v2.2.0 (#9188) * cargo update foir v2.2.0 * revert zcash_protocol to 0.4.0 (which requires zcash_address 0.6.0) --------- Co-authored-by: Conrado Gouvea --- Cargo.lock | 829 +++++++++++++++++++++++++++++++++++++---------------- deny.toml | 14 +- 2 files changed, 585 insertions(+), 258 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a2d61944a5c..11dcdfaa92c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,7 +38,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -107,9 +107,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -143,9 +143,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -158,9 +158,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -182,19 +182,20 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", + "once_cell", "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arc-swap" @@ -216,9 +217,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "flate2", "futures-core", @@ -251,9 +252,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", @@ -285,9 +286,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -304,8 +305,8 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 1.0.1", - "tower 0.5.1", + "sync_wrapper", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -325,7 +326,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper", "tower-layer", "tower-service", ] @@ -427,29 +428,29 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.70.1" +version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ "bitflags 2.8.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash 2.1.0", "shlex", "syn 2.0.96", ] [[package]] name = "bip32" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa13fae8b6255872fd86f7faf4b41168661d7d78609f7bfe6771b85c6739a15b" +checksum = "db40d3dfbeab4e031d78c844642fa0caa0b0db11ce1607ac9d2986dff1405c69" dependencies = [ "bs58", "hmac", @@ -575,9 +576,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byte-slice-cast" @@ -587,9 +588,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -631,9 +632,9 @@ checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -669,9 +670,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -876,15 +877,15 @@ checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width 0.1.14", - "windows-sys 0.52.0", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", ] [[package]] @@ -955,9 +956,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1018,9 +1019,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1037,15 +1038,15 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-common" @@ -1182,6 +1183,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "document-features" version = "0.2.10" @@ -1254,9 +1266,9 @@ dependencies = [ [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "env_logger" @@ -1286,12 +1298,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1306,18 +1318,18 @@ dependencies = [ [[package]] name = "f4jumble" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a83e8d7fd0c526af4aad893b7c9fe41e2699ed8a776a6c74aecdeafe05afc75" +checksum = "0d42773cb15447644d170be20231a3268600e0c4cea8987d013b93ac973d3cf7" dependencies = [ "blake2b_simd", ] [[package]] name = "fastrand" -version = "2.1.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" @@ -1356,12 +1368,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide 0.8.3", ] [[package]] @@ -1385,9 +1397,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "form_urlencoded" @@ -1562,6 +1574,18 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "getset" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded738faa0e88d3abc9d1a13cb11adc2073c400969eeb8793cf7132589959fc" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "gimli" version = "0.28.1" @@ -1583,9 +1607,9 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "group" @@ -1601,9 +1625,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -1630,18 +1654,20 @@ dependencies = [ [[package]] name = "halo2_gadgets" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126a150072b0c38c7b573fe3eaf0af944a7fed09e154071bf2436d3f016f7230" +checksum = "73a5e510d58a07d8ed238a5a8a436fe6c2c79e1bb2611f62688bc65007b4e6e7" dependencies = [ "arrayvec", "bitvec", "ff", "group", + "halo2_poseidon", "halo2_proofs", "lazy_static", "pasta_curves", "rand 0.8.5", + "sinsemilla", "subtle", "uint 0.9.5", ] @@ -1652,6 +1678,18 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47716fe1ae67969c5e0b2ef826f32db8c3be72be325e1aa3c1951d06b5575ec5" +[[package]] +name = "halo2_poseidon" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa3da60b81f02f9b33ebc6252d766f843291fb4d2247a07ae73d20b791fc56f" +dependencies = [ + "bitvec", + "ff", + "group", + "pasta_curves", +] + [[package]] name = "halo2_proofs" version = "0.3.0" @@ -1768,11 +1806,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1799,9 +1837,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1833,9 +1871,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1888,9 +1926,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", "http", @@ -1906,9 +1944,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -1959,6 +1997,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1967,12 +2123,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1986,13 +2153,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.96", ] [[package]] @@ -2089,19 +2256,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2128,6 +2295,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -2139,9 +2315,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -2154,10 +2330,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -2186,9 +2363,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" +checksum = "76637f6294b04e747d68e69336ef839a3493ca62b35bf488ead525f7da75c5bb" dependencies = [ "async-trait", "bytes", @@ -2199,7 +2376,7 @@ dependencies = [ "jsonrpsee-types", "parking_lot", "rand 0.8.5", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "thiserror 1.0.69", @@ -2222,9 +2399,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" +checksum = "66b7a3df90a1a60c3ed68e7ca63916b53e9afa928e33531e87f61a9c8e9ae87b" dependencies = [ "futures-util", "http", @@ -2317,9 +2494,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets", @@ -2368,9 +2545,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "libc", @@ -2386,9 +2563,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "litrs" @@ -2455,12 +2638,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memuse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2145869435ace5ea6ea3d35f59be559317ec9a0d04e1812d5f185a87b6d36f1a" -dependencies = [ - "nonempty", -] +checksum = "3d97bbf43eb4f088f8ca469930cde17fa036207c9a5e02ccc5107c4e8b17c964" [[package]] name = "metrics" @@ -2531,20 +2711,19 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", @@ -2713,17 +2892,20 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f18e997fa121de5c73e95cdc7e8512ae43b7de38904aeea5e5713cc48f3c0ba" +checksum = "02f7152474406422f572de163e0bc63b2126cdbfe17bc849efbbde36fcfe647e" dependencies = [ "aes", "bitvec", "blake2b_simd", + "core2", "ff", "fpe", + "getset", "group", "halo2_gadgets", + "halo2_poseidon", "halo2_proofs", "hex", "incrementalmerkletree", @@ -2734,6 +2916,7 @@ dependencies = [ "rand 0.8.5", "reddsa", "serde", + "sinsemilla", "subtle", "tracing", "visibility", @@ -2754,9 +2937,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.8.2" +version = "3.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" +checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" dependencies = [ "log", "serde", @@ -2862,20 +3045,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.11", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" +checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" dependencies = [ "pest", "pest_generator", @@ -2883,9 +3066,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" +checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" dependencies = [ "pest", "pest_meta", @@ -2896,9 +3079,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" +checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" dependencies = [ "once_cell", "pest", @@ -2937,9 +3120,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -3004,9 +3187,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -3025,9 +3208,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", "syn 2.0.96", @@ -3077,11 +3260,33 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -3129,13 +3334,12 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +checksum = "d0f3e5beed80eb580c68e2c600937ac2c4eedabdfd5ef1e5b7ea4f3fba84497b" dependencies = [ - "bytes", "heck 0.5.0", - "itertools 0.12.1", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -3155,7 +3359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.96", @@ -3163,18 +3367,18 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" dependencies = [ "prost", ] [[package]] name = "quanta" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" dependencies = [ "crossbeam-utils", "libc", @@ -3193,9 +3397,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.37.1" +version = "0.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22f29bdff3987b4d8632ef95fd6424ec7e4e0a57e2f4fc63e489e75357f6a03" +checksum = "165859e9e55f79d67b96c5d96f4e88b6f2695a1972849c15a6a3f5c59fc2c003" dependencies = [ "memchr", ] @@ -3225,44 +3429,47 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom 0.2.15", "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", + "rustls-pki-types", "slab", - "thiserror 1.0.69", + "thiserror 2.0.11", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.6" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -3378,9 +3585,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.2.0" +version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" dependencies = [ "bitflags 2.8.0", ] @@ -3438,9 +3645,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.8.0", ] @@ -3464,7 +3671,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -3479,9 +3686,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -3502,9 +3709,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "async-compression", "base64 0.22.1", @@ -3532,10 +3739,11 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper", "tokio", "tokio-rustls", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -3628,9 +3836,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc-hex" @@ -3649,22 +3857,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "log", "once_cell", @@ -3686,9 +3894,12 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -3703,9 +3914,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -3721,9 +3932,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "same-file" @@ -4073,9 +4284,20 @@ dependencies = [ [[package]] name = "similar" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + +[[package]] +name = "sinsemilla" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d268ae0ea06faafe1662e9967cd4f9022014f5eeb798e0c302c876df8b7af9c" +dependencies = [ + "group", + "pasta_curves", + "subtle", +] [[package]] name = "sketches-ddsketch" @@ -4100,9 +4322,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4165,6 +4387,12 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -4243,15 +4471,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -4268,6 +4490,17 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "tap" version = "1.0.1" @@ -4372,9 +4605,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -4395,14 +4628,24 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -4460,12 +4703,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] @@ -4540,9 +4782,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ "indexmap 2.7.1", "serde", @@ -4631,14 +4873,15 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper", + "tokio", "tower-layer", "tower-service", ] @@ -4782,9 +5025,9 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" dependencies = [ "libc", "tracing-core", @@ -4909,26 +5152,11 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-segmentation" @@ -4972,9 +5200,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.1" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" dependencies = [ "base64 0.22.1", "log", @@ -4987,9 +5215,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -4997,6 +5225,18 @@ dependencies = [ "serde", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -5005,18 +5245,18 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ "serde", ] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -5170,24 +5410,24 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.96", @@ -5196,21 +5436,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5218,9 +5459,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", @@ -5231,15 +5472,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -5257,9 +5501,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" dependencies = [ "rustls-pki-types", ] @@ -5440,9 +5684,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "7e49d2d35d3fad69b39b94139037ecfb4f359f08958b9c11e7315ce770462419" dependencies = [ "memchr", ] @@ -5456,6 +5700,18 @@ dependencies = [ "bitflags 2.8.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -5483,6 +5739,30 @@ version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", + "synstructure 0.13.1", +] + [[package]] name = "zcash_address" version = "0.6.0" @@ -5674,7 +5954,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2122a042c77d529d3c60b899e74705eda39ae96a8a992460caeb06afa76990a2" dependencies = [ - "bindgen 0.70.1", + "bindgen 0.71.1", "cc", ] @@ -6171,6 +6451,27 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", + "synstructure 0.13.1", +] + [[package]] name = "zeroize" version = "1.8.1" @@ -6191,11 +6492,33 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "zip32" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92022ac1e47c7b78f9cee29efac8a1a546e189506f3bb5ad46d525be7c519bf6" +checksum = "2e9943793abf9060b68e1889012dafbd5523ab5b125c0fcc24802d69182f2ac9" dependencies = [ "blake2b_simd", "memuse", diff --git a/deny.toml b/deny.toml index 33e9872da29..7b73f984f69 100644 --- a/deny.toml +++ b/deny.toml @@ -65,10 +65,6 @@ skip-tree = [ # also wait for ron to update insta, and wait for tonic update. { name = "base64", version = "=0.13.1" }, - # wait for reqwest to update base64 - { name = "base64", version = "=0.21.7" }, - { name = "sync_wrapper", version = "0.1.2" }, - # wait for abscissa_core to update toml { name = "toml", version = "=0.5.11" }, @@ -92,7 +88,15 @@ skip-tree = [ # Remove after release candicate period is over and the ECC crates are not patched anymore { name = "equihash", version = "=0.2.0" }, - { name = "f4jumble", version = "=0.1.0" }, + + # wait for zcash_client_backend to update bech32 + { name = "bech32", version = "=0.9.1" }, + + # wait for zcash_script to update itertools + { name = "itertools", version = "=0.13.0" }, + + # wait for abscissa_core to update synstructure + { name = "synstructure", version = "=0.12.6" }, ] # This section is considered when running `cargo deny check sources`. From fcf5565b2ee981f0c37bf9a9a9910d5bb7d03dbe Mon Sep 17 00:00:00 2001 From: Marek Date: Sat, 1 Feb 2025 13:49:59 +0100 Subject: [PATCH 074/245] fix(consensus): Verify consensus branch ID in SIGHASH precomputation (#9139) * Add `has_foo` fns to `Transaction` * Add V5 SIGHASH test based on consensus branch ID * Guard `skip_checks` by test features * Enable `proptest-impl` for `zebrad` in tests * Simplify conditional compilation * Enable `proptest-impl` in scanner's dev deps * Fix conditional compilation in `zebra-chain` tests * Add error types for `zebra-chain` * `impl TryFrom for NetworkUpgrade` * `impl TryFrom for BranchId` * Rm `fn from_branch_id() -> Option` * Check consensus branch ID in SIGHASH computation * Simplify tx deserialization * Rm `impl TryFrom<&Trans> for zp_tx::Trans` * Update tests * Update tests * Add docs for `to_librustzcash` * Update docs for `PrecomputedTxData::new` * Document the SIGHASH consensus rules we missed * Update docs for script validation * Fix script verification tests In a previous commit, I erroneously edited the tests so that they'd expect `Ok`s instead of `Err`s. This commit fixes that. * Fix spelling * Impl `NetworkUpgrade::iter()` * Refactor `Network_upgrade::next_upgrade` * Impl `NetworkUpgrade::previous_upgrade` * Impl `Transaction::hash_shielded_data` * Don't make `NETWORK_UPGRADES_IN_ORDER` `pub` * Test `Transaction::sighash` with cons branch ids * Extend the `consensus_branch_id` test * Derive `Debug` for `SigHasher` * Remove the beautiful test for tx verifier * Remove the "skip check" functionality * Revert the compilation adjustments * Apply suggestions from code review Co-authored-by: Arya * Fix docs * Clarify panic conditions in docs * remove duplicated verification Co-authored-by: Arya --------- Co-authored-by: Arya Co-authored-by: Alfredo Garcia --- book/src/dev/rfcs/0003-inventory-tracking.md | 2 +- .../dev/rfcs/0006-contextual-difficulty.md | 4 +- book/src/dev/rfcs/drafts/0005-treestate.md | 2 +- zebra-chain/src/error.rs | 20 +++ zebra-chain/src/lib.rs | 2 + zebra-chain/src/parameters/network/testnet.rs | 6 +- .../src/parameters/network/tests/vectors.rs | 11 +- zebra-chain/src/parameters/network_upgrade.rs | 51 ++++--- .../src/primitives/zcash_note_encryption.rs | 27 ++-- .../src/primitives/zcash_primitives.rs | 122 +++++++--------- zebra-chain/src/serialization/error.rs | 10 ++ zebra-chain/src/tests/vectors.rs | 2 +- zebra-chain/src/transaction.rs | 69 +++++++-- zebra-chain/src/transaction/serialize.rs | 7 +- zebra-chain/src/transaction/sighash.rs | 16 ++- zebra-chain/src/transaction/tests/vectors.rs | 134 ++++++++++-------- zebra-chain/src/transaction/txid.rs | 22 ++- zebra-consensus/src/script.rs | 6 +- zebra-consensus/src/transaction.rs | 24 ++-- zebra-consensus/src/transaction/tests.rs | 18 +-- .../src/peer_set/initialize/tests/vectors.rs | 2 +- zebra-scan/Cargo.toml | 1 - zebra-script/src/lib.rs | 69 ++++----- zebra-test/src/net.rs | 4 +- zebrad/src/commands/copy_state.rs | 2 +- .../components/inbound/tests/fake_peer_set.rs | 8 +- .../components/mempool/storage/tests/prop.rs | 8 +- .../mempool/storage/tests/vectors.rs | 2 +- zebrad/src/components/sync.rs | 2 +- zebrad/tests/common/test_type.rs | 2 +- 30 files changed, 354 insertions(+), 301 deletions(-) diff --git a/book/src/dev/rfcs/0003-inventory-tracking.md b/book/src/dev/rfcs/0003-inventory-tracking.md index ff7bac4d18a..a562843070f 100644 --- a/book/src/dev/rfcs/0003-inventory-tracking.md +++ b/book/src/dev/rfcs/0003-inventory-tracking.md @@ -192,7 +192,7 @@ specific inventory request is ready, because until we get the request, we can't determine which peers might be required to process it. We could attempt to ensure that the peer set would be ready to process a -specific inventory request would be to pre-emptively "reserve" a peer as soon +specific inventory request would be to preemptively "reserve" a peer as soon as it advertises an inventory item. But this doesn't actually work to ensure readiness, because a peer could advertise two inventory items, and only be able to service one request at a time. It also potentially locks the peer diff --git a/book/src/dev/rfcs/0006-contextual-difficulty.md b/book/src/dev/rfcs/0006-contextual-difficulty.md index accc8f00332..b23fbab8c24 100644 --- a/book/src/dev/rfcs/0006-contextual-difficulty.md +++ b/book/src/dev/rfcs/0006-contextual-difficulty.md @@ -753,10 +753,10 @@ would be a security issue. # Future possibilities [future-possibilities]: #future-possibilities -## Re-using the relevant chain API in other contextual checks +## Reusing the relevant chain API in other contextual checks [relevant-chain-api-reuse]: #relevant-chain-api-reuse -The relevant chain iterator can be re-used to implement other contextual +The relevant chain iterator can be reused to implement other contextual validation checks. For example, responding to peer requests for block locators, which means diff --git a/book/src/dev/rfcs/drafts/0005-treestate.md b/book/src/dev/rfcs/drafts/0005-treestate.md index 67ad62262e8..d18654375dc 100644 --- a/book/src/dev/rfcs/drafts/0005-treestate.md +++ b/book/src/dev/rfcs/drafts/0005-treestate.md @@ -126,7 +126,7 @@ finished validating everything that can be validated without the context of their anchor's finalization state. So for each transaction, for both `Spend` descriptions and `JoinSplit`s, we can -pre-emptively try to do our consensus check by looking up the anchors in our +preemptively try to do our consensus check by looking up the anchors in our finalized set first. For `Spend`s, we then trigger the remaining validation and when that finishes we are full done with those. For `JoinSplit`s, the anchor state check may pass early if it's a previous block Sprout `NoteCommitment` tree diff --git a/zebra-chain/src/error.rs b/zebra-chain/src/error.rs index a3182a21feb..755e508e402 100644 --- a/zebra-chain/src/error.rs +++ b/zebra-chain/src/error.rs @@ -1,6 +1,10 @@ //! Errors that can occur inside any `zebra-chain` submodule. + +use std::io; use thiserror::Error; +// TODO: Move all these enums into a common enum at the bottom. + /// Errors related to random bytes generation. #[derive(Error, Copy, Clone, Debug, PartialEq, Eq)] pub enum RandError { @@ -51,3 +55,19 @@ pub enum AddressError { #[error("Randomness did not hash into the Jubjub group for producing a new diversifier")] DiversifierGenerationFailure, } + +/// `zebra-chain`'s errors +#[derive(Error, Debug)] +pub enum Error { + /// Invalid consensus branch ID. + #[error("invalid consensus branch id")] + InvalidConsensusBranchId, + + /// Zebra's type could not be converted to its librustzcash equivalent. + #[error("Zebra's type could not be converted to its librustzcash equivalent: ")] + Conversion(#[from] io::Error), + + /// The transaction is missing a network upgrade. + #[error("the transaction is missing a network upgrade")] + MissingNetworkUpgrade, +} diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 460d3a850f0..0dd4a57c2d7 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -41,6 +41,8 @@ pub mod transparent; pub mod value_balance; pub mod work; +pub use error::Error; + #[cfg(any(test, feature = "proptest-impl"))] pub use block::LedgerState; diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 1f77e95e750..6045a7e2581 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -7,7 +7,7 @@ use crate::{ constants::{magics, SLOW_START_INTERVAL, SLOW_START_SHIFT}, network_upgrade::TESTNET_ACTIVATION_HEIGHTS, subsidy::{funding_stream_address_period, FUNDING_STREAM_RECEIVER_DENOMINATOR}, - Network, NetworkKind, NetworkUpgrade, NETWORK_UPGRADES_IN_ORDER, + Network, NetworkKind, NetworkUpgrade, }, work::difficulty::{ExpandedDifficulty, U256}, }; @@ -369,7 +369,7 @@ impl ParametersBuilder { // Check that the provided network upgrade activation heights are in the same order by height as the default testnet activation heights let mut activation_heights_iter = activation_heights.iter(); - for expected_network_upgrade in NETWORK_UPGRADES_IN_ORDER { + for expected_network_upgrade in NetworkUpgrade::iter() { if !network_upgrades.contains(&expected_network_upgrade) { continue; } else if let Some((&height, &network_upgrade)) = activation_heights_iter.next() { @@ -381,7 +381,7 @@ impl ParametersBuilder { assert!( network_upgrade == expected_network_upgrade, - "network upgrades must be activated in order, the correct order is {NETWORK_UPGRADES_IN_ORDER:?}" + "network upgrades must be activated in order specified by the protocol" ); } } diff --git a/zebra-chain/src/parameters/network/tests/vectors.rs b/zebra-chain/src/parameters/network/tests/vectors.rs index 4282c86844f..8845090b50c 100644 --- a/zebra-chain/src/parameters/network/tests/vectors.rs +++ b/zebra-chain/src/parameters/network/tests/vectors.rs @@ -15,8 +15,7 @@ use crate::{ self, ConfiguredActivationHeights, ConfiguredFundingStreamRecipient, ConfiguredFundingStreams, MAX_NETWORK_NAME_LENGTH, RESERVED_NETWORK_NAMES, }, - Network, NetworkUpgrade, MAINNET_ACTIVATION_HEIGHTS, NETWORK_UPGRADES_IN_ORDER, - TESTNET_ACTIVATION_HEIGHTS, + Network, NetworkUpgrade, MAINNET_ACTIVATION_HEIGHTS, TESTNET_ACTIVATION_HEIGHTS, }, }; @@ -124,7 +123,7 @@ fn activates_network_upgrades_correctly() { "activation height for all networks after Genesis and BeforeOverwinter should match NU5 activation height" ); - for nu in NETWORK_UPGRADES_IN_ORDER.into_iter().skip(1) { + for nu in NetworkUpgrade::iter().skip(1) { let activation_height = nu .activation_height(&network) .expect("must return an activation height"); @@ -286,8 +285,8 @@ fn check_full_activation_list() { }) .to_network(); - // We expect the first 8 network upgrades to be included, up to NU5 - let expected_network_upgrades = &NETWORK_UPGRADES_IN_ORDER[..8]; + // We expect the first 8 network upgrades to be included, up to and including NU5 + let expected_network_upgrades = NetworkUpgrade::iter().take(8); let full_activation_list_network_upgrades: Vec<_> = network .full_activation_list() .into_iter() @@ -296,7 +295,7 @@ fn check_full_activation_list() { for expected_network_upgrade in expected_network_upgrades { assert!( - full_activation_list_network_upgrades.contains(expected_network_upgrade), + full_activation_list_network_upgrades.contains(&expected_network_upgrade), "full activation list should contain expected network upgrade" ); } diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index b08cfec520d..74bc59cf162 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -15,7 +15,7 @@ use hex::{FromHex, ToHex}; use proptest_derive::Arbitrary; /// A list of network upgrades in the order that they must be activated. -pub const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 9] = [ +const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 9] = [ Genesis, BeforeOverwinter, Overwinter, @@ -63,6 +63,18 @@ pub enum NetworkUpgrade { Nu6, } +impl TryFrom for NetworkUpgrade { + type Error = crate::Error; + + fn try_from(branch_id: u32) -> Result { + CONSENSUS_BRANCH_IDS + .iter() + .find(|id| id.1 == ConsensusBranchId(branch_id)) + .map(|nu| nu.0) + .ok_or(Self::Error::InvalidConsensusBranchId) + } +} + impl fmt::Display for NetworkUpgrade { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Same as the debug representation for now @@ -204,6 +216,15 @@ impl fmt::Display for ConsensusBranchId { } } +impl TryFrom for zcash_primitives::consensus::BranchId { + type Error = crate::Error; + + fn try_from(id: ConsensusBranchId) -> Result { + zcash_primitives::consensus::BranchId::try_from(u32::from(id)) + .map_err(|_| Self::Error::InvalidConsensusBranchId) + } +} + /// Network Upgrade Consensus Branch Ids. /// /// Branch ids are the same for mainnet and testnet. If there is a testnet @@ -327,19 +348,14 @@ impl NetworkUpgrade { .expect("every height has a current network upgrade") } - /// Returns the next expected network upgrade after this network upgrade + /// Returns the next expected network upgrade after this network upgrade. pub fn next_upgrade(self) -> Option { - match self { - Genesis => Some(BeforeOverwinter), - BeforeOverwinter => Some(Overwinter), - Overwinter => Some(Sapling), - Sapling => Some(Blossom), - Blossom => Some(Heartwood), - Heartwood => Some(Canopy), - Canopy => Some(Nu5), - Nu5 => Some(Nu6), - Nu6 => None, - } + Self::iter().skip_while(|&nu| self != nu).nth(1) + } + + /// Returns the previous network upgrade before this network upgrade. + pub fn previous_upgrade(self) -> Option { + Self::iter().rev().skip_while(|&nu| self != nu).nth(1) } /// Returns the next network upgrade for `network` and `height`. @@ -518,12 +534,9 @@ impl NetworkUpgrade { NetworkUpgrade::current(network, height).averaging_window_timespan() } - /// Returns the NetworkUpgrade given an u32 as ConsensusBranchId - pub fn from_branch_id(branch_id: u32) -> Option { - CONSENSUS_BRANCH_IDS - .iter() - .find(|id| id.1 == ConsensusBranchId(branch_id)) - .map(|nu| nu.0) + /// Returns an iterator over [`NetworkUpgrade`] variants. + pub fn iter() -> impl DoubleEndedIterator { + NETWORK_UPGRADES_IN_ORDER.into_iter() } } diff --git a/zebra-chain/src/primitives/zcash_note_encryption.rs b/zebra-chain/src/primitives/zcash_note_encryption.rs index cbc19afc5d2..ae802beb0f7 100644 --- a/zebra-chain/src/primitives/zcash_note_encryption.rs +++ b/zebra-chain/src/primitives/zcash_note_encryption.rs @@ -4,38 +4,29 @@ use crate::{ block::Height, parameters::{Network, NetworkUpgrade}, - primitives::zcash_primitives::convert_tx_to_librustzcash, transaction::Transaction, }; /// Returns true if all Sapling or Orchard outputs, if any, decrypt successfully with /// an all-zeroes outgoing viewing key. -/// -/// # Panics -/// -/// If passed a network/height without matching consensus branch ID (pre-Overwinter), -/// since `librustzcash` won't be able to parse it. -pub fn decrypts_successfully(transaction: &Transaction, network: &Network, height: Height) -> bool { - let network_upgrade = NetworkUpgrade::current(network, height); - let alt_tx = convert_tx_to_librustzcash( - transaction, - network_upgrade - .branch_id() - .expect("should have a branch ID"), - ) - .expect("zcash_primitives and Zebra transaction formats must be compatible"); +pub fn decrypts_successfully(tx: &Transaction, network: &Network, height: Height) -> bool { + let nu = NetworkUpgrade::current(network, height); + + let Ok(tx) = tx.to_librustzcash(nu) else { + return false; + }; let null_sapling_ovk = sapling_crypto::keys::OutgoingViewingKey([0u8; 32]); // Note that, since this function is used to validate coinbase transactions, we can ignore // the "grace period" mentioned in ZIP-212. - let zip_212_enforcement = if network_upgrade >= NetworkUpgrade::Canopy { + let zip_212_enforcement = if nu >= NetworkUpgrade::Canopy { sapling_crypto::note_encryption::Zip212Enforcement::On } else { sapling_crypto::note_encryption::Zip212Enforcement::Off }; - if let Some(bundle) = alt_tx.sapling_bundle() { + if let Some(bundle) = tx.sapling_bundle() { for output in bundle.shielded_outputs().iter() { let recovery = sapling_crypto::note_encryption::try_sapling_output_recovery( &null_sapling_ovk, @@ -48,7 +39,7 @@ pub fn decrypts_successfully(transaction: &Transaction, network: &Network, heigh } } - if let Some(bundle) = alt_tx.orchard_bundle() { + if let Some(bundle) = tx.orchard_bundle() { for act in bundle.actions() { if zcash_note_encryption::try_output_recovery_with_ovk( &orchard::note_encryption::OrchardDomain::for_action(act), diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 7ab2f32d751..149ca423cd4 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -8,7 +8,7 @@ use zcash_protocol::value::BalanceError; use crate::{ amount::{Amount, NonNegative}, - parameters::{ConsensusBranchId, Network}, + parameters::{Network, NetworkUpgrade}, serialization::ZcashSerialize, transaction::{AuthDigest, HashType, SigHash, Transaction}, transparent::{self, Script}, @@ -150,48 +150,6 @@ impl<'a> zp_tx::Authorization for PrecomputedAuth<'a> { // End of (mostly) copied code -impl TryFrom<&Transaction> for zp_tx::Transaction { - type Error = io::Error; - - /// Convert a Zebra transaction into a librustzcash one. - /// - /// # Panics - /// - /// If the transaction is not V5. (Currently there is no need for this - /// conversion for other versions.) - #[allow(clippy::unwrap_in_result)] - fn try_from(trans: &Transaction) -> Result { - let network_upgrade = match trans { - Transaction::V5 { - network_upgrade, .. - } => network_upgrade, - Transaction::V1 { .. } - | Transaction::V2 { .. } - | Transaction::V3 { .. } - | Transaction::V4 { .. } => panic!("Zebra only uses librustzcash for V5 transactions"), - }; - - convert_tx_to_librustzcash( - trans, - network_upgrade.branch_id().expect("V5 txs have branch IDs"), - ) - } -} - -pub(crate) fn convert_tx_to_librustzcash( - trans: &Transaction, - branch_id: ConsensusBranchId, -) -> Result { - let serialized_tx = trans.zcash_serialize_to_vec()?; - let branch_id: u32 = branch_id.into(); - // We've already parsed this transaction, so its network upgrade must be valid. - let branch_id: zcash_primitives::consensus::BranchId = branch_id - .try_into() - .expect("zcash_primitives and Zebra have the same branch ids"); - let alt_tx = zp_tx::Transaction::read(&serialized_tx[..], branch_id)?; - Ok(alt_tx) -} - /// Convert a Zebra transparent::Output into a librustzcash one. impl TryFrom<&transparent::Output> for zp_tx::components::TxOut { type Error = io::Error; @@ -251,31 +209,59 @@ pub(crate) struct PrecomputedTxData<'a> { } impl<'a> PrecomputedTxData<'a> { - /// Compute data used for sighash or txid computation. + /// Computes the data used for sighash or txid computation. /// /// # Inputs /// - /// - `tx`: the relevant transaction - /// - `branch_id`: the branch ID of the transaction - /// - `all_previous_outputs` the transparent Output matching each - /// transparent input in the transaction. + /// - `tx`: the relevant transaction. + /// - `nu`: the network upgrade to which the transaction belongs. + /// - `all_previous_outputs`: the transparent Output matching each transparent input in `tx`. + /// + /// # Panics + /// + /// - If `tx` can't be converted to its `librustzcash` equivalent. + /// - If `nu` doesn't contain a consensus branch id convertible to its `librustzcash` + /// equivalent. + /// + /// # Consensus + /// + /// > [NU5 only, pre-NU6] All transactions MUST use the NU5 consensus branch ID `0xF919A198` as + /// > defined in [ZIP-252]. + /// + /// > [NU6 only] All transactions MUST use the NU6 consensus branch ID `0xC8E71055` as defined + /// > in [ZIP-253]. + /// + /// # Notes + /// + /// The check that ensures compliance with the two consensus rules stated above takes place in + /// the [`Transaction::to_librustzcash`] method. If the check fails, the tx can't be converted + /// to its `librustzcash` equivalent, which leads to a panic. The check relies on the passed + /// `nu` parameter, which uniquely represents a consensus branch id and can, therefore, be used + /// as an equivalent to a consensus branch id. The desired `nu` is set either by the script or + /// tx verifier in `zebra-consensus`. + /// + /// [ZIP-252]: + /// [ZIP-253]: pub(crate) fn new( tx: &'a Transaction, - branch_id: ConsensusBranchId, + nu: NetworkUpgrade, all_previous_outputs: &'a [transparent::Output], ) -> PrecomputedTxData<'a> { - let alt_tx = convert_tx_to_librustzcash(tx, branch_id) - .expect("zcash_primitives and Zebra transaction formats must be compatible"); - let txid_parts = alt_tx.deref().digest(zp_tx::txid::TxIdDigester); + let tx = tx + .to_librustzcash(nu) + .expect("`zcash_primitives` and Zebra tx formats must be compatible"); + + let txid_parts = tx.deref().digest(zp_tx::txid::TxIdDigester); let f_transparent = MapTransparent { auth: TransparentAuth { all_prev_outputs: all_previous_outputs, }, }; - let tx_data: zp_tx::TransactionData = alt_tx - .into_data() - .map_authorization(f_transparent, IdentityMap, IdentityMap); + + let tx_data: zp_tx::TransactionData = + tx.into_data() + .map_authorization(f_transparent, IdentityMap, IdentityMap); PrecomputedTxData { tx_data, @@ -331,26 +317,24 @@ pub(crate) fn sighash( ) } -/// Compute the authorizing data commitment of this transaction as specified -/// in [ZIP-244]. +/// Compute the authorizing data commitment of this transaction as specified in [ZIP-244]. /// /// # Panics /// /// If passed a pre-v5 transaction. /// /// [ZIP-244]: https://zips.z.cash/zip-0244 -pub(crate) fn auth_digest(trans: &Transaction) -> AuthDigest { - let alt_tx: zp_tx::Transaction = trans - .try_into() - .expect("zcash_primitives and Zebra transaction formats must be compatible"); - - let digest_bytes: [u8; 32] = alt_tx - .auth_commitment() - .as_ref() - .try_into() - .expect("digest has the correct size"); - - AuthDigest(digest_bytes) +pub(crate) fn auth_digest(tx: &Transaction) -> AuthDigest { + let nu = tx.network_upgrade().expect("V5 tx has a network upgrade"); + + AuthDigest( + tx.to_librustzcash(nu) + .expect("V5 tx is convertible to its `zcash_params` equivalent") + .auth_commitment() + .as_ref() + .try_into() + .expect("digest has the correct size"), + ) } /// Return the destination address from a transparent output. diff --git a/zebra-chain/src/serialization/error.rs b/zebra-chain/src/serialization/error.rs index 17566548d1a..411e2649537 100644 --- a/zebra-chain/src/serialization/error.rs +++ b/zebra-chain/src/serialization/error.rs @@ -51,3 +51,13 @@ pub enum SerializationError { #[error("transaction balance is non-zero but doesn't have Sapling shielded spends or outputs")] BadTransactionBalance, } + +impl From for SerializationError { + fn from(value: crate::Error) -> Self { + match value { + crate::Error::InvalidConsensusBranchId => Self::Parse("invalid consensus branch id"), + crate::Error::Conversion(e) => Self::Io(e), + crate::Error::MissingNetworkUpgrade => Self::Parse("missing network upgrade"), + } + } +} diff --git a/zebra-chain/src/tests/vectors.rs b/zebra-chain/src/tests/vectors.rs index 69e4955f6a0..44fe6644fbe 100644 --- a/zebra-chain/src/tests/vectors.rs +++ b/zebra-chain/src/tests/vectors.rs @@ -63,7 +63,7 @@ impl Network { .filter_map(|transaction| { VerifiedUnminedTx::new( transaction, - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .ok() diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index c04f4155b4f..025bd22881d 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -37,11 +37,12 @@ pub use sighash::{HashType, SigHash, SigHasher}; pub use unmined::{ zip317, UnminedTx, UnminedTxId, VerifiedUnminedTx, MEMPOOL_TRANSACTION_COST_THRESHOLD, }; +use zcash_protocol::consensus; use crate::{ amount::{Amount, Error as AmountError, NegativeAllowed, NonNegative}, block, orchard, - parameters::{ConsensusBranchId, Network, NetworkUpgrade}, + parameters::{Network, NetworkUpgrade}, primitives::{ed25519, Bctv14Proof, Groth16Proof}, sapling, serialization::ZcashSerialize, @@ -215,24 +216,28 @@ impl Transaction { /// - if called on a v1 or v2 transaction /// - if the input index points to a transparent::Input::CoinBase /// - if the input index is out of bounds for self.inputs() + /// - if the tx contains `nConsensusBranchId` field and `nu` doesn't match it + /// - if the tx is not convertible to its `librustzcash` equivalent + /// - if `nu` doesn't contain a consensus branch id convertible to its `librustzcash` + /// equivalent pub fn sighash( &self, - branch_id: ConsensusBranchId, + nu: NetworkUpgrade, hash_type: sighash::HashType, all_previous_outputs: &[transparent::Output], input_index_script_code: Option<(usize, Vec)>, ) -> SigHash { - sighash::SigHasher::new(self, branch_id, all_previous_outputs) + sighash::SigHasher::new(self, nu, all_previous_outputs) .sighash(hash_type, input_index_script_code) } /// Return a [`SigHasher`] for this transaction. pub fn sighasher<'a>( &'a self, - branch_id: ConsensusBranchId, + nu: NetworkUpgrade, all_previous_outputs: &'a [transparent::Output], ) -> sighash::SigHasher<'a> { - sighash::SigHasher::new(self, branch_id, all_previous_outputs) + sighash::SigHasher::new(self, nu, all_previous_outputs) } /// Compute the authorizing data commitment of this transaction as specified @@ -258,6 +263,16 @@ impl Transaction { !self.inputs().is_empty() } + /// Does this transaction have transparent outputs? + pub fn has_transparent_outputs(&self) -> bool { + !self.outputs().is_empty() + } + + /// Does this transaction have transparent inputs or outputs? + pub fn has_transparent_inputs_or_outputs(&self) -> bool { + self.has_transparent_inputs() || self.has_transparent_outputs() + } + /// Does this transaction have transparent or shielded inputs? pub fn has_transparent_or_shielded_inputs(&self) -> bool { self.has_transparent_inputs() || self.has_shielded_inputs() @@ -276,11 +291,6 @@ impl Transaction { .contains(orchard::Flags::ENABLE_SPENDS)) } - /// Does this transaction have transparent or shielded outputs? - pub fn has_transparent_or_shielded_outputs(&self) -> bool { - !self.outputs().is_empty() || self.has_shielded_outputs() - } - /// Does this transaction have shielded outputs? /// /// See [`Self::has_transparent_or_shielded_outputs`] for details. @@ -294,6 +304,11 @@ impl Transaction { .contains(orchard::Flags::ENABLE_OUTPUTS)) } + /// Does this transaction have transparent or shielded outputs? + pub fn has_transparent_or_shielded_outputs(&self) -> bool { + self.has_transparent_outputs() || self.has_shielded_outputs() + } + /// Does this transaction has at least one flag when we have at least one orchard action? pub fn has_enough_orchard_flags(&self) -> bool { if self.version() < 5 || self.orchard_actions().count() == 0 { @@ -1217,6 +1232,40 @@ impl Transaction { ) -> Result, ValueBalanceError> { self.value_balance_from_outputs(&outputs_from_utxos(utxos.clone())) } + + /// Converts [`Transaction`] to [`zcash_primitives::transaction::Transaction`]. + /// + /// If the tx contains a network upgrade, this network upgrade must match the passed `nu`. The + /// passed `nu` must also contain a consensus branch id convertible to its `librustzcash` + /// equivalent. + pub(crate) fn to_librustzcash( + &self, + nu: NetworkUpgrade, + ) -> Result { + if self.network_upgrade().is_some_and(|tx_nu| tx_nu != nu) { + return Err(crate::Error::InvalidConsensusBranchId); + } + + let Some(branch_id) = nu.branch_id() else { + return Err(crate::Error::InvalidConsensusBranchId); + }; + + let Ok(branch_id) = consensus::BranchId::try_from(branch_id) else { + return Err(crate::Error::InvalidConsensusBranchId); + }; + + Ok(zcash_primitives::transaction::Transaction::read( + &self.zcash_serialize_to_vec()?[..], + branch_id, + )?) + } + + // Common Sapling & Orchard Properties + + /// Does this transaction have shielded inputs or outputs? + pub fn has_shielded_data(&self) -> bool { + self.has_shielded_inputs() || self.has_shielded_outputs() + } } #[cfg(any(test, feature = "proptest-impl"))] diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index 47d1a4e4ad8..a8a6b8ed8e5 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -888,12 +888,7 @@ impl ZcashDeserialize for Transaction { // Denoted as `nConsensusBranchId` in the spec. // Convert it to a NetworkUpgrade let network_upgrade = - NetworkUpgrade::from_branch_id(limited_reader.read_u32::()?) - .ok_or({ - SerializationError::Parse( - "expected a valid network upgrade from the consensus branch id", - ) - })?; + NetworkUpgrade::try_from(limited_reader.read_u32::()?)?; // Denoted as `lock_time` in the spec. let lock_time = LockTime::zcash_deserialize(&mut limited_reader)?; diff --git a/zebra-chain/src/transaction/sighash.rs b/zebra-chain/src/transaction/sighash.rs index 24d58a77b38..c2460a7101c 100644 --- a/zebra-chain/src/transaction/sighash.rs +++ b/zebra-chain/src/transaction/sighash.rs @@ -2,7 +2,7 @@ use super::Transaction; -use crate::parameters::ConsensusBranchId; +use crate::parameters::NetworkUpgrade; use crate::transparent; use crate::primitives::zcash_primitives::{sighash, PrecomputedTxData}; @@ -41,20 +41,28 @@ impl AsRef<[u8]> for SigHash { /// A SigHasher context which stores precomputed data that is reused /// between sighash computations for the same transaction. +#[derive(Debug)] pub struct SigHasher<'a> { precomputed_tx_data: PrecomputedTxData<'a>, } impl<'a> SigHasher<'a> { /// Create a new SigHasher for the given transaction. + /// + /// # Panics + /// + /// - If `trans` can't be converted to its `librustzcash` equivalent. This could happen, for + /// example, if `trans` contains the `nConsensusBranchId` field, and `nu` doesn't match it. + /// More details in [`PrecomputedTxData::new`]. + /// - If `nu` doesn't contain a consensus branch id convertible to its `librustzcash` + /// equivalent. pub fn new( trans: &'a Transaction, - branch_id: ConsensusBranchId, + nu: NetworkUpgrade, all_previous_outputs: &'a [transparent::Output], ) -> Self { - let precomputed_tx_data = PrecomputedTxData::new(trans, branch_id, all_previous_outputs); SigHasher { - precomputed_tx_data, + precomputed_tx_data: PrecomputedTxData::new(trans, nu, all_previous_outputs), } } diff --git a/zebra-chain/src/transaction/tests/vectors.rs b/zebra-chain/src/transaction/tests/vectors.rs index 7daff649f3a..d8dd2e54097 100644 --- a/zebra-chain/src/transaction/tests/vectors.rs +++ b/zebra-chain/src/transaction/tests/vectors.rs @@ -1,8 +1,10 @@ //! Fixed test vectors for transactions. +use arbitrary::v5_transactions; use chrono::DateTime; use color_eyre::eyre::Result; use lazy_static::lazy_static; +use rand::{seq::IteratorRandom, thread_rng}; use crate::{ block::{Block, Height, MAX_BLOCK_BYTES}, @@ -320,7 +322,9 @@ fn empty_v5_librustzcash_round_trip() { let _init_guard = zebra_test::init(); let tx: &Transaction = &EMPTY_V5_TX; - let _alt_tx: zcash_primitives::transaction::Transaction = tx.try_into().expect( + let nu = tx.network_upgrade().expect("network upgrade"); + + tx.to_librustzcash(nu).expect( "librustzcash deserialization might work for empty zebra serialized transactions. \ Hint: if empty transactions fail, but other transactions work, delete this test", ); @@ -417,9 +421,10 @@ fn fake_v5_librustzcash_round_trip_for_network(network: Network) { "v1-v4 transaction data must change when converted to fake v5" ); - let _alt_tx: zcash_primitives::transaction::Transaction = fake_tx - .as_ref() - .try_into() + let nu = fake_tx.network_upgrade().expect("network upgrade"); + + fake_tx + .to_librustzcash(nu) .expect("librustzcash deserialization must work for zebra serialized transactions"); } } @@ -430,14 +435,14 @@ fn zip244_round_trip() -> Result<()> { let _init_guard = zebra_test::init(); for test in zip0244::TEST_VECTORS.iter() { - let transaction = test.tx.zcash_deserialize_into::()?; - let reencoded = transaction.zcash_serialize_to_vec()?; + let tx = test.tx.zcash_deserialize_into::()?; + let reencoded = tx.zcash_serialize_to_vec()?; + assert_eq!(test.tx, reencoded); - // The borrow is actually needed to call the correct trait impl - #[allow(clippy::needless_borrow)] - let _alt_tx: zcash_primitives::transaction::Transaction = (&transaction) - .try_into() + let nu = tx.network_upgrade().expect("network upgrade"); + + tx.to_librustzcash(nu) .expect("librustzcash deserialization must work for zebra serialized transactions"); } @@ -449,9 +454,10 @@ fn zip244_txid() -> Result<()> { let _init_guard = zebra_test::init(); for test in zip0244::TEST_VECTORS.iter() { - let transaction = test.tx.zcash_deserialize_into::()?; - let hasher = TxIdBuilder::new(&transaction); - let txid = hasher.txid()?; + let txid = TxIdBuilder::new(&test.tx.zcash_deserialize_into::()?) + .txid() + .expect("txid"); + assert_eq!(txid.0, test.txid); } @@ -482,11 +488,7 @@ fn test_vec143_1() -> Result<()> { let transaction = ZIP143_1.zcash_deserialize_into::()?; - let hasher = SigHasher::new( - &transaction, - NetworkUpgrade::Overwinter.branch_id().unwrap(), - &[], - ); + let hasher = SigHasher::new(&transaction, NetworkUpgrade::Overwinter, &[]); let hash = hasher.sighash(HashType::ALL, None); let expected = "a1f1a4e5cd9bd522322d661edd2af1bf2a7019cfab94ece18f4ba935b0a19073"; @@ -520,7 +522,7 @@ fn test_vec143_2() -> Result<()> { let hasher = SigHasher::new( &transaction, - NetworkUpgrade::Overwinter.branch_id().unwrap(), + NetworkUpgrade::Overwinter, &all_previous_outputs, ); @@ -549,11 +551,7 @@ fn test_vec243_1() -> Result<()> { let transaction = ZIP243_1.zcash_deserialize_into::()?; - let hasher = SigHasher::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - &[], - ); + let hasher = SigHasher::new(&transaction, NetworkUpgrade::Sapling, &[]); let hash = hasher.sighash(HashType::ALL, None); let expected = "63d18534de5f2d1c9e169b73f9c783718adbef5c8a7d55b5e7a37affa1dd3ff3"; @@ -567,11 +565,7 @@ fn test_vec243_1() -> Result<()> { let _guard = span.enter(); assert_eq!(expected, result); - let precomputed_tx_data = PrecomputedTxData::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - &[], - ); + let precomputed_tx_data = PrecomputedTxData::new(&transaction, NetworkUpgrade::Sapling, &[]); let alt_sighash = crate::primitives::zcash_primitives::sighash(&precomputed_tx_data, HashType::ALL, None); let result = hex::encode(alt_sighash); @@ -595,11 +589,7 @@ fn test_vec243_2() -> Result<()> { }; let all_previous_outputs = mock_pre_v5_output_list(output, input_ind); - let hasher = SigHasher::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - &all_previous_outputs, - ); + let hasher = SigHasher::new(&transaction, NetworkUpgrade::Sapling, &all_previous_outputs); let hash = hasher.sighash( HashType::NONE, @@ -624,11 +614,8 @@ fn test_vec243_2() -> Result<()> { let index = input_ind; let all_previous_outputs = mock_pre_v5_output_list(prevout, input_ind); - let precomputed_tx_data = PrecomputedTxData::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - &all_previous_outputs, - ); + let precomputed_tx_data = + PrecomputedTxData::new(&transaction, NetworkUpgrade::Sapling, &all_previous_outputs); let alt_sighash = crate::primitives::zcash_primitives::sighash( &precomputed_tx_data, HashType::NONE, @@ -656,11 +643,7 @@ fn test_vec243_3() -> Result<()> { lock_script: lock_script.clone(), }]; - let hasher = SigHasher::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - &all_previous_outputs, - ); + let hasher = SigHasher::new(&transaction, NetworkUpgrade::Sapling, &all_previous_outputs); let hash = hasher.sighash( HashType::ALL, @@ -687,11 +670,8 @@ fn test_vec243_3() -> Result<()> { let index = input_ind; let all_previous_outputs = &[prevout]; - let precomputed_tx_data = PrecomputedTxData::new( - &transaction, - NetworkUpgrade::Sapling.branch_id().unwrap(), - all_previous_outputs, - ); + let precomputed_tx_data = + PrecomputedTxData::new(&transaction, NetworkUpgrade::Sapling, all_previous_outputs); let alt_sighash = crate::primitives::zcash_primitives::sighash( &precomputed_tx_data, HashType::ALL, @@ -724,7 +704,7 @@ fn zip143_sighash() -> Result<()> { None => vec![], }; let result = hex::encode(transaction.sighash( - ConsensusBranchId(test.consensus_branch_id), + NetworkUpgrade::try_from(test.consensus_branch_id).expect("network upgrade"), HashType::from_bits(test.hash_type).expect("must be a valid HashType"), &all_previous_outputs, input_index.map(|input_index| { @@ -762,7 +742,7 @@ fn zip243_sighash() -> Result<()> { None => vec![], }; let result = hex::encode(transaction.sighash( - ConsensusBranchId(test.consensus_branch_id), + NetworkUpgrade::try_from(test.consensus_branch_id).expect("network upgrade"), HashType::from_bits(test.hash_type).expect("must be a valid HashType"), &all_previous_outputs, input_index.map(|input_index| { @@ -797,7 +777,7 @@ fn zip244_sighash() -> Result<()> { .collect(); let result = hex::encode(transaction.sighash( - NetworkUpgrade::Nu5.branch_id().unwrap(), + NetworkUpgrade::Nu5, HashType::ALL, &all_previous_outputs, None, @@ -808,7 +788,7 @@ fn zip244_sighash() -> Result<()> { if let Some(sighash_all) = test.sighash_all { let result = hex::encode( transaction.sighash( - NetworkUpgrade::Nu5.branch_id().unwrap(), + NetworkUpgrade::Nu5, HashType::ALL, &all_previous_outputs, test.transparent_input @@ -823,6 +803,46 @@ fn zip244_sighash() -> Result<()> { Ok(()) } +#[test] +fn consensus_branch_id() { + for net in Network::iter() { + for tx in v5_transactions(net.block_iter()).filter(|tx| { + !tx.has_transparent_inputs() && tx.has_shielded_data() && tx.network_upgrade().is_some() + }) { + let tx_nu = tx + .network_upgrade() + .expect("this test shouldn't use txs without a network upgrade"); + + let any_other_nu = NetworkUpgrade::iter() + .filter(|&nu| nu != tx_nu) + .choose(&mut thread_rng()) + .expect("there must be a network upgrade other than the tx one"); + + // All computations should succeed under the tx nu. + + tx.to_librustzcash(tx_nu) + .expect("tx is convertible under tx nu"); + PrecomputedTxData::new(&tx, tx_nu, &[]); + sighash::SigHasher::new(&tx, tx_nu, &[]); + tx.sighash(tx_nu, HashType::ALL, &[], None); + + // All computations should fail under an nu other than the tx one. + + tx.to_librustzcash(any_other_nu) + .expect_err("tx is not convertible under nu other than the tx one"); + + std::panic::catch_unwind(|| PrecomputedTxData::new(&tx, any_other_nu, &[])) + .expect_err("precomputing tx sighash data panics under nu other than the tx one"); + + std::panic::catch_unwind(|| sighash::SigHasher::new(&tx, any_other_nu, &[])) + .expect_err("creating the sighasher panics under nu other than the tx one"); + + std::panic::catch_unwind(|| tx.sighash(any_other_nu, HashType::ALL, &[], None)) + .expect_err("the sighash computation panics under nu other than the tx one"); + } + } +} + #[test] fn binding_signatures() { let _init_guard = zebra_test::init(); @@ -840,9 +860,7 @@ fn binding_signatures() { .block_iter() .skip_while(|(height, _)| **height < sapling_activation_height) { - let branch_id = NetworkUpgrade::current(&net, Height(*height)) - .branch_id() - .expect("consensus branch ID"); + let nu = NetworkUpgrade::current(&net, Height(*height)); for tx in block .zcash_deserialize_into::() @@ -856,7 +874,7 @@ fn binding_signatures() { .. } => { if let Some(sapling_shielded_data) = sapling_shielded_data { - let sighash = tx.sighash(branch_id, HashType::ALL, &[], None); + let sighash = tx.sighash(nu, HashType::ALL, &[], None); let bvk = redjubjub::VerificationKey::try_from( sapling_shielded_data.binding_verification_key(), @@ -885,7 +903,7 @@ fn binding_signatures() { continue; } - let sighash = tx.sighash(branch_id, HashType::ALL, &[], None); + let sighash = tx.sighash(nu, HashType::ALL, &[], None); let bvk = redjubjub::VerificationKey::try_from( sapling_shielded_data.binding_verification_key(), diff --git a/zebra-chain/src/transaction/txid.rs b/zebra-chain/src/transaction/txid.rs index f67f6dee58d..abaffdd4d45 100644 --- a/zebra-chain/src/transaction/txid.rs +++ b/zebra-chain/src/transaction/txid.rs @@ -1,6 +1,5 @@ //! Transaction ID computation. Contains code for generating the Transaction ID //! from the transaction. -use std::io; use super::{Hash, Transaction}; use crate::serialization::{sha256d, ZcashSerialize}; @@ -22,7 +21,7 @@ impl<'a> TxIdBuilder<'a> { } /// Compute the Transaction ID for the previously specified transaction. - pub(super) fn txid(self) -> Result { + pub(super) fn txid(self) -> Option { match self.trans { Transaction::V1 { .. } | Transaction::V2 { .. } @@ -34,22 +33,19 @@ impl<'a> TxIdBuilder<'a> { /// Compute the Transaction ID for transactions V1 to V4. /// In these cases it's simply the hash of the serialized transaction. - #[allow(clippy::unwrap_in_result)] - fn txid_v1_to_v4(self) -> Result { + fn txid_v1_to_v4(self) -> Option { let mut hash_writer = sha256d::Writer::default(); - self.trans - .zcash_serialize(&mut hash_writer) - .expect("Transactions must serialize into the hash."); - Ok(Hash(hash_writer.finish())) + self.trans.zcash_serialize(&mut hash_writer).ok()?; + Some(Hash(hash_writer.finish())) } /// Compute the Transaction ID for a V5 transaction in the given network upgrade. /// In this case it's the hash of a tree of hashes of specific parts of the /// transaction, as specified in ZIP-244 and ZIP-225. - fn txid_v5(self) -> Result { - // The v5 txid (from ZIP-244) is computed using librustzcash. Convert the zebra - // transaction to a librustzcash transaction. - let alt_tx: zcash_primitives::transaction::Transaction = self.trans.try_into()?; - Ok(Hash(*alt_tx.txid().as_ref())) + fn txid_v5(self) -> Option { + let nu = self.trans.network_upgrade()?; + + // We compute v5 txid (from ZIP-244) using librustzcash. + Some(Hash(*self.trans.to_librustzcash(nu).ok()?.txid().as_ref())) } } diff --git a/zebra-consensus/src/script.rs b/zebra-consensus/src/script.rs index 5adbc18c105..4432575e211 100644 --- a/zebra-consensus/src/script.rs +++ b/zebra-consensus/src/script.rs @@ -59,10 +59,6 @@ impl tower::Service for Verifier { upgrade, } = req; let input = &cached_ffi_transaction.inputs()[input_index]; - let branch_id = upgrade - .branch_id() - .expect("post-Sapling NUs have a consensus branch ID"); - match input { transparent::Input::PrevOut { outpoint, .. } => { let outpoint = *outpoint; @@ -71,7 +67,7 @@ impl tower::Service for Verifier { let span = tracing::trace_span!("script", ?outpoint); async move { - cached_ffi_transaction.is_valid(branch_id, input_index)?; + cached_ffi_transaction.is_valid(upgrade, input_index)?; tracing::trace!("script verification succeeded"); Ok(()) diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index c0c735871e7..d9184035aa5 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -575,12 +575,10 @@ where miner_fee, legacy_sigop_count, }, - Request::Mempool { transaction, .. } => { + Request::Mempool { transaction: ref tx, .. } => { let transaction = VerifiedUnminedTx::new( - transaction, - miner_fee.expect( - "unexpected mempool coinbase transaction: should have already rejected", - ), + tx.clone(), + miner_fee.expect("fee should have been checked earlier"), legacy_sigop_count, )?; @@ -874,14 +872,12 @@ where sapling_shielded_data: &Option>, ) -> Result { let tx = request.transaction(); - let upgrade = request.upgrade(network); + let nu = request.upgrade(network); - Self::verify_v4_transaction_network_upgrade(&tx, upgrade)?; + Self::verify_v4_transaction_network_upgrade(&tx, nu)?; let shielded_sighash = tx.sighash( - upgrade - .branch_id() - .expect("Overwinter-onwards must have branch ID, and we checkpoint on Canopy"), + nu, HashType::ALL, cached_ffi_transaction.all_previous_outputs(), None, @@ -970,14 +966,12 @@ where orchard_shielded_data: &Option, ) -> Result { let transaction = request.transaction(); - let upgrade = request.upgrade(network); + let nu = request.upgrade(network); - Self::verify_v5_transaction_network_upgrade(&transaction, upgrade)?; + Self::verify_v5_transaction_network_upgrade(&transaction, nu)?; let shielded_sighash = transaction.sighash( - upgrade - .branch_id() - .expect("Overwinter-onwards must have branch ID, and we checkpoint on Canopy"), + nu, HashType::ALL, cached_ffi_transaction.all_previous_outputs(), None, diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 96ddaaa8903..122dd57c700 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -1769,7 +1769,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected let _init_guard = zebra_test::init(); zebra_test::MULTI_THREADED_RUNTIME.block_on(async { let network = Network::Mainnet; - let network_upgrade = NetworkUpgrade::Canopy; + let nu = NetworkUpgrade::Canopy; let canopy_activation_height = NetworkUpgrade::Canopy .activation_height(&network) @@ -1796,12 +1796,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected }; // Sign the transaction - let sighash = transaction.sighash( - network_upgrade.branch_id().expect("must have branch ID"), - HashType::ALL, - &[], - None, - ); + let sighash = transaction.sighash(nu, HashType::ALL, &[], None); match &mut transaction { Transaction::V4 { @@ -1841,7 +1836,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte let _init_guard = zebra_test::init(); zebra_test::MULTI_THREADED_RUNTIME.block_on(async { let network = Network::Mainnet; - let network_upgrade = NetworkUpgrade::Canopy; + let nu = NetworkUpgrade::Canopy; let canopy_activation_height = NetworkUpgrade::Canopy .activation_height(&network) @@ -1874,12 +1869,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte }; // Sign the transaction - let sighash = transaction.sighash( - network_upgrade.branch_id().expect("must have branch ID"), - HashType::ALL, - &[], - None, - ); + let sighash = transaction.sighash(nu, HashType::ALL, &[], None); match &mut transaction { Transaction::V4 { diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 8568c7ea13a..6a53b2ed0ab 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1575,7 +1575,7 @@ where let nil_peer_set = service_fn(move |req| async move { let rsp = match req { // Return the correct response variant for Peers requests, - // re-using one of the peers we already provided. + // reusing one of the peers we already provided. Request::Peers => Response::Peers(vec![fake_peer.unwrap()]), _ => unreachable!("unexpected request: {:?}", req), }; diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index e1e4e2618ff..9c785ceae0b 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -127,4 +127,3 @@ tonic = "0.12.3" zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } - diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 5b6e5f2a846..3747b84c46e 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -21,7 +21,7 @@ use zcash_script::{ }; use zebra_chain::{ - parameters::ConsensusBranchId, + parameters::NetworkUpgrade, transaction::{HashType, SigHasher, Transaction}, transparent, }; @@ -152,11 +152,10 @@ impl CachedFfiTransaction { &self.all_previous_outputs } - /// Verify if the script in the input at `input_index` of a transaction correctly - /// spends the matching [`transparent::Output`] it refers to, with the [`ConsensusBranchId`] - /// of the block containing the transaction. + /// Verify if the script in the input at `input_index` of a transaction correctly spends the + /// matching [`transparent::Output`] it refers to. #[allow(clippy::unwrap_in_result)] - pub fn is_valid(&self, branch_id: ConsensusBranchId, input_index: usize) -> Result<(), Error> { + pub fn is_valid(&self, nu: NetworkUpgrade, input_index: usize) -> Result<(), Error> { let previous_output = self .all_previous_outputs .get(input_index) @@ -200,7 +199,7 @@ impl CachedFfiTransaction { let ctx = Box::new(SigHashContext { input_index: n_in, - sighasher: SigHasher::new(&self.transaction, branch_id, &self.all_previous_outputs), + sighasher: SigHasher::new(&self.transaction, nu, &self.all_previous_outputs), }); // SAFETY: The `script_*` fields are created from a valid Rust `slice`. let ret = unsafe { @@ -271,7 +270,7 @@ mod tests { use hex::FromHex; use std::sync::Arc; use zebra_chain::{ - parameters::{ConsensusBranchId, NetworkUpgrade::*}, + parameters::NetworkUpgrade, serialization::{ZcashDeserialize, ZcashDeserializeInto}, transaction::Transaction, transparent::{self, Output}, @@ -286,7 +285,7 @@ mod tests { } fn verify_valid_script( - branch_id: ConsensusBranchId, + nu: NetworkUpgrade, tx: &[u8], amount: u64, pubkey: &[u8], @@ -301,7 +300,7 @@ mod tests { let previous_output = vec![output]; let verifier = super::CachedFfiTransaction::new(transaction, previous_output); - verifier.is_valid(branch_id, input_index)?; + verifier.is_valid(nu, input_index)?; Ok(()) } @@ -311,7 +310,7 @@ mod tests { let _init_guard = zebra_test::init(); verify_valid_script( - Blossom.branch_id().unwrap(), + NetworkUpgrade::Blossom, &SCRIPT_TX, 212 * u64::pow(10, 8), &SCRIPT_PUBKEY, @@ -344,12 +343,10 @@ mod tests { lock_script: transparent::Script::new(&SCRIPT_PUBKEY.clone()[..]), }; let input_index = 0; - let branch_id = Blossom - .branch_id() - .expect("Blossom has a ConsensusBranchId"); - let verifier = super::CachedFfiTransaction::new(transaction, vec![output]); - verifier.is_valid(branch_id, input_index).unwrap_err(); + verifier + .is_valid(NetworkUpgrade::Blossom, input_index) + .expect_err("verification should fail"); Ok(()) } @@ -370,13 +367,9 @@ mod tests { let verifier = super::CachedFfiTransaction::new(transaction, vec![output]); let input_index = 0; - let branch_id = Blossom - .branch_id() - .expect("Blossom has a ConsensusBranchId"); - - verifier.is_valid(branch_id, input_index)?; - verifier.is_valid(branch_id, input_index)?; + verifier.is_valid(NetworkUpgrade::Blossom, input_index)?; + verifier.is_valid(NetworkUpgrade::Blossom, input_index)?; Ok(()) } @@ -397,13 +390,11 @@ mod tests { let verifier = super::CachedFfiTransaction::new(transaction, vec![output]); let input_index = 0; - let branch_id = Blossom - .branch_id() - .expect("Blossom has a ConsensusBranchId"); - verifier.is_valid(branch_id, input_index)?; - - verifier.is_valid(branch_id, input_index + 1).unwrap_err(); + verifier.is_valid(NetworkUpgrade::Blossom, input_index)?; + verifier + .is_valid(NetworkUpgrade::Blossom, input_index + 1) + .expect_err("verification should fail"); Ok(()) } @@ -424,13 +415,11 @@ mod tests { let verifier = super::CachedFfiTransaction::new(transaction, vec![output]); let input_index = 0; - let branch_id = Blossom - .branch_id() - .expect("Blossom has a ConsensusBranchId"); - - verifier.is_valid(branch_id, input_index + 1).unwrap_err(); - verifier.is_valid(branch_id, input_index)?; + verifier + .is_valid(NetworkUpgrade::Blossom, input_index + 1) + .expect_err("verification should fail"); + verifier.is_valid(NetworkUpgrade::Blossom, input_index)?; Ok(()) } @@ -451,13 +440,10 @@ mod tests { let verifier = super::CachedFfiTransaction::new(transaction, vec![output]); let input_index = 0; - let branch_id = Blossom - .branch_id() - .expect("Blossom has a ConsensusBranchId"); - verifier.is_valid(branch_id, input_index + 1).unwrap_err(); - - verifier.is_valid(branch_id, input_index + 1).unwrap_err(); + verifier + .is_valid(NetworkUpgrade::Blossom, input_index + 1) + .expect_err("verification should fail"); Ok(()) } @@ -471,12 +457,15 @@ mod tests { let serialized_output = "4065675c0000000017a914c117756dcbe144a12a7c33a77cfa81aa5aeeb38187"; let tx = Transaction::zcash_deserialize(&hex::decode(serialized_tx).unwrap().to_vec()[..]) .unwrap(); + let previous_output = Output::zcash_deserialize(&hex::decode(serialized_output).unwrap().to_vec()[..]) .unwrap(); let verifier = super::CachedFfiTransaction::new(Arc::new(tx), vec![previous_output]); - verifier.is_valid(Nu5.branch_id().unwrap(), 0)?; + + verifier.is_valid(NetworkUpgrade::Nu5, 0)?; + Ok(()) } } diff --git a/zebra-test/src/net.rs b/zebra-test/src/net.rs index 161a5ce80aa..1b66ff466c4 100644 --- a/zebra-test/src/net.rs +++ b/zebra-test/src/net.rs @@ -58,7 +58,7 @@ pub fn zebra_skip_ipv6_tests() -> bool { /// to - it has a small risk of port conflicts. /// /// Use this function when you need to use the same random port multiple -/// times. For example: setting up both ends of a connection, or re-using +/// times. For example: setting up both ends of a connection, or reusing /// the same port multiple times. pub fn random_known_port() -> u16 { use rand::Rng; @@ -99,7 +99,7 @@ pub fn random_known_port() -> u16 { /// between this fn call and binding the tcp listener. /// /// Use this function when you need to use the same random port multiple -/// times. For example: setting up both ends of a connection, or re-using +/// times. For example: setting up both ends of a connection, or reusing /// the same port multiple times. /// /// ## Panics diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index 56e7eea6263..a285209b62d 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -280,7 +280,7 @@ impl CopyStateCmd { // then deserializes bytes into new `Block` structs when reading. // So these checks are sufficient to detect block data corruption. // - // If Zebra starts re-using cached `Block` structs after writing them, + // If Zebra starts reusing cached `Block` structs after writing them, // we'll also need to check `Block` structs created from the actual database bytes. if source_block_hash != target_block_commit_hash || source_block_hash != target_block_data_hash diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 176ec8c1c57..07402dafb50 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -166,7 +166,7 @@ async fn mempool_push_transaction() -> Result<(), crate::BoxError> { responder.respond(transaction::Response::from( VerifiedUnminedTx::new( transaction, - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), @@ -271,7 +271,7 @@ async fn mempool_advertise_transaction_ids() -> Result<(), crate::BoxError> { responder.respond(transaction::Response::from( VerifiedUnminedTx::new( transaction, - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), @@ -373,7 +373,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { responder.respond(transaction::Response::from( VerifiedUnminedTx::new( transaction, - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), @@ -512,7 +512,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { responder.respond(transaction::Response::from( VerifiedUnminedTx::new( transaction, - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), diff --git a/zebrad/src/components/mempool/storage/tests/prop.rs b/zebrad/src/components/mempool/storage/tests/prop.rs index 398ba0925f9..bac143349a5 100644 --- a/zebrad/src/components/mempool/storage/tests/prop.rs +++ b/zebrad/src/components/mempool/storage/tests/prop.rs @@ -478,14 +478,14 @@ impl SpendConflictTestInput { VerifiedUnminedTx::new( first.0.into(), // make sure miner fee is big enough for all cases - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), VerifiedUnminedTx::new( second.0.into(), // make sure miner fee is big enough for all cases - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), @@ -508,14 +508,14 @@ impl SpendConflictTestInput { VerifiedUnminedTx::new( first.0.into(), // make sure miner fee is big enough for all cases - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), VerifiedUnminedTx::new( second.0.into(), // make sure miner fee is big enough for all cases - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), diff --git a/zebrad/src/components/mempool/storage/tests/vectors.rs b/zebrad/src/components/mempool/storage/tests/vectors.rs index 30ce35bb832..e4c1cd471fc 100644 --- a/zebrad/src/components/mempool/storage/tests/vectors.rs +++ b/zebrad/src/components/mempool/storage/tests/vectors.rs @@ -267,7 +267,7 @@ fn mempool_expired_basic_for_network(network: Network) -> Result<()> { storage.insert( VerifiedUnminedTx::new( tx.into(), - Amount::try_from(1_000_000).expect("invalid value"), + Amount::try_from(1_000_000).expect("valid amount"), 0, ) .expect("verification should pass"), diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 06ab0b70cef..bc35656dcc3 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -463,7 +463,7 @@ where // The Hedge middleware is the outermost layer, hedging requests // between two retry-wrapped networks. The innermost timeout // layer is relatively unimportant, because slow requests will - // probably be pre-emptively hedged. + // probably be preemptively hedged. // // The Hedge goes outside the Retry, because the Retry layer // abstracts away spurious failures from individual peers diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index bc0cd8ef417..5621b12b690 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -200,7 +200,7 @@ impl TestType { if !use_internet_connection { config.network.initial_mainnet_peers = IndexSet::new(); config.network.initial_testnet_peers = IndexSet::new(); - // Avoid re-using cached peers from disk when we're supposed to be a disconnected instance + // Avoid reusing cached peers from disk when we're supposed to be a disconnected instance config.network.cache_dir = CacheDir::disabled(); // Activate the mempool immediately by default From d26b0c188fcd577ab319ffcc694e0be935d8734b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 4 Feb 2025 00:29:46 -0300 Subject: [PATCH 075/245] run `cargo autoinherit` (#9041) Co-authored-by: zancas --- Cargo.toml | 118 +++++++++++++++++++++++++++++ tower-batch-control/Cargo.toml | 32 ++++---- tower-fallback/Cargo.toml | 10 +-- zebra-chain/Cargo.toml | 108 +++++++++++++-------------- zebra-consensus/Cargo.toml | 68 ++++++++--------- zebra-grpc/Cargo.toml | 22 +++--- zebra-network/Cargo.toml | 76 +++++++++---------- zebra-node-services/Cargo.toml | 22 +++--- zebra-rpc/Cargo.toml | 64 +++++++--------- zebra-scan/Cargo.toml | 76 +++++++++---------- zebra-script/Cargo.toml | 8 +- zebra-state/Cargo.toml | 84 ++++++++++----------- zebra-test/Cargo.toml | 44 +++++------ zebra-utils/Cargo.toml | 38 +++++----- zebrad/Cargo.toml | 132 ++++++++++++++++----------------- 15 files changed, 506 insertions(+), 396 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dccad95c218..6aef936bc39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,124 @@ zcash_keys = "0.4.0" zcash_primitives = "0.19.0" zcash_proofs = "0.19.0" zcash_protocol = "0.4.0" +abscissa_core = "0.7.0" +atty = "0.2.14" +base64 = "0.22.1" +bellman = "0.14.0" +bincode = "1.3.3" +bitflags = "2.8.0" +bitflags-serde-legacy = "0.1.1" +bitvec = "1.0.1" +blake2b_simd = "1.0.2" +blake2s_simd = "1.0.2" +bls12_381 = "0.8.0" +bridgetree = "0.6.0" +bs58 = "0.5.1" +byteorder = "1.5.0" +bytes = "1.9.0" +chrono = { version = "0.4.39", default-features = false } +clap = "4.5.27" +color-eyre = { version = "0.6.3", default-features = false } +console-subscriber = "0.4.0" +criterion = "0.5.1" +crossbeam-channel = "0.5.14" +dirs = "6.0.0" +ed25519-zebra = "4.0.3" +elasticsearch = { version = "8.17.0-alpha.1", default-features = false } +equihash = "0.2.0" +ff = "0.13.0" +futures = "0.3.31" +futures-core = "0.3.28" +futures-util = "0.3.28" +group = "0.13.0" +halo2 = "0.3.0" +hex = "0.4.3" +hex-literal = "0.4.1" +howudoin = "0.1.2" +http-body-util = "0.1.2" +human_bytes = { version = "0.4.3", default-features = false } +humantime = "2.1.0" +humantime-serde = "1.1.1" +hyper = "1.6.0" +hyper-util = "0.1.9" +indexmap = "2.7.1" +indicatif = "0.17.11" +inferno = { version = "0.12.1", default-features = false } +insta = "1.42.1" +itertools = "0.14.0" +jsonrpc = "0.18.0" +jsonrpsee = "0.24.8" +jsonrpsee-proc-macros = "0.24.8" +jsonrpsee-types = "0.24.8" +jubjub = "0.10.0" +lazy_static = "1.4.0" +log = "0.4.25" +metrics = "0.24.1" +metrics-exporter-prometheus = { version = "0.16.1", default-features = false } +mset = "0.1.1" +nix = "0.29.0" +num-integer = "0.1.46" +once_cell = "1.20.2" +ordered-map = "0.4.2" +owo-colors = "4.1.0" +pin-project = "1.1.8" +primitive-types = "0.12.2" +proptest = "1.6.0" +proptest-derive = "0.5.1" +prost = "0.13.4" +quote = "1.0.38" +rand = "0.8.5" +rand_chacha = "0.3.1" +rand_core = "0.6.4" +rayon = "1.10.0" +reddsa = "0.5.1" +redjubjub = "0.7.0" +regex = "1.11.0" +reqwest = { version = "0.12.9", default-features = false } +ripemd = "0.1.3" +rlimit = "0.10.2" +rocksdb = { version = "0.22.0", default-features = false } +secp256k1 = "0.27.0" +semver = "1.0.25" +sentry = { version = "0.36.0", default-features = false } +serde = "1.0.217" +serde-big-array = "0.5.1" +serde_json = "1.0.138" +serde_with = "3.12.0" +serde_yml = "0.0.12" +sha2 = "0.10.7" +spandoc = "0.2.2" +static_assertions = "1.1.0" +structopt = "0.3.26" +syn = "2.0.96" +tempfile = "3.16.0" +thiserror = "2.0.11" +thread-priority = "1.2.0" +tinyvec = "1.8.1" +tokio = "1.43.0" +tokio-stream = "0.1.17" +tokio-test = "0.4.4" +tokio-util = "0.7.13" +toml = "0.8.19" +tonic = "0.12.3" +tonic-build = "0.12.3" +tonic-reflection = "0.12.3" +tower = "0.4.13" +tower-test = "0.4.0" +tracing = "0.1.41" +tracing-appender = "0.2.3" +tracing-error = "0.2.1" +tracing-flame = "0.2.0" +tracing-futures = "0.2.5" +tracing-journald = "0.3.0" +tracing-subscriber = "0.3.19" +tracing-test = "0.2.4" +uint = "0.10.0" +vergen = { version = "8.3.2", default-features = false } +wagyu-zcash-parameters = "0.2.0" +x25519-dalek = "2.0.1" +zcash_note_encryption = "0.4.1" +zcash_script = "0.2.0" [workspace.metadata.release] diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 6170840c098..631c22d68d8 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -22,29 +22,29 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -futures = "0.3.31" -futures-core = "0.3.28" -pin-project = "1.1.8" -rayon = "1.10.0" -tokio = { version = "1.43.0", features = ["time", "sync", "tracing", "macros"] } -tokio-util = "0.7.13" -tower = { version = "0.4.13", features = ["util", "buffer"] } -tracing = "0.1.41" -tracing-futures = "0.2.5" +futures = { workspace = true } +futures-core = { workspace = true } +pin-project = { workspace = true } +rayon = { workspace = true } +tokio = { workspace = true, features = ["time", "sync", "tracing", "macros"] } +tokio-util = { workspace = true } +tower = { workspace = true, features = ["util", "buffer"] } +tracing = { workspace = true } +tracing-futures = { workspace = true } [dev-dependencies] -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -ed25519-zebra = "4.0.3" -rand = "0.8.5" +ed25519-zebra = { workspace = true } +rand = { workspace = true } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } -tokio-test = "0.4.4" +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tokio-test = { workspace = true } tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } -tower-test = "0.4.0" +tower-test = { workspace = true } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 2141c2bdc4b..c291bfa6f15 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -16,12 +16,12 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.8" -tower = "0.4.13" -futures-core = "0.3.28" -tracing = "0.1.41" +pin-project = { workspace = true } +tower = { workspace = true } +futures-core = { workspace = true } +tracing = { workspace = true } [dev-dependencies] -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 07a38ac0818..f8b9c6c69a1 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -63,110 +63,110 @@ bench = ["zebra-test"] [dependencies] # Cryptography -bitvec = "1.0.1" -bitflags = "2.8.0" -bitflags-serde-legacy = "0.1.1" -blake2b_simd = "1.0.2" -blake2s_simd = "1.0.2" -bridgetree = "0.6.0" -bs58 = { version = "0.5.1", features = ["check"] } -byteorder = "1.5.0" +bitvec = { workspace = true } +bitflags = { workspace = true } +bitflags-serde-legacy = { workspace = true } +blake2b_simd = { workspace = true } +blake2s_simd = { workspace = true } +bridgetree = { workspace = true } +bs58 = { workspace = true, features = ["check"] } +byteorder = { workspace = true } # TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 # See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L73-L85 # Restore support when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 -equihash = "0.2.0" +equihash = { workspace = true } -group = "0.13.0" +group = { workspace = true } incrementalmerkletree.workspace = true -jubjub = "0.10.0" -lazy_static = "1.4.0" -tempfile = "3.16.0" -dirs = "6.0.0" -num-integer = "0.1.46" -primitive-types = "0.12.2" -rand_core = "0.6.4" -ripemd = "0.1.3" +jubjub = { workspace = true } +lazy_static = { workspace = true } +tempfile = { workspace = true } +dirs = { workspace = true } +num-integer = { workspace = true } +primitive-types = { workspace = true } +rand_core = { workspace = true } +ripemd = { workspace = true } # Matches version used by hdwallet -secp256k1 = { version = "0.27.0", features = ["serde"] } -sha2 = { version = "0.10.7", features = ["compress"] } -uint = "0.10.0" -x25519-dalek = { version = "2.0.1", features = ["serde"] } +secp256k1 = { workspace = true, features = ["serde"] } +sha2 = { workspace = true, features = ["compress"] } +uint = { workspace = true } +x25519-dalek = { workspace = true, features = ["serde"] } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } orchard.workspace = true zcash_encoding.workspace = true zcash_history.workspace = true -zcash_note_encryption = "0.4.1" +zcash_note_encryption = { workspace = true } zcash_primitives = { workspace = true, features = ["transparent-inputs"] } sapling-crypto.workspace = true zcash_protocol.workspace = true zcash_address.workspace = true # Time -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } -humantime = "2.1.0" +chrono = { workspace = true, features = ["clock", "std", "serde"] } +humantime = { workspace = true } # Error Handling & Formatting -static_assertions = "1.1.0" -thiserror = "2.0.11" -tracing = "0.1.41" +static_assertions = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } # Serialization -hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.217", features = ["serde_derive", "rc"] } -serde_with = "3.12.0" -serde-big-array = "0.5.1" +hex = { workspace = true, features = ["serde"] } +serde = { workspace = true, features = ["serde_derive", "rc"] } +serde_with = { workspace = true } +serde-big-array = { workspace = true } # Processing -futures = "0.3.31" -itertools = "0.14.0" -rayon = "1.10.0" +futures = { workspace = true } +itertools = { workspace = true } +rayon = { workspace = true } # ZF deps -ed25519-zebra = "4.0.3" -redjubjub = "0.7.0" -reddsa = "0.5.1" +ed25519-zebra = { workspace = true } +redjubjub = { workspace = true } +reddsa = { workspace = true } # Production feature json-conversion -serde_json = { version = "1.0.138", optional = true } +serde_json = { workspace = true, optional = true } # Production feature async-error and testing feature proptest-impl -tokio = { version = "1.43.0", optional = true } +tokio = { workspace = true, optional = true } # Experimental feature shielded-scan zcash_client_backend = { workspace = true, optional = true } # Optional testing dependencies -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } -rand = { version = "0.8.5", optional = true } -rand_chacha = { version = "0.3.1", optional = true } +rand = { workspace = true, optional = true } +rand_chacha = { workspace = true, optional = true } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } [dev-dependencies] # Benchmarks -criterion = { version = "0.5.1", features = ["html_reports"] } +criterion = { workspace = true, features = ["html_reports"] } # Error Handling & Formatting -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } -spandoc = "0.2.2" -tracing = "0.1.41" +tinyvec = { workspace = true, features = ["rustc_1_55"] } +spandoc = { workspace = true } +tracing = { workspace = true } # Make the optional testing dependencies required -proptest = "1.6.0" -proptest-derive = "0.5.1" +proptest = { workspace = true } +proptest-derive = { workspace = true } -rand = "0.8.5" -rand_chacha = "0.3.1" +rand = { workspace = true } +rand_chacha = { workspace = true } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 8a01cbf44ad..ba2dd14e8d6 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -35,33 +35,33 @@ getblocktemplate-rpcs = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "zebra-state/proptest-impl"] [dependencies] -blake2b_simd = "1.0.2" -bellman = "0.14.0" -bls12_381 = "0.8.0" +blake2b_simd = { workspace = true } +bellman = { workspace = true } +bls12_381 = { workspace = true } halo2 = { package = "halo2_proofs", version = "0.3.0" } -jubjub = "0.10.0" -rand = "0.8.5" -rayon = "1.10.0" - -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -lazy_static = "1.4.0" -once_cell = "1.20.2" -serde = { version = "1.0.217", features = ["serde_derive"] } - -futures = "0.3.31" -futures-util = "0.3.28" -metrics = "0.24.1" -thiserror = "2.0.11" -tokio = { version = "1.43.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } -tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } -tracing = "0.1.41" -tracing-futures = "0.2.5" +jubjub = { workspace = true } +rand = { workspace = true } +rayon = { workspace = true } + +chrono = { workspace = true, features = ["clock", "std"] } +lazy_static = { workspace = true } +once_cell = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } + +futures = { workspace = true } +futures-util = { workspace = true } +metrics = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["time", "sync", "tracing", "rt-multi-thread"] } +tower = { workspace = true, features = ["timeout", "util", "buffer"] } +tracing = { workspace = true } +tracing-futures = { workspace = true } sapling-crypto.workspace = true orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } -wagyu-zcash-parameters = "0.2.0" +wagyu-zcash-parameters = { workspace = true } tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.20" } @@ -72,27 +72,27 @@ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.4 zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } # prod feature progress-bar -howudoin = { version = "0.1.2", optional = true } +howudoin = { workspace = true, optional = true } # Test-only dependencies -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -hex = "0.4.3" -num-integer = "0.1.46" -proptest = "1.6.0" -proptest-derive = "0.5.1" -spandoc = "0.2.2" +hex = { workspace = true } +num-integer = { workspace = true } +proptest = { workspace = true } +proptest-derive = { workspace = true } +spandoc = { workspace = true } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } -tracing-error = "0.2.1" -tracing-subscriber = "0.3.19" +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tracing-error = { workspace = true } +tracing-subscriber = { workspace = true } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index 411abc378ad..dfb9a0cff8a 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -16,15 +16,15 @@ categories = ["cryptography::cryptocurrencies"] [dependencies] -futures-util = "0.3.28" -tonic = "0.12.3" -tonic-reflection = "0.12.3" -prost = "0.13.4" -serde = { version = "1.0.217", features = ["serde_derive"] } -tokio = { version = "1.43.0", features = ["macros", "rt-multi-thread"] } -tokio-stream = "0.1.17" -tower = { version = "0.4.13", features = ["util", "buffer", "timeout"] } -color-eyre = "0.6.3" +futures-util = { workspace = true } +tonic = { workspace = true } +tonic-reflection = { workspace = true } +prost = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-stream = { workspace = true } +tower = { workspace = true, features = ["util", "buffer", "timeout"] } +color-eyre = { workspace = true } zcash_primitives.workspace = true @@ -32,10 +32,10 @@ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.4 zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } [build-dependencies] -tonic-build = "0.12.3" +tonic-build = { workspace = true } [dev-dependencies] -insta = { version = "1.42.1", features = ["redactions", "json", "ron"] } +insta = { workspace = true, features = ["redactions", "json", "ron"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-state = { path = "../zebra-state" } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index c7410b6b02a..c74f2d35149 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -40,39 +40,39 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.8.0" -byteorder = "1.5.0" -bytes = "1.9.0" -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -dirs = "6.0.0" -hex = "0.4.3" -humantime-serde = "1.1.1" -indexmap = { version = "2.7.1", features = ["serde"] } -itertools = "0.14.0" -lazy_static = "1.4.0" -num-integer = "0.1.46" -ordered-map = "0.4.2" -pin-project = "1.1.8" -rand = "0.8.5" -rayon = "1.10.0" -regex = "1.11.0" -serde = { version = "1.0.217", features = ["serde_derive"] } -tempfile = "3.16.0" -thiserror = "2.0.11" - -futures = "0.3.31" -tokio = { version = "1.43.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } -tokio-stream = { version = "0.1.17", features = ["sync", "time"] } -tokio-util = { version = "0.7.13", features = ["codec"] } -tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } - -metrics = "0.24.1" -tracing-futures = "0.2.5" -tracing-error = { version = "0.2.1", features = ["traced-error"] } -tracing = "0.1.41" +bitflags = { workspace = true } +byteorder = { workspace = true } +bytes = { workspace = true } +chrono = { workspace = true, features = ["clock", "std"] } +dirs = { workspace = true } +hex = { workspace = true } +humantime-serde = { workspace = true } +indexmap = { workspace = true, features = ["serde"] } +itertools = { workspace = true } +lazy_static = { workspace = true } +num-integer = { workspace = true } +ordered-map = { workspace = true } +pin-project = { workspace = true } +rand = { workspace = true } +rayon = { workspace = true } +regex = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } +tempfile = { workspace = true } +thiserror = { workspace = true } + +futures = { workspace = true } +tokio = { workspace = true, features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio-stream = { workspace = true, features = ["sync", "time"] } +tokio-util = { workspace = true, features = ["codec"] } +tower = { workspace = true, features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } + +metrics = { workspace = true } +tracing-futures = { workspace = true } +tracing-error = { workspace = true, features = ["traced-error"] } +tracing = { workspace = true } # prod feature progress-bar -howudoin = { version = "0.1.2", optional = true } +howudoin = { workspace = true, optional = true } # tor dependencies # Wait until `arti-client`'s dependency `x25519-dalek v1.2.0` is updated to a higher version. (#5492) @@ -80,18 +80,18 @@ howudoin = { version = "0.1.2", optional = true } # tor-rtcompat = { version = "0.0.2", optional = true } # proptest dependencies -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } [dev-dependencies] -proptest = "1.6.0" -proptest-derive = "0.5.1" +proptest = { workspace = true } +proptest-derive = { workspace = true } -static_assertions = "1.1.0" -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } -toml = "0.8.19" +static_assertions = { workspace = true } +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +toml = { workspace = true } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index c84e8e9e2f2..93dd206121f 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -42,18 +42,18 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } # Optional dependencies # Tool and test feature rpc-client -color-eyre = { version = "0.6.3", optional = true } -jsonrpsee-types = { version = "0.24.8", optional = true } +color-eyre = { workspace = true, optional = true } +jsonrpsee-types = { workspace = true, optional = true } # Security: avoid default dependency on openssl -reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.217", optional = true } -serde_json = { version = "1.0.138", optional = true } -tokio = { version = "1.43.0", features = ["time", "sync"] } +reqwest = { workspace = true, features = ["rustls-tls"], optional = true } +serde = { workspace = true, optional = true } +serde_json = { workspace = true, optional = true } +tokio = { workspace = true, features = ["time", "sync"] } [dev-dependencies] -color-eyre = "0.6.3" -reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"] } -serde = "1.0.217" -serde_json = "1.0.138" -jsonrpsee-types = "0.24.8" +color-eyre = { workspace = true } +reqwest = { workspace = true, features = ["rustls-tls"] } +serde = { workspace = true } +serde_json = { workspace = true } +jsonrpsee-types = { workspace = true } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 0c93ba9b4e9..188b6c32048 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -54,48 +54,40 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.39", default-features = false, features = [ - "clock", - "std", -] } -futures = "0.3.31" +chrono = { workspace = true, features = ["clock", "std"] } +futures = { workspace = true } -jsonrpsee = { version = "0.24.8", features = ["server"] } -jsonrpsee-types = "0.24.8" -jsonrpsee-proc-macros = "0.24.8" -hyper = "1.6.0" -http-body-util = "0.1.2" -serde_json = "1.0.138" -indexmap = { version = "2.7.1", features = ["serde"] } +jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee-types = { workspace = true } +jsonrpsee-proc-macros = { workspace = true } +hyper = { workspace = true } +http-body-util = { workspace = true } +serde_json = { workspace = true } +indexmap = { workspace = true, features = ["serde"] } # RPC endpoint basic auth -base64 = "0.22.1" -rand = "0.8.5" +base64 = { workspace = true } +rand = { workspace = true } # Error handling -color-eyre = "0.6.3" +color-eyre = { workspace = true } -tokio = { version = "1.43.0", features = [ - "time", - "rt-multi-thread", - "macros", - "tracing", -] } -tower = "0.4.13" +tokio = { workspace = true, features = ["time", "rt-multi-thread", "macros", "tracing"] } +tower = { workspace = true } # indexer-rpcs dependencies -tonic = { version = "0.12.3", optional = true } -tonic-reflection = { version = "0.12.3", optional = true } -prost = { version = "0.13.4", optional = true } -tokio-stream = { version = "0.1.17", optional = true } +tonic = { workspace = true, optional = true } +tonic-reflection = { workspace = true, optional = true } +prost = { workspace = true, optional = true } +tokio-stream = { workspace = true, optional = true } -tracing = "0.1.41" +tracing = { workspace = true } -hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.217", features = ["serde_derive"] } +hex = { workspace = true, features = ["serde"] } +serde = { workspace = true, features = ["serde_derive"] } # For the `stop` RPC method. -nix = { version = "0.29.0", features = ["signal"] } +nix = { workspace = true, features = ["signal"] } zcash_primitives = { workspace = true, features = ["transparent-inputs"] } @@ -103,7 +95,7 @@ zcash_primitives = { workspace = true, features = ["transparent-inputs"] } zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl -proptest = { version = "1.6.0", optional = true } +proptest = { workspace = true, optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "json-conversion", @@ -117,15 +109,15 @@ zebra-script = { path = "../zebra-script", version = "1.0.0-beta.44" } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } [build-dependencies] -tonic-build = { version = "0.12.3", optional = true } +tonic-build = { workspace = true, optional = true } [dev-dependencies] -insta = { version = "1.42.1", features = ["redactions", "json", "ron"] } +insta = { workspace = true, features = ["redactions", "json", "ron"] } -proptest = "1.6.0" +proptest = { workspace = true } -thiserror = "2.0.11" -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ "proptest-impl", diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 9c785ceae0b..fedb26f74ff 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -60,15 +60,15 @@ results-reader = [ [dependencies] -color-eyre = "0.6.3" -indexmap = { version = "2.7.1", features = ["serde"] } -itertools = "0.14.0" -semver = "1.0.25" -serde = { version = "1.0.217", features = ["serde_derive"] } -tokio = { version = "1.43.0", features = ["time"] } -tower = "0.4.13" -tracing = "0.1.41" -futures = "0.3.31" +color-eyre = { workspace = true } +indexmap = { workspace = true, features = ["serde"] } +itertools = { workspace = true } +semver = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } +tokio = { workspace = true, features = ["time"] } +tower = { workspace = true } +tracing = { workspace = true } +futures = { workspace = true } # ECC dependencies. zcash_client_backend.workspace = true @@ -83,47 +83,47 @@ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.4 zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std", "serde"] } +chrono = { workspace = true, features = ["clock", "std", "serde"] } # test feature proptest-impl -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } -bls12_381 = { version = "0.8.0", optional = true } -ff = { version = "0.13.0", optional = true } -group = { version = "0.13.0", optional = true } -jubjub = { version = "0.10.0", optional = true } -rand = { version = "0.8.5", optional = true } -zcash_note_encryption = { version = "0.4.1", optional = true } +bls12_381 = { workspace = true, optional = true } +ff = { workspace = true, optional = true } +group = { workspace = true, optional = true } +jubjub = { workspace = true, optional = true } +rand = { workspace = true, optional = true } +zcash_note_encryption = { workspace = true, optional = true } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44", optional = true } # zebra-scanner binary dependencies -tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } -structopt = "0.3.26" -lazy_static = "1.4.0" -serde_json = "1.0.138" +tracing-subscriber = { workspace = true, features = ["env-filter"] } +structopt = { workspace = true } +lazy_static = { workspace = true } +serde_json = { workspace = true } -jsonrpc = { version = "0.18.0", optional = true } -hex = { version = "0.4.3", optional = true } +jsonrpc = { workspace = true, optional = true } +hex = { workspace = true, optional = true } zebrad = { path = "../zebrad", version = "2.1.1" } [dev-dependencies] -insta = { version = "1.42.1", features = ["ron", "redactions"] } -tokio = { version = "1.43.0", features = ["test-util"] } - -proptest = "1.6.0" -proptest-derive = "0.5.1" -bls12_381 = "0.8.0" -ff = "0.13.0" -group = "0.13.0" -jubjub = "0.10.0" -rand = "0.8.5" -tempfile = "3.16.0" -zcash_note_encryption = "0.4.1" -toml = "0.8.19" -tonic = "0.12.3" +insta = { workspace = true, features = ["ron", "redactions"] } +tokio = { workspace = true, features = ["test-util"] } + +proptest = { workspace = true } +proptest-derive = { workspace = true } +bls12_381 = { workspace = true } +ff = { workspace = true } +group = { workspace = true } +jubjub = { workspace = true } +rand = { workspace = true } +tempfile = { workspace = true } +zcash_note_encryption = { workspace = true } +toml = { workspace = true } +tonic = { workspace = true } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 5aed0b0a949..df20563dc6f 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -15,12 +15,12 @@ keywords = ["zebra", "zcash"] categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] -zcash_script = "0.2.0" +zcash_script = { workspace = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } -thiserror = "2.0.11" +thiserror = { workspace = true } [dev-dependencies] -hex = "0.4.3" -lazy_static = "1.4.0" +hex = { workspace = true } +lazy_static = { workspace = true } zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index ab236e830d1..cd46a0c5f7b 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -49,68 +49,68 @@ elasticsearch = [ ] [dependencies] -bincode = "1.3.3" -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -dirs = "6.0.0" -futures = "0.3.31" -hex = "0.4.3" -hex-literal = "0.4.1" -humantime-serde = "1.1.1" -human_bytes = { version = "0.4.3", default-features = false } -indexmap = "2.7.1" -itertools = "0.14.0" -lazy_static = "1.4.0" -metrics = "0.24.1" -mset = "0.1.1" -regex = "1.11.0" -rlimit = "0.10.2" -rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } -semver = "1.0.25" -crossbeam-channel = "0.5.14" -serde = { version = "1.0.217", features = ["serde_derive"] } -tempfile = "3.16.0" -thiserror = "2.0.11" - -rayon = "1.10.0" -tokio = { version = "1.43.0", features = ["rt-multi-thread", "sync", "tracing"] } -tower = { version = "0.4.13", features = ["buffer", "util"] } -tracing = "0.1.41" +bincode = { workspace = true } +chrono = { workspace = true, features = ["clock", "std"] } +dirs = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } +humantime-serde = { workspace = true } +human_bytes = { workspace = true } +indexmap = { workspace = true } +itertools = { workspace = true } +lazy_static = { workspace = true } +metrics = { workspace = true } +mset = { workspace = true } +regex = { workspace = true } +rlimit = { workspace = true } +rocksdb = { workspace = true, features = ["lz4"] } +semver = { workspace = true } +crossbeam-channel = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } +tempfile = { workspace = true } +thiserror = { workspace = true } + +rayon = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "sync", "tracing"] } +tower = { workspace = true, features = ["buffer", "util"] } +tracing = { workspace = true } # elasticsearch specific dependencies. # Security: avoid default dependency on openssl -elasticsearch = { version = "8.17.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.138", package = "serde_json", optional = true } +elasticsearch = { workspace = true, features = ["rustls-tls"], optional = true } +serde_json = { workspace = true, optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } # prod feature progress-bar -howudoin = { version = "0.1.2", optional = true } +howudoin = { workspace = true, optional = true } # test feature proptest-impl zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -once_cell = "1.20.2" -spandoc = "0.2.2" +once_cell = { workspace = true } +spandoc = { workspace = true } -hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.42.1", features = ["ron", "redactions"] } +hex = { workspace = true, features = ["serde"] } +insta = { workspace = true, features = ["ron", "redactions"] } -proptest = "1.6.0" -proptest-derive = "0.5.1" -rand = "0.8.5" +proptest = { workspace = true } +proptest-derive = { workspace = true } +rand = { workspace = true } halo2 = { package = "halo2_proofs", version = "0.3.0" } -jubjub = "0.10.0" +jubjub = { workspace = true } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index b23711733c6..b436ee57fe7 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -15,33 +15,33 @@ keywords = ["zebra", "zcash"] categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] -hex = "0.4.3" -indexmap = "2.7.1" -lazy_static = "1.4.0" -insta = "1.42.1" -itertools = "0.14.0" -proptest = "1.6.0" -once_cell = "1.20.2" -rand = "0.8.5" -regex = "1.11.0" +hex = { workspace = true } +indexmap = { workspace = true } +lazy_static = { workspace = true } +insta = { workspace = true } +itertools = { workspace = true } +proptest = { workspace = true } +once_cell = { workspace = true } +rand = { workspace = true } +regex = { workspace = true } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } -tower = { version = "0.4.13", features = ["util"] } -futures = "0.3.31" +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tower = { workspace = true, features = ["util"] } +futures = { workspace = true } -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -humantime = "2.1.0" -owo-colors = "4.1.0" -spandoc = "0.2.2" -thiserror = "2.0.11" +humantime = { workspace = true } +owo-colors = { workspace = true } +spandoc = { workspace = true } +thiserror = { workspace = true } -tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } -tracing-error = "0.2.1" -tracing = "0.1.41" +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing-error = { workspace = true } +tracing = { workspace = true } [dev-dependencies] -tempfile = "3.16.0" +tempfile = { workspace = true } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 49b1db4c65b..0ff1d14f5aa 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -82,17 +82,17 @@ openapi-generator = [ ] [dependencies] -color-eyre = "0.6.3" +color-eyre = { workspace = true } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -structopt = "0.3.26" -hex = "0.4.3" -serde_json = "1.0.138" -tracing-error = "0.2.1" -tracing-subscriber = "0.3.19" -thiserror = "2.0.11" +structopt = { workspace = true } +hex = { workspace = true } +serde_json = { workspace = true } +tracing-error = { workspace = true } +tracing-subscriber = { workspace = true } +thiserror = { workspace = true } zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } @@ -101,27 +101,27 @@ zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.14.0", optional = true } +itertools = { workspace = true, optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.11.0", optional = true } +regex = { workspace = true, optional = true } # Avoid default openssl dependency to reduce the dependency tree and security alerts. -reqwest = { version = "0.12.9", default-features = false, features = ["rustls-tls"], optional = true } +reqwest = { workspace = true, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.43.0", features = ["full"], optional = true } +tokio = { workspace = true, features = ["full"], optional = true } -jsonrpc = { version = "0.18.0", optional = true } +jsonrpc = { workspace = true, optional = true } zcash_primitives = { workspace = true, optional = true } zcash_client_backend = { workspace = true, optional = true } zcash_protocol.workspace = true # For the openapi generator -rand = "0.8.5" -syn = { version = "2.0.96", features = ["full"], optional = true } -quote = { version = "1.0.38", optional = true } -serde_yml = { version = "0.0.12", optional = true } -serde = { version = "1.0.217", features = ["serde_derive"], optional = true } -indexmap = "2.7.1" +rand = { workspace = true } +syn = { workspace = true, features = ["full"], optional = true } +quote = { workspace = true, optional = true } +serde_yml = { workspace = true, optional = true } +serde = { workspace = true, features = ["serde_derive"], optional = true } +indexmap = { workspace = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 63e3372c1a5..5493deddf4f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -169,117 +169,117 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } # Required for crates.io publishing, but it's only used in tests zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44", optional = true } -abscissa_core = "0.7.0" -clap = { version = "4.5.27", features = ["cargo"] } -chrono = { version = "0.4.39", default-features = false, features = ["clock", "std"] } -humantime-serde = "1.1.1" -indexmap = "2.7.1" -lazy_static = "1.4.0" -semver = "1.0.25" -serde = { version = "1.0.217", features = ["serde_derive"] } -toml = "0.8.19" - -futures = "0.3.31" -rayon = "1.10.0" -tokio = { version = "1.43.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } -tokio-stream = { version = "0.1.17", features = ["time"] } -tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.8" - -color-eyre = { version = "0.6.3", default-features = false, features = ["issue-url"] } +abscissa_core = { workspace = true } +clap = { workspace = true, features = ["cargo"] } +chrono = { workspace = true, features = ["clock", "std"] } +humantime-serde = { workspace = true } +indexmap = { workspace = true } +lazy_static = { workspace = true } +semver = { workspace = true } +serde = { workspace = true, features = ["serde_derive"] } +toml = { workspace = true } + +futures = { workspace = true } +rayon = { workspace = true } +tokio = { workspace = true, features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio-stream = { workspace = true, features = ["time"] } +tower = { workspace = true, features = ["hedge", "limit"] } +pin-project = { workspace = true } + +color-eyre = { workspace = true, features = ["issue-url"] } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.8.1", features = ["rustc_1_55"] } +tinyvec = { workspace = true, features = ["rustc_1_55"] } -thiserror = "2.0.11" +thiserror = { workspace = true } -tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } -tracing-appender = "0.2.3" -tracing-error = "0.2.1" -tracing-futures = "0.2.5" -tracing = "0.1.41" +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing-appender = { workspace = true } +tracing-error = { workspace = true } +tracing-futures = { workspace = true } +tracing = { workspace = true } -metrics = "0.24.1" +metrics = { workspace = true } -dirs = "6.0.0" -atty = "0.2.14" +dirs = { workspace = true } +atty = { workspace = true } -num-integer = "0.1.46" -rand = "0.8.5" +num-integer = { workspace = true } +rand = { workspace = true } # prod feature internal-miner -thread-priority = { version = "1.2.0", optional = true } +thread-priority = { workspace = true, optional = true } # prod feature sentry -sentry = { version = "0.36.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { workspace = true, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph -tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.12.1", default-features = false, optional = true } +tracing-flame = { workspace = true, optional = true } +inferno = { workspace = true, optional = true } # prod feature journald -tracing-journald = { version = "0.3.0", optional = true } +tracing-journald = { workspace = true, optional = true } # prod feature filter-reload -hyper = { version = "1.6.0", features = ["http1", "http2", "server"], optional = true } -http-body-util = { version = "0.1.2", optional = true } -hyper-util = { version = "0.1.9", optional = true } -bytes = { version = "1.9.0", optional = true } +hyper = { workspace = true, features = ["http1", "http2", "server"], optional = true } +http-body-util = { workspace = true, optional = true } +hyper-util = { workspace = true, optional = true } +bytes = { workspace = true, optional = true } # prod feature prometheus -metrics-exporter-prometheus = { version = "0.16.1", default-features = false, features = ["http-listener"], optional = true } +metrics-exporter-prometheus = { workspace = true, features = ["http-listener"], optional = true } # prod feature release_max_level_info # # zebrad uses tracing for logging, # we only use `log` to set and print the static log levels in transitive dependencies -log = "0.4.25" +log = { workspace = true } # prod feature progress-bar -howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.11", optional = true } +howudoin = { workspace = true, features = ["term-line"], optional = true } +indicatif = { workspace = true, optional = true } # test feature proptest-impl -proptest = { version = "1.6.0", optional = true } -proptest-derive = { version = "0.5.1", optional = true } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } # test feature tokio-console -console-subscriber = { version = "0.4.0", optional = true } +console-subscriber = { workspace = true, optional = true } [build-dependencies] -vergen = { version = "8.3.2", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { workspace = true, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests -tonic-build = { version = "0.12.3", optional = true } +tonic-build = { workspace = true, optional = true } [dev-dependencies] -abscissa_core = { version = "0.7.0", features = ["testing"] } -hex = "0.4.3" -hex-literal = "0.4.1" -jsonrpsee-types = "0.24.8" -once_cell = "1.20.2" -regex = "1.11.0" -insta = { version = "1.42.1", features = ["json"] } +abscissa_core = { workspace = true, features = ["testing"] } +hex = { workspace = true } +hex-literal = { workspace = true } +jsonrpsee-types = { workspace = true } +once_cell = { workspace = true } +regex = { workspace = true } +insta = { workspace = true, features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.138", features = ["preserve_order"] } -tempfile = "3.16.0" +serde_json = { workspace = true, features = ["preserve_order"] } +tempfile = { workspace = true } -hyper = { version = "1.6.0", features = ["http1", "http2", "server"]} -tracing-test = { version = "0.2.4", features = ["no-env-filter"] } +hyper = { workspace = true, features = ["http1", "http2", "server"] } +tracing-test = { workspace = true, features = ["no-env-filter"] } -tokio = { version = "1.43.0", features = ["full", "tracing", "test-util"] } -tokio-stream = "0.1.17" +tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tokio-stream = { workspace = true } # test feature lightwalletd-grpc-tests -prost = "0.13.4" -tonic = "0.12.3" +prost = { workspace = true } +tonic = { workspace = true } -proptest = "1.6.0" -proptest-derive = "0.5.1" +proptest = { workspace = true } +proptest-derive = { workspace = true } # enable span traces and track caller in tests -color-eyre = { version = "0.6.3" } +color-eyre = { workspace = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44", features = ["proptest-impl"] } From a5a1cdfebe799eee4f791b26906f98990ed6da34 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 4 Feb 2025 04:29:49 +0100 Subject: [PATCH 076/245] chore: Minor cleanups (#9198) * Remove unnecessary clone * Fix test in zebra-script * Document that RPC server addr must be set * Change the log level of missing port msg to info Having the RPC port not specified is part of Zebra's normal operation, and is the default config. * Fix links in docs for tracing * Update zebrad/src/commands/start.rs Co-authored-by: Alfredo Garcia --------- Co-authored-by: Alfredo Garcia --- book/src/user/tracing.md | 8 ++++---- zebra-consensus/src/transaction.rs | 4 ++-- zebra-rpc/src/server.rs | 4 ++++ zebra-script/src/lib.rs | 4 ++++ zebrad/src/commands/start.rs | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/book/src/user/tracing.md b/book/src/user/tracing.md index d1b984f05df..cbacddb9ad4 100644 --- a/book/src/user/tracing.md +++ b/book/src/user/tracing.md @@ -36,10 +36,10 @@ and the [`flamegraph`][flamegraph] runtime config option. Compile Zebra with `--features sentry` to monitor it using [Sentry][sentry] in production. -[tracing_section]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html -[filter]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.filter -[flamegraph]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.flamegraph +[tracing_section]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.InnerConfig.html +[filter]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.InnerConfig.html#structfield.filter +[flamegraph]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.InnerConfig.html#structfield.flamegraph [flamegraphs]: http://www.brendangregg.com/flamegraphs.html [systemd_journald]: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -[use_journald]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.use_journald +[use_journald]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.InnerConfig.html#structfield.use_journald [sentry]: https://sentry.io/welcome/ diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index d9184035aa5..661b0be14d2 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -575,9 +575,9 @@ where miner_fee, legacy_sigop_count, }, - Request::Mempool { transaction: ref tx, .. } => { + Request::Mempool { transaction: tx, .. } => { let transaction = VerifiedUnminedTx::new( - tx.clone(), + tx, miner_fee.expect("fee should have been checked earlier"), legacy_sigop_count, )?; diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index c787071d74d..87c565f4a3e 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -86,6 +86,10 @@ impl RpcServer { /// /// Returns [`JoinHandle`]s for the RPC server and `sendrawtransaction` queue tasks, /// and a [`RpcServer`] handle, which can be used to shut down the RPC server task. + /// + /// # Panics + /// + /// - If [`Config::listen_addr`] is `None`. // // TODO: // - put some of the configs or services in their own struct? diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 3747b84c46e..096a36be9e8 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -441,6 +441,10 @@ mod tests { let input_index = 0; + verifier + .is_valid(NetworkUpgrade::Blossom, input_index + 1) + .expect_err("verification should fail"); + verifier .is_valid(NetworkUpgrade::Blossom, input_index + 1) .expect_err("verification should fail"); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 035b9a20100..5d346e42c77 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -262,7 +262,7 @@ impl StartCmd { ); rpc_task_handle.await.unwrap() } else { - warn!("configure an listen_addr to start the RPC server"); + info!("configure a listen_addr to start the RPC server"); ( tokio::spawn(std::future::pending().in_current_span()), tokio::spawn(std::future::pending().in_current_span()), From 6f24c9cc1e66f9a4d426fcee476e9cdea7e124fc Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 4 Feb 2025 11:51:47 +0000 Subject: [PATCH 077/245] docs: add CI/CD architecture documentation (#9202) ## Motivation Our CI/CD infrastructure is complex and was lacking proper documentation, making it challenging for newcomers and contributors to understand the system. This PR adds comprehensive documentation to: - Make our CI/CD architecture more accessible to new contributors - Document workflow relationships and dependencies - Explain infrastructure components and their roles - Provide best practices and known limitations Closes #9112 ### Specifications & References - [GitHub Actions Path Filtering Limitation](https://github.com/orgs/community/discussions/44490) - [GitHub Actions Variables in Forks Limitation](https://github.com/orgs/community/discussions/44322) - [Rust Caching Limitations](https://github.com/ZcashFoundation/zebra/issues/6169#issuecomment-1712776391) ## Solution 1. Added detailed CI/CD documentation in `.github/workflows/README.md`: - Table of contents for easy navigation - System overview and core infrastructure details - Mermaid diagram showing workflow relationships and parallel execution - Detailed sections on: - Core infrastructure components - Workflow organization - Test execution strategy - Infrastructure details - Best practices - Known issues 2. Added CI/CD section to main `README.md`: - Brief overview of CI/CD capabilities - Strategic placement between Getting Started and Documentation sections - Direct link to detailed documentation 3. Documentation improvements: - Clear explanation of patch workflows and their rationale - Detailed infrastructure dependencies and requirements - Comprehensive coverage of test execution patterns - External contributor considerations and limitations --- .github/workflows/README.md | 360 ++++++++++++++++++++++++++++++++++++ README.md | 16 ++ 2 files changed, 376 insertions(+) create mode 100644 .github/workflows/README.md diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 00000000000..68d83f0fd1e --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,360 @@ +# Zebra CI/CD Architecture + +This document provides a comprehensive overview of Zebra's Continuous Integration and Continuous Deployment (CI/CD) system. It serves as a guide for contributors, maintainers, and new team members. + +## Table of Contents + +1. [System Overview](#system-overview) +2. [CI/CD Workflow Diagram](#cicd-workflow-diagram) +3. [Core Infrastructure](#core-infrastructure) +4. [Workflow Organization](#workflow-organization) +5. [Test Execution Strategy](#test-execution-strategy) +6. [Infrastructure Details](#infrastructure-details) +7. [Best Practices](#best-practices) +8. [Known Issues](#known-issues) + +## System Overview + +Zebra's CI/CD system is built on GitHub Actions, providing a unified platform for automation. The system ensures code quality, maintains stability, and automates routine tasks through specialized workflows. + +## CI/CD Workflow Diagram + +Below is a Mermaid diagram illustrating how our CI workflows relate to each other, with a focus on parallel execution patterns and job dependencies. The diagram shows the main CI pipeline, integration test flow, unit test flow, underlying infrastructure, and the various triggers that initiate the pipeline. + +```mermaid +graph TB + %% Define Triggers subgraph with parallel triggers + subgraph "Triggers" + direction TB + P[Pull Request] & Q[Push to main] & R[Weekly Schedule] & S[Manual Trigger] & T[Merge Queue] + end + + %% Main CI Pipeline with parallel flows after build + subgraph "Main CI Pipeline" + direction TB + A[ci-tests.yml] + B[sub-build-docker-image.yml] + A --> B + end + + %% Infrastructure dependencies + subgraph "Infrastructure" + direction TB + M[Docker Build Cloud] + N[GCP Resources] + O[GitHub Runners] + end + + %% Unit Test Flow with parallel test execution + subgraph "Unit Test Flow" + direction TB + C[sub-ci-unit-tests-docker.yml] + H[test-all] & I[test-fake-activation-heights] & J[test-empty-sync] & K[test-lightwalletd-integration] & L[test-configuration-file] + C --> H + C --> I + C --> J + C --> K + C --> L + end + + %% Integration Test Flow with some parallel and some sequential steps + subgraph "Integration Test Flow" + direction TB + D[sub-ci-integration-tests-gcp.yml] + E[sub-find-cached-disks.yml] + F[sub-deploy-integration-tests-gcp.yml] + G[sub-test-zebra-config.yml] + D --> E + D --> F + E --> F + F --> G + end + + %% Connect triggers to main pipeline + P --> A + Q --> A + R --> A + S --> A + T --> A + + %% Connect infrastructure to respective components + M --> B + N --> D + O --> C + + %% Connect main pipeline to test flows + B --> C + B --> D + + %% Style definitions + classDef primary fill:#2374ab,stroke:#2374ab,color:white + classDef secondary fill:#48a9a6,stroke:#48a9a6,color:white + classDef infra fill:#4b4e6d,stroke:#4b4e6d,color:white + classDef trigger fill:#95a5a6,stroke:#95a5a6,color:white + + %% Apply styles + class A,B primary + class C,D,E,F,G secondary + class H,I,J,K,L secondary + class M,N,O infra + class P,Q,R,S,T trigger +``` + +*The diagram above illustrates the parallel execution patterns in our CI/CD system. All triggers can initiate the pipeline concurrently, unit tests run in parallel after the Docker image build, and integration tests follow a mix of parallel and sequential steps. The infrastructure components support their respective workflow parts concurrently.* + +## Core Infrastructure + +### 1. GitHub Actions + +- Primary CI/CD platform +- Workflow automation and orchestration +- Integration with other services + +### 2. Infrastructure as Code + +- Uses [Cloud Foundation Fabric](https://github.com/ZcashFoundation/cloud-foundation-fabric) for GCP infrastructure +- Terraform-based architecture, networking, and permissions +- Resources (VMs, Disks, Images, etc.) deployed via GitHub Actions pipelines + +### 3. Build and Registry Services + +#### Docker-based Testing + +- Most tests run in containers defined by our [Dockerfile](http://../../docker/Dockerfile) +- The [entrypoint script](http://../../docker/entrypoint.sh) manages: + - Test execution + - Environment configuration + - Resource cleanup + +#### [Docker Build Cloud](https://www.docker.com/products/build-cloud/) + +- Optimized build times (~10 min for non-cached, ~30 sec for cached) +- More efficient than GitHub Runners +- Addresses [Rust caching limitations](https://github.com/ZcashFoundation/zebra/issues/6169#issuecomment-1712776391) + +#### Container Registries + +- Google Cloud Registry: Internal CI artifacts +- [Docker Hub](https://hub.docker.com/): Public release artifacts +- Ensures proper artifact distribution + +### 4. Test Infrastructure + +#### GitHub-hosted Runners + +- All Unit Tests jobs +- Standard CI/CD operations +- Limited to 6-hour runtime + +#### Self-hosted Runners (GKE) + +- All Integration Tests jobs (deployed to GCP) +- Support for tests exceeding 6 hours +- Extended logging capabilities +- Full GitHub Actions console integration + +**Note**: Self-hosted Runners are just used to keep the logs running in the GitHub Actions UI for over 6 hours, the Integration Tests are not run in the Self-hosted Runner itself, but in the deployed VMs in GCP through GitHub Actions. + +### 5. Queue Management + +[Mergify](https://mergify.yml) + +- Automated PR merging and queue-based testing +- Priority management +- Ensures code quality before merge +- See our [`.mergify.yml`](http://../../.mergify.yml) for configuration + +## Workflow Organization + +### Main Workflows + +- **CI Tests** (`ci-*.yml`): Core testing workflows + - Unit tests + - Integration tests + - Code coverage + - Linting +- **CD Deployments** (`cd-*.yml`): Deployment workflows + - Node deployment to GCP + - Documentation deployment +- **Release Management** (`release-*.yml`): Version and release workflows + +### Supporting Workflows + +- **Sub-workflows** (`sub-*.yml`): Reusable workflow components + - Docker image building + - Test configurations + - GCP resource management +- **Patch Workflows** (`*.patch.yml`, `*.patch-external.yml`): Handle GitHub Actions limitations for required checks + +### Patch Workflows Rationale + +Our use of patch workflows (`.patch.yml` and `.patch-external.yml`) is a workaround for a [known limitation in GitHub Actions](https://github.com/orgs/community/discussions/44490) regarding path filters and required checks. When a workflow is marked as required for PR merging: + +1. **Path Filtering Limitation**: GitHub Actions does not properly handle the case where a required workflow is skipped due to path filters. Instead of marking the check as "skipped" or "passed", it remains in a "pending" state, blocking PR merges. + +2. **Our Solution**: We maintain parallel "patch" workflows that: + + - Run without path filters + - Contain minimal steps that always pass when the original workflow would have been skipped + - Allow PRs to merge when changes don't affect relevant paths + +3. **Impact**: + + - Doubled number of workflow files to maintain + - Additional complexity in workflow management + - Extra status checks in PR UI + +## Test Execution Strategy + +### Test Orchestration + +Our test execution is centralized through our Docker [entrypoint script](http://../../docker/entrypoint.sh), providing a unified way to run tests both in CI and locally. + +#### Environment Variable-driven Testing + +```bash +# Full test suite +docker run --rm -e RUN_ALL_TESTS=1 zebra-tests + +# Specific test suites +docker run --rm -e TEST_LWD_INTEGRATION=1 zebra-tests +``` + +#### Test Categories + +- Full suite (`RUN_ALL_TESTS`) +- Experimental features (`RUN_ALL_EXPERIMENTAL_TESTS`) +- Integration tests (`TEST_LWD_INTEGRATION`) +- Network sync (`TEST_ZEBRA_EMPTY_SYNC`, `TEST_UPDATE_SYNC`) +- State management (`TEST_DISK_REBUILD`) + +### Pull Request Testing + +#### Continuous Validation + +- Tests run automatically on each commit +- Contributors get immediate feedback on their changes +- Regressions are caught early in the development process +- Reduces manual testing burden on reviewers + +#### Fast Feedback Loop + +- Linting: Code style and formatting +- Unit tests: Function and component behavior +- Basic integration tests: Core functionality +- All results are reported directly in the PR interface + +#### Deep Validation + +- Full integration test suite +- Cross-platform compatibility checks +- Performance benchmarks +- State management validation + +### Scheduled Testing + +Weekly runs include: + +- Full Mainnet synchronization +- Extended integration suites +- Resource cleanup + +## Infrastructure Details + +### VM-based Test Infrastructure + +#### Test-specific Requirements + +- Some integration tests need a fully synced network +- Certain tests validate against specific chain heights +- Network state persistence between test runs +- Not all tests require this infrastructure - many run in standard containers + +#### State Management Complexity + +- **Creation**: Initial sync and state building for test environments +- **Versioning**: Multiple state versions for different test scenarios +- **Caching**: Reuse of existing states to avoid re-sync +- **Attachment**: Dynamic VM disk mounting for tests +- **Cleanup**: Automated state and resource cleanup + +#### Infrastructure Implications + +- GCP VM infrastructure for state-dependent tests +- Complex disk image management for test states +- State versioning and compatibility checks +- Resource lifecycle management + +#### Future Considerations + +- Potential migration of state-dependent tests to container-native environments +- Would require solving state persistence in Kubernetes +- Need to balance containerization benefits with test requirements +- Opportunity to reduce infrastructure complexity + +## Best Practices + +### For Contributors + +#### Local Testing + +```bash +# Build and run tests +docker build -t zebra-tests --target tests . +docker run --rm zebra-tests +``` + +#### PR Guidelines + +- Use descriptive labels +- Mark as draft when needed +- Address test failures + +### For Maintainers + +#### Workflow Maintenance + +- Regular review of workflow efficiency +- Update resource allocations as needed +- Monitor test execution times + +#### Security Considerations + +- Regular secret rotation +- Access control review +- Dependency updates + +## Known Issues + +### External Contributor Limitations + +#### GCP Dependencies + +- Most CI workflows depend on Google Cloud Platform resources +- Docker artifacts and VM images are tied to GCP +- External contributors cannot run full CI suite in their forks +- Integration tests require GCP infrastructure access +- This particularly impacts: + - Integration test execution + - Node deployment testing + - State storage and caching validation + +#### GitHub Actions Variables Restriction + +- Due to a [GitHub Actions limitation](https://github.com/orgs/community/discussions/44322), workflows in forked repositories cannot access repository variables +- This affects external contributors' ability to run CI workflows +- Required configuration values are not available in fork-based PRs +- Currently no workaround available from GitHub +- Impact on external contributors: + - Cannot run workflows requiring GCP credentials + - Unable to access configuration variables + - Limited ability to test infrastructure changes + +### Mitigation Through Mergify + +- When external PRs enter the merge queue, they are tested with full access to variables and resources +- All CI workflows run in the context of our repository, not the fork +- This provides a safety net, ensuring no untested code reaches production +- External contributors can still get feedback through code review before their changes are tested in the queue + +These safeguards help maintain code quality while working around the platform limitations for external contributions. diff --git a/README.md b/README.md index 569b6f46d4b..de401540ba5 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,9 @@ - [Getting Started](#getting-started) - [Docker](#docker) - [Manual Build](#manual-build) + - [General instructions for installing dependencies](#general-instructions-for-installing-dependencies) + - [Dependencies on Arch](#dependencies-on-arch) +- [CI/CD Architecture](#cicd-architecture) - [Documentation](#documentation) - [User support](#user-support) - [Security](#security) @@ -73,6 +76,7 @@ Below are quick summaries for installing the dependencies on your machine.
#### General instructions for installing dependencies + 1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). @@ -98,6 +102,7 @@ Below are quick summaries for installing the dependencies on your machine.
#### Dependencies on Arch + ```sh @@ -124,6 +129,17 @@ Refer to the [Installing Zebra](https://zebra.zfnd.org/user/install.html) and [Running Zebra](https://zebra.zfnd.org/user/run.html) sections in the book for enabling optional features, detailed configuration and further details. +## CI/CD Architecture + +Zebra uses a comprehensive CI/CD system built on GitHub Actions to ensure code quality, maintain stability, and automate routine tasks. Our CI/CD infrastructure: + +- Runs automated tests on every PR and commit +- Manages deployments to various environments +- Handles cross-platform compatibility checks +- Automates release processes + +For a detailed understanding of our CI/CD system, including workflow diagrams, infrastructure details, and best practices, see our [CI/CD Architecture Documentation](.github/workflows/README.md). + ## Documentation The Zcash Foundation maintains the following resources documenting Zebra: From 6d01f052c990b06f0f351fed39bff5a0a33f2b40 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 4 Feb 2025 13:50:10 -0300 Subject: [PATCH 078/245] chore(release): Zebra release v2.2.0 (#9195) * add changelog section for 2.2.0 * chore: Release * update end of support * Apply suggestions from code review Co-authored-by: Arya * Update CHANGELOG.md Co-authored-by: Conrado Gouvea --------- Co-authored-by: Arya Co-authored-by: Conrado Gouvea --- CHANGELOG.md | 33 ++++++++++++++++++++ Cargo.lock | 28 ++++++++--------- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +-- tower-batch-control/Cargo.toml | 6 ++-- tower-fallback/Cargo.toml | 4 +-- zebra-chain/Cargo.toml | 6 ++-- zebra-consensus/Cargo.toml | 20 ++++++------ zebra-grpc/Cargo.toml | 6 ++-- zebra-network/Cargo.toml | 4 +-- zebra-node-services/Cargo.toml | 4 +-- zebra-rpc/Cargo.toml | 24 +++++++------- zebra-scan/Cargo.toml | 20 ++++++------ zebra-script/Cargo.toml | 6 ++-- zebra-state/Cargo.toml | 10 +++--- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 ++--- zebrad/Cargo.toml | 30 +++++++++--------- zebrad/src/components/sync/end_of_support.rs | 2 +- 19 files changed, 126 insertions(+), 93 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f12c495bb75..43c189f398b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,39 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 2.2.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.2.0) - 2025-02-03 + +In this release, Zebra introduced an additional consensus check on the branch ID of Nu6 transactions +(which is currently also checked elsewhere; but we believe it's important to check on its own to protect +against future code changes), along with important refactors and improvements. + +### Added + +- An index to track spending transaction ids by spent outpoints and revealed nullifiers ([#8895](https://github.com/ZcashFoundation/zebra/pull/8895)) +- Accessor methods to `zebra-rpc` request/response types ([#9113](https://github.com/ZcashFoundation/zebra/pull/9113)) +- `getblock` RPC method now can return transaction details with verbosity=2 ([#9083](https://github.com/ZcashFoundation/zebra/pull/9083)) +- Serialized NU5 blocks to test vectors ([#9098](https://github.com/ZcashFoundation/zebra/pull/9098)) + +### Changed + +- Migrated from deprecated `jsonrpc_*` crates to `jsonrpsee` ([#9059](https://github.com/ZcashFoundation/zebra/pull/9059), [#9151](https://github.com/ZcashFoundation/zebra/pull/9151)) +- Optimized checks for coinbase transactions ([#9126](https://github.com/ZcashFoundation/zebra/pull/9126)) +- Avoid re-verifying transactions in blocks if those transactions are in the mempool ([#8951](https://github.com/ZcashFoundation/zebra/pull/8951), [#9118](https://github.com/ZcashFoundation/zebra/pull/9118)) +- Allow transactions spending coinbase outputs to have transparent outputs on Regtest ([#9085](https://github.com/ZcashFoundation/zebra/pull/9085)) + +### Fixed + +- Respond to getblockchaininfo with genesis block when empty state ([#9138](https://github.com/ZcashFoundation/zebra/pull/9138)) +- Verify consensus branch ID in SIGHASH precomputation ([#9139](https://github.com/ZcashFoundation/zebra/pull/9139)) +- More closely match zcashd RPC errors and `getrawtransaction` RPC behaviour ([#9049](https://github.com/ZcashFoundation/zebra/pull/9049)) +- Fixes bugs in the lightwalletd integration tests ([#9052](https://github.com/ZcashFoundation/zebra/pull/9052)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@Fallengirl, @arya2, @conradoplg, @elijahhampton, @futreall, @gustavovalverde, @idky137, @mpguerra, @oxarbitrage, @rex4539, @rootdiae, @sandakersmann and @upbqdn + + ## [Zebra 2.1.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.1.0) - 2024-12-06 This release adds a check to verify that V5 transactions in the mempool have the correct consensus branch ID; diff --git a/Cargo.lock b/Cargo.lock index 11dcdfaa92c..0d4b5a3719b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4888,7 +4888,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.20" +version = "0.2.41-beta.21" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4911,7 +4911,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.20" +version = "0.2.41-beta.21" dependencies = [ "futures-core", "pin-project", @@ -5969,7 +5969,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "bitflags 2.8.0", "bitflags-serde-legacy", @@ -6034,7 +6034,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "bellman", "blake2b_simd", @@ -6080,7 +6080,7 @@ dependencies = [ [[package]] name = "zebra-grpc" -version = "0.1.0-alpha.11" +version = "0.1.0-alpha.12" dependencies = [ "color-eyre", "futures-util", @@ -6102,7 +6102,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "bitflags 2.8.0", "byteorder", @@ -6143,7 +6143,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "color-eyre", "jsonrpsee-types", @@ -6156,7 +6156,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "base64 0.22.1", "chrono", @@ -6197,7 +6197,7 @@ dependencies = [ [[package]] name = "zebra-scan" -version = "0.1.0-alpha.13" +version = "0.1.0-alpha.14" dependencies = [ "bls12_381", "chrono", @@ -6243,7 +6243,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "hex", "lazy_static", @@ -6255,7 +6255,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "bincode", "chrono", @@ -6301,7 +6301,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "color-eyre", "futures", @@ -6329,7 +6329,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" dependencies = [ "color-eyre", "hex", @@ -6360,7 +6360,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "2.1.1" +version = "2.2.0" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index bee81533175..1349ee6b322 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -37,7 +37,7 @@ docker run -d --platform linux/amd64 \ ### Build it locally ```shell -git clone --depth 1 --branch v2.1.1 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v2.2.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index f9e0b03c5c9..272a99cba7a 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -76,7 +76,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v2.1.1 +git checkout v2.2.0 ``` 3. Build and Run `zebrad` @@ -89,7 +89,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.1.1 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.2.0 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 631c22d68d8..90cfae1fc3e 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.20" +version = "0.2.41-beta.21" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -43,10 +43,10 @@ rand = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } tokio-test = { workspace = true } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.21" } tower-test = { workspace = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index c291bfa6f15..745574c898b 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.20" +version = "0.2.41-beta.21" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -24,4 +24,4 @@ tracing = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index f8b9c6c69a1..fb4833027d3 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -145,7 +145,7 @@ proptest-derive = { workspace = true, optional = true } rand = { workspace = true, optional = true } rand_chacha = { workspace = true, optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45", optional = true } [dev-dependencies] # Benchmarks @@ -168,7 +168,7 @@ rand_chacha = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } [[bench]] name = "block" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index ba2dd14e8d6..7fe94c1b380 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -63,13 +63,13 @@ orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } wagyu-zcash-parameters = { workspace = true } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.20" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.20" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.21" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.21" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.44" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.45" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } # prod feature progress-bar howudoin = { workspace = true, optional = true } @@ -94,6 +94,6 @@ tokio = { workspace = true, features = ["full", "tracing", "test-util"] } tracing-error = { workspace = true } tracing-subscriber = { workspace = true } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index dfb9a0cff8a..ad696e03482 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-grpc" -version = "0.1.0-alpha.11" +version = "0.1.0-alpha.12" authors = ["Zcash Foundation "] description = "Zebra gRPC interface" license = "MIT OR Apache-2.0" @@ -28,8 +28,8 @@ color-eyre = { workspace = true } zcash_primitives.workspace = true -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["shielded-scan"] } -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.45" } [build-dependencies] tonic-build = { workspace = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index c74f2d35149..69d41648286 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { workspace = true, optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["async-error"] } [dev-dependencies] proptest = { workspace = true } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 93dd206121f..ce9e48100f3 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -37,7 +37,7 @@ rpc-client = [ shielded-scan = [] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.45" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 188b6c32048..463b9288c69 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -97,16 +97,16 @@ zcash_address = { workspace = true, optional = true} # Test-only feature proptest-impl proptest = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = [ "json-conversion", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = [ "rpc-client", ] } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.44" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.45" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } [build-dependencies] tonic-build = { workspace = true, optional = true } @@ -119,17 +119,17 @@ proptest = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = [ "proptest-impl", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45", features = [ "proptest-impl", ] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44", features = [ +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45", features = [ "proptest-impl", ] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = [ +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = [ "proptest-impl", ] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index fedb26f74ff..bdc1f40f870 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-scan" -version = "0.1.0-alpha.13" +version = "0.1.0-alpha.14" authors = ["Zcash Foundation "] description = "Shielded transaction scanner for the Zcash blockchain" license = "MIT OR Apache-2.0" @@ -77,11 +77,11 @@ zcash_primitives.workspace = true zcash_address.workspace = true sapling-crypto.workspace = true -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["shielded-scan"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["shielded-scan"] } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["shielded-scan"] } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.12" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } chrono = { workspace = true, features = ["clock", "std", "serde"] } @@ -96,7 +96,7 @@ jubjub = { workspace = true, optional = true } rand = { workspace = true, optional = true } zcash_note_encryption = { workspace = true, optional = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44", optional = true } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45", optional = true } # zebra-scanner binary dependencies tracing-subscriber = { workspace = true, features = ["env-filter"] } @@ -107,7 +107,7 @@ serde_json = { workspace = true } jsonrpc = { workspace = true, optional = true } hex = { workspace = true, optional = true } -zebrad = { path = "../zebrad", version = "2.1.1" } +zebrad = { path = "../zebrad", version = "2.2.0" } [dev-dependencies] insta = { workspace = true, features = ["ron", "redactions"] } @@ -125,5 +125,5 @@ zcash_note_encryption = { workspace = true } toml = { workspace = true } tonic = { workspace = true } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index df20563dc6f..1813b7c2629 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -16,11 +16,11 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = { workspace = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } thiserror = { workspace = true } [dev-dependencies] hex = { workspace = true } lazy_static = { workspace = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index cd46a0c5f7b..2b62be81751 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -81,13 +81,13 @@ tracing = { workspace = true } elasticsearch = { workspace = true, features = ["rustls-tls"], optional = true } serde_json = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["async-error"] } # prod feature progress-bar howudoin = { workspace = true, optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45", optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } @@ -112,5 +112,5 @@ jubjub = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index b436ee57fe7..10c455d5b49 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 0ff1d14f5aa..eef1f4397a3 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.44" +version = "1.0.0-beta.45" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -94,11 +94,11 @@ tracing-error = { workspace = true } tracing-subscriber = { workspace = true } thiserror = { workspace = true } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { workspace = true, optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 5493deddf4f..832a3a4cfb5 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "2.1.1" +version = "2.2.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -159,15 +159,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.44", features = ["rpc-client"] } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.44" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["rpc-client"] } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.45", optional = true } abscissa_core = { workspace = true } clap = { workspace = true, features = ["cargo"] } @@ -281,13 +281,13 @@ proptest-derive = { workspace = true } # enable span traces and track caller in tests color-eyre = { workspace = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.44", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.44", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.44" } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.12" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -298,7 +298,7 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.11" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.44" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.45" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 284c266fb6c..9fa98425531 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_742_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_809_400; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 343656cb3c82e05349d8ace024fd507534dd464b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 4 Feb 2025 23:24:37 -0300 Subject: [PATCH 079/245] fix(mining): Advertise mined blocks (#9176) * add a channel for submit_block notifications to gossip task * fix tests and gossip logic * remove the network discriminant and add a test * clippy suggestions * fix unused variable * attempt to fix the conditional compilation issues * fix default * Suggestions for "fix(mining): Advertise mined blocks" (#9183) * refactor error conversions in GetBlockTemplateRpcImpl and rewords documentation * Replaces polling mined block receiver with a select * Skip checking that Zebra is likely synced to the network tip before returning block templates on Testnet. * fixes a clippy lint and a concurrency bug --------- Co-authored-by: Arya --- .../src/methods/get_block_template_rpcs.rs | 21 ++- .../get_block_template.rs | 5 +- .../types/submit_block.rs | 36 ++++++ .../tests/snapshot/get_block_template_rpcs.rs | 3 + zebra-rpc/src/methods/tests/vectors.rs | 15 ++- zebra-rpc/src/server.rs | 5 +- zebra-rpc/src/server/error.rs | 23 ++++ zebra-rpc/src/server/tests/vectors.rs | 4 + zebrad/src/commands/start.rs | 17 +++ .../components/inbound/tests/fake_peer_set.rs | 23 +++- .../components/inbound/tests/real_peer_set.rs | 121 ++++++++++++++++++ zebrad/src/components/sync/gossip.rs | 69 +++++++--- zebrad/tests/acceptance.rs | 22 +++- 13 files changed, 327 insertions(+), 37 deletions(-) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 2bb9a0ca393..3cdb8ef079c 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -6,6 +6,7 @@ use futures::{future::OptionFuture, TryFutureExt}; use jsonrpsee::core::{async_trait, RpcResult as Result}; use jsonrpsee_proc_macros::rpc; use jsonrpsee_types::ErrorObject; +use tokio::sync::watch; use tower::{Service, ServiceExt}; use zcash_address::{unified::Encoding, TryFromAddress}; @@ -63,7 +64,10 @@ use crate::{ hex_data::HexData, GetBlockHash, }, - server::{self, error::MapError}, + server::{ + self, + error::{MapError, OkOrError}, + }, }; pub mod constants; @@ -375,6 +379,10 @@ pub struct GetBlockTemplateRpcImpl< /// Address book of peers, used for `getpeerinfo`. address_book: AddressBook, + + /// A channel to send successful block submissions to the block gossip task, + /// so they can be advertised to peers. + mined_block_sender: watch::Sender<(block::Hash, block::Height)>, } impl Debug @@ -465,6 +473,7 @@ where block_verifier_router: BlockVerifierRouter, sync_status: SyncStatus, address_book: AddressBook, + mined_block_sender: Option>, ) -> Self { // Prevent loss of miner funds due to an unsupported or incorrect address type. if let Some(miner_address) = mining_config.miner_address.clone() { @@ -527,6 +536,8 @@ where block_verifier_router, sync_status, address_book, + mined_block_sender: mined_block_sender + .unwrap_or(submit_block::SubmitBlockChannel::default().sender()), } } } @@ -937,8 +948,7 @@ where let block_height = block .coinbase_height() - .map(|height| height.0.to_string()) - .unwrap_or_else(|| "invalid coinbase height".to_string()); + .ok_or_error(0, "coinbase height not found")?; let block_hash = block.hash(); let block_verifier_router_response = block_verifier_router @@ -957,6 +967,11 @@ where // The difference is important to miners, because they want to mine on the best chain. Ok(block_hash) => { tracing::info!(?block_hash, ?block_height, "submit block accepted"); + + self.mined_block_sender + .send((block_hash, block_height)) + .map_error_with_prefix(0, "failed to send mined block")?; + return Ok(submit_block::Response::Accepted); } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index baa0200db1f..2a8a9f60109 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -161,10 +161,7 @@ where Tip: ChainTip + Clone + Send + Sync + 'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, { - // TODO: - // - Add a `disable_peers` field to `Network` to check instead of `disable_pow()` (#8361) - // - Check the field in `sync_status` so it applies to the mempool as well. - if network.disable_pow() { + if network.is_a_test_network() { return Ok(()); } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs index cec806901bb..54f593eb867 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs @@ -1,5 +1,9 @@ //! Parameter and response types for the `submitblock` RPC. +use tokio::sync::watch; + +use zebra_chain::{block, parameters::GENESIS_PREVIOUS_BLOCK_HASH}; + // Allow doc links to these imports. #[allow(unused_imports)] use crate::methods::get_block_template_rpcs::GetBlockTemplate; @@ -64,3 +68,35 @@ impl From for Response { Self::ErrorResponse(error_response) } } + +/// A submit block channel, used to inform the gossip task about mined blocks. +pub struct SubmitBlockChannel { + /// The channel sender + sender: watch::Sender<(block::Hash, block::Height)>, + /// The channel receiver + receiver: watch::Receiver<(block::Hash, block::Height)>, +} + +impl SubmitBlockChannel { + /// Create a new submit block channel + pub fn new() -> Self { + let (sender, receiver) = watch::channel((GENESIS_PREVIOUS_BLOCK_HASH, block::Height::MIN)); + Self { sender, receiver } + } + + /// Get the channel sender + pub fn sender(&self) -> watch::Sender<(block::Hash, block::Height)> { + self.sender.clone() + } + + /// Get the channel receiver + pub fn receiver(&self) -> watch::Receiver<(block::Hash, block::Height)> { + self.receiver.clone() + } +} + +impl Default for SubmitBlockChannel { + fn default() -> Self { + Self::new() + } +} diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index a512faf7cfc..ec6d2fe4870 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -151,6 +151,7 @@ pub async fn test_responses( block_verifier_router.clone(), mock_sync_status.clone(), mock_address_book, + None, ); if network.is_a_test_network() && !network.is_default_testnet() { @@ -286,6 +287,7 @@ pub async fn test_responses( block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), + None, ); // Basic variant (default mode and no extra features) @@ -395,6 +397,7 @@ pub async fn test_responses( mock_block_verifier_router.clone(), mock_sync_status, MockAddressBookPeers::default(), + None, ); let get_block_template_fut = get_block_template_rpc_mock_state_verifier.get_block_template( diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index e1f559b8e4f..7d2f88f1983 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -6,13 +6,12 @@ use std::sync::Arc; use futures::FutureExt; use tower::buffer::Buffer; -use zebra_chain::serialization::ZcashSerialize; use zebra_chain::{ amount::Amount, block::Block, chain_tip::{mock::MockChainTip, NoChainTip}, parameters::Network::*, - serialization::ZcashDeserializeInto, + serialization::{ZcashDeserializeInto, ZcashSerialize}, transaction::UnminedTxId, }; use zebra_node_services::BoxError; @@ -1195,6 +1194,7 @@ async fn rpc_getblockcount() { block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); // Get the tip height using RPC method `get_block_count` @@ -1244,6 +1244,7 @@ async fn rpc_getblockcount_empty_state() { block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); // Get the tip height using RPC method `get_block_count @@ -1312,6 +1313,7 @@ async fn rpc_getpeerinfo() { block_verifier_router, MockSyncStatus::default(), mock_address_book, + None, ); // Call `get_peer_info` @@ -1372,6 +1374,7 @@ async fn rpc_getblockhash() { tower::ServiceBuilder::new().service(block_verifier_router), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); // Query the hashes using positive indexes @@ -1428,6 +1431,7 @@ async fn rpc_getmininginfo() { MockService::build().for_unit_tests(), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); get_block_template_rpc @@ -1464,6 +1468,7 @@ async fn rpc_getnetworksolps() { MockService::build().for_unit_tests(), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); let get_network_sol_ps_inputs = [ @@ -1595,6 +1600,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), + None, ); // Fake the ChainInfo response @@ -1870,6 +1876,7 @@ async fn rpc_submitblock_errors() { block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); // Try to submit pre-populated blocks and assert that it responds with duplicate. @@ -1922,6 +1929,7 @@ async fn rpc_validateaddress() { MockService::build().for_unit_tests(), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); let validate_address = get_block_template_rpc @@ -1967,6 +1975,7 @@ async fn rpc_z_validateaddress() { MockService::build().for_unit_tests(), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); let z_validate_address = get_block_template_rpc @@ -2055,6 +2064,7 @@ async fn rpc_getdifficulty() { block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), + None, ); // Fake the ChainInfo response: smallest numeric difficulty @@ -2176,6 +2186,7 @@ async fn rpc_z_listunifiedreceivers() { MockService::build().for_unit_tests(), MockSyncStatus::default(), MockAddressBookPeers::default(), + None, ); // invalid address diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 87c565f4a3e..3d54ab8ea54 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -12,7 +12,7 @@ use std::{fmt, panic}; use cookie::Cookie; use jsonrpsee::server::middleware::rpc::RpcServiceBuilder; use jsonrpsee::server::{Server, ServerHandle}; -use tokio::task::JoinHandle; +use tokio::{sync::watch, task::JoinHandle}; use tower::Service; use tracing::*; @@ -120,6 +120,8 @@ impl RpcServer { address_book: AddressBook, latest_chain_tip: Tip, network: Network, + #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] + mined_block_sender: Option>, ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> where VersionString: ToString + Clone + Send + 'static, @@ -170,6 +172,7 @@ impl RpcServer { block_verifier_router, sync_status, address_book, + mined_block_sender, ); // Initialize the rpc methods with the zebra version diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs index 835e3c4581c..cf54de4e8b2 100644 --- a/zebra-rpc/src/server/error.rs +++ b/zebra-rpc/src/server/error.rs @@ -69,6 +69,14 @@ pub(crate) trait MapError: Sized { /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a specific error code. fn map_error(self, code: impl Into) -> std::result::Result; + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a prefixed message and a specific error code. + #[cfg(feature = "getblocktemplate-rpcs")] + fn map_error_with_prefix( + self, + code: impl Into, + msg_prefix: impl ToString, + ) -> Result; + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a [`LegacyCode::Misc`] error code. fn map_misc_error(self) -> std::result::Result { self.map_error(LegacyCode::Misc) @@ -98,6 +106,21 @@ where fn map_error(self, code: impl Into) -> Result { self.map_err(|error| ErrorObject::owned(code.into().code(), error.to_string(), None::<()>)) } + + #[cfg(feature = "getblocktemplate-rpcs")] + fn map_error_with_prefix( + self, + code: impl Into, + msg_prefix: impl ToString, + ) -> Result { + self.map_err(|error| { + ErrorObject::owned( + code.into().code(), + format!("{}: {}", msg_prefix.to_string(), error.to_string()), + None::<()>, + ) + }) + } } impl OkOrError for Option { diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index bf850661a09..6cb83f98326 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -56,6 +56,7 @@ async fn rpc_server_spawn() { MockAddressBookPeers::default(), NoChainTip, Mainnet, + None, ); info!("spawned RPC server, checking services..."); @@ -115,6 +116,7 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { MockAddressBookPeers::default(), NoChainTip, Mainnet, + None, ) .await .expect(""); @@ -170,6 +172,7 @@ async fn rpc_server_spawn_port_conflict() { MockAddressBookPeers::default(), NoChainTip, Mainnet, + None, ) .await; @@ -189,6 +192,7 @@ async fn rpc_server_spawn_port_conflict() { MockAddressBookPeers::default(), NoChainTip, Mainnet, + None, ) .await; diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 5d346e42c77..ba6ddce82c9 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -86,6 +86,9 @@ use zebra_chain::block::genesis::regtest_genesis_block; use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; use zebra_rpc::server::RpcServer; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; + use crate::{ application::{build_version, user_agent}, components::{ @@ -242,6 +245,10 @@ impl StartCmd { ); } + #[cfg(feature = "getblocktemplate-rpcs")] + // Create a channel to send mined blocks to the gossip task + let submit_block_channel = SubmitBlockChannel::new(); + // Launch RPC server let (rpc_task_handle, mut rpc_tx_queue_task_handle) = if let Some(listen_addr) = config.rpc.listen_addr { @@ -259,6 +266,10 @@ impl StartCmd { address_book.clone(), latest_chain_tip.clone(), config.network.network.clone(), + #[cfg(feature = "getblocktemplate-rpcs")] + Some(submit_block_channel.sender()), + #[cfg(not(feature = "getblocktemplate-rpcs"))] + None, ); rpc_task_handle.await.unwrap() } else { @@ -301,6 +312,10 @@ impl StartCmd { sync_status.clone(), chain_tip_change.clone(), peer_set.clone(), + #[cfg(feature = "getblocktemplate-rpcs")] + Some(submit_block_channel.receiver()), + #[cfg(not(feature = "getblocktemplate-rpcs"))] + None, ) .in_current_span(), ); @@ -382,6 +397,7 @@ impl StartCmd { #[cfg(feature = "internal-miner")] let miner_task_handle = if config.mining.is_internal_miner_enabled() { info!("spawning Zcash miner"); + let rpc = zebra_rpc::methods::get_block_template_rpcs::GetBlockTemplateRpcImpl::new( &config.network.network, config.mining.clone(), @@ -391,6 +407,7 @@ impl StartCmd { block_verifier_router, sync_status, address_book, + Some(submit_block_channel.sender()), ); crate::components::miner::spawn_init(&config.network.network, &config.mining, rpc) diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 07402dafb50..f3c05315258 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -5,7 +5,7 @@ use std::{collections::HashSet, iter, net::SocketAddr, str::FromStr, sync::Arc, use futures::FutureExt; use tokio::{sync::oneshot, task::JoinHandle, time::timeout}; use tower::{buffer::Buffer, builder::ServiceBuilder, util::BoxService, Service, ServiceExt}; -use tracing::Span; +use tracing::{Instrument, Span}; use zebra_chain::{ amount::Amount, @@ -24,6 +24,8 @@ use zebra_network::{ AddressBook, InventoryResponse, Request, Response, }; use zebra_node_services::mempool; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -974,11 +976,20 @@ async fn setup( // Pretend we're close to tip SyncStatus::sync_close_to_tip(&mut recent_syncs); - let sync_gossip_task_handle = tokio::spawn(sync::gossip_best_tip_block_hashes( - sync_status.clone(), - chain_tip_change.clone(), - peer_set.clone(), - )); + #[cfg(feature = "getblocktemplate-rpcs")] + let submitblock_channel = SubmitBlockChannel::new(); + let sync_gossip_task_handle = tokio::spawn( + sync::gossip_best_tip_block_hashes( + sync_status.clone(), + chain_tip_change.clone(), + peer_set.clone(), + #[cfg(feature = "getblocktemplate-rpcs")] + Some(submitblock_channel.receiver()), + #[cfg(not(feature = "getblocktemplate-rpcs"))] + None, + ) + .in_current_span(), + ); let tx_gossip_task_handle = tokio::spawn(gossip_mempool_transaction_id( transaction_receiver, diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index e4c0f08659e..11211ccabed 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -21,6 +21,8 @@ use zebra_network::{ Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, }; use zebra_node_services::mempool; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use zebra_state::Config as StateConfig; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -725,10 +727,17 @@ async fn setup( // We can't expect or unwrap because the returned Result does not implement Debug assert!(r.is_ok(), "unexpected setup channel send failure"); + #[cfg(feature = "getblocktemplate-rpcs")] + let submitblock_channel = SubmitBlockChannel::new(); + let block_gossip_task_handle = tokio::spawn(sync::gossip_best_tip_block_hashes( sync_status.clone(), chain_tip_change, peer_set.clone(), + #[cfg(feature = "getblocktemplate-rpcs")] + Some(submitblock_channel.receiver()), + #[cfg(not(feature = "getblocktemplate-rpcs"))] + None, )); let tx_gossip_task_handle = tokio::spawn(gossip_mempool_transaction_id( @@ -782,3 +791,115 @@ async fn setup( listen_addr, ) } + +#[cfg(feature = "getblocktemplate-rpcs")] +mod submitblock_test { + use std::io; + use std::sync::{Arc, Mutex}; + use tracing::{Instrument, Level}; + use tracing_subscriber::fmt; + + use super::*; + + use crate::components::sync::PEER_GOSSIP_DELAY; + + // Custom in-memory writer to capture logs + struct TestWriter(Arc>>); + + impl io::Write for TestWriter { + #[allow(clippy::unwrap_in_result)] + fn write(&mut self, buf: &[u8]) -> io::Result { + let mut logs = self.0.lock().unwrap(); + logs.extend_from_slice(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + #[tokio::test] + async fn submitblock_channel() -> Result<(), crate::BoxError> { + let logs = Arc::new(Mutex::new(Vec::new())); + let log_sink = logs.clone(); + + // Set up a tracing subscriber with a custom writer + let subscriber = fmt() + .with_max_level(Level::INFO) + .with_writer(move || TestWriter(log_sink.clone())) // Write logs to an in-memory buffer + .finish(); + + let _guard = tracing::subscriber::set_default(subscriber); + + let (sync_status, _recent_syncs) = SyncStatus::new(); + + // State + let state_config = StateConfig::ephemeral(); + let (_state_service, _read_only_state_service, latest_chain_tip, chain_tip_change) = + zebra_state::init(state_config, &Network::Mainnet, Height::MAX, 0); + + let config_listen_addr = "127.0.0.1:0".parse().unwrap(); + + // Network + let network_config = NetworkConfig { + network: Network::Mainnet, + listen_addr: config_listen_addr, + + // Stop Zebra making outbound connections + initial_mainnet_peers: IndexSet::new(), + initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), + + ..NetworkConfig::default() + }; + + // Inbound + let (_setup_tx, setup_rx) = oneshot::channel(); + let inbound_service = Inbound::new(MAX_INBOUND_CONCURRENCY, setup_rx); + let inbound_service = ServiceBuilder::new() + .load_shed() + .buffer(10) + .service(BoxService::new(inbound_service)); + + let (peer_set, _address_book) = zebra_network::init( + network_config, + inbound_service.clone(), + latest_chain_tip.clone(), + "Zebra user agent".to_string(), + ) + .await; + + // Start the block gossip task with a SubmitBlockChannel + let submitblock_channel = SubmitBlockChannel::new(); + let gossip_task_handle = tokio::spawn( + sync::gossip_best_tip_block_hashes( + sync_status.clone(), + chain_tip_change, + peer_set.clone(), + Some(submitblock_channel.receiver()), + ) + .in_current_span(), + ); + + // Send a block top the channel + submitblock_channel + .sender() + .send((block::Hash([1; 32]), block::Height(1))) + .unwrap(); + + // Wait for the block gossip task to process the block + tokio::time::sleep(PEER_GOSSIP_DELAY).await; + + // Check that the block was processed as a mnined block by the gossip task + let captured_logs = logs.lock().unwrap(); + let log_output = String::from_utf8(captured_logs.clone()).unwrap(); + + assert!(log_output.contains("initializing block gossip task")); + assert!(log_output.contains("sending mined block broadcast")); + + std::mem::drop(gossip_task_handle); + + Ok(()) + } +} diff --git a/zebrad/src/components/sync/gossip.rs b/zebrad/src/components/sync/gossip.rs index 9cb02c6529f..5fbbbbd65ec 100644 --- a/zebrad/src/components/sync/gossip.rs +++ b/zebrad/src/components/sync/gossip.rs @@ -2,10 +2,13 @@ //! //! [`block::Hash`]: zebra_chain::block::Hash +use futures::TryFutureExt; use thiserror::Error; use tokio::sync::watch; use tower::{timeout::Timeout, Service, ServiceExt}; +use tracing::Instrument; +use zebra_chain::block; use zebra_network as zn; use zebra_state::ChainTipChange; @@ -43,9 +46,10 @@ pub enum BlockGossipError { /// /// [`block::Hash`]: zebra_chain::block::Hash pub async fn gossip_best_tip_block_hashes( - mut sync_status: SyncStatus, + sync_status: SyncStatus, mut chain_state: ChainTipChange, broadcast_network: ZN, + mut mined_block_receiver: Option>, ) -> Result<(), BlockGossipError> where ZN: Service + Send + Clone + 'static, @@ -58,27 +62,56 @@ where let mut broadcast_network = Timeout::new(broadcast_network, TIPS_RESPONSE_TIMEOUT); loop { - // wait for at least one tip change, to make sure we have a new block hash to broadcast - let tip_action = chain_state.wait_for_tip_change().await.map_err(TipChange)?; - - // wait until we're close to the tip, because broadcasts are only useful for nodes near the tip - // (if they're a long way from the tip, they use the syncer and block locators) - sync_status - .wait_until_close_to_tip() - .await - .map_err(SyncStatus)?; - - // get the latest tip change - it might be different to the change we awaited, - // because the syncer might take a long time to reach the tip - let tip_action = chain_state.last_tip_change().unwrap_or(tip_action); + let mut sync_status = sync_status.clone(); + let mut chain_tip = chain_state.clone(); + let tip_change_close_to_network_tip_fut = async move { + // wait for at least one tip change, to make sure we have a new block hash to broadcast + let tip_action = chain_tip.wait_for_tip_change().await.map_err(TipChange)?; + + // wait until we're close to the tip, because broadcasts are only useful for nodes near the tip + // (if they're a long way from the tip, they use the syncer and block locators), unless a mined block + // hash is received before `wait_until_close_to_tip()` is ready. + sync_status + .wait_until_close_to_tip() + .map_err(SyncStatus) + .await?; + + // get the latest tip change when close to tip - it might be different to the change we awaited, + // because the syncer might take a long time to reach the tip + let best_tip = chain_tip + .last_tip_change() + .unwrap_or(tip_action) + .best_tip_hash_and_height(); + + Ok((best_tip, "sending committed block broadcast", chain_tip)) + } + .in_current_span(); + + let ((hash, height), log_msg, updated_chain_state) = if let Some(mined_block_receiver) = + mined_block_receiver.as_mut() + { + tokio::select! { + tip_change_close_to_network_tip = tip_change_close_to_network_tip_fut => { + mined_block_receiver.mark_unchanged(); + tip_change_close_to_network_tip? + }, + + Ok(_) = mined_block_receiver.changed() => { + // we have a new block to broadcast from the `submitblock `RPC method, get block data and release the channel. + (*mined_block_receiver.borrow_and_update(), "sending mined block broadcast", chain_state) + } + } + } else { + tip_change_close_to_network_tip_fut.await? + }; + + chain_state = updated_chain_state; // block broadcasts inform other nodes about new blocks, // so our internal Grow or Reset state doesn't matter to them - let request = zn::Request::AdvertiseBlock(tip_action.best_tip_hash()); - - let height = tip_action.best_tip_height(); - debug!(?height, ?request, "sending committed block broadcast"); + let request = zn::Request::AdvertiseBlock(hash); + info!(?height, ?request, log_msg); // broadcast requests don't return errors, and we'd just want to ignore them anyway let _ = broadcast_network .ready() diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 584a28f7705..270a0e955c7 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -162,8 +162,8 @@ use color_eyre::{ }; use semver::Version; use serde_json::Value; - use tower::ServiceExt; + use zebra_chain::{ block::{self, genesis::regtest_genesis_block, Height}, parameters::Network::{self, *}, @@ -3270,8 +3270,10 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { fetch_state_tip_and_local_time, generate_coinbase_and_roots, proposal_block_from_template, GetBlockTemplate, GetBlockTemplateRequestMode, }, - types::get_block_template, - types::submit_block, + types::{ + get_block_template, + submit_block::{self, SubmitBlockChannel}, + }, }, hex_data::HexData, GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, @@ -3340,6 +3342,8 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); + let submitblock_channel = SubmitBlockChannel::new(); + let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( &network, mining_config, @@ -3349,6 +3353,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { block_verifier_router, mock_sync_status, MockAddressBookPeers::default(), + Some(submitblock_channel.sender()), ); let make_mock_mempool_request_handler = || async move { @@ -3406,6 +3411,17 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { "valid block should be accepted" ); + // Check that the submitblock channel received the submitted block + let submit_block_channel_data = *submitblock_channel.receiver().borrow_and_update(); + assert_eq!( + submit_block_channel_data, + ( + proposal_block.hash(), + proposal_block.coinbase_height().unwrap() + ), + "submitblock channel should receive the submitted block" + ); + // Use an invalid coinbase transaction (with an output value greater than the `block_subsidy + miner_fees - expected_lockbox_funding_stream`) let make_configured_recipients_with_lockbox_numerator = |numerator| { From bbb8e3d650fb51afee75b81d979f5e8f6cca3452 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 5 Feb 2025 14:06:36 +0000 Subject: [PATCH 080/245] fix(ci): pin release-drafter to v6.0.0 to prevent duplicate drafts (#9207) Release Drafter v6.1.0 introduced a regression that causes it to create multiple duplicate draft releases instead of updating existing ones. This was causing ~160 duplicate draft releases to be created for Zebra 2.2.0. Pin to v6.0.0 which correctly finds and updates existing draft releases. This is a known issue tracked in release-drafter/release-drafter#1425. --- .github/workflows/release-drafter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index fd4c39a728c..6b1e21364d3 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes - - uses: release-drafter/release-drafter@v6 + - uses: release-drafter/release-drafter@v6.0.0 with: config-name: release-drafter.yml commitish: main From e0c3a55998979a713ef44daa338f80d7479a34f0 Mon Sep 17 00:00:00 2001 From: cryptoraph Date: Thu, 6 Feb 2025 03:04:18 +0100 Subject: [PATCH 081/245] Fix Typos in Continuous Integration and Release Process Documentation (#9209) * typo continuous-integration.md * typorelease-process.md --- book/src/dev/continuous-integration.md | 2 +- book/src/dev/release-process.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index d59ad00aeee..7e2a452d03c 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -16,7 +16,7 @@ Some of our builds and tests are repeated on the `main` branch, due to: - our cached state sharing rules, or - generating base coverage for PR coverage reports. -Currently, each Zebra and lightwalletd full and update sync will updates cached state images, +Currently, each Zebra and lightwalletd full and update sync will update cached state images, which are shared by all tests. Tests prefer the latest image generated from the same commit. But if a state from the same commit is not available, tests will use the latest image from any branch and commit, as long as the state version is the same. diff --git a/book/src/dev/release-process.md b/book/src/dev/release-process.md index 606e9a03682..abcf90d11f3 100644 --- a/book/src/dev/release-process.md +++ b/book/src/dev/release-process.md @@ -65,7 +65,7 @@ We let you preview what's coming by providing Release Candidate \(`rc`\) pre-rel ### Distribution tags -Zebras's tagging relates directly to versions published on Docker. We will reference these [Docker Hub distribution tags](https://hub.docker.com/r/zfnd/zebra/tags) throughout: +Zebra's tagging relates directly to versions published on Docker. We will reference these [Docker Hub distribution tags](https://hub.docker.com/r/zfnd/zebra/tags) throughout: | Tag | Description | |:--- |:--- | From 97460cfccafa59aa3f6d88e3752c74d4ea657536 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 7 Feb 2025 10:00:51 -0500 Subject: [PATCH 082/245] fix(chain): Use network kind of `TestnetKind` in transparent addresses on Regtest (#9175) * Use `TestnetKind` as transparent addr network kinds on Regtest * Updates outdated error message --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/parameters/network.rs | 13 +++++++++++-- zebra-chain/src/primitives/zcash_primitives.rs | 4 ++-- zebra-consensus/src/error.rs | 2 +- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index e8571340b7d..71ede1ba7f6 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -29,8 +29,7 @@ pub enum NetworkKind { /// A test network. Testnet, - /// Regtest mode, not yet implemented - // TODO: Add `new_regtest()` and `is_regtest` methods on `Network`. + /// Regtest mode Regtest, } @@ -186,6 +185,16 @@ impl Network { } } + /// Returns [`NetworkKind::Testnet`] on Testnet and Regtest, or [`NetworkKind::Mainnet`] on Mainnet. + /// + /// This is used for transparent addresses, as the address prefix is the same on Regtest as it is on Testnet. + pub fn t_addr_kind(&self) -> NetworkKind { + match self { + Network::Mainnet => NetworkKind::Mainnet, + Network::Testnet(_) => NetworkKind::Testnet, + } + } + /// Returns an iterator over [`Network`] variants. pub fn iter() -> impl Iterator { [Self::Mainnet, Self::new_default_testnet()].into_iter() diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 149ca423cd4..6dbff2df09c 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -351,10 +351,10 @@ pub(crate) fn transparent_output_address( match alt_addr { Some(zcash_primitives::legacy::TransparentAddress::PublicKeyHash(pub_key_hash)) => Some( - transparent::Address::from_pub_key_hash(network.kind(), pub_key_hash), + transparent::Address::from_pub_key_hash(network.t_addr_kind(), pub_key_hash), ), Some(zcash_primitives::legacy::TransparentAddress::ScriptHash(script_hash)) => Some( - transparent::Address::from_script_hash(network.kind(), script_hash), + transparent::Address::from_script_hash(network.t_addr_kind(), script_hash), ), None => None, } diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index caf68bad225..9c7307d5ee1 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -204,7 +204,7 @@ pub enum TransactionError { #[error("could not find a mempool transaction input UTXO in the best chain")] TransparentInputNotFound, - #[error("could not validate nullifiers and anchors on best chain: {0}")] + #[error("could not contextually validate transaction on best chain: {0}")] #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] // This error variant is at least 128 bytes ValidateContextError(Box), From 9bda2c8474c161004da9a22e714ed47136c4d824 Mon Sep 17 00:00:00 2001 From: Elijah Hampton Date: Tue, 11 Feb 2025 12:12:24 +0000 Subject: [PATCH 083/245] Add invalidate block method and invalidated_blocks field to NonFinalizedState (#9167) * Adds new invalidate_block method to non finalized state. Adds test case in vectors.rs. Updates non finalized state to track invalidated_blocks * Removes InvalidatedBlockData struct. Wraps invalidated_blocks HashMap values with Arc. Optimizies invalidate_block fn to return early if root has hash of desired block. * Update metrics whenever chain_set is modified * Adds child_blocks method and refactors invalidate_block in chain.rs. Refactors invalidate_block in NonFinalizedState. --------- Co-authored-by: Elijah Hampton --- .../src/service/non_finalized_state.rs | 44 ++++++++- .../src/service/non_finalized_state/chain.rs | 24 ++++- .../non_finalized_state/tests/vectors.rs | 90 ++++++++++++++++++- 3 files changed, 154 insertions(+), 4 deletions(-) diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index ebcbb2cfd35..91ae30ae23d 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -9,7 +9,7 @@ use std::{ }; use zebra_chain::{ - block::{self, Block}, + block::{self, Block, Hash}, parameters::Network, sprout, transparent, }; @@ -45,6 +45,10 @@ pub struct NonFinalizedState { /// callers should migrate to `chain_iter().next()`. chain_set: BTreeSet>, + /// Blocks that have been invalidated in, and removed from, the non finalized + /// state. + invalidated_blocks: HashMap>>, + // Configuration // /// The configured Zcash network. @@ -92,6 +96,7 @@ impl Clone for NonFinalizedState { Self { chain_set: self.chain_set.clone(), network: self.network.clone(), + invalidated_blocks: self.invalidated_blocks.clone(), #[cfg(feature = "getblocktemplate-rpcs")] should_count_metrics: self.should_count_metrics, @@ -112,6 +117,7 @@ impl NonFinalizedState { NonFinalizedState { chain_set: Default::default(), network: network.clone(), + invalidated_blocks: Default::default(), #[cfg(feature = "getblocktemplate-rpcs")] should_count_metrics: true, #[cfg(feature = "progress-bar")] @@ -264,6 +270,37 @@ impl NonFinalizedState { Ok(()) } + /// Invalidate block with hash `block_hash` and all descendants from the non-finalized state. Insert + /// the new chain into the chain_set and discard the previous. + pub fn invalidate_block(&mut self, block_hash: Hash) { + let Some(chain) = self.find_chain(|chain| chain.contains_block_hash(block_hash)) else { + return; + }; + + let invalidated_blocks = if chain.non_finalized_root_hash() == block_hash { + self.chain_set.remove(&chain); + chain.blocks.values().cloned().collect() + } else { + let (new_chain, invalidated_blocks) = chain + .invalidate_block(block_hash) + .expect("already checked that chain contains hash"); + + // Add the new chain fork or updated chain to the set of recent chains, and + // remove the chain containing the hash of the block from chain set + self.insert_with(Arc::new(new_chain.clone()), |chain_set| { + chain_set.retain(|c| !c.contains_block_hash(block_hash)) + }); + + invalidated_blocks + }; + + self.invalidated_blocks + .insert(block_hash, Arc::new(invalidated_blocks)); + + self.update_metrics_for_chains(); + self.update_metrics_bars(); + } + /// Commit block to the non-finalized state as a new chain where its parent /// is the finalized tip. #[tracing::instrument(level = "debug", skip(self, finalized_state, prepared))] @@ -586,6 +623,11 @@ impl NonFinalizedState { self.chain_set.len() } + /// Return the invalidated blocks. + pub fn invalidated_blocks(&self) -> HashMap>> { + self.invalidated_blocks.clone() + } + /// Return the chain whose tip block hash is `parent_hash`. /// /// The chain can be an existing chain in the non-finalized state, or a freshly diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 6ad284a23f5..c7d0d2877c6 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -359,6 +359,26 @@ impl Chain { (block, treestate) } + // Returns the block at the provided height and all of its descendant blocks. + pub fn child_blocks(&self, block_height: &block::Height) -> Vec { + self.blocks + .range(block_height..) + .map(|(_h, b)| b.clone()) + .collect() + } + + // Returns a new chain without the invalidated block or its descendants. + pub fn invalidate_block( + &self, + block_hash: block::Hash, + ) -> Option<(Self, Vec)> { + let block_height = self.height_by_hash(block_hash)?; + let mut new_chain = self.fork(block_hash)?; + new_chain.pop_tip(); + new_chain.last_fork_height = None; + Some((new_chain, self.child_blocks(&block_height))) + } + /// Returns the height of the chain root. pub fn non_finalized_root_height(&self) -> block::Height { self.blocks @@ -1600,7 +1620,7 @@ impl DerefMut for Chain { /// The revert position being performed on a chain. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum RevertPosition { +pub(crate) enum RevertPosition { /// The chain root is being reverted via [`Chain::pop_root`], when a block /// is finalized. Root, @@ -1619,7 +1639,7 @@ enum RevertPosition { /// and [`Chain::pop_tip`] functions, and fear that it would be easy to /// introduce bugs when updating them, unless the code was reorganized to keep /// related operations adjacent to each other. -trait UpdateWith { +pub(crate) trait UpdateWith { /// When `T` is added to the chain tip, /// update [`Chain`] cumulative data members to add data that are derived from `T`. fn update_chain_tip_with(&mut self, _: &T) -> Result<(), ValidateContextError>; diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index b489d6f94f0..5b392e4a0b9 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use zebra_chain::{ amount::NonNegative, - block::{Block, Height}, + block::{self, Block, Height}, history_tree::NonEmptyHistoryTree, parameters::{Network, NetworkUpgrade}, serialization::ZcashDeserializeInto, @@ -216,6 +216,94 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { Ok(()) } +fn invalidate_block_removes_block_and_descendants_from_chain_for_network( + network: Network, +) -> Result<()> { + let block1: Arc = Arc::new(network.test_block(653599, 583999).unwrap()); + let block2 = block1.make_fake_child().set_work(10); + let block3 = block2.make_fake_child().set_work(1); + + let mut state = NonFinalizedState::new(&network); + let finalized_state = FinalizedState::new( + &Config::ephemeral(), + &network, + #[cfg(feature = "elasticsearch")] + false, + ); + + let fake_value_pool = ValueBalance::::fake_populated_pool(); + finalized_state.set_finalized_value_pool(fake_value_pool); + + state.commit_new_chain(block1.clone().prepare(), &finalized_state)?; + state.commit_block(block2.clone().prepare(), &finalized_state)?; + state.commit_block(block3.clone().prepare(), &finalized_state)?; + + assert_eq!( + state + .best_chain() + .unwrap_or(&Arc::new(Chain::default())) + .blocks + .len(), + 3 + ); + + state.invalidate_block(block2.hash()); + + let post_invalidated_chain = state.best_chain().unwrap(); + + assert_eq!(post_invalidated_chain.blocks.len(), 1); + assert!( + post_invalidated_chain.contains_block_hash(block1.hash()), + "the new modified chain should contain block1" + ); + + assert!( + !post_invalidated_chain.contains_block_hash(block2.hash()), + "the new modified chain should not contain block2" + ); + assert!( + !post_invalidated_chain.contains_block_hash(block3.hash()), + "the new modified chain should not contain block3" + ); + + let invalidated_blocks_state = &state.invalidated_blocks; + assert!( + invalidated_blocks_state.contains_key(&block2.hash()), + "invalidated blocks map should reference the hash of block2" + ); + + let invalidated_blocks_state_descendants = + invalidated_blocks_state.get(&block2.hash()).unwrap(); + + match network { + Network::Mainnet => assert!( + invalidated_blocks_state_descendants + .iter() + .any(|block| block.height == block::Height(653601)), + "invalidated descendants vec should contain block3" + ), + Network::Testnet(_parameters) => assert!( + invalidated_blocks_state_descendants + .iter() + .any(|block| block.height == block::Height(584001)), + "invalidated descendants vec should contain block3" + ), + } + + Ok(()) +} + +#[test] +fn invalidate_block_removes_block_and_descendants_from_chain() -> Result<()> { + let _init_guard = zebra_test::init(); + + for network in Network::iter() { + invalidate_block_removes_block_and_descendants_from_chain_for_network(network)?; + } + + Ok(()) +} + #[test] // This test gives full coverage for `take_chain_if` fn commit_block_extending_best_chain_doesnt_drop_worst_chains() -> Result<()> { From 7445d81295101886ae39183634e2f3037c3b6a8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 13:27:13 +0000 Subject: [PATCH 084/245] build(deps): bump the devops group with 5 updates (#9208) Bumps the devops group with 5 updates: | Package | From | To | | --- | --- | --- | | [google-github-actions/auth](https://github.com/google-github-actions/auth) | `2.1.7` | `2.1.8` | | [google-github-actions/setup-gcloud](https://github.com/google-github-actions/setup-gcloud) | `2.1.2` | `2.1.4` | | [tj-actions/changed-files](https://github.com/tj-actions/changed-files) | `45.0.6` | `45.0.7` | | [jontze/action-mdbook](https://github.com/jontze/action-mdbook) | `3.0.1` | `4.0.0` | | [release-drafter/release-drafter](https://github.com/release-drafter/release-drafter) | `6.0.0` | `6.1.0` | Updates `google-github-actions/auth` from 2.1.7 to 2.1.8 - [Release notes](https://github.com/google-github-actions/auth/releases) - [Changelog](https://github.com/google-github-actions/auth/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/auth/compare/v2.1.7...v2.1.8) Updates `google-github-actions/setup-gcloud` from 2.1.2 to 2.1.4 - [Release notes](https://github.com/google-github-actions/setup-gcloud/releases) - [Changelog](https://github.com/google-github-actions/setup-gcloud/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/setup-gcloud/compare/v2.1.2...v2.1.4) Updates `tj-actions/changed-files` from 45.0.6 to 45.0.7 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v45.0.6...v45.0.7) Updates `jontze/action-mdbook` from 3.0.1 to 4.0.0 - [Release notes](https://github.com/jontze/action-mdbook/releases) - [Changelog](https://github.com/jontze/action-mdbook/blob/master/CHANGELOG.md) - [Commits](https://github.com/jontze/action-mdbook/compare/v3.0.1...v4.0.0) Updates `release-drafter/release-drafter` from 6.0.0 to 6.1.0 - [Release notes](https://github.com/release-drafter/release-drafter/releases) - [Commits](https://github.com/release-drafter/release-drafter/compare/v6.0.0...v6.1.0) --- updated-dependencies: - dependency-name: google-github-actions/auth dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: google-github-actions/setup-gcloud dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: jontze/action-mdbook dependency-type: direct:production update-type: version-update:semver-major dependency-group: devops - dependency-name: release-drafter/release-drafter dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cd-deploy-nodes-gcp.yml | 8 ++++---- .github/workflows/chore-delete-gcp-resources.yml | 6 +++--- .github/workflows/ci-lint.yml | 4 ++-- .github/workflows/docs-deploy-firebase.yml | 6 +++--- .github/workflows/manual-zcashd-deploy.yml | 4 ++-- .github/workflows/release-drafter.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- .../workflows/sub-deploy-integration-tests-gcp.yml | 12 ++++++------ .github/workflows/sub-find-cached-disks.yml | 4 ++-- 9 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index cccdd4af542..315a7dc4464 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -269,13 +269,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 - name: Create instance template for ${{ matrix.network }} run: | @@ -384,13 +384,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Create instance template from container image - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 962442fc8d8..661c8c05093 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -50,13 +50,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Deletes all mainnet and testnet instances older than $DELETE_INSTANCE_DAYS days. # @@ -121,7 +121,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 0e40daa7c7e..df13ae1b1f7 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.6 + uses: tj-actions/changed-files@v45.0.7 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.6 + uses: tj-actions/changed-files@v45.0.7 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 0154ffe1bd7..eca70c4d98b 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -92,7 +92,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Setup mdBook - uses: jontze/action-mdbook@v3.0.1 + uses: jontze/action-mdbook@v4.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} mdbook-version: '~0.4' @@ -106,7 +106,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' @@ -164,7 +164,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 8fc5951d142..8d6541ff370 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -52,13 +52,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Create instance template from container image - name: Create instance template diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 6b1e21364d3..b5025a4b463 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes - - uses: release-drafter/release-drafter@v6.0.0 + - uses: release-drafter/release-drafter@v6.1.0 with: config-name: release-drafter.yml commitish: main diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 743b3e1565c..c5142babe31 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -127,7 +127,7 @@ jobs: - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_ARTIFACTS_SA }}' diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index bd23da1f31b..1a8854febd0 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -172,13 +172,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME env as the source image to populate the disk cached state @@ -429,13 +429,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, # and the empty string otherwise. @@ -695,13 +695,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Deletes the instances that has been recently deployed in the actual commit after all # previous jobs have run, no matter the outcome of the job. diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index a45e3f731fa..d0dd52d6c1e 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -67,13 +67,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.7 + uses: google-github-actions/auth@v2.1.8 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.2 + uses: google-github-actions/setup-gcloud@v2.1.4 # Performs formatting on disk name components. # From 3a0269d625c3d6a8cd495c40afc97edd13c5879f Mon Sep 17 00:00:00 2001 From: Metalcape Date: Tue, 11 Feb 2025 17:25:20 +0100 Subject: [PATCH 085/245] feat(rpc); Add `blockcommitments` field to `getblock` output (#9217) * Add blockcommitments field to getblock output * Parse commitment_bytes from block header * Update tests * Rustfmt --------- Co-authored-by: Marek --- zebra-chain/src/block/header.rs | 13 +++++- zebra-rpc/src/methods.rs | 31 +++++++++++++- ..._block_header_hash_verbose@mainnet_10.snap | 1 + ..._block_header_hash_verbose@testnet_10.snap | 1 + ...lock_header_height_verbose@mainnet_10.snap | 1 + ...lock_header_height_verbose@testnet_10.snap | 1 + ...k_verbose_hash_verbosity_1@mainnet_10.snap | 1 + ...k_verbose_hash_verbosity_1@testnet_10.snap | 1 + ...k_verbose_hash_verbosity_2@mainnet_10.snap | 1 + ...k_verbose_hash_verbosity_2@testnet_10.snap | 1 + ...ose_hash_verbosity_default@mainnet_10.snap | 1 + ...ose_hash_verbosity_default@testnet_10.snap | 1 + ...verbose_height_verbosity_1@mainnet_10.snap | 1 + ...verbose_height_verbosity_1@testnet_10.snap | 1 + ...verbose_height_verbosity_2@mainnet_10.snap | 1 + ...verbose_height_verbosity_2@testnet_10.snap | 1 + ...e_height_verbosity_default@mainnet_10.snap | 1 + ...e_height_verbosity_default@testnet_10.snap | 1 + zebra-rpc/src/methods/tests/vectors.rs | 42 ++++++++++++++----- 19 files changed, 89 insertions(+), 13 deletions(-) diff --git a/zebra-chain/src/block/header.rs b/zebra-chain/src/block/header.rs index 1bbec3b471c..c72a9bba7af 100644 --- a/zebra-chain/src/block/header.rs +++ b/zebra-chain/src/block/header.rs @@ -7,11 +7,12 @@ use thiserror::Error; use crate::{ fmt::HexDebug, + parameters::Network, serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, work::{difficulty::CompactDifficulty, equihash::Solution}, }; -use super::{merkle, Hash, Height}; +use super::{merkle, Commitment, CommitmentError, Hash, Height}; #[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; @@ -124,6 +125,16 @@ impl Header { } } + /// Get the parsed block [`Commitment`] for this header. + /// Its interpretation depends on the given `network` and block `height`. + pub fn commitment( + &self, + network: &Network, + height: Height, + ) -> Result { + Commitment::from_bytes(*self.commitment_bytes, network, height) + } + /// Compute the hash of this header. pub fn hash(&self) -> Hash { Hash::from(self) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 4c6de27b69b..df42dcbb3b6 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -22,7 +22,7 @@ use tracing::Instrument; use zcash_primitives::consensus::Parameters; use zebra_chain::{ - block::{self, Height, SerializedBlock}, + block::{self, Commitment, Height, SerializedBlock}, chain_tip::{ChainTip, NetworkChainTipHeightEstimator}, parameters::{ConsensusBranchId, Network, NetworkUpgrade}, serialization::{ZcashDeserialize, ZcashSerialize}, @@ -797,6 +797,7 @@ where height, version, merkle_root, + block_commitments, final_sapling_root, sapling_tree_size, time, @@ -908,6 +909,7 @@ where tx, trees, size: None, + block_commitments: Some(block_commitments), final_sapling_root: Some(final_sapling_root), final_orchard_root, previous_block_hash: Some(previous_block_hash), @@ -1005,12 +1007,25 @@ where let difficulty = header.difficulty_threshold.relative_to_network(&network); + let block_commitments = match header.commitment(&network, height).expect( + "Unexpected failure while parsing the blockcommitments field in get_block_header", + ) { + Commitment::PreSaplingReserved(bytes) => bytes, + Commitment::FinalSaplingRoot(_) => final_sapling_root, + Commitment::ChainHistoryActivationReserved => [0; 32], + Commitment::ChainHistoryRoot(root) => root.bytes_in_display_order(), + Commitment::ChainHistoryBlockTxAuthCommitment(hash) => { + hash.bytes_in_display_order() + } + }; + let block_header = GetBlockHeaderObject { hash: GetBlockHash(hash), confirmations, height, version: header.version, merkle_root: header.merkle_root, + block_commitments, final_sapling_root, sapling_tree_size, time: header.time.timestamp(), @@ -1850,7 +1865,12 @@ pub enum GetBlock { #[serde(skip_serializing_if = "Option::is_none")] merkle_root: Option, - // `blockcommitments` would be here. Undocumented. TODO: decide if we want to support it + /// The blockcommitments field of the requested block. Its interpretation changes + /// depending on the network and height. + #[serde(with = "opthex", rename = "blockcommitments")] + #[serde(skip_serializing_if = "Option::is_none")] + block_commitments: Option<[u8; 32]>, + // `authdataroot` would be here. Undocumented. TODO: decide if we want to support it // /// The root of the Sapling commitment tree after applying this block. @@ -1924,6 +1944,7 @@ impl Default for GetBlock { size: None, version: None, merkle_root: None, + block_commitments: None, final_sapling_root: None, final_orchard_root: None, nonce: None, @@ -1982,6 +2003,11 @@ pub struct GetBlockHeaderObject { #[serde(with = "hex", rename = "merkleroot")] pub merkle_root: block::merkle::Root, + /// The blockcommitments field of the requested block. Its interpretation changes + /// depending on the network and height. + #[serde(with = "hex", rename = "blockcommitments")] + pub block_commitments: [u8; 32], + /// The root of the Sapling commitment tree after applying this block. #[serde(with = "hex", rename = "finalsaplingroot")] pub final_sapling_root: [u8; 32], @@ -2035,6 +2061,7 @@ impl Default for GetBlockHeaderObject { height: Height::MIN, version: 4, merkle_root: block::merkle::Root([0; 32]), + block_commitments: Default::default(), final_sapling_root: Default::default(), sapling_tree_size: Default::default(), time: 0, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap index 723bf78e62e..78b6db434b9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "time": 1477671596, "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap index 0d76afbbb96..03ae04e2e84 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_hash_verbose@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "time": 1477674473, "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap index 723bf78e62e..78b6db434b9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "time": 1477671596, "nonce": "9057977ea6d4ae867decc96359fcf2db8cdebcbfb3bd549de4f21f16cfe83475", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap index 0d76afbbb96..03ae04e2e84 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_header_height_verbose@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "time": 1477674473, "nonce": "0000e5739438a096ca89cde16bcf6001e0c5a7ce6f7c591d26314c26c2560000", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap index 93010ad42d4..6a5c246fefb 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap index 5bd22590f1b..9054e0f3ec9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 51729b13293..a974f13a67b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 51bbfc72f05..c0da399be69 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap index 93010ad42d4..6a5c246fefb 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap index 5bd22590f1b..9054e0f3ec9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap index 93010ad42d4..6a5c246fefb 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap index 5bd22590f1b..9054e0f3ec9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index 51729b13293..a974f13a67b 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index 51bbfc72f05..c0da399be69 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap index 93010ad42d4..6a5c246fefb 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap index 5bd22590f1b..9054e0f3ec9 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap @@ -8,6 +8,7 @@ expression: block "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + "blockcommitments": "0000000000000000000000000000000000000000000000000000000000000000", "finalsaplingroot": "0000000000000000000000000000000000000000000000000000000000000000", "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 7d2f88f1983..c2f3700b3bb 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -57,13 +57,13 @@ async fn rpc_getinfo() { assert!(rpc_tx_queue_task_result.is_none()); } -// Helper function that returns the nonce and final sapling root of a given -// Block. +// Helper function that returns the nonce, final sapling root and +// block commitments of a given Block. async fn get_block_data( read_state: &ReadStateService, block: Arc, height: usize, -) -> ([u8; 32], [u8; 32]) { +) -> ([u8; 32], [u8; 32], [u8; 32]) { let zebra_state::ReadResponse::SaplingTree(sapling_tree) = read_state .clone() .oneshot(zebra_state::ReadRequest::SaplingTree(HashOrHeight::Height( @@ -85,7 +85,22 @@ async fn get_block_data( } else { [0; 32] }; - (expected_nonce, expected_final_sapling_root) + + let expected_block_commitments = match block + .commitment(&Mainnet) + .expect("Unexpected failure while parsing the blockcommitments field in get_block_data") + { + Commitment::PreSaplingReserved(bytes) => bytes, + Commitment::FinalSaplingRoot(_) => expected_final_sapling_root, + Commitment::ChainHistoryActivationReserved => [0; 32], + Commitment::ChainHistoryRoot(root) => root.bytes_in_display_order(), + Commitment::ChainHistoryBlockTxAuthCommitment(hash) => hash.bytes_in_display_order(), + }; + ( + expected_nonce, + expected_final_sapling_root, + expected_block_commitments, + ) } #[tokio::test(flavor = "multi_thread")] @@ -165,7 +180,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -184,6 +199,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -208,7 +224,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -227,6 +243,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -251,7 +268,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -274,6 +291,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -298,7 +316,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -321,6 +339,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -345,7 +364,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -364,6 +383,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -388,7 +408,7 @@ async fn rpc_getblock() { .await .expect("We should have a GetBlock struct"); - let (expected_nonce, expected_final_sapling_root) = + let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; assert_eq!( @@ -407,6 +427,7 @@ async fn rpc_getblock() { size: None, version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), + block_commitments: Some(expected_block_commitments), final_sapling_root: Some(expected_final_sapling_root), final_orchard_root: None, nonce: Some(expected_nonce), @@ -602,6 +623,7 @@ async fn rpc_getblockheader() { height, version: 4, merkle_root: block.header.merkle_root, + block_commitments: block.header.commitment_bytes.0, final_sapling_root: expected_final_sapling_root, sapling_tree_size: sapling_tree.count(), time: block.header.time.timestamp(), From f0ad47101543a0d38e96dc1f2299da15fc83d872 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 12 Feb 2025 16:20:45 +0100 Subject: [PATCH 086/245] Fix doc ref (#9236) --- zebra-chain/src/block/header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-chain/src/block/header.rs b/zebra-chain/src/block/header.rs index c72a9bba7af..39b265e0304 100644 --- a/zebra-chain/src/block/header.rs +++ b/zebra-chain/src/block/header.rs @@ -59,7 +59,7 @@ pub struct Header { /// without incrementing the block [`version`](Self::version). Therefore, /// this field cannot be parsed without the network and height. Use /// [`Block::commitment`](super::Block::commitment) to get the parsed - /// [`Commitment`](super::Commitment). + /// [`Commitment`]. pub commitment_bytes: HexDebug<[u8; 32]>, /// The block timestamp is a Unix epoch time (UTC) when the miner From 35e400beafc96d7455fdad4bc2cabf96a8b208f5 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 12 Feb 2025 12:34:19 -0500 Subject: [PATCH 087/245] correctly serialize testnet params into config (#9224) --- zebra-chain/src/parameters/network/subsidy.rs | 2 +- zebra-chain/src/parameters/network/testnet.rs | 82 ++++++++- zebra-network/src/config.rs | 155 ++++++++++++------ zebra-network/src/config/tests/vectors.rs | 19 +++ 4 files changed, 204 insertions(+), 54 deletions(-) diff --git a/zebra-chain/src/parameters/network/subsidy.rs b/zebra-chain/src/parameters/network/subsidy.rs index df3d8c966f4..2fd60a5afbc 100644 --- a/zebra-chain/src/parameters/network/subsidy.rs +++ b/zebra-chain/src/parameters/network/subsidy.rs @@ -54,7 +54,7 @@ pub(crate) const FIRST_HALVING_TESTNET: Height = Height(1_116_000); const FIRST_HALVING_REGTEST: Height = Height(287); /// The funding stream receiver categories. -#[derive(Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum FundingStreamReceiver { /// The Electric Coin Company (Bootstrap Foundation) funding stream. #[serde(rename = "ECC")] diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 6045a7e2581..2b7fc4920b3 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -1,5 +1,5 @@ //! Types and implementation for Testnet consensus parameters -use std::{collections::BTreeMap, fmt}; +use std::{collections::BTreeMap, fmt, sync::Arc}; use crate::{ block::{self, Height, HeightDiff}, @@ -57,7 +57,7 @@ const TESTNET_GENESIS_HASH: &str = const PRE_BLOSSOM_REGTEST_HALVING_INTERVAL: HeightDiff = 144; /// Configurable funding stream recipient for configured Testnets. -#[derive(Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct ConfiguredFundingStreamRecipient { /// Funding stream receiver, see [`FundingStreams::recipients`] for more details. @@ -79,7 +79,7 @@ impl ConfiguredFundingStreamRecipient { } /// Configurable funding streams for configured Testnets. -#[derive(Deserialize, Clone, Default, Debug)] +#[derive(Serialize, Deserialize, Clone, Default, Debug)] #[serde(deny_unknown_fields)] pub struct ConfiguredFundingStreams { /// Start and end height for funding streams see [`FundingStreams::height_range`] for more details. @@ -88,6 +88,71 @@ pub struct ConfiguredFundingStreams { pub recipients: Option>, } +impl From<&FundingStreams> for ConfiguredFundingStreams { + fn from(value: &FundingStreams) -> Self { + Self { + height_range: Some(value.height_range().clone()), + recipients: Some( + value + .recipients() + .iter() + .map(|(receiver, recipient)| ConfiguredFundingStreamRecipient { + receiver: *receiver, + numerator: recipient.numerator(), + addresses: Some( + recipient + .addresses() + .iter() + .map(ToString::to_string) + .collect(), + ), + }) + .collect(), + ), + } + } +} + +impl From<&BTreeMap> for ConfiguredActivationHeights { + fn from(activation_heights: &BTreeMap) -> Self { + let mut configured_activation_heights = ConfiguredActivationHeights::default(); + + for (height, network_upgrade) in activation_heights.iter() { + match network_upgrade { + NetworkUpgrade::BeforeOverwinter => { + configured_activation_heights.before_overwinter = Some(height.0); + } + NetworkUpgrade::Overwinter => { + configured_activation_heights.overwinter = Some(height.0); + } + NetworkUpgrade::Sapling => { + configured_activation_heights.sapling = Some(height.0); + } + NetworkUpgrade::Blossom => { + configured_activation_heights.blossom = Some(height.0); + } + NetworkUpgrade::Heartwood => { + configured_activation_heights.heartwood = Some(height.0); + } + NetworkUpgrade::Canopy => { + configured_activation_heights.canopy = Some(height.0); + } + NetworkUpgrade::Nu5 => { + configured_activation_heights.nu5 = Some(height.0); + } + NetworkUpgrade::Nu6 => { + configured_activation_heights.nu6 = Some(height.0); + } + NetworkUpgrade::Genesis => { + continue; + } + } + } + + configured_activation_heights + } +} + impl ConfiguredFundingStreams { /// Returns an empty [`ConfiguredFundingStreams`]. fn empty() -> Self { @@ -185,7 +250,7 @@ fn check_funding_stream_address_period(funding_streams: &FundingStreams, network } /// Configurable activation heights for Regtest and configured Testnets. -#[derive(Deserialize, Default, Clone)] +#[derive(Serialize, Deserialize, Default, Clone)] #[serde(rename_all = "PascalCase", deny_unknown_fields)] pub struct ConfiguredActivationHeights { /// Activation height for `BeforeOverwinter` network upgrade. @@ -754,6 +819,15 @@ impl Parameters { } impl Network { + /// Returns the parameters of this network if it is a Testnet. + pub fn parameters(&self) -> Option> { + if let Self::Testnet(parameters) = self { + Some(parameters.clone()) + } else { + None + } + } + /// Returns true if proof-of-work validation should be disabled for this network pub fn disable_pow(&self) -> bool { if let Self::Testnet(params) = self { diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 8619507fa0d..b401a5d4c93 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -4,6 +4,7 @@ use std::{ collections::HashSet, io::{self, ErrorKind}, net::{IpAddr, SocketAddr}, + sync::Arc, time::Duration, }; @@ -51,7 +52,7 @@ const MAX_SINGLE_SEED_PEER_DNS_RETRIES: usize = 0; /// Configuration for networking code. #[derive(Clone, Debug, Eq, PartialEq, Serialize)] -#[serde(deny_unknown_fields, default)] +#[serde(deny_unknown_fields, default, into = "DConfig")] pub struct Config { /// The address on which this node should listen for connections. /// @@ -580,60 +581,116 @@ impl Default for Config { } } -impl<'de> Deserialize<'de> for Config { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(deny_unknown_fields)] - struct DTestnetParameters { - network_name: Option, - network_magic: Option<[u8; 4]>, - slow_start_interval: Option, - target_difficulty_limit: Option, - disable_pow: Option, - genesis_hash: Option, - activation_heights: Option, - pre_nu6_funding_streams: Option, - post_nu6_funding_streams: Option, - pre_blossom_halving_interval: Option, +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +struct DTestnetParameters { + network_name: Option, + network_magic: Option<[u8; 4]>, + slow_start_interval: Option, + target_difficulty_limit: Option, + disable_pow: Option, + genesis_hash: Option, + activation_heights: Option, + pre_nu6_funding_streams: Option, + post_nu6_funding_streams: Option, + pre_blossom_halving_interval: Option, +} + +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields, default)] +struct DConfig { + listen_addr: String, + external_addr: Option, + network: NetworkKind, + testnet_parameters: Option, + initial_mainnet_peers: IndexSet, + initial_testnet_peers: IndexSet, + cache_dir: CacheDir, + peerset_initial_target_size: usize, + #[serde(alias = "new_peer_interval", with = "humantime_serde")] + crawl_new_peer_interval: Duration, + max_connections_per_ip: Option, +} + +impl Default for DConfig { + fn default() -> Self { + let config = Config::default(); + Self { + listen_addr: "0.0.0.0".to_string(), + external_addr: None, + network: Default::default(), + testnet_parameters: None, + initial_mainnet_peers: config.initial_mainnet_peers, + initial_testnet_peers: config.initial_testnet_peers, + cache_dir: config.cache_dir, + peerset_initial_target_size: config.peerset_initial_target_size, + crawl_new_peer_interval: config.crawl_new_peer_interval, + max_connections_per_ip: Some(config.max_connections_per_ip), } + } +} - #[derive(Deserialize)] - #[serde(deny_unknown_fields, default)] - struct DConfig { - listen_addr: String, - external_addr: Option, - network: NetworkKind, - testnet_parameters: Option, - initial_mainnet_peers: IndexSet, - initial_testnet_peers: IndexSet, - cache_dir: CacheDir, - peerset_initial_target_size: usize, - #[serde(alias = "new_peer_interval", with = "humantime_serde")] - crawl_new_peer_interval: Duration, - max_connections_per_ip: Option, +impl From> for DTestnetParameters { + fn from(params: Arc) -> Self { + Self { + network_name: Some(params.network_name().to_string()), + network_magic: Some(params.network_magic().0), + slow_start_interval: Some(params.slow_start_interval().0), + target_difficulty_limit: Some(params.target_difficulty_limit().to_string()), + disable_pow: Some(params.disable_pow()), + genesis_hash: Some(params.genesis_hash().to_string()), + activation_heights: Some(params.activation_heights().into()), + pre_nu6_funding_streams: Some(params.pre_nu6_funding_streams().into()), + post_nu6_funding_streams: Some(params.post_nu6_funding_streams().into()), + pre_blossom_halving_interval: Some( + params + .pre_blossom_halving_interval() + .try_into() + .expect("should convert"), + ), } + } +} - impl Default for DConfig { - fn default() -> Self { - let config = Config::default(); - Self { - listen_addr: "0.0.0.0".to_string(), - external_addr: None, - network: Default::default(), - testnet_parameters: None, - initial_mainnet_peers: config.initial_mainnet_peers, - initial_testnet_peers: config.initial_testnet_peers, - cache_dir: config.cache_dir, - peerset_initial_target_size: config.peerset_initial_target_size, - crawl_new_peer_interval: config.crawl_new_peer_interval, - max_connections_per_ip: Some(config.max_connections_per_ip), - } - } +impl From for DConfig { + fn from( + Config { + listen_addr, + external_addr, + network, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip, + }: Config, + ) -> Self { + let testnet_parameters = network + .parameters() + .filter(|params| !params.is_default_testnet() && !params.is_regtest()) + .map(Into::into); + + DConfig { + listen_addr: listen_addr.to_string(), + external_addr: external_addr.map(|addr| addr.to_string()), + network: network.into(), + testnet_parameters, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip: Some(max_connections_per_ip), } + } +} +impl<'de> Deserialize<'de> for Config { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let DConfig { listen_addr, external_addr, diff --git a/zebra-network/src/config/tests/vectors.rs b/zebra-network/src/config/tests/vectors.rs index 6927698338f..3cef8c2df91 100644 --- a/zebra-network/src/config/tests/vectors.rs +++ b/zebra-network/src/config/tests/vectors.rs @@ -1,6 +1,7 @@ //! Fixed test vectors for zebra-network configuration. use static_assertions::const_assert; +use zebra_chain::parameters::testnet; use crate::{ constants::{INBOUND_PEER_LIMIT_MULTIPLIER, OUTBOUND_PEER_LIMIT_MULTIPLIER}, @@ -46,3 +47,21 @@ fn ensure_peer_connection_limits_consistent() { "default config should allow more inbound connections, to avoid connection exhaustion", ); } + +#[test] +fn testnet_params_serialization_roundtrip() { + let _init_guard = zebra_test::init(); + + let config = Config { + network: testnet::Parameters::build() + .with_disable_pow(true) + .to_network(), + initial_testnet_peers: [].into(), + ..Config::default() + }; + + let serialized = toml::to_string(&config).unwrap(); + let deserialized: Config = toml::from_str(&serialized).unwrap(); + + assert_eq!(config, deserialized); +} From ece9a8ee59994937ebc2eef490dd5b25f1d64d50 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 12 Feb 2025 16:16:33 -0500 Subject: [PATCH 088/245] Adds unused and optional `_allow_high_fees` param on `sendrawtransaction` RPC (#9242) --- zebra-rpc/src/methods.rs | 3 +++ zebra-rpc/src/methods/tests/prop.rs | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index df42dcbb3b6..e3c4eb6e21f 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -131,6 +131,7 @@ pub trait Rpc { /// # Parameters /// /// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes. + /// - `allow_high_fees`: (bool, optional) A legacy parameter accepted by zcashd but ignored by Zebra. /// /// # Notes /// @@ -140,6 +141,7 @@ pub trait Rpc { async fn send_raw_transaction( &self, raw_transaction_hex: String, + _allow_high_fees: Option, ) -> Result; /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. @@ -688,6 +690,7 @@ where async fn send_raw_transaction( &self, raw_transaction_hex: String, + _allow_high_fees: Option, ) -> Result { let mempool = self.mempool.clone(); let queue_sender = self.queue_sender.clone(); diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index cf7aec6c67a..fb373c3468f 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -49,7 +49,7 @@ proptest! { let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex).await }); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex, None).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); @@ -93,7 +93,7 @@ proptest! { let _rpc = rpc.clone(); let _transaction_hex = transaction_hex.clone(); - let send_task = tokio::spawn(async move { _rpc.send_raw_transaction(_transaction_hex).await }); + let send_task = tokio::spawn(async move { _rpc.send_raw_transaction(_transaction_hex, None).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -109,7 +109,7 @@ proptest! { check_err_code(result, ErrorCode::ServerError(-1))?; - let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex.clone()).await }); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex.clone(), None).await }); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -147,7 +147,7 @@ proptest! { let rsp = mempool::Response::Queued(vec![Err(DummyError.into())]); let mempool_query = mempool.expect_request(req).map_ok(|r| r.respond(rsp)); - let (rpc_rsp, _) = tokio::join!(rpc.send_raw_transaction(tx), mempool_query); + let (rpc_rsp, _) = tokio::join!(rpc.send_raw_transaction(tx, None), mempool_query); check_err_code(rpc_rsp, ErrorCode::ServerError(-1))?; @@ -175,7 +175,7 @@ proptest! { tokio::time::pause(); runtime.block_on(async move { - let send_task = rpc.send_raw_transaction(non_hex_string); + let send_task = rpc.send_raw_transaction(non_hex_string, None); // Check that there are no further requests. mempool.expect_no_requests().await?; @@ -206,7 +206,7 @@ proptest! { prop_assume!(Transaction::zcash_deserialize(&*random_bytes).is_err()); runtime.block_on(async move { - let send_task = rpc.send_raw_transaction(hex::encode(random_bytes)); + let send_task = rpc.send_raw_transaction(hex::encode(random_bytes), None); mempool.expect_no_requests().await?; state.expect_no_requests().await?; @@ -711,7 +711,7 @@ proptest! { let tx_hex = hex::encode(&tx_bytes); let send_task = { let rpc = rpc.clone(); - tokio::task::spawn(async move { rpc.send_raw_transaction(tx_hex).await }) + tokio::task::spawn(async move { rpc.send_raw_transaction(tx_hex, None).await }) }; let tx_unmined = UnminedTx::from(tx); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); @@ -790,7 +790,7 @@ proptest! { // send a transaction let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); - let send_task = tokio::task::spawn(async move { rpc_clone.send_raw_transaction(tx_hex).await }); + let send_task = tokio::task::spawn(async move { rpc_clone.send_raw_transaction(tx_hex, None).await }); let tx_unmined = UnminedTx::from(tx.clone()); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); From cf653313dd3335fc09a47e2614010052b693728b Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 12 Feb 2025 18:20:05 -0500 Subject: [PATCH 089/245] feat(rpc): Add fields to `getblockchaininfo` RPC output (#9215) * Adds some of the required fields on `getblockchaininfo` output. * Adds state request/response variants for querying disk usage * Adds `size_on_disk`, `chain_supply`, and `monitored` fields. * Updates snapshots * fixes prop tests * fixes doc lints * Adds missing `size()` method * Fixes lwd integration test issue by updating get_blockchain_info to fallback on default values instead of returning an error if the state is empty. Related: Runs state queries in parallel from getblockchaininfo RPC and removes the BlockHeader query by getting the tip block time from the latest chain tip channel. * Updates failing proptests * fixes lint --- zebra-rpc/src/methods.rs | 276 +++++++++++++----- .../src/methods/get_block_template_rpcs.rs | 65 +---- zebra-rpc/src/methods/tests/prop.rs | 121 ++++---- zebra-rpc/src/methods/tests/snapshot.rs | 6 + .../get_blockchain_info@mainnet_10.snap | 27 +- .../get_blockchain_info@testnet_10.snap | 27 +- ..._info_future_nu6_height@nu6testnet_10.snap | 27 +- zebra-rpc/src/methods/types.rs | 2 +- .../src/methods/types/get_blockchain_info.rs | 42 ++- zebra-state/src/lib.rs | 5 +- zebra-state/src/request.rs | 7 +- zebra-state/src/response.rs | 14 +- zebra-state/src/service.rs | 21 +- .../src/service/finalized_state/disk_db.rs | 21 ++ .../src/service/finalized_state/zebra_db.rs | 5 + zebra-state/src/service/read.rs | 4 +- zebra-state/src/service/read/difficulty.rs | 1 + zebra-state/src/service/read/tree.rs | 1 - 18 files changed, 431 insertions(+), 241 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index e3c4eb6e21f..45abd973a72 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -29,14 +29,16 @@ use zebra_chain::{ subtree::NoteCommitmentSubtreeIndex, transaction::{self, SerializedTransaction, Transaction, UnminedTx}, transparent::{self, Address}, - value_balance::ValueBalance, work::{ - difficulty::{CompactDifficulty, ExpandedDifficulty}, + difficulty::{CompactDifficulty, ExpandedDifficulty, ParameterDifficulty, U256}, equihash::Solution, }, }; +use zebra_consensus::ParameterCheckpoint; use zebra_node_services::mempool; -use zebra_state::{HashOrHeight, OutputIndex, OutputLocation, TransactionLocation}; +use zebra_state::{ + HashOrHeight, OutputIndex, OutputLocation, ReadRequest, ReadResponse, TransactionLocation, +}; use crate::{ methods::trees::{GetSubtrees, GetTreestate, SubtreeRpcData}, @@ -546,75 +548,65 @@ where #[allow(clippy::unwrap_in_result)] async fn get_blockchain_info(&self) -> Result { - let network = self.network.clone(); let debug_force_finished_sync = self.debug_force_finished_sync; - let mut state = self.state.clone(); + let network = &self.network; + + let (usage_info_rsp, tip_pool_values_rsp, chain_tip_difficulty) = { + use zebra_state::ReadRequest::*; + let state_call = |request| self.state.clone().oneshot(request); + tokio::join!( + state_call(UsageInfo), + state_call(TipPoolValues), + chain_tip_difficulty(network.clone(), self.state.clone()) + ) + }; - // `chain` field - let chain = network.bip70_network_name(); + let (size_on_disk, (tip_height, tip_hash), value_balance, difficulty) = { + use zebra_state::ReadResponse::*; - let (tip_height, tip_hash, tip_block_time, value_balance) = match state - .ready() - .and_then(|service| service.call(zebra_state::ReadRequest::TipPoolValues)) - .await - { - Ok(zebra_state::ReadResponse::TipPoolValues { - tip_height, - tip_hash, - value_balance, - }) => { - let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - if let zebra_state::ReadResponse::BlockHeader { header, .. } = response { - (tip_height, tip_hash, header.time, value_balance) - } else { - unreachable!("unmatched response to a TipPoolValues request") - } - } - _ => { - let request = - zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(Height::MIN)); - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - if let zebra_state::ReadResponse::BlockHeader { - header, - hash, - height, - .. - } = response - { - (height, hash, header.time, ValueBalance::zero()) - } else { - unreachable!("unmatched response to a BlockHeader request") - } - } + let UsageInfo(size_on_disk) = usage_info_rsp.map_misc_error()? else { + unreachable!("unmatched response to a TipPoolValues request") + }; + + let (tip, value_balance) = match tip_pool_values_rsp { + Ok(TipPoolValues { + tip_height, + tip_hash, + value_balance, + }) => ((tip_height, tip_hash), value_balance), + Ok(_) => unreachable!("unmatched response to a TipPoolValues request"), + Err(_) => ((Height::MIN, network.genesis_hash()), Default::default()), + }; + + let difficulty = chain_tip_difficulty.unwrap_or_else(|_| { + (U256::from(network.target_difficulty_limit()) >> 128).as_u128() as f64 + }); + (size_on_disk, tip, value_balance, difficulty) }; let now = Utc::now(); - let zebra_estimated_height = - NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) - .estimate_height_at(now); - - // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, - // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is - // later than the current time on the local clock. - let estimated_height = if tip_block_time > now - || zebra_estimated_height < tip_height - || debug_force_finished_sync - { - tip_height - } else { - zebra_estimated_height - }; + let (estimated_height, verification_progress) = self + .latest_chain_tip + .best_tip_height_and_block_time() + .map(|(tip_height, tip_block_time)| { + let height = + NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, network) + .estimate_height_at(now); + + // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, + // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is + // later than the current time on the local clock. + let height = + if tip_block_time > now || height < tip_height || debug_force_finished_sync { + tip_height + } else { + height + }; + + (height, f64::from(tip_height.0) / f64::from(height.0)) + }) + // TODO: Add a `genesis_block_time()` method on `Network` to use here. + .unwrap_or((Height::MIN, 0.0)); // `upgrades` object // @@ -647,29 +639,40 @@ where (tip_height + 1).expect("valid chain tips are a lot less than Height::MAX"); let consensus = TipConsensusBranch { chain_tip: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, tip_height) + NetworkUpgrade::current(network, tip_height) .branch_id() .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), ), next_block: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, next_block_height) + NetworkUpgrade::current(network, next_block_height) .branch_id() .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), ), }; let response = GetBlockChainInfo { - chain, + chain: network.bip70_network_name(), blocks: tip_height, best_block_hash: tip_hash, estimated_height, - value_pools: types::ValuePoolBalance::from_value_balance(value_balance), + chain_supply: types::Balance::chain_supply(value_balance), + value_pools: types::Balance::value_pools(value_balance), upgrades, consensus, + headers: tip_height, + difficulty, + verification_progress, + // TODO: store work in the finalized state for each height (#7109) + chain_work: 0, + pruned: false, + size_on_disk, + // TODO: Investigate whether this needs to be implemented (it's sprout-only in zcashd) + commitments: 0, }; Ok(response) } + async fn get_address_balance(&self, address_strings: AddressStrings) -> Result { let state = self.state.clone(); @@ -1540,7 +1543,7 @@ impl GetInfo { /// Response to a `getblockchaininfo` RPC request. /// /// See the notes for the [`Rpc::get_blockchain_info` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct GetBlockChainInfo { /// Current network name as defined in BIP70 (main, test, regtest) chain: String, @@ -1548,6 +1551,30 @@ pub struct GetBlockChainInfo { /// The current number of blocks processed in the server, numeric blocks: Height, + /// The current number of headers we have validated in the best chain, that is, + /// the height of the best chain. + headers: Height, + + /// The estimated network solution rate in Sol/s. + difficulty: f64, + + /// The verification progress relative to the estimated network chain tip. + #[serde(rename = "verificationprogress")] + verification_progress: f64, + + /// The total amount of work in the best chain, hex-encoded. + #[serde(rename = "chainwork")] + chain_work: u64, + + /// Whether this node is pruned, currently always false in Zebra. + pruned: bool, + + /// The estimated size of the block and undo files on disk + size_on_disk: u64, + + /// The current number of note commitments in the commitment tree + commitments: u64, + /// The hash of the currently best block, in big-endian order, hex-encoded #[serde(rename = "bestblockhash", with = "hex")] best_block_hash: block::Hash, @@ -1558,9 +1585,13 @@ pub struct GetBlockChainInfo { #[serde(rename = "estimatedheight")] estimated_height: Height, + /// Chain supply balance + #[serde(rename = "chainSupply")] + chain_supply: types::Balance, + /// Value pool balances #[serde(rename = "valuePools")] - value_pools: [types::ValuePoolBalance; 5], + value_pools: [types::Balance; 5], /// Status of network upgrades upgrades: IndexMap, @@ -1576,35 +1607,60 @@ impl Default for GetBlockChainInfo { blocks: Height(1), best_block_hash: block::Hash([0; 32]), estimated_height: Height(1), - value_pools: types::ValuePoolBalance::zero_pools(), + chain_supply: types::Balance::chain_supply(Default::default()), + value_pools: types::Balance::zero_pools(), upgrades: IndexMap::new(), consensus: TipConsensusBranch { chain_tip: ConsensusBranchIdHex(ConsensusBranchId::default()), next_block: ConsensusBranchIdHex(ConsensusBranchId::default()), }, + headers: Height(1), + difficulty: 0.0, + verification_progress: 0.0, + chain_work: 0, + pruned: false, + size_on_disk: 0, + commitments: 0, } } } impl GetBlockChainInfo { /// Creates a new [`GetBlockChainInfo`] instance. + #[allow(clippy::too_many_arguments)] pub fn new( chain: String, blocks: Height, best_block_hash: block::Hash, estimated_height: Height, - value_pools: [types::ValuePoolBalance; 5], + chain_supply: types::Balance, + value_pools: [types::Balance; 5], upgrades: IndexMap, consensus: TipConsensusBranch, + headers: Height, + difficulty: f64, + verification_progress: f64, + chain_work: u64, + pruned: bool, + size_on_disk: u64, + commitments: u64, ) -> Self { Self { chain, blocks, best_block_hash, estimated_height, + chain_supply, value_pools, upgrades, consensus, + headers, + difficulty, + verification_progress, + chain_work, + pruned, + size_on_disk, + commitments, } } @@ -1633,7 +1689,7 @@ impl GetBlockChainInfo { } /// Returns the value pool balances. - pub fn value_pools(&self) -> &[types::ValuePoolBalance; 5] { + pub fn value_pools(&self) -> &[types::Balance; 5] { &self.value_pools } @@ -2456,3 +2512,73 @@ mod opthex { } } } +/// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. +pub(crate) async fn chain_tip_difficulty(network: Network, mut state: State) -> Result +where + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + State::Future: Send, +{ + let request = ReadRequest::ChainInfo; + + // # TODO + // - add a separate request like BestChainNextMedianTimePast, but skipping the + // consistency check, because any block's difficulty is ok for display + // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: + // + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let chain_info = match response { + ReadResponse::ChainInfo(info) => info, + _ => unreachable!("unmatched response to a chain info request"), + }; + + // This RPC is typically used for display purposes, so it is not consensus-critical. + // But it uses the difficulty consensus rules for its calculations. + // + // Consensus: + // https://zips.z.cash/protocol/protocol.pdf#nbits + // + // The zcashd implementation performs to_expanded() on f64, + // and then does an inverse division: + // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 + // + // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives + // a similar result, because the lower 128 bits are insignificant after conversion + // to `f64` with a 53-bit mantissa. + // + // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation + // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. + // + // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's + // difficulty currently uses 68 bits, so even it would still have full precision + // using this calculation.) + + // Get expanded difficulties (256 bits), these are the inverse of the work + let pow_limit: U256 = network.target_difficulty_limit().into(); + let Some(difficulty) = chain_info.expected_difficulty.to_expanded() else { + return Ok(0.0); + }; + + // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) + let pow_limit = pow_limit >> 128; + let difficulty = U256::from(difficulty) >> 128; + + // Convert to u128 then f64. + // We could also convert U256 to String, then parse as f64, but that's slower. + let pow_limit = pow_limit.as_u128() as f64; + let difficulty = difficulty.as_u128() as f64; + + // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` + Ok(pow_limit / difficulty) +} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 3cdb8ef079c..2405be36a0a 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -25,7 +25,6 @@ use zebra_chain::{ transparent::{ self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN, }, - work::difficulty::{ParameterDifficulty as _, U256}, }; use zebra_consensus::{ block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, RouterError, @@ -36,7 +35,7 @@ use zebra_state::{ReadRequest, ReadResponse}; use crate::{ methods::{ - best_chain_tip_height, + best_chain_tip_height, chain_tip_difficulty, get_block_template_rpcs::{ constants::{ DEFAULT_SOLUTION_RATE_WINDOW_SIZE, GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, @@ -1261,67 +1260,7 @@ where } async fn get_difficulty(&self) -> Result { - let network = self.network.clone(); - let mut state = self.state.clone(); - - let request = ReadRequest::ChainInfo; - - // # TODO - // - add a separate request like BestChainNextMedianTimePast, but skipping the - // consistency check, because any block's difficulty is ok for display - // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: - // - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - - let chain_info = match response { - ReadResponse::ChainInfo(info) => info, - _ => unreachable!("unmatched response to a chain info request"), - }; - - // This RPC is typically used for display purposes, so it is not consensus-critical. - // But it uses the difficulty consensus rules for its calculations. - // - // Consensus: - // https://zips.z.cash/protocol/protocol.pdf#nbits - // - // The zcashd implementation performs to_expanded() on f64, - // and then does an inverse division: - // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 - // - // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives - // a similar result, because the lower 128 bits are insignificant after conversion - // to `f64` with a 53-bit mantissa. - // - // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation - // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. - // - // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's - // difficulty currently uses 68 bits, so even it would still have full precision - // using this calculation.) - - // Get expanded difficulties (256 bits), these are the inverse of the work - let pow_limit: U256 = network.target_difficulty_limit().into(); - let difficulty: U256 = chain_info - .expected_difficulty - .to_expanded() - .expect("valid blocks have valid difficulties") - .into(); - - // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) - let pow_limit = pow_limit >> 128; - let difficulty = difficulty >> 128; - - // Convert to u128 then f64. - // We could also convert U256 to String, then parse as f64, but that's slower. - let pow_limit = pow_limit.as_u128() as f64; - let difficulty = difficulty.as_u128() as f64; - - // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` - Ok(pow_limit / difficulty) + chain_tip_difficulty(self.network.clone(), self.state.clone()).await } async fn z_list_unified_receivers(&self, address: String) -> Result { diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index fb373c3468f..e2ff0bc68f5 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -12,20 +12,22 @@ use tower::buffer::Buffer; use zebra_chain::{ amount::{Amount, NonNegative}, - block::{self, Block, Height}, + block::{Block, Height}, chain_tip::{mock::MockChainTip, ChainTip, NoChainTip}, parameters::{ConsensusBranchId, Network, NetworkUpgrade}, - serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}, + serialization::{DateTime32, ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}, transaction::{self, Transaction, UnminedTx, VerifiedUnminedTx}, transparent, value_balance::ValueBalance, }; + +use zebra_consensus::ParameterCheckpoint; use zebra_node_services::mempool; -use zebra_state::{BoxError, HashOrHeight}; +use zebra_state::{BoxError, GetBlockTemplateChainInfo}; use zebra_test::mock_service::MockService; -use crate::methods::{self, types::ValuePoolBalance}; +use crate::methods::{self, types::Balance}; use super::super::{ AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, @@ -355,31 +357,51 @@ proptest! { fn get_blockchain_info_response_without_a_chain_tip(network in any::()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); - let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); + let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network.clone(), NoChainTip); // CORRECTNESS: Nothing in this test depends on real time, so we can speed it up. tokio::time::pause(); + let genesis_hash = network.genesis_hash(); + runtime.block_on(async move { let response_fut = rpc.get_blockchain_info(); let mock_state_handler = { let mut state = state.clone(); async move { + state + .expect_request(zebra_state::ReadRequest::UsageInfo) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(zebra_state::ReadResponse::UsageInfo(0)); + state .expect_request(zebra_state::ReadRequest::TipPoolValues) .await .expect("getblockchaininfo should call mock state service with correct request") .respond(Err(BoxError::from("no chain tip available yet"))); - state.expect_request(zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(block::Height(0)))).await.expect("no chain tip available yet").respond(Err(BoxError::from("no chain tip available yet"))); + state + .expect_request(zebra_state::ReadRequest::ChainInfo) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(zebra_state::ReadResponse::ChainInfo(GetBlockTemplateChainInfo { + tip_hash: genesis_hash, + tip_height: Height::MIN, + history_tree: Default::default(), + expected_difficulty: Default::default(), + cur_time: DateTime32::now(), + min_time: DateTime32::now(), + max_time: DateTime32::now() + })); } }; let (response, _) = tokio::join!(response_fut, mock_state_handler); prop_assert_eq!( - response.err().unwrap().message().to_string(), - "no chain tip available yet".to_string() + response.unwrap().best_block_hash, + genesis_hash ); mempool.expect_no_requests().await?; @@ -409,7 +431,7 @@ proptest! { // get arbitrary chain tip data let block_height = block.coinbase_height().unwrap(); let block_hash = block.hash(); - let block_time = block.header.time; + let expected_size_on_disk = 1_000; // check no requests were made during this test runtime.block_on(async move { @@ -417,6 +439,12 @@ proptest! { let mock_state_handler = { let mut state = state.clone(); async move { + state + .expect_request(zebra_state::ReadRequest::UsageInfo) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(zebra_state::ReadResponse::UsageInfo(expected_size_on_disk)); + state .expect_request(zebra_state::ReadRequest::TipPoolValues) .await @@ -428,24 +456,18 @@ proptest! { }); state - .expect_request(zebra_state::ReadRequest::BlockHeader(block_hash.into())) + .expect_request(zebra_state::ReadRequest::ChainInfo) .await .expect("getblockchaininfo should call mock state service with correct request") - .respond(zebra_state::ReadResponse::BlockHeader { - header: Arc::new(block::Header { - time: block_time, - version: Default::default(), - previous_block_hash: Default::default(), - merkle_root: Default::default(), - commitment_bytes: Default::default(), - difficulty_threshold: Default::default(), - nonce: Default::default(), - solution: Default::default() - }), - hash: block::Hash::from([0; 32]), - height: Height::MIN, - next_block_hash: None, - }); + .respond(zebra_state::ReadResponse::ChainInfo(GetBlockTemplateChainInfo { + tip_hash: block_hash, + tip_height: block_height, + history_tree: Default::default(), + expected_difficulty: Default::default(), + cur_time: DateTime32::now(), + min_time: DateTime32::now(), + max_time: DateTime32::now() + })); } }; @@ -457,6 +479,7 @@ proptest! { prop_assert_eq!(info.chain, network.bip70_network_name()); prop_assert_eq!(info.blocks, block_height); prop_assert_eq!(info.best_block_hash, block_hash); + prop_assert_eq!(info.size_on_disk, expected_size_on_disk); prop_assert!(info.estimated_height < Height::MAX); prop_assert_eq!( @@ -480,8 +503,8 @@ proptest! { prop_assert_eq!(u.1.status, status); } } - Err(_) => { - unreachable!("Test should never error with the data we are feeding it") + Err(err) => { + unreachable!("Test should never error with the data we are feeding it: {err}") } }; @@ -512,53 +535,37 @@ proptest! { block }, Network::Testnet(_) => { - let block_bytes = &zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES; + let block_bytes = &zebra_test::vectors::BLOCK_TESTNET_GENESIS_BYTES; let block: Arc = block_bytes.zcash_deserialize_into().expect("block is valid"); block }, }; // Genesis block fields - let block_time = genesis_block.header.time; - let block_version = genesis_block.header.version; - let block_prev_block_hash = genesis_block.header.previous_block_hash; - let block_merkle_root = genesis_block.header.merkle_root; - let block_commitment_bytes = genesis_block.header.commitment_bytes; - let block_difficulty_threshold = genesis_block.header.difficulty_threshold; - let block_nonce = genesis_block.header.nonce; - let block_solution = genesis_block.header.solution; - let block_hash = genesis_block.header.hash(); + let expected_size_on_disk = 1_000; runtime.block_on(async move { let response_fut = rpc.get_blockchain_info(); let mock_state_handler = { let mut state = state.clone(); async move { - state.expect_request(zebra_state::ReadRequest::TipPoolValues) + state + .expect_request(zebra_state::ReadRequest::UsageInfo) .await .expect("getblockchaininfo should call mock state service with correct request") - .respond(Err(BoxError::from("tip values not available"))); + .respond(zebra_state::ReadResponse::UsageInfo(expected_size_on_disk)); + state.expect_request(zebra_state::ReadRequest::TipPoolValues) + .await + .expect("getblockchaininfo should call mock state service with correct request") + .respond(Err(BoxError::from("tip values not available"))); - state - .expect_request(zebra_state::ReadRequest::BlockHeader(HashOrHeight::Height(Height::MIN))) + state + .expect_request(zebra_state::ReadRequest::ChainInfo) .await .expect("getblockchaininfo should call mock state service with correct request") - .respond(zebra_state::ReadResponse::BlockHeader { - header: Arc::new(block::Header { - time: block_time, - version: block_version, - previous_block_hash: block_prev_block_hash, - merkle_root: block_merkle_root, - commitment_bytes: block_commitment_bytes, - difficulty_threshold: block_difficulty_threshold, - nonce: block_nonce, - solution: block_solution - }), - hash: block_hash, - height: Height::MIN, - next_block_hash: None, - }); + .respond(Err(BoxError::from("chain info not available"))); + } }; @@ -569,7 +576,7 @@ proptest! { prop_assert_eq!(response.best_block_hash, genesis_block.header.hash()); prop_assert_eq!(response.chain, network.bip70_network_name()); prop_assert_eq!(response.blocks, Height::MIN); - prop_assert_eq!(response.value_pools, ValuePoolBalance::from_value_balance(ValueBalance::zero())); + prop_assert_eq!(response.value_pools, Balance::value_pools(ValueBalance::zero())); let genesis_branch_id = NetworkUpgrade::current(&network, Height::MIN).branch_id().unwrap_or(ConsensusBranchId::RPC_MISSING_ID); let next_height = (Height::MIN + 1).expect("genesis height plus one is next height and valid"); diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 89ee464c70a..217b54f6510 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -613,6 +613,12 @@ fn snapshot_rpc_getblockchaininfo( // replace with: "[Height]" }), + ".verificationprogress" => dynamic_redaction(|value, _path| { + // assert that the value looks like a valid verification progress here + assert!(value.as_f64().unwrap() <= 1.0); + // replace with: + "[f64]" + }), }) }); } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@mainnet_10.snap index 9986da0ec95..c5a536205c3 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@mainnet_10.snap @@ -5,33 +5,50 @@ expression: info { "chain": "main", "blocks": 10, + "headers": 10, + "difficulty": 1.0, + "verificationprogress": "[f64]", + "chainwork": 0, + "pruned": false, + "size_on_disk": 0, + "commitments": 0, "bestblockhash": "00074c46a4aa8172df8ae2ad1848a2e084e1b6989b7d9e6132adc938bf835b36", "estimatedheight": "[Height]", + "chainSupply": { + "chainValue": 0.034375, + "chainValueZat": 3437500, + "monitored": true + }, "valuePools": [ { "id": "transparent", "chainValue": 0.034375, - "chainValueZat": 3437500 + "chainValueZat": 3437500, + "monitored": true }, { "id": "sprout", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "sapling", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "orchard", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "deferred", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false } ], "upgrades": { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@testnet_10.snap index 3bea6c01509..8a1747ac777 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info@testnet_10.snap @@ -5,33 +5,50 @@ expression: info { "chain": "test", "blocks": 10, + "headers": 10, + "difficulty": 1.0, + "verificationprogress": "[f64]", + "chainwork": 0, + "pruned": false, + "size_on_disk": 0, + "commitments": 0, "bestblockhash": "079f4c752729be63e6341ee9bce42fbbe37236aba22e3deb82405f3c2805c112", "estimatedheight": "[Height]", + "chainSupply": { + "chainValue": 0.034375, + "chainValueZat": 3437500, + "monitored": true + }, "valuePools": [ { "id": "transparent", "chainValue": 0.034375, - "chainValueZat": 3437500 + "chainValueZat": 3437500, + "monitored": true }, { "id": "sprout", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "sapling", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "orchard", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "deferred", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false } ], "upgrades": { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info_future_nu6_height@nu6testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info_future_nu6_height@nu6testnet_10.snap index 0f4f6fe26a6..a92cc10f678 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info_future_nu6_height@nu6testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_blockchain_info_future_nu6_height@nu6testnet_10.snap @@ -5,33 +5,50 @@ expression: info { "chain": "test", "blocks": 10, + "headers": 10, + "difficulty": 1.0, + "verificationprogress": "[f64]", + "chainwork": 0, + "pruned": false, + "size_on_disk": 0, + "commitments": 0, "bestblockhash": "079f4c752729be63e6341ee9bce42fbbe37236aba22e3deb82405f3c2805c112", "estimatedheight": "[Height]", + "chainSupply": { + "chainValue": 0.034375, + "chainValueZat": 3437500, + "monitored": true + }, "valuePools": [ { "id": "transparent", "chainValue": 0.034375, - "chainValueZat": 3437500 + "chainValueZat": 3437500, + "monitored": true }, { "id": "sprout", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "sapling", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "orchard", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false }, { "id": "deferred", "chainValue": 0.0, - "chainValueZat": 0 + "chainValueZat": 0, + "monitored": false } ], "upgrades": { diff --git a/zebra-rpc/src/methods/types.rs b/zebra-rpc/src/methods/types.rs index d29e697f065..db3e36c6554 100644 --- a/zebra-rpc/src/methods/types.rs +++ b/zebra-rpc/src/methods/types.rs @@ -3,5 +3,5 @@ mod get_blockchain_info; mod zec; -pub use get_blockchain_info::ValuePoolBalance; +pub use get_blockchain_info::Balance; pub use zec::Zec; diff --git a/zebra-rpc/src/methods/types/get_blockchain_info.rs b/zebra-rpc/src/methods/types/get_blockchain_info.rs index a2d1e781685..57eda10faec 100644 --- a/zebra-rpc/src/methods/types/get_blockchain_info.rs +++ b/zebra-rpc/src/methods/types/get_blockchain_info.rs @@ -10,57 +10,61 @@ use super::*; /// A value pool's balance in Zec and Zatoshis #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ValuePoolBalance { +pub struct Balance { /// Name of the pool + #[serde(skip_serializing_if = "String::is_empty")] id: String, /// Total amount in the pool, in ZEC chain_value: Zec, /// Total amount in the pool, in zatoshis chain_value_zat: Amount, + /// Whether the value pool balance is being monitored. + monitored: bool, } -impl ValuePoolBalance { - /// Returns a list of [`ValuePoolBalance`]s converted from the default [`ValueBalance`]. +impl Balance { + /// Returns a list of [`Balance`]s converted from the default [`ValueBalance`]. pub fn zero_pools() -> [Self; 5] { - Self::from_value_balance(Default::default()) + Self::value_pools(Default::default()) } - /// Creates a new [`ValuePoolBalance`] from a pool name and its value balance. + /// Creates a new [`Balance`] from a pool name and its value balance. pub fn new(id: impl ToString, amount: Amount) -> Self { Self { id: id.to_string(), chain_value: Zec::from(amount), chain_value_zat: amount, + monitored: amount.zatoshis() != 0, } } - /// Creates a [`ValuePoolBalance`] for the transparent pool. + /// Creates a [`Balance`] for the transparent pool. pub fn transparent(amount: Amount) -> Self { Self::new("transparent", amount) } - /// Creates a [`ValuePoolBalance`] for the Sprout pool. + /// Creates a [`Balance`] for the Sprout pool. pub fn sprout(amount: Amount) -> Self { Self::new("sprout", amount) } - /// Creates a [`ValuePoolBalance`] for the Sapling pool. + /// Creates a [`Balance`] for the Sapling pool. pub fn sapling(amount: Amount) -> Self { Self::new("sapling", amount) } - /// Creates a [`ValuePoolBalance`] for the Orchard pool. + /// Creates a [`Balance`] for the Orchard pool. pub fn orchard(amount: Amount) -> Self { Self::new("orchard", amount) } - /// Creates a [`ValuePoolBalance`] for the Deferred pool. + /// Creates a [`Balance`] for the Deferred pool. pub fn deferred(amount: Amount) -> Self { Self::new("deferred", amount) } - /// Converts a [`ValueBalance`] to a list of [`ValuePoolBalance`]s. - pub fn from_value_balance(value_balance: ValueBalance) -> [Self; 5] { + /// Converts a [`ValueBalance`] to a list of [`Balance`]s. + pub fn value_pools(value_balance: ValueBalance) -> [Self; 5] { [ Self::transparent(value_balance.transparent_amount()), Self::sprout(value_balance.sprout_amount()), @@ -69,4 +73,18 @@ impl ValuePoolBalance { Self::deferred(value_balance.deferred_amount()), ] } + + /// Converts a [`ValueBalance`] to a [`Balance`] representing the total chain supply. + pub fn chain_supply(value_balance: ValueBalance) -> Self { + Self::value_pools(value_balance) + .into_iter() + .reduce(|a, b| { + Balance::new( + "", + (a.chain_value_zat + b.chain_value_zat) + .expect("sum of value balances should not overflow"), + ) + }) + .expect("at least one pool") + } } diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 848d30950ec..461b2d4b19f 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -48,7 +48,7 @@ pub use request::{ #[cfg(feature = "indexer")] pub use request::Spend; -pub use response::{KnownBlock, MinedTx, ReadResponse, Response}; +pub use response::{GetBlockTemplateChainInfo, KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip, TipAction}, check, init, init_read_only, @@ -73,9 +73,6 @@ pub use service::finalized_state::{ pub use service::{finalized_state::ZebraDb, ReadStateService}; -#[cfg(feature = "getblocktemplate-rpcs")] -pub use response::GetBlockTemplateChainInfo; - // Allow use in external tests #[cfg(any(test, feature = "proptest-impl"))] pub use service::{ diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index ee7ddedc73f..d5eb92e2d90 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -866,6 +866,10 @@ impl Request { /// A read-only query about the chain state, via the /// [`ReadStateService`](crate::service::ReadStateService). pub enum ReadRequest { + /// Returns [`ReadResponse::UsageInfo(num_bytes: u64)`](ReadResponse::UsageInfo) + /// with the current disk space usage in bytes. + UsageInfo, + /// Returns [`ReadResponse::Tip(Option<(Height, block::Hash)>)`](ReadResponse::Tip) /// with the current best chain tip. Tip, @@ -1096,7 +1100,6 @@ pub enum ReadRequest { /// * [`ReadResponse::BlockHash(None)`](ReadResponse::BlockHash) otherwise. BestChainBlockHash(block::Height), - #[cfg(feature = "getblocktemplate-rpcs")] /// Get state information from the best block chain. /// /// Returns [`ReadResponse::ChainInfo(info)`](ReadResponse::ChainInfo) where `info` is a @@ -1135,6 +1138,7 @@ pub enum ReadRequest { impl ReadRequest { fn variant_name(&self) -> &'static str { match self { + ReadRequest::UsageInfo => "usage_info", ReadRequest::Tip => "tip", ReadRequest::TipPoolValues => "tip_pool_values", ReadRequest::Depth(_) => "depth", @@ -1161,7 +1165,6 @@ impl ReadRequest { ReadRequest::BestChainBlockHash(_) => "best_chain_block_hash", #[cfg(feature = "indexer")] ReadRequest::SpendingTransactionId(_) => "spending_transaction_id", - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::ChainInfo => "chain_info", #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::SolutionRate { .. } => "solution_rate", diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 73b2ab09a96..0e847e9d0cf 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -13,7 +13,6 @@ use zebra_chain::{ value_balance::ValueBalance, }; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::work::difficulty::CompactDifficulty; // Allow *only* these unused imports, so that rustdoc link resolution @@ -135,6 +134,9 @@ impl MinedTx { /// A response to a read-only /// [`ReadStateService`](crate::service::ReadStateService)'s [`ReadRequest`]. pub enum ReadResponse { + /// Response to [`ReadRequest::UsageInfo`] with the current best chain tip. + UsageInfo(u64), + /// Response to [`ReadRequest::Tip`] with the current best chain tip. Tip(Option<(block::Height, block::Hash)>), @@ -241,7 +243,6 @@ pub enum ReadResponse { /// Response to [`ReadRequest::BestChainBlockHash`] with the specified block hash. BlockHash(Option), - #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`ReadRequest::ChainInfo`] with the state /// information needed by the `getblocktemplate` RPC method. ChainInfo(GetBlockTemplateChainInfo), @@ -260,7 +261,6 @@ pub enum ReadResponse { } /// A structure with the information needed from the state to build a `getblocktemplate` RPC response. -#[cfg(feature = "getblocktemplate-rpcs")] #[derive(Clone, Debug, Eq, PartialEq)] pub struct GetBlockTemplateChainInfo { // Data fetched directly from the state tip. @@ -337,7 +337,8 @@ impl TryFrom for Response { ReadResponse::ValidBestChainTipNullifiersAndAnchors => Ok(Response::ValidBestChainTipNullifiersAndAnchors), - ReadResponse::TipPoolValues { .. } + ReadResponse::UsageInfo(_) + | ReadResponse::TipPoolValues { .. } | ReadResponse::TransactionIdsForBlock(_) | ReadResponse::SaplingTree(_) | ReadResponse::OrchardTree(_) @@ -345,7 +346,8 @@ impl TryFrom for Response { | ReadResponse::OrchardSubtrees(_) | ReadResponse::AddressBalance(_) | ReadResponse::AddressesTransactionIds(_) - | ReadResponse::AddressUtxos(_) => { + | ReadResponse::AddressUtxos(_) + | ReadResponse::ChainInfo(_) => { Err("there is no corresponding Response for this ReadResponse") } @@ -356,7 +358,7 @@ impl TryFrom for Response { ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal), #[cfg(feature = "getblocktemplate-rpcs")] - ReadResponse::ChainInfo(_) | ReadResponse::SolutionRate(_) | ReadResponse::TipBlockSize(_) => { + ReadResponse::SolutionRate(_) | ReadResponse::TipBlockSize(_) => { Err("there is no corresponding Response for this ReadResponse") } } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 487d152c62c..9e3fbfed2d4 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1173,6 +1173,24 @@ impl Service for ReadStateService { let span = Span::current(); match req { + // Used by the `getblockchaininfo` RPC. + ReadRequest::UsageInfo => { + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + // The work is done in the future. + + let db_size = db.size(); + + timer.finish(module_path!(), line!(), "ReadRequest::UsageInfo"); + + Ok(ReadResponse::UsageInfo(db_size)) + }) + }) + .wait_for_panics() + } + // Used by the StateService. ReadRequest::Tip => { let state = self.clone(); @@ -1813,8 +1831,7 @@ impl Service for ReadStateService { .wait_for_panics() } - // Used by get_block_template RPC. - #[cfg(feature = "getblocktemplate-rpcs")] + // Used by get_block_template and getblockchaininfo RPCs. ReadRequest::ChainInfo => { let state = self.clone(); let latest_non_finalized_state = self.latest_non_finalized_state(); diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 69be5a4585f..014213fcf83 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -570,6 +570,27 @@ impl DiskDb { ); } + /// Returns the estimated total disk space usage of the database. + pub fn size(&self) -> u64 { + let db: &Arc = &self.db; + let db_options = DiskDb::options(); + let mut total_size_on_disk = 0; + for cf_descriptor in DiskDb::construct_column_families(&db_options, db.path(), &[]).iter() { + let cf_name = &cf_descriptor.name(); + let cf_handle = db + .cf_handle(cf_name) + .expect("Column family handle must exist"); + + total_size_on_disk += db + .property_int_value_cf(cf_handle, "rocksdb.total-sst-files-size") + .ok() + .flatten() + .unwrap_or(0); + } + + total_size_on_disk + } + /// When called with a secondary DB instance, tries to catch up with the primary DB instance pub fn try_catch_up_with_primary(&self) -> Result<(), rocksdb::Error> { self.db.try_catch_up_with_primary() diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index 951c78ec93c..2c73b059f6a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -343,6 +343,11 @@ impl ZebraDb { pub fn print_db_metrics(&self) { self.db.print_db_metrics(); } + + /// Returns the estimated total disk space usage of the database. + pub fn size(&self) -> u64 { + self.db.size() + } } impl Drop for ZebraDb { diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index 0b0ece3a358..2cc4c63e361 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -16,12 +16,10 @@ use crate::service; pub mod address; pub mod block; +pub mod difficulty; pub mod find; pub mod tree; -#[cfg(feature = "getblocktemplate-rpcs")] -pub mod difficulty; - #[cfg(test)] mod tests; diff --git a/zebra-state/src/service/read/difficulty.rs b/zebra-state/src/service/read/difficulty.rs index cbb0e519157..dd42c213656 100644 --- a/zebra-state/src/service/read/difficulty.rs +++ b/zebra-state/src/service/read/difficulty.rs @@ -82,6 +82,7 @@ pub fn get_block_template_chain_info( /// /// Returns the solution rate per second for the current best chain, or `None` if /// the `start_hash` and at least 1 block below it are not found in the chain. +#[allow(unused)] pub fn solution_rate( non_finalized_state: &NonFinalizedState, db: &ZebraDb, diff --git a/zebra-state/src/service/read/tree.rs b/zebra-state/src/service/read/tree.rs index ec610a32987..5a1239c53e7 100644 --- a/zebra-state/src/service/read/tree.rs +++ b/zebra-state/src/service/read/tree.rs @@ -197,7 +197,6 @@ where } } -#[cfg(feature = "getblocktemplate-rpcs")] /// Get the history tree of the provided chain. pub fn history_tree( chain: Option, From f8860a67f490fdd0c2c114db4b3ccfe4b9b6cef2 Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Wed, 12 Feb 2025 23:47:50 +0000 Subject: [PATCH 090/245] zebra-rpc: Correctly map JSON-RPC to/from 2.0 (#9216) * zebra-rpc: Correctly map JSON-RPC to/from 2.0 The existing code was only handling the `lightwalletd` client format which includes a `"jsonrpc": "1.0"` entry, and it was returning responses as JSON-RPC 2.0 regardless of what the client expected. * Update zebra-rpc/src/server/http_request_compatibility.rs --------- Co-authored-by: Arya --- .../src/server/http_request_compatibility.rs | 174 ++++++++++++++---- 1 file changed, 142 insertions(+), 32 deletions(-) diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 5eb03b1c4fc..4e7aa3f94cc 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -8,12 +8,13 @@ use std::pin::Pin; use futures::{future, FutureExt}; use http_body_util::BodyExt; -use hyper::{body::Bytes, header}; +use hyper::header; use jsonrpsee::{ core::BoxError, server::{HttpBody, HttpRequest, HttpResponse}, }; use jsonrpsee_types::ErrorObject; +use serde::{Deserialize, Serialize}; use tower::Service; use super::cookie::Cookie; @@ -118,24 +119,55 @@ impl HttpRequestMiddleware { } } - /// Remove any "jsonrpc: 1.0" fields in `data`, and return the resulting string. - pub fn remove_json_1_fields(data: String) -> String { - // Replace "jsonrpc = 1.0": - // - at the start or middle of a list, and - // - at the end of a list; - // with no spaces (lightwalletd format), and spaces after separators (example format). - // - // TODO: if we see errors from lightwalletd, make this replacement more accurate: - // - use a partial JSON fragment parser - // - combine the whole request into a single buffer, and use a JSON parser - // - use a regular expression - // - // We could also just handle the exact lightwalletd format, - // by replacing `{"jsonrpc":"1.0",` with `{"jsonrpc":"2.0`. - data.replace("\"jsonrpc\":\"1.0\",", "\"jsonrpc\":\"2.0\",") - .replace("\"jsonrpc\": \"1.0\",", "\"jsonrpc\": \"2.0\",") - .replace(",\"jsonrpc\":\"1.0\"", ",\"jsonrpc\":\"2.0\"") - .replace(", \"jsonrpc\": \"1.0\"", ", \"jsonrpc\": \"2.0\"") + /// Maps whatever JSON-RPC version the client is using to JSON-RPC 2.0. + async fn request_to_json_rpc_2( + request: HttpRequest, + ) -> (JsonRpcVersion, HttpRequest) { + let (parts, body) = request.into_parts(); + let bytes = body + .collect() + .await + .expect("Failed to collect body data") + .to_bytes(); + let (version, bytes) = + if let Ok(request) = serde_json::from_slice::<'_, JsonRpcRequest>(bytes.as_ref()) { + let version = request.version(); + if matches!(version, JsonRpcVersion::Unknown) { + (version, bytes) + } else { + ( + version, + serde_json::to_vec(&request.into_2()).expect("valid").into(), + ) + } + } else { + (JsonRpcVersion::Unknown, bytes) + }; + ( + version, + HttpRequest::from_parts(parts, HttpBody::from(bytes.as_ref().to_vec())), + ) + } + /// Maps JSON-2.0 to whatever JSON-RPC version the client is using. + async fn response_from_json_rpc_2( + version: JsonRpcVersion, + response: HttpResponse, + ) -> HttpResponse { + let (parts, body) = response.into_parts(); + let bytes = body + .collect() + .await + .expect("Failed to collect body data") + .to_bytes(); + let bytes = + if let Ok(response) = serde_json::from_slice::<'_, JsonRpcResponse>(bytes.as_ref()) { + serde_json::to_vec(&response.into_version(version)) + .expect("valid") + .into() + } else { + bytes + }; + HttpResponse::from_parts(parts, HttpBody::from(bytes.as_ref().to_vec())) } } @@ -203,25 +235,103 @@ where Self::insert_or_replace_content_type_header(request.headers_mut()); let mut service = self.service.clone(); - let (parts, body) = request.into_parts(); async move { - let bytes = body - .collect() - .await - .expect("Failed to collect body data") - .to_bytes(); + let (version, request) = Self::request_to_json_rpc_2(request).await; + let response = service.call(request).await.map_err(Into::into)?; + Ok(Self::response_from_json_rpc_2(version, response).await) + } + .boxed() + } +} + +#[derive(Clone, Copy, Debug)] +enum JsonRpcVersion { + /// bitcoind used a mishmash of 1.0, 1.1, and 2.0 for its JSON-RPC. + Bitcoind, + /// lightwalletd uses the above mishmash, but also breaks spec to include a + /// `"jsonrpc": "1.0"` key. + Lightwalletd, + /// The client is indicating strict 2.0 handling. + TwoPointZero, + /// On parse errors we don't modify anything, and let the `jsonrpsee` crate handle it. + Unknown, +} - let data = String::from_utf8_lossy(bytes.as_ref()).to_string(); +/// A version-agnostic JSON-RPC request. +#[derive(Debug, Deserialize, Serialize)] +struct JsonRpcRequest { + #[serde(skip_serializing_if = "Option::is_none")] + jsonrpc: Option, + method: String, + #[serde(skip_serializing_if = "Option::is_none")] + params: Option, + #[serde(skip_serializing_if = "Option::is_none")] + id: Option, +} - // Fix JSON-RPC 1.0 requests. - let data = Self::remove_json_1_fields(data); - let body = HttpBody::from(Bytes::from(data).as_ref().to_vec()); +impl JsonRpcRequest { + fn version(&self) -> JsonRpcVersion { + match (self.jsonrpc.as_deref(), &self.params, &self.id) { + ( + Some("2.0"), + _, + None + | Some( + serde_json::Value::Null + | serde_json::Value::String(_) + | serde_json::Value::Number(_), + ), + ) => JsonRpcVersion::TwoPointZero, + (Some("1.0"), Some(_), Some(_)) => JsonRpcVersion::Lightwalletd, + (None, Some(_), Some(_)) => JsonRpcVersion::Bitcoind, + _ => JsonRpcVersion::Unknown, + } + } - let request = HttpRequest::from_parts(parts, body); + fn into_2(mut self) -> Self { + self.jsonrpc = Some("2.0".into()); + self + } +} +/// A version-agnostic JSON-RPC response. +#[derive(Debug, Deserialize, Serialize)] +struct JsonRpcResponse { + #[serde(skip_serializing_if = "Option::is_none")] + jsonrpc: Option, + id: serde_json::Value, + #[serde(skip_serializing_if = "Option::is_none")] + result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, +} - service.call(request).await.map_err(Into::into) +impl JsonRpcResponse { + fn into_version(mut self, version: JsonRpcVersion) -> Self { + match version { + JsonRpcVersion::Bitcoind => { + self.jsonrpc = None; + self.result = self.result.or(Some(serde_json::Value::Null)); + self.error = self.error.or(Some(serde_json::Value::Null)); + } + JsonRpcVersion::Lightwalletd => { + self.jsonrpc = Some("1.0".into()); + self.result = self.result.or(Some(serde_json::Value::Null)); + self.error = self.error.or(Some(serde_json::Value::Null)); + } + JsonRpcVersion::TwoPointZero => { + // `jsonrpsee` should be returning valid JSON-RPC 2.0 responses. However, + // a valid result of `null` can be parsed into `None` by this parser, so + // we map the result explicitly to `Null` when there is no error. + assert_eq!(self.jsonrpc.as_deref(), Some("2.0")); + if self.error.is_none() { + self.result = self.result.or(Some(serde_json::Value::Null)); + } else { + assert!(self.result.is_none()); + } + } + JsonRpcVersion::Unknown => (), } - .boxed() + self } } From 4132c0e4e9452281ce48fd8c62f9f5a83e469bb6 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 13 Feb 2025 10:15:32 +0100 Subject: [PATCH 091/245] ci: Refactor Dockerfile & entrypoint (#8923) * Refactor formatting & docs * Refactor the `runtime` stage in Dockerfile * Remove unused code from `entrypoint.sh` * Simplify `entrypoint.sh` setup * Revise docs & formatting * Adjust default values for env vars * Bump Rust v from 1.79 to 1.81 in Dockerfile * Refactor `entrypoint.sh` * Refactor `Dockerfile` * Add TODOs for monitoring stage to Dockerfile * Refactor `Dockerfile` * Add TODOs for monitoring stage to Dockerfile * Fix a typo * Allow running `zebrad` in test mode * Allow custom config for `zebrad` in test mode * Remove `curl` from the `runtime` Docker image * Remove redundant echos * Remove a malfunctioning CD test The test was using a custom config file set in `test_variables`. However, the file was not included in the Docker image, and the entrypoint script created a new, default one under the original file's path. Zebra then loaded this new file, and the test passed because the pattern in `grep_patterns` matched Zebra's output containing the original path, even though the config file was different. * Remove a redundant CI test * Remove all packages from the `runtime` stage * Docs cosmetics * Clarify docs * Bump Rust version * Remove a security note * Explicitly specify network cache dir * Explicitly specify cookie dir * Set UID, GID and home dir for the `zebra` user * Set a working dir for the `zebra` user * Don't remove `FEATURES` * Try re-introducing the `testnet-conf` check * `ZEBRA_CACHED_STATE_DIR` -> `ZEBRA_CACHE_DIR` This dir doesn't hold only the state cache anymore, but also the cache for network peers, and the cookie file. * Refactor the dir structure * Check that `ZEBRA_CONF_PATH` exists in the image * Improve the check for `ZEBRA_CONF_PATH` * Use different flag in the `ZEBRA_CONF_PATH` check * Simplify the `ZEBRA_CONF_PATH` check * Fix spelling * Comment out the `testnet-conf` CI check * Add commented out `test-zebra-conf-path` CI check * Reintroduce `testnet-conf` CI check * Update the `custom-conf` CI check * Add `v2.1.0.toml` conf file * Refine the `v2.1.0.toml` conf file * Remove `ZEBRA_LISTEN_ADDR` from the entrypoint * Remove `ZEBRA_CHECKPOINT_SYNC` from the entrypoint * Stop supporting configuration of the RPC port * Add default conf file * Prepare Zebra's config in the entrypoint script * Remove unneeded packages from the `deps` target * Docs cosmetics * Use only `$FEATURES` in entrypoint * Simplify handling of Rust features * Add a TODO * Add CI debug statements * Don't require test vars in conf test * Reintroduce `protoc` * Remove `-e NETWORK` * Remove `ZEBRA_FORCE_USE_COLOR=1` * Remove `ZEBRA_CACHE_DIR=/var/cache/zebrad-cache` * Reintroduce the "custom-conf" test * Set up test env the same way as prod * Don't repeatedly check for conf file in entrypoint * Simplify file ownership in Dockerfile * Fix checkpoint tests in entrypoint * Fix Zebra config CI tests * `LIGHTWALLETD_DATA_DIR` -> `LWD_CACHE_DIR` * Add config for `LWD_CACHE_DIR` to Dockerfile * `/var/cache/zebrad-cache` -> `~/.cache/zebra` * `var/cache/lwd-cache` -> `/home/zebra/.cache/lwd` * Remove `LOG_COLOR=false` from GCP setup * Don't specify `LWD_CACHE_DIR` in CI tests * Don't switch to `zebra` user for tests in Docker * Join "experimental" and "all" tests in CI * Remove outdated docs * Refactor tests with fake activation heights * Fix tests for scanner --- .dockerignore | 1 + .github/workflows/README.md | 1 - .github/workflows/cd-deploy-nodes-gcp.yml | 146 ++--- .github/workflows/ci-tests.yml | 1 + .github/workflows/sub-build-docker-image.yml | 24 +- .../sub-ci-integration-tests-gcp.yml | 40 +- .../workflows/sub-ci-unit-tests-docker.yml | 83 ++- .../sub-deploy-integration-tests-gcp.yml | 6 +- .github/workflows/sub-test-zebra-config.yml | 9 +- book/src/user/docker.md | 12 +- book/src/user/mining-docker.md | 4 +- docker/Dockerfile | 204 ++++--- docker/default_zebra_config.toml | 59 ++ docker/docker-compose.test.yml | 6 +- docker/docker-compose.yml | 2 +- docker/entrypoint.sh | 538 ++++++++---------- zebra-scan/tests/scan_task_commands.rs | 6 +- zebra-scan/tests/scanner.rs | 8 +- zebrad/tests/acceptance.rs | 68 +-- zebrad/tests/common/cached_state.rs | 2 +- zebrad/tests/common/configs/v2.1.0.toml | 85 +++ zebrad/tests/common/launch.rs | 4 +- zebrad/tests/common/lightwalletd.rs | 4 +- zebrad/tests/common/sync.rs | 4 +- zebrad/tests/common/test_type.rs | 18 +- 25 files changed, 688 insertions(+), 647 deletions(-) create mode 100644 docker/default_zebra_config.toml create mode 100644 zebrad/tests/common/configs/v2.1.0.toml diff --git a/.dockerignore b/.dockerignore index 9d62f3c5c13..b26a7d4f413 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,3 +21,4 @@ !zebra-* !zebrad !docker/entrypoint.sh +!docker/default_zebra_config.toml diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 68d83f0fd1e..e45be336730 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -223,7 +223,6 @@ docker run --rm -e TEST_LWD_INTEGRATION=1 zebra-tests #### Test Categories - Full suite (`RUN_ALL_TESTS`) -- Experimental features (`RUN_ALL_EXPERIMENTAL_TESTS`) - Integration tests (`TEST_LWD_INTEGRATION`) - Network sync (`TEST_ZEBRA_EMPTY_SYNC`, `TEST_UPDATE_SYNC`) - State management (`TEST_DISK_REBUILD`) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 315a7dc4464..0f015429d62 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,6 +1,6 @@ # Google Cloud node deployments and tests that run when Rust code or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. -# (External PRs are tested/deployed by GitHub's Merge Queue.) +# (External PRs are tested/deployed by GitHub's Merge Queue.) # # 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions. # 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. @@ -28,13 +28,13 @@ concurrency: on: merge_group: - types: [ checks_requested ] + types: [checks_requested] workflow_dispatch: inputs: network: default: Mainnet - description: 'Network to deploy: Mainnet or Testnet' + description: "Network to deploy: Mainnet or Testnet" required: true type: choice options: @@ -42,7 +42,7 @@ on: - Testnet cached_disk_type: default: tip - description: 'Type of cached disk to use' + description: "Type of cached disk to use" required: true type: choice options: @@ -50,63 +50,63 @@ on: - checkpoint prefer_main_cached_state: default: false - description: 'Prefer cached state from the main branch' + description: "Prefer cached state from the main branch" required: false type: boolean need_cached_disk: default: true - description: 'Use a cached state disk' + description: "Use a cached state disk" required: false type: boolean no_cache: - description: 'Disable the Docker cache for this build' + description: "Disable the Docker cache for this build" required: false type: boolean default: false log_file: - default: '' - description: 'Log to a file path rather than standard output' + default: "" + description: "Log to a file path rather than standard output" push: - # Skip main branch updates where Rust code and dependencies aren't modified. - branches: - - main - paths: - # code and tests - - '**/*.rs' - # hard-coded checkpoints and proptest regressions - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/cd-deploy-nodes-gcp.yml' - - '.github/workflows/sub-build-docker-image.yml' + # Skip main branch updates where Rust code and dependencies aren't modified. + branches: + - main + paths: + # code and tests + - "**/*.rs" + # hard-coded checkpoints and proptest regressions + - "**/*.txt" + # dependencies + - "**/Cargo.toml" + - "**/Cargo.lock" + # configuration files + - ".cargo/config.toml" + - "**/clippy.toml" + # workflow definitions + - "docker/**" + - ".dockerignore" + - ".github/workflows/cd-deploy-nodes-gcp.yml" + - ".github/workflows/sub-build-docker-image.yml" # Only runs the Docker image tests, doesn't deploy any instances pull_request: # Skip PRs where Rust code and dependencies aren't modified. paths: # code and tests - - '**/*.rs' + - "**/*.rs" # hard-coded checkpoints and proptest regressions - - '**/*.txt' + - "**/*.txt" # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' + - "**/Cargo.toml" + - "**/Cargo.lock" # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' + - ".cargo/config.toml" + - "**/clippy.toml" # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/cd-deploy-nodes-gcp.yml' - - '.github/workflows/sub-build-docker-image.yml' + - "docker/**" + - ".dockerignore" + - ".github/workflows/cd-deploy-nodes-gcp.yml" + - ".github/workflows/sub-build-docker-image.yml" release: types: @@ -160,6 +160,17 @@ jobs: disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} + # Test that Zebra works using $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test CD custom Docker config file + needs: build + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: "custom-conf" + docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v2.1.0.toml"' + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v2.1.0.toml"' + # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git # @@ -174,6 +185,7 @@ jobs: image_name: zebrad no_cache: ${{ inputs.no_cache || false }} rust_log: info + features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit @@ -183,11 +195,9 @@ jobs: needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'default-conf' + test_id: "default-conf" docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK' - network: 'Mainnet' # Test reconfiguring the docker image for testnet. test-configuration-file-testnet: @@ -196,23 +206,10 @@ jobs: # Make sure Zebra can sync the genesis block on testnet uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'testnet-conf' + test_id: "testnet-conf" docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK' - network: 'Testnet' - - # Test that Zebra works using $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test CD custom Docker config file - needs: build - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'custom-conf' - docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: "-e NETWORK=Testnet" # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. @@ -234,15 +231,22 @@ jobs: matrix: network: [Mainnet, Testnet] name: Deploy ${{ matrix.network }} nodes - needs: [ build, versioning, test-configuration-file, test-zebra-conf-path, get-disk-name ] + needs: + [ + build, + versioning, + test-configuration-file, + test-zebra-conf-path, + get-disk-name, + ] runs-on: ubuntu-latest timeout-minutes: 60 env: CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} permissions: - contents: 'read' - id-token: 'write' + contents: "read" + id-token: "write" if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: @@ -271,8 +275,8 @@ jobs: id: auth uses: google-github-actions/auth@v2.1.8 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.4 @@ -301,11 +305,11 @@ jobs: --image-family=cos-stable \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --create-disk="${DISK_PARAMS}" \ - --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=${DISK_NAME},mode=rw \ + --container-mount-disk=mount-path='/home/zebra/.cache/zebra',name=${DISK_NAME},mode=rw \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ + --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ --scopes cloud-platform \ --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ @@ -349,14 +353,14 @@ jobs: # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single ${{ inputs.network }} instance - needs: [ build, test-configuration-file, test-zebra-conf-path, get-disk-name ] + needs: [build, test-configuration-file, test-zebra-conf-path, get-disk-name] runs-on: ubuntu-latest timeout-minutes: 30 env: CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} permissions: - contents: 'read' - id-token: 'write' + contents: "read" + id-token: "write" # Run even if we don't need a cached disk, but only when triggered by a workflow_dispatch if: ${{ !failure() && github.event_name == 'workflow_dispatch' }} @@ -386,8 +390,8 @@ jobs: id: auth uses: google-github-actions/auth@v2.1.8 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.4 @@ -413,11 +417,11 @@ jobs: --image-family=cos-stable \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --create-disk="${DISK_PARAMS}" \ - --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=${DISK_NAME},mode=rw \ + --container-mount-disk=mount-path='/home/zebra/.cache/zebra',name=${DISK_NAME},mode=rw \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ + --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ --scopes cloud-platform \ --metadata google-logging-enabled=true,google-monitoring-enabled=true \ @@ -428,7 +432,7 @@ jobs: failure-issue: name: Open or update issues for release failures # When a new job is added to this workflow, add it to this list. - needs: [ versioning, build, deploy-nodes, deploy-instance ] + needs: [versioning, build, deploy-nodes, deploy-instance] # Only open tickets for failed or cancelled jobs that are not coming from PRs. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 517ba4151f2..ffb886b4512 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -133,6 +133,7 @@ jobs: rust_backtrace: full rust_lib_backtrace: full rust_log: info + features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index c5142babe31..1ec1b88c0d2 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -32,14 +32,9 @@ on: rust_log: required: false type: string - # defaults to: vars.RUST_PROD_FEATURES features: required: false type: string - # defaults to: vars.RUST_TEST_FEATURES (and entrypoint.sh adds vars.RUST_PROD_FEATURES) - test_features: - required: false - type: string latest_tag: required: false type: boolean @@ -48,20 +43,18 @@ on: required: false type: string no_cache: - description: 'Disable the Docker cache for this build' + description: "Disable the Docker cache for this build" required: false type: boolean default: false outputs: image_digest: - description: 'The image digest to be used on a caller workflow' + description: "The image digest to be used on a caller workflow" value: ${{ jobs.build.outputs.image_digest }} - env: - FEATURES: ${{ inputs.features || vars.RUST_PROD_FEATURES }} - TEST_FEATURES: ${{ inputs.test_features || vars.RUST_TEST_FEATURES }} + FEATURES: ${{ inputs.features }} RUST_LOG: ${{ inputs.rust_log || vars.RUST_LOG }} CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} @@ -75,8 +68,8 @@ jobs: image_digest: ${{ steps.docker_build.outputs.digest }} image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} permissions: - contents: 'read' - id-token: 'write' + contents: "read" + id-token: "write" pull-requests: write # for `docker-scout` to be able to write the comment env: DOCKER_BUILD_SUMMARY: ${{ vars.DOCKER_BUILD_SUMMARY }} @@ -129,9 +122,9 @@ jobs: id: auth uses: google-github-actions/auth@v2.1.8 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_ARTIFACTS_SA }}' - token_format: 'access_token' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_ARTIFACTS_SA }}" + token_format: "access_token" # Some builds might take over an hour, and Google's default lifetime duration for # an access token is 1 hour (3600s). We increase this to 3 hours (10800s) # as some builds take over an hour. @@ -173,7 +166,6 @@ jobs: SHORT_SHA=${{ env.GITHUB_SHA_SHORT }} RUST_LOG=${{ env.RUST_LOG }} FEATURES=${{ env.FEATURES }} - TEST_FEATURES=${{ env.TEST_FEATURES }} push: true # It's recommended to build images with max-level provenance attestations # https://docs.docker.com/build/ci/github-actions/attestations/ diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index d4e4bd506d4..438b32ac235 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -32,7 +32,7 @@ on: #! The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and #! `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. #! -#! The test variables ZEBRA_CACHED_STATE_DIR and LIGHTWALLETD_DATA_DIR used in some steps are set in the +#! The test variables ZEBRA_CACHE_DIR and LWD_CACHE_DIR used in some steps are set in the #! `sub-deploy-integration-tests-gcp.yml` workflow file as inputs. If modified in this file, they must #! also be updated in the `sub-deploy-integration-tests-gcp.yml` file. jobs: @@ -79,7 +79,7 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -108,7 +108,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -138,7 +138,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -178,7 +178,7 @@ jobs: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1" needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true @@ -209,7 +209,7 @@ jobs: test_id: checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1" needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false @@ -241,7 +241,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. @@ -285,7 +285,7 @@ jobs: app_name: zebrad test_id: checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1" network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed @@ -316,7 +316,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -351,7 +351,7 @@ jobs: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" needs_zebra_state: true needs_lwd_state: true saves_to_disk: true @@ -379,7 +379,7 @@ jobs: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1" needs_zebra_state: true saves_to_disk: false secrets: inherit @@ -401,7 +401,7 @@ jobs: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -424,7 +424,7 @@ jobs: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -451,7 +451,7 @@ jobs: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -474,7 +474,7 @@ jobs: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -488,16 +488,16 @@ jobs: # # If the state version has changed, waits for the new cached states to be created. # Otherwise, if the state rebuild was skipped, runs immediately after the build job. - scan-task-commands-test: - name: scan task commands + test-scanner: + name: Scanner tests needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebra-scan - test_id: scan-task-commands - test_description: Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_TASK_COMMANDS=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + test_id: scanner-tests + test_description: Tests the scanner. + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCANNER=1" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index 0847806f610..ab4d9a97f1d 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -20,7 +20,7 @@ on: type: string network: type: string - default: 'Mainnet' + default: "Mainnet" no_cache: type: boolean default: false @@ -32,14 +32,7 @@ env: COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} -#! IMPORTANT -#! -#! The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and -#! `ci-unit-tests-docker.patch-external.yml` must be kept in sync. jobs: - # Run all the zebra tests, including tests that are ignored by default. - # - # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. test-all: name: Test all timeout-minutes: 180 @@ -52,25 +45,19 @@ jobs: with: short-length: 7 - # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. - # - # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. - # - - name: Run zebrad tests - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e RUN_ALL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - - # Run unit, basic acceptance tests, and ignored tests with experimental features. + # Run unit, basic acceptance tests, and ignored tests, only showing + # command output if the test fails. # - - name: Run zebrad tests with experimental features - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + # If some tests hang, add "-- --nocapture" for just that test, or for all + # the tests. + - name: Run all tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e RUN_ALL_EXPERIMENTAL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run -t \ + -e RUN_ALL_TESTS=1 \ + -e FEATURES="journald prometheus filter-reload" \ + -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Run state tests with fake activation heights. # @@ -98,7 +85,10 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run -t \ + -e TEST_FAKE_ACTIVATION_HEIGHTS=1 \ + -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. test-empty-sync: @@ -118,7 +108,7 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. test-lightwalletd-integration: @@ -138,42 +128,37 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 -e TEST_LWD_INTEGRATION=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty -e ZEBRA_TEST_LIGHTWALLETD=1 -e TEST_LWD_INTEGRATION=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: name: Test CI default Docker config file uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'default-conf' + test_id: "default-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK' - network: 'Mainnet' - # Test reconfiguring the docker image for tesnet. + # Test reconfiguring Zebra for Testnet in Docker. test-configuration-file-testnet: - name: Test CI testnet Docker config file + name: Test enabling Testnet in Docker # Make sure Zebra can sync the genesis block on testnet uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'testnet-conf' + test_id: "testnet-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - # TODO: improve the entrypoint to avoid using `ENTRYPOINT_FEATURES=""` - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml" -e ENTRYPOINT_FEATURES=""' - network: 'Testnet' + test_variables: "-e NETWORK=Testnet" # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test CI custom Docker config file uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'custom-conf' + test_id: "custom-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v2.1.0.toml"' + test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v2.1.0.toml"' failure-issue: name: Open or update issues for main branch failures @@ -181,7 +166,15 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ test-all, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path ] + needs: + [ + test-all, + test-fake-activation-heights, + test-empty-sync, + test-lightwalletd-integration, + test-configuration-file, + test-zebra-conf-path, + ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? @@ -202,7 +195,7 @@ jobs: if: contains(github.event.pull_request.labels.*.name, 'A-release') runs-on: ubuntu-latest steps: - - name: Run check_no_git_refs_in_cargo_lock - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e NETWORK -e RUN_CHECK_NO_GIT_REFS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + - name: Run check_no_git_refs_in_cargo_lock + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty -e RUN_CHECK_NO_GIT_REFS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 1a8854febd0..b054814355f 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -38,12 +38,12 @@ on: zebra_state_dir: required: false type: string - default: '/var/cache/zebrad-cache' + default: '/home/zebra/.cache/zebra' description: 'Zebra cached state directory and input image prefix to search in GCP' lwd_state_dir: required: false type: string - default: '/var/cache/lwd-cache' + default: '/home/zebra/.cache/lwd' description: 'Lightwalletd cached state directory and input image prefix to search in GCP' disk_prefix: required: false @@ -250,7 +250,7 @@ jobs: # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd, tests - # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, + # will only respect the values from $ZEBRA_CACHE_DIR and $LWD_CACHE_DIR, # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 69990bdb3e1..d72f35bf4e4 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -23,14 +23,9 @@ on: type: string description: 'Docker image to test' test_variables: - required: true - type: string - description: 'Environment variables used to select and configure the test' - network: required: false type: string - default: Mainnet - description: 'Zcash network to test against' + description: 'Environment variables used to select and configure the test' jobs: test-docker-config: @@ -87,5 +82,3 @@ jobs: # Handle other potential errors here echo "An error occurred while processing the logs."; exit 1; - env: - NETWORK: '${{ inputs.network }}' diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 1349ee6b322..7709bdca4e7 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -27,7 +27,7 @@ docker volume create zebrad-cache docker run -d --platform linux/amd64 \ --restart unless-stopped \ --env-file .env \ - --mount type=volume,source=zebrad-cache,target=/var/cache/zebrad-cache \ + --mount type=volume,source=zebrad-cache,target=/home/zebra/.cache/zebra \ -p 8233:8233 \ --memory 16G \ --cpus 4 \ @@ -78,7 +78,7 @@ Based on our actual `entrypoint.sh` script, the following configuration file wil network = "Mainnet" listen_addr = "0.0.0.0" [state] -cache_dir = "/var/cache/zebrad-cache" +cache_dir = "/home/zebra/.cache/zebra" [metrics] endpoint_addr = "127.0.0.1:9999" ``` @@ -129,7 +129,6 @@ This approach ensures you can run the same tests locally that are run in CI, pro #### Configuration - `FEATURES`: Specifies the features to build `zebrad` with. Example: `"default-release-binaries getblocktemplate-rpcs"` -- `TEST_FEATURES`: Specifies the features for tests. Example: `"lightwalletd-grpc-tests zebra-checkpoints"` #### Logging @@ -140,9 +139,7 @@ This approach ensures you can run the same tests locally that are run in CI, pro #### Tests -- `TEST_FEATURES`: Specifies the features for tests. Example: `"lightwalletd-grpc-tests zebra-checkpoints"` - `ZEBRA_SKIP_IPV6_TESTS`: Skips IPv6 tests. Example: `1` -- `ENTRYPOINT_FEATURES`: Overrides the specific features used to run tests in `entrypoint.sh`. Example: `"default-release-binaries lightwalletd-grpc-tests"` #### CI/CD @@ -154,14 +151,11 @@ This approach ensures you can run the same tests locally that are run in CI, pro #### Zebra Configuration -- `ZEBRA_CHECKPOINT_SYNC`: Enables or disables checkpoint sync. Example: `true` -- `ZEBRA_LISTEN_ADDR`: Address for Zebra to listen on. Example: `"0.0.0.0"` -- `ZEBRA_CACHED_STATE_DIR`: Directory for cached state. Example: `"/var/cache/zebrad-cache"` +- `ZEBRA_CACHE_DIR`: Directory for cached state. Example: `"/home/zebra/.cache/zebra"` #### Mining Configuration - `RPC_LISTEN_ADDR`: Address for RPC to listen on. Example: `"0.0.0.0"` -- `RPC_PORT`: Port for RPC. Example: `8232` - `MINER_ADDRESS`: Address for the miner. Example: `"t1XhG6pT9xRqRQn3BHP7heUou1RuYrbcrCc"` #### Other Configuration diff --git a/book/src/user/mining-docker.md b/book/src/user/mining-docker.md index 002848c0ca3..96f47918763 100644 --- a/book/src/user/mining-docker.md +++ b/book/src/user/mining-docker.md @@ -20,9 +20,7 @@ meaning it is a Mainnet P2PKH address. Please remember to set your own address for the rewards. The port we mapped between the container and the host with the `-p` flag in the -example above is Zebra's default Mainnet RPC port. If you want to use a -different one, you can specify it in the `RPC_PORT` environment variable, -similarly to `MINER_ADDRESS`, and then map it with the Docker's `-p` flag. +example above is Zebra's default Mainnet RPC port. Instead of listing the environment variables on the command line, you can use Docker's `--env-file` flag to specify a file containing the variables. You diff --git a/docker/Dockerfile b/docker/Dockerfile index c441ce44e22..320ff15aec4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -3,25 +3,18 @@ # If you want to include a file in the Docker image, add it to .dockerignore. # -# We are using 4 stages: -# - deps: install build dependencies and sets the needed variables +# We are using 4 (TODO: 5) stages: +# - deps: installs build dependencies and sets default values # - tests: builds tests binaries # - release: builds release binaries # - runtime: runs the release binaries +# - TODO: Add a `monitoring` stage # # We first set default values for build arguments used across the stages. # Each stage must define the build arguments (ARGs) it uses. -# -# Build zebrad with these features -# -# Keep these argument defaults in sync with GitHub vars.RUST_PROD_FEATURES and vars.RUST_TEST_FEATURES -# https://github.com/ZcashFoundation/zebra/settings/variables/actions -ARG FEATURES="default-release-binaries" -ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" -ARG EXPERIMENTAL_FEATURES="" - -ARG APP_HOME="/opt/zebrad" -ARG RUST_VERSION=1.82.0 + +ARG RUST_VERSION=1.84.0 + # In this stage we download all system requirements to build the project # # It also captures all the build arguments to be used as environment variables. @@ -29,25 +22,14 @@ ARG RUST_VERSION=1.82.0 FROM rust:${RUST_VERSION}-bookworm AS deps SHELL ["/bin/bash", "-xo", "pipefail", "-c"] -# Set the default path for the zebrad binary -ARG APP_HOME -ENV APP_HOME=${APP_HOME} -WORKDIR ${APP_HOME} - -# Install zebra build deps and Dockerfile deps +# Install zebra build deps RUN apt-get -qq update && \ apt-get -qq install -y --no-install-recommends \ - llvm \ libclang-dev \ - clang \ - ca-certificates \ protobuf-compiler \ - rocksdb-tools \ && rm -rf /var/lib/apt/lists/* /tmp/* # Build arguments and variables set for tracelog levels and debug information -# -# We set defaults to all variables. ARG RUST_LOG ENV RUST_LOG=${RUST_LOG:-info} @@ -65,38 +47,42 @@ ARG SHORT_SHA # https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 ENV SHORT_SHA=${SHORT_SHA:-} -ENV CARGO_HOME="${APP_HOME}/.cargo/" - -# Copy the entrypoint script to be used on both images -COPY ./docker/entrypoint.sh /etc/zebrad/entrypoint.sh - -# In this stage we build tests (without running then) +# This stage builds tests without running them. # # We also download needed dependencies for tests to work, from other images. # An entrypoint.sh is only available in this step for easier test handling with variables. FROM deps AS tests +ARG FEATURES +ENV FEATURES=${FEATURES} + # Skip IPv6 tests by default, as some CI environment don't have IPv6 available ARG ZEBRA_SKIP_IPV6_TESTS ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} -# Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, -# separately from the test and production image builds. -ARG FEATURES -ARG TEST_FEATURES -ARG EXPERIMENTAL_FEATURES -# TODO: add empty $EXPERIMENTAL_FEATURES when we can avoid adding an extra space to the end of the string -ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" +# Set up the test environment the same way the production environment is. This +# is not very DRY as the same code repeats for the `runtime` target below, but I +# didn't find a suitable way to share the setup between the two targets. + +ENV UID=101 +ENV GID=${UID} +ENV USER="zebra" +ENV HOME="/home/${USER}" +ENV CARGO_HOME="${HOME}/.cargo/" + +RUN adduser --system --gid ${GID} --uid ${UID} --home ${HOME} ${USER} + +WORKDIR ${HOME} # Build Zebra test binaries, but don't run them # Leverage a cache mount to /usr/local/cargo/registry/ # for downloaded dependencies, a cache mount to /usr/local/cargo/git/db -# for git repository dependencies, and a cache mount to ${APP_HOME}/target/ for +# for git repository dependencies, and a cache mount to ${HOME}/target/ for # compiled dependencies which will speed up subsequent builds. # Leverage a bind mount to each crate directory to avoid having to copy the # source code into the container. Once built, copy the executable to an -# output directory before the cache mounted ${APP_HOME}/target/ is unmounted. +# output directory before the cache mounted ${HOME}/target/ is unmounted. RUN --mount=type=bind,source=zebrad,target=zebrad \ --mount=type=bind,source=zebra-chain,target=zebra-chain \ --mount=type=bind,source=zebra-network,target=zebra-network \ @@ -113,25 +99,38 @@ RUN --mount=type=bind,source=zebrad,target=zebrad \ --mount=type=bind,source=tower-fallback,target=tower-fallback \ --mount=type=bind,source=Cargo.toml,target=Cargo.toml \ --mount=type=bind,source=Cargo.lock,target=Cargo.lock \ - --mount=type=cache,target=${APP_HOME}/target/ \ + --mount=type=cache,target=${HOME}/target/ \ --mount=type=cache,target=/usr/local/cargo/git/db \ --mount=type=cache,target=/usr/local/cargo/registry/ \ -cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run && \ -cp ${APP_HOME}/target/release/zebrad /usr/local/bin && \ -cp ${APP_HOME}/target/release/zebra-checkpoints /usr/local/bin + cargo test --locked --release --workspace --no-run \ + --features "${FEATURES} zebra-checkpoints" && \ + cp ${HOME}/target/release/zebrad /usr/local/bin && \ + cp ${HOME}/target/release/zebra-checkpoints /usr/local/bin # Copy the lightwalletd binary and source files to be able to run tests COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/ -COPY ./ ./ -# Entrypoint environment variables -ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} -# We repeat the ARGs here, so they are available in the entrypoint.sh script for $RUN_ALL_EXPERIMENTAL_TESTS -ARG EXPERIMENTAL_FEATURES="journald prometheus filter-reload" -ENV ENTRYPOINT_FEATURES_EXPERIMENTAL="${ENTRYPOINT_FEATURES} ${EXPERIMENTAL_FEATURES}" +# Use the same default config as in the production environment. +ENV ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" +COPY --chown=${UID}:${GID} ./docker/default_zebra_config.toml ${ZEBRA_CONF_PATH} + +ARG LWD_CACHE_DIR +ENV LWD_CACHE_DIR="${HOME}/.cache/lwd" +RUN mkdir -p ${LWD_CACHE_DIR} +RUN chown -R ${UID}:${GID} ${LWD_CACHE_DIR} + +# Use the same cache dir as in the production environment. +ARG ZEBRA_CACHE_DIR +ENV ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" +RUN mkdir -p ${ZEBRA_CACHE_DIR} +RUN chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} -# By default, runs the entrypoint tests specified by the environmental variables (if any are set) -ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ] +COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY ./ ${HOME} + +RUN chown -R ${UID}:${GID} ${HOME} + +ENTRYPOINT [ "entrypoint.sh", "test" ] # In this stage we build a release (generate the zebrad binary) # @@ -161,68 +160,67 @@ RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ --mount=type=cache,target=${APP_HOME}/target/ \ --mount=type=cache,target=/usr/local/cargo/git/db \ --mount=type=cache,target=/usr/local/cargo/registry/ \ -cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \ -cp ${APP_HOME}/target/release/zebrad /usr/local/bin + cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \ + cp ${APP_HOME}/target/release/zebrad /usr/local/bin -# This stage is only used when deploying nodes or when only the resulting zebrad binary is needed -# -# To save space, this step starts from scratch using debian, and only adds the resulting -# binary from the `release` stage +# This step starts from scratch using Debian and only adds the resulting binary +# from the `release` stage. FROM debian:bookworm-slim AS runtime -# Set the default path for the zebrad binary -ARG APP_HOME -ENV APP_HOME=${APP_HOME} -WORKDIR ${APP_HOME} - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - rocksdb-tools \ - gosu \ - && rm -rf /var/lib/apt/lists/* /tmp/* +COPY --from=release /usr/local/bin/zebrad /usr/local/bin/ +COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh -# Create a non-privileged user that the app will run under. -# Running as root inside the container is running as root in the Docker host -# If an attacker manages to break out of the container, they will have root access to the host -# See https://docs.docker.com/go/dockerfile-user-best-practices/ -ARG USER=zebra +ARG FEATURES +ENV FEATURES=${FEATURES} + +# Create a non-privileged system user for running `zebrad`. +ARG USER="zebra" ENV USER=${USER} -ARG UID=10001 + +# System users have no home dirs, but we set one for users' convenience. +ARG HOME="/home/zebra" +WORKDIR ${HOME} + +# System UIDs should be set according to +# https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/uidrange.html. +# +# In Debian, the default dynamic range for system UIDs is defined by +# [FIRST_SYSTEM_UID, LAST_SYSTEM_UID], which is set to [100, 999] in +# `etc/adduser.conf`: +# https://manpages.debian.org/bullseye/adduser/adduser.8.en.html +# +# Debian assigns GID 100 to group `users`, so we set UID = GID = 101 as the +# default value. +ARG UID=101 ENV UID=${UID} -ARG GID=10001 +ARG GID=${UID} ENV GID=${GID} -RUN addgroup --system --gid ${GID} ${USER} \ - && adduser \ - --system \ - --disabled-login \ - --shell /bin/bash \ - --home ${APP_HOME} \ - --uid "${UID}" \ - --gid "${GID}" \ - ${USER} - -# Config settings for zebrad -ARG FEATURES -ENV FEATURES=${FEATURES} +RUN addgroup --system --gid ${GID} ${USER} +RUN adduser --system --gid ${GID} --uid ${UID} --home ${HOME} ${USER} -# Path and name of the config file -# These are set to a default value when not defined in the environment -ENV ZEBRA_CONF_DIR=${ZEBRA_CONF_DIR:-/etc/zebrad} -ENV ZEBRA_CONF_FILE=${ZEBRA_CONF_FILE:-zebrad.toml} +# We set the default locations of the conf and cache dirs according to the XDG +# spec: https://specifications.freedesktop.org/basedir-spec/latest/ -RUN mkdir -p ${ZEBRA_CONF_DIR} && chown ${UID}:${UID} ${ZEBRA_CONF_DIR} \ - && chown ${UID}:${UID} ${APP_HOME} +ARG ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" +ENV ZEBRA_CONF_PATH=${ZEBRA_CONF_PATH} +COPY --chown=${UID}:${GID} ./docker/default_zebra_config.toml ${ZEBRA_CONF_PATH} -COPY --from=release /usr/local/bin/zebrad /usr/local/bin -COPY --from=release /etc/zebrad/entrypoint.sh /etc/zebrad +ARG ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" +ENV ZEBRA_CACHE_DIR=${ZEBRA_CACHE_DIR} +RUN mkdir -p ${ZEBRA_CACHE_DIR} && chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} -# Expose configured ports -EXPOSE 8233 18233 +RUN chown -R ${UID}:${GID} ${HOME} +USER $USER -# Update the config file based on the Docker run variables, -# and launch zebrad with it -ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ] +ENTRYPOINT [ "entrypoint.sh" ] CMD ["zebrad"] + +# TODO: Add a `monitoring` stage +# +# This stage will be based on `runtime`, and initially: +# +# - run `zebrad` on Testnet +# - with mining enabled using S-nomp and `nheqminer`. +# +# We can add further functionality to this stage for further purposes. diff --git a/docker/default_zebra_config.toml b/docker/default_zebra_config.toml new file mode 100644 index 00000000000..d31a702ade4 --- /dev/null +++ b/docker/default_zebra_config.toml @@ -0,0 +1,59 @@ +# Default configuration file for running Zebra in Docker. +# +# This file is tailored for Zebra running in Docker. Do not use it with Zebra +# running directly on your localhost as some fields are adjusted specifically +# for Docker. +# +# You can use this file as a starting point for custom configuration. If you +# don't specify a field, Zebra will use its default value. +# +# The config format, including a complete list of sections and fields, is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html + +[network] +network = "Mainnet" +listen_addr = "0.0.0.0" +cache_dir = "/home/zebra/.cache/zebra" + +[rpc] +# The RPC server is disabled by default. To enable it, uncomment one of the +# lines below and alternatively set your own port. + +# listen_addr = "0.0.0.0:8232" # Mainnet +# listen_addr = "0.0.0.0:18232" # Testnet + +cookie_dir = "/home/zebra/.cache/zebra" + +[state] +cache_dir = "/home/zebra/.cache/zebra" + +[tracing] +# Zebra uses colored output if it is attached to a terminal. To disable colors, +# set `use_color` to false. To enable colors even for non-terminal outputs, set +# `use_color` to `true` and uncomment the line below. + +# force_use_color = true +use_color = true + +# Logging to a file is disabled by default. To enable it, uncomment the line +# below and alternatively set your own path. + +# log_file = "/home/zebra/.local/state/zebrad.log" + +# Sending tracing events to systemd-journald is disabled by default. To enable +# it, uncomment the line below. + +# use_journald = true + +[metrics] +# Metrics via Prometheus are disabled by default. To enable them, uncomment the +# line below and alternatively set your own port. + +# endpoint_addr = "0.0.0.0:9999" # Prometheus + +[mining] +# If you are going to use Zebra as a backend for a mining pool, set your mining +# address. + +# miner_address = "your_mining_address" diff --git a/docker/docker-compose.test.yml b/docker/docker-compose.test.yml index fac94e3f4db..b5730359bcb 100644 --- a/docker/docker-compose.test.yml +++ b/docker/docker-compose.test.yml @@ -14,10 +14,10 @@ services: memory: 16G # Change this to the command you want to run, respecting the entrypoint.sh # For example, to run the tests, use the following command: - # command: ["cargo", "test", "--locked", "--release", "--features", "${TEST_FEATURES}", "--package", "zebrad", "--test", "acceptance", "--", "--nocapture", "--include-ignored", "sync_large_checkpoints_"] + # command: ["cargo", "test", "--locked", "--release", "--features", "${FEATURES}", "--package", "zebrad", "--test", "acceptance", "--", "--nocapture", "--include-ignored", "sync_large_checkpoints_"] volumes: - - zebrad-cache:/var/cache/zebrad-cache - - lwd-cache:/var/cache/lwd-cache + - zebrad-cache:/home/zebra/.cache/zebra + - lwd-cache:/home/zebra/.cache/lwd ports: # Zebra uses the following inbound and outbound TCP ports - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 22359488de1..f9c71d6e0d4 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -29,7 +29,7 @@ services: # gid: '2001' # Rust's container default group gid # mode: 0440 volumes: - - zebrad-cache:/var/cache/zebrad-cache + - zebrad-cache:/home/zebra/.cache/zebra ports: # Zebra uses the following default inbound and outbound TCP ports - "8233:8233" # Mainnet Network (for peer connections) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index ccd09f43c33..45376df5b47 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,186 +1,119 @@ #!/usr/bin/env bash -# This script serves as the entrypoint for the Zebra Docker container. +# Entrypoint for running Zebra in Docker. # -# Description: -# This script serves as the primary entrypoint for the Docker container. Its main responsibilities include: -# 1. Environment Setup: Prepares the environment by setting various flags and parameters. -# 2. Configuration Management: Dynamically generates the `zebrad.toml` configuration file based on environment variables, ensuring the node starts with the desired settings. -# 3. Test Execution: Can run a series of tests to validate functionality based on specified environment variables. -# 4. Node Startup: Starts the node, allowing it to begin its operations. +# The main script logic is at the bottom. # +# ## Notes +# +# - `$ZEBRA_CONF_PATH` must point to a Zebra conf file writable by `$USER`. -# Exit if a command fails -set -e -# Exit if any command in a pipeline fails -set -o pipefail - -#### -# General Variables -# These variables are used to run the Zebra node. -#### - -# Path and name of the config file. These two have defaults set in the Dockerfile. -: "${ZEBRA_CONF_DIR:=}" -: "${ZEBRA_CONF_FILE:=}" -# [network] -: "${NETWORK:=Mainnet}" -: "${ZEBRA_LISTEN_ADDR:=0.0.0.0}" -# [consensus] -: "${ZEBRA_CHECKPOINT_SYNC:=true}" -# [state] -# Set this to change the default cached state directory -: "${ZEBRA_CACHED_STATE_DIR:=/var/cache/zebrad-cache}" -: "${LIGHTWALLETD_DATA_DIR:=/var/cache/lwd-cache}" -# [metrics] -: "${METRICS_ENDPOINT_ADDR:=0.0.0.0}" -: "${METRICS_ENDPOINT_PORT:=9999}" -# [tracing] -: "${LOG_COLOR:=false}" -: "${TRACING_ENDPOINT_ADDR:=0.0.0.0}" -: "${TRACING_ENDPOINT_PORT:=3000}" -# [rpc] -: "${RPC_LISTEN_ADDR:=0.0.0.0}" -# if ${RPC_PORT} is not set, use the default value for the current network -if [[ -z "${RPC_PORT}" ]]; then - if [[ "${NETWORK}" = "Mainnet" ]]; then - : "${RPC_PORT:=8232}" - elif [[ "${NETWORK}" = "Testnet" ]]; then - : "${RPC_PORT:=18232}" - fi -fi +set -eo pipefail -#### -# Test Variables -# These variables are used to run tests in the Dockerfile. -#### - -: "${RUN_ALL_TESTS:=}" -: "${RUN_ALL_EXPERIMENTAL_TESTS:=}" -: "${TEST_FAKE_ACTIVATION_HEIGHTS:=}" -: "${TEST_ZEBRA_EMPTY_SYNC:=}" -: "${TEST_LWD_INTEGRATION:=}" -: "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES:=}" -: "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES:=}" -: "${TEST_DISK_REBUILD:=}" -: "${TEST_UPDATE_SYNC:=}" -: "${TEST_CHECKPOINT_SYNC:=}" -: "${GENERATE_CHECKPOINTS_MAINNET:=}" -: "${GENERATE_CHECKPOINTS_TESTNET:=}" -: "${TEST_LWD_RPC_CALL:=}" -: "${TEST_LWD_FULL_SYNC:=}" -: "${TEST_LWD_UPDATE_SYNC:=}" -: "${TEST_LWD_GRPC:=}" -: "${TEST_LWD_TRANSACTIONS:=}" -: "${TEST_GET_BLOCK_TEMPLATE:=}" -: "${TEST_SUBMIT_BLOCK:=}" -: "${ENTRYPOINT_FEATURES:=}" -: "${TEST_SCAN_TASK_COMMANDS:=}" - -# Configuration file path -if [[ -n "${ZEBRA_CONF_DIR}" ]] && [[ -n "${ZEBRA_CONF_FILE}" ]] && [[ -z "${ZEBRA_CONF_PATH}" ]]; then - ZEBRA_CONF_PATH="${ZEBRA_CONF_DIR}/${ZEBRA_CONF_FILE}" +# Exit early if `ZEBRA_CONF_PATH` does not point to a file. +if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then + echo "the ZEBRA_CONF_PATH env var does not point to a Zebra conf file" + exit 1 fi -# Populate `zebrad.toml` before starting zebrad, using the environmental -# variables set by the Dockerfile or the user. If the user has already created a config, don't replace it. +# Populates the config file for Zebra, using the env vars set by the Dockerfile +# or user. +# +# Also prints the content of the generated config file. +# +# ## Positional Parameters # -# We disable most ports by default, so the default config is secure. -# Users have to opt-in to additional functionality by setting environmental variables. -if [[ -n "${ZEBRA_CONF_PATH}" ]] && [[ ! -f "${ZEBRA_CONF_PATH}" ]] && [[ -z "${ENTRYPOINT_FEATURES}" ]]; then - # Create the conf path and file - (mkdir -p "$(dirname "${ZEBRA_CONF_PATH}")" && touch "${ZEBRA_CONF_PATH}") || { echo "Error creating file ${ZEBRA_CONF_PATH}"; exit 1; } - # Populate the conf file - cat < "${ZEBRA_CONF_PATH}" -[network] -network = "${NETWORK}" -listen_addr = "${ZEBRA_LISTEN_ADDR}" -[state] -cache_dir = "${ZEBRA_CACHED_STATE_DIR}" -EOF - - if [[ " ${FEATURES} " =~ " prometheus " ]]; then # spaces are important here to avoid partial matches - cat <> "${ZEBRA_CONF_PATH}" -[metrics] -endpoint_addr = "${METRICS_ENDPOINT_ADDR}:${METRICS_ENDPOINT_PORT}" -EOF +# - "$1": the file to write the config to +prepare_conf_file() { + # Set a custom `network`. + if [[ "${NETWORK}" ]]; then + sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${1}" fi - if [[ -n "${RPC_PORT}" ]]; then - cat <> "${ZEBRA_CONF_PATH}" -[rpc] -listen_addr = "${RPC_LISTEN_ADDR}:${RPC_PORT}" -EOF + # Enable the RPC server by setting its port. + if [[ "${ZEBRA_RPC_PORT}" ]]; then + sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${1}" + sed -i 's/ *# Mainnet$//' "${1}" + sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${1}" fi - if [[ -n "${LOG_FILE}" ]] || [[ -n "${LOG_COLOR}" ]] || [[ -n "${TRACING_ENDPOINT_ADDR}" ]]; then - cat <> "${ZEBRA_CONF_PATH}" -[tracing] -EOF - if [[ " ${FEATURES} " =~ " filter-reload " ]]; then # spaces are important here to avoid partial matches - cat <> "${ZEBRA_CONF_PATH}" -endpoint_addr = "${TRACING_ENDPOINT_ADDR}:${TRACING_ENDPOINT_PORT}" -EOF - fi - # Set this to log to a file, if not set, logs to standard output - if [[ -n "${LOG_FILE}" ]]; then - mkdir -p "$(dirname "${LOG_FILE}")" - cat <> "${ZEBRA_CONF_PATH}" -log_file = "${LOG_FILE}" -EOF - fi - # Zebra automatically detects if it is attached to a terminal, and uses colored output. - # Set this to 'true' to force using color even if the output is not a terminal. - # Set this to 'false' to disable using color even if the output is a terminal. - if [[ "${LOG_COLOR}" = "true" ]]; then - cat <> "${ZEBRA_CONF_PATH}" -force_use_color = true -EOF - elif [[ "${LOG_COLOR}" = "false" ]]; then - cat <> "${ZEBRA_CONF_PATH}" -use_color = false -EOF - fi + # Set a custom state, network and cookie cache dirs. + # + # We're pointing all three cache dirs at the same location, so users will find + # all cached data in that single location. We can introduce more env vars and + # use them to set the cache dirs separately if needed. + if [[ "${ZEBRA_CACHE_DIR}" ]]; then + mkdir -p "${ZEBRA_CACHE_DIR//\"/}" + sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${1}" fi - if [[ -n "${MINER_ADDRESS}" ]]; then - cat <> "${ZEBRA_CONF_PATH}" -[mining] -miner_address = "${MINER_ADDRESS}" -EOF + # Enable the Prometheus metrics endpoint. + if [[ "${FEATURES}" == *"prometheus"* ]]; then + sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${1}" fi -fi -if [[ -n "${ZEBRA_CONF_PATH}" ]] && [[ -z "${ENTRYPOINT_FEATURES}" ]]; then - # Print the config file - echo "Using zebrad.toml:" - cat "${ZEBRA_CONF_PATH}" -fi + # Enable logging to a file by setting a custom log file path. + if [[ "${LOG_FILE}" ]]; then + mkdir -p "$(dirname "${LOG_FILE//\"/}")" + sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${1}" + fi + + # Enable or disable colored logs. + if [[ "${LOG_COLOR}" ]]; then + sed -i '/# force_use_color = true/s/^# //' "${1}" + sed -i '/use_color = true/s/true/'"${LOG_COLOR//\"/}"'/' "${1}" + fi + + # Enable or disable logging to systemd-journald. + if [[ "${USE_JOURNALD}" ]]; then + sed -i '/# use_journald = true/s/^# //; s/true/'"${USE_JOURNALD//\"/}"'/' "${1}" + fi + + # Set a mining address. + if [[ "${MINER_ADDRESS}" ]]; then + sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${1}" + fi + + # Trim all comments and empty lines. + sed -i '/^#/d; /^$/d' "${1}" + + echo "Prepared the following Zebra config:" + cat "$1" +} -# Function to list directory +# Checks if a directory contains subdirectories +# +# Exits with 0 if it does, and 1 otherwise. check_directory_files() { local dir="$1" # Check if the directory exists if [[ -d "${dir}" ]]; then # Check if there are any subdirectories if find "${dir}" -mindepth 1 -type d | read -r; then - # Subdirectories exist, so we continue : else - # No subdirectories, print message and exit with status 1 echo "No subdirectories found in ${dir}." exit 1 fi else - # Directory doesn't exist, print message and exit with status 1 echo "Directory ${dir} does not exist." exit 1 fi } -# Function to run cargo test with an arbitrary number of arguments +# Runs cargo test with an arbitrary number of arguments. +# +# ## Positional Parameters +# +# - '$1' must contain +# - either cargo FEATURES as described here: +# https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options, +# - or be empty. +# - The remaining params will be appended to a command starting with +# `exec cargo test ... -- ...` run_cargo_test() { - # Start constructing the command, ensuring that $1 is enclosed in single quotes as it's a feature list + # Start constructing the command, ensuring that $1 is enclosed in single + # quotes as it's a feature list local cmd="exec cargo test --locked --release --features '$1' --package zebrad --test acceptance -- --nocapture --include-ignored" # Shift the first argument, as it's already included in the cmd @@ -194,170 +127,161 @@ run_cargo_test() { fi done - # Run the command using eval, this will replace the current process with the cargo command - eval "${cmd}" || { echo "Cargo test failed"; exit 1; } + # Run the command using eval. This will replace the current process with the + # cargo command. + echo "Running:" + echo "${cmd}" + eval "${cmd}" || { + echo "Cargo test failed" + exit 1 + } } -# Main Execution Logic: -# This script orchestrates the execution flow based on the provided arguments and environment variables. -# - If "$1" is '--', '-', or 'zebrad', the script processes the subsequent arguments for the 'zebrad' command. -# - If ENTRYPOINT_FEATURES is unset, it checks for ZEBRA_CONF_PATH. If set, 'zebrad' runs with this custom configuration; otherwise, it runs with the provided arguments. -# - If "$1" is an empty string and ENTRYPOINT_FEATURES is set, the script enters the testing phase, checking various environment variables to determine the specific tests to run. -# - Different tests or operations are triggered based on the respective conditions being met. -# - If "$1" doesn't match any of the above, it's assumed to be a command, which is executed directly. -# This structure ensures a flexible execution strategy, accommodating various scenarios such as custom configurations, different testing phases, or direct command execution. +# Runs tests depending on the env vars. +# +# Positional Parameters +# +# - $@: Arbitrary command that will be executed if no test env var is set. +run_tests() { + if [[ "${RUN_ALL_TESTS}" -eq "1" ]]; then + # Run unit, basic acceptance tests, and ignored tests, only showing command + # output if the test fails. If the lightwalletd environment variables are + # set, we will also run those tests. + exec cargo test --locked --release --workspace --features "${FEATURES}" \ + -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock + + elif [[ "${RUN_CHECK_NO_GIT_REFS}" -eq "1" ]]; then + # Run the check_no_git_refs_in_cargo_lock test. + exec cargo test --locked --release --workspace --features "${FEATURES}" \ + -- --nocapture --include-ignored check_no_git_refs_in_cargo_lock + + elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then + # Run state tests with fake activation heights. + exec cargo test --locked --release --lib --features "zebra-test" \ + --package zebra-state \ + -- --nocapture --include-ignored with_fake_activation_heights + + elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then + # Test the scanner. + exec cargo test --locked --release --package zebra-scan \ + -- --nocapture --include-ignored scan_task_commands scan_start_where_left + + elif [[ "${TEST_ZEBRA_EMPTY_SYNC}" -eq "1" ]]; then + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty + # state. + run_cargo_test "${FEATURES}" "sync_large_checkpoints_" + + elif [[ -n "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES}" ]]; then + # Run a Zebra full sync test on mainnet. + run_cargo_test "${FEATURES}" "full_sync_mainnet" + + elif [[ -n "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES}" ]]; then + # Run a Zebra full sync test on testnet. + run_cargo_test "${FEATURES}" "full_sync_testnet" + + elif [[ "${TEST_DISK_REBUILD}" -eq "1" ]]; then + # Run a Zebra sync up to the mandatory checkpoint. + run_cargo_test "${FEATURES} test_sync_to_mandatory_checkpoint_${NETWORK,,}" \ + "sync_to_mandatory_checkpoint_${NETWORK,,}" + echo "ran test_disk_rebuild" + + elif [[ "${TEST_UPDATE_SYNC}" -eq "1" ]]; then + # Run a Zebra sync starting at the cached tip, and syncing to the latest + # tip. + run_cargo_test "${FEATURES}" "zebrad_update_sync" + + elif [[ "${TEST_CHECKPOINT_SYNC}" -eq "1" ]]; then + # Run a Zebra sync starting at the cached mandatory checkpoint, and syncing + # past it. + run_cargo_test "${FEATURES} test_sync_past_mandatory_checkpoint_${NETWORK,,}" \ + "sync_past_mandatory_checkpoint_${NETWORK,,}" + + elif [[ "${GENERATE_CHECKPOINTS_MAINNET}" -eq "1" ]]; then + # Generate checkpoints after syncing Zebra from a cached state on mainnet. + # + # TODO: disable or filter out logs like: + # test generate_checkpoints_mainnet has been running for over 60 seconds + run_cargo_test "${FEATURES}" "generate_checkpoints_mainnet" + + elif [[ "${GENERATE_CHECKPOINTS_TESTNET}" -eq "1" ]]; then + # Generate checkpoints after syncing Zebra on testnet. + # + # This test might fail if testnet is unstable. + run_cargo_test "${FEATURES}" "generate_checkpoints_testnet" + + elif [[ "${TEST_LWD_RPC_CALL}" -eq "1" ]]; then + # Starting at a cached Zebra tip, test a JSON-RPC call to Zebra. + # Run both the fully synced RPC test and the subtree snapshot test, one test + # at a time. Since these tests use the same cached state, a state problem in + # the first test can fail the second test. + run_cargo_test "${FEATURES}" "--test-threads" "1" "fully_synced_rpc_" + + elif [[ "${TEST_LWD_INTEGRATION}" -eq "1" ]]; then + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + run_cargo_test "${FEATURES}" "lightwalletd_integration" + + elif [[ "${TEST_LWD_FULL_SYNC}" -eq "1" ]]; then + # Starting at a cached Zebra tip, run a lightwalletd sync to tip. + run_cargo_test "${FEATURES}" "lightwalletd_full_sync" + + elif [[ "${TEST_LWD_UPDATE_SYNC}" -eq "1" ]]; then + # Starting with a cached Zebra and lightwalletd tip, run a quick update sync. + run_cargo_test "${FEATURES}" "lightwalletd_update_sync" + + # These tests actually use gRPC. + elif [[ "${TEST_LWD_GRPC}" -eq "1" ]]; then + # Starting with a cached Zebra and lightwalletd tip, test all gRPC calls to + # lightwalletd, which calls Zebra. + run_cargo_test "${FEATURES}" "lightwalletd_wallet_grpc_tests" + + elif [[ "${TEST_LWD_TRANSACTIONS}" -eq "1" ]]; then + # Starting with a cached Zebra and lightwalletd tip, test sending + # transactions gRPC call to lightwalletd, which calls Zebra. + run_cargo_test "${FEATURES}" "sending_transactions_using_lightwalletd" + + # These tests use mining code, but don't use gRPC. + elif [[ "${TEST_GET_BLOCK_TEMPLATE}" -eq "1" ]]; then + # Starting with a cached Zebra tip, test getting a block template from + # Zebra's RPC server. + run_cargo_test "${FEATURES}" "get_block_template" + + elif [[ "${TEST_SUBMIT_BLOCK}" -eq "1" ]]; then + # Starting with a cached Zebra tip, test sending a block to Zebra's RPC + # port. + run_cargo_test "${FEATURES}" "submit_block" -case "$1" in - --* | -* | zebrad) - shift - if [[ -n "${ZEBRA_CONF_PATH}" ]]; then - exec zebrad -c "${ZEBRA_CONF_PATH}" "$@" || { echo "Execution with custom configuration failed"; exit 1; } - else - exec zebrad "$@" || { echo "Execution failed"; exit 1; } - fi - ;; - "") - if [[ -n "${ENTRYPOINT_FEATURES}" ]]; then - # Validate the test variables - # For these tests, we activate the test features to avoid recompiling `zebrad`, - # but we don't actually run any gRPC tests. - if [[ "${RUN_ALL_TESTS}" -eq "1" ]]; then - # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. - # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock - - elif [[ "${RUN_ALL_EXPERIMENTAL_TESTS}" -eq "1" ]]; then - # Run unit, basic acceptance tests, and ignored tests with experimental features. - # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES_EXPERIMENTAL}" --workspace -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock - - elif [[ "${RUN_CHECK_NO_GIT_REFS}" -eq "1" ]]; then - # Run the check_no_git_refs_in_cargo_lock test. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored check_no_git_refs_in_cargo_lock - - elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then - # Run state tests with fake activation heights. - exec cargo test --locked --release --features "zebra-test" --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights - - elif [[ "${TEST_ZEBRA_EMPTY_SYNC}" -eq "1" ]]; then - # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. - run_cargo_test "${ENTRYPOINT_FEATURES}" "sync_large_checkpoints_" - - elif [[ -n "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES}" ]]; then - # Run a Zebra full sync test on mainnet. - run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_mainnet" - # List directory generated by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - - elif [[ -n "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES}" ]]; then - # Run a Zebra full sync test on testnet. - run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_testnet" - # List directory generated by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - - elif [[ "${TEST_DISK_REBUILD}" -eq "1" ]]; then - # Run a Zebra sync up to the mandatory checkpoint. - # - # TODO: use environmental variables instead of Rust features (part of #2995) - run_cargo_test "test_sync_to_mandatory_checkpoint_${NETWORK,,},${ENTRYPOINT_FEATURES}" "sync_to_mandatory_checkpoint_${NETWORK,,}" - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - - elif [[ "${TEST_UPDATE_SYNC}" -eq "1" ]]; then - # Run a Zebra sync starting at the cached tip, and syncing to the latest tip. - # - # List directory used by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "zebrad_update_sync" - - elif [[ "${TEST_CHECKPOINT_SYNC}" -eq "1" ]]; then - # Run a Zebra sync starting at the cached mandatory checkpoint, and syncing past it. - # - # List directory used by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - # TODO: use environmental variables instead of Rust features (part of #2995) - run_cargo_test "test_sync_past_mandatory_checkpoint_${NETWORK,,},${ENTRYPOINT_FEATURES}" "sync_past_mandatory_checkpoint_${NETWORK,,}" - - elif [[ "${GENERATE_CHECKPOINTS_MAINNET}" -eq "1" ]]; then - # Generate checkpoints after syncing Zebra from a cached state on mainnet. - # - # TODO: disable or filter out logs like: - # test generate_checkpoints_mainnet has been running for over 60 seconds - # - # List directory used by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "generate_checkpoints_mainnet" - - elif [[ "${GENERATE_CHECKPOINTS_TESTNET}" -eq "1" ]]; then - # Generate checkpoints after syncing Zebra on testnet. - # - # This test might fail if testnet is unstable. - # - # List directory used by test - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "generate_checkpoints_testnet" - - elif [[ "${TEST_LWD_RPC_CALL}" -eq "1" ]]; then - # Starting at a cached Zebra tip, test a JSON-RPC call to Zebra. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - # Run both the fully synced RPC test and the subtree snapshot test, one test at a time. - # Since these tests use the same cached state, a state problem in the first test can fail the second test. - run_cargo_test "${ENTRYPOINT_FEATURES}" "--test-threads" "1" "fully_synced_rpc_" - - elif [[ "${TEST_LWD_INTEGRATION}" -eq "1" ]]; then - # Test launching lightwalletd with an empty lightwalletd and Zebra state. - run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_integration" - - elif [[ "${TEST_LWD_FULL_SYNC}" -eq "1" ]]; then - # Starting at a cached Zebra tip, run a lightwalletd sync to tip. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_full_sync" - check_directory_files "${LIGHTWALLETD_DATA_DIR}/db" - - elif [[ "${TEST_LWD_UPDATE_SYNC}" -eq "1" ]]; then - # Starting with a cached Zebra and lightwalletd tip, run a quick update sync. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - check_directory_files "${LIGHTWALLETD_DATA_DIR}/db" - run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_update_sync" - - # These tests actually use gRPC. - elif [[ "${TEST_LWD_GRPC}" -eq "1" ]]; then - # Starting with a cached Zebra and lightwalletd tip, test all gRPC calls to lightwalletd, which calls Zebra. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - check_directory_files "${LIGHTWALLETD_DATA_DIR}/db" - run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_wallet_grpc_tests" - - elif [[ "${TEST_LWD_TRANSACTIONS}" -eq "1" ]]; then - # Starting with a cached Zebra and lightwalletd tip, test sending transactions gRPC call to lightwalletd, which calls Zebra. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - check_directory_files "${LIGHTWALLETD_DATA_DIR}/db" - run_cargo_test "${ENTRYPOINT_FEATURES}" "sending_transactions_using_lightwalletd" - - # These tests use mining code, but don't use gRPC. - elif [[ "${TEST_GET_BLOCK_TEMPLATE}" -eq "1" ]]; then - # Starting with a cached Zebra tip, test getting a block template from Zebra's RPC server. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "get_block_template" - - elif [[ "${TEST_SUBMIT_BLOCK}" -eq "1" ]]; then - # Starting with a cached Zebra tip, test sending a block to Zebra's RPC port. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "${ENTRYPOINT_FEATURES}" "submit_block" - - elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then - # Test that the scan task commands are working. - check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - exec cargo test --locked --release --features "zebra-test" --package zebra-scan -- --nocapture --include-ignored scan_task_commands - - else - exec "$@" - fi - fi - ;; - *) - if command -v gosu >/dev/null 2>&1; then - exec gosu "$USER" "$@" + else + if [[ "$1" == "zebrad" ]]; then + shift + exec zebrad -c "${ZEBRA_CONF_PATH}" "$@" else exec "$@" fi - ;; + fi +} + +# Main Script Logic + +prepare_conf_file "$ZEBRA_CONF_PATH" + +# - If "$1" is "--", "-", or "zebrad", run `zebrad` with the remaining params. +# - If "$1" is "tests", run tests. +# - TODO: If "$1" is "monitoring", start a monitoring node. +# - If "$1" doesn't match any of the above, run "$@" directly. +case "$1" in +--* | -* | zebrad) + shift + exec zebrad --config "${ZEBRA_CONF_PATH}" "$@" + ;; +test) + shift + run_tests "$@" + ;; +monitoring) + # TODO: Impl logic for starting a monitoring node. + : + ;; +*) + exec "$@" + ;; esac diff --git a/zebra-scan/tests/scan_task_commands.rs b/zebra-scan/tests/scan_task_commands.rs index 20c4edfe757..1f38a3302fe 100644 --- a/zebra-scan/tests/scan_task_commands.rs +++ b/zebra-scan/tests/scan_task_commands.rs @@ -3,7 +3,7 @@ //! This test requires a cached chain state that is partially synchronized past the //! Sapling activation height and [`REQUIRED_MIN_TIP_HEIGHT`] //! -//! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +//! export ZEBRA_CACHE_DIR="/path/to/zebra/state" //! cargo test scan_task_commands --features="shielded-scan" -- --ignored --nocapture #![allow(dead_code, non_local_definitions)] @@ -59,9 +59,9 @@ pub(crate) async fn run() -> Result<()> { // This is currently needed for the 'Check startup logs' step in CI to pass. tracing::info!("Zcash network: {network}"); - let zebrad_state_path = match std::env::var_os("ZEBRA_CACHED_STATE_DIR") { + let zebrad_state_path = match std::env::var_os("ZEBRA_CACHE_DIR") { None => { - tracing::error!("ZEBRA_CACHED_STATE_DIR is not set"); + tracing::error!("ZEBRA_CACHE_DIR is not set"); return Ok(()); } Some(path) => std::path::PathBuf::from(path), diff --git a/zebra-scan/tests/scanner.rs b/zebra-scan/tests/scanner.rs index 3708caa07a9..9423119fb84 100644 --- a/zebra-scan/tests/scanner.rs +++ b/zebra-scan/tests/scanner.rs @@ -132,7 +132,7 @@ async fn scan_binary_starts() -> Result<()> { /// /// Needs a cache state close to the tip. A possible way to run it locally is: /// -/// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +/// export ZEBRA_CACHE_DIR="/path/to/zebra/state" /// cargo test scan_start_where_left -- --ignored --nocapture /// /// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. @@ -145,9 +145,9 @@ async fn scan_start_where_left() -> Result<()> { let _init_guard = zebra_test::init(); - let Ok(zebrad_cachedir) = std::env::var("ZEBRA_CACHED_STATE_DIR") else { + let Ok(zebrad_cachedir) = std::env::var("ZEBRA_CACHE_DIR") else { tracing::info!("skipping scan_start_where_left test due to missing cached state, \ - please set a ZEBRA_CACHED_STATE_DIR env var with a populated and valid path to run this test"); + please set a ZEBRA_CACHE_DIR env var with a populated and valid path to run this test"); return Ok(()); }; @@ -244,7 +244,7 @@ async fn scan_start_where_left() -> Result<()> { /// Example of how to run the scan_task_commands test locally: /// /// ```console -/// RUST_LOG=info ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test scan_task_commands -- --include-ignored --nocapture +/// RUST_LOG=info ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test scan_task_commands -- --include-ignored --nocapture /// ``` #[tokio::test] #[ignore] diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 270a0e955c7..89afff9bbcc 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -30,7 +30,7 @@ //! will allow this test to run or give up. Value for the Mainnet full sync tests. //! - `FULL_SYNC_TESTNET_TIMEOUT_MINUTES` env variable: The total number of minutes we //! will allow this test to run or give up. Value for the Testnet full sync tests. -//! - `ZEBRA_CACHED_STATE_DIR` env variable: The path to a Zebra cached state directory. +//! - `ZEBRA_CACHE_DIR` env variable: The path to a Zebra cached state directory. //! If not set, it defaults to `/zebrad-cache`. For some sync tests, this directory needs to be //! created in the file system with write permissions. //! @@ -41,15 +41,15 @@ //! //! $ cargo test sync_large_checkpoints_mempool_mainnet -- --ignored --nocapture //! -//! $ export ZEBRA_CACHED_STATE_DIR="/zebrad-cache" -//! $ sudo mkdir -p "$ZEBRA_CACHED_STATE_DIR" -//! $ sudo chmod 777 "$ZEBRA_CACHED_STATE_DIR" +//! $ export ZEBRA_CACHE_DIR="/zebrad-cache" +//! $ sudo mkdir -p "$ZEBRA_CACHE_DIR" +//! $ sudo chmod 777 "$ZEBRA_CACHE_DIR" //! $ export FULL_SYNC_MAINNET_TIMEOUT_MINUTES=600 //! $ cargo test full_sync_mainnet -- --ignored --nocapture //! -//! $ export ZEBRA_CACHED_STATE_DIR="/zebrad-cache" -//! $ sudo mkdir -p "$ZEBRA_CACHED_STATE_DIR" -//! $ sudo chmod 777 "$ZEBRA_CACHED_STATE_DIR" +//! $ export ZEBRA_CACHE_DIR="/zebrad-cache" +//! $ sudo mkdir -p "$ZEBRA_CACHE_DIR" +//! $ sudo chmod 777 "$ZEBRA_CACHE_DIR" //! $ export FULL_SYNC_TESTNET_TIMEOUT_MINUTES=600 //! $ cargo test full_sync_testnet -- --ignored --nocapture //! ``` @@ -70,9 +70,9 @@ //! at least the `ZEBRA_TEST_LIGHTWALLETD` environment variable is present: //! //! - `ZEBRA_TEST_LIGHTWALLETD` env variable: Needs to be present to run any of the lightwalletd tests. -//! - `ZEBRA_CACHED_STATE_DIR` env variable: The path to a Zebra cached state directory. +//! - `ZEBRA_CACHE_DIR` env variable: The path to a Zebra cached state directory. //! If not set, it defaults to `/zebrad-cache`. -//! - `LIGHTWALLETD_DATA_DIR` env variable: The path to a lightwalletd database. +//! - `LWD_CACHE_DIR` env variable: The path to a lightwalletd database. //! - `--features lightwalletd-grpc-tests` cargo flag: The flag given to cargo to build the source code of the running test. //! //! Here are some examples of running each test: @@ -82,29 +82,29 @@ //! $ cargo test lightwalletd_integration -- --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true -//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" -//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ export ZEBRA_CACHE_DIR="/path/to/zebra/state" +//! $ export LWD_CACHE_DIR="/path/to/lightwalletd/database" //! $ cargo test lightwalletd_update_sync -- --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true -//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +//! $ export ZEBRA_CACHE_DIR="/path/to/zebra/state" //! $ cargo test lightwalletd_full_sync -- --ignored --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true //! $ cargo test lightwalletd_test_suite -- --ignored --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true -//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +//! $ export ZEBRA_CACHE_DIR="/path/to/zebra/state" //! $ cargo test fully_synced_rpc_test -- --ignored --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true -//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" -//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ export ZEBRA_CACHE_DIR="/path/to/zebra/state" +//! $ export LWD_CACHE_DIR="/path/to/lightwalletd/database" //! $ cargo test sending_transactions_using_lightwalletd --features lightwalletd-grpc-tests -- --ignored --nocapture //! //! $ export ZEBRA_TEST_LIGHTWALLETD=true -//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" -//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database" +//! $ export ZEBRA_CACHE_DIR="/path/to/zebra/state" +//! $ export LWD_CACHE_DIR="/path/to/lightwalletd/database" //! $ cargo test lightwalletd_wallet_grpc_tests --features lightwalletd-grpc-tests -- --ignored --nocapture //! ``` //! @@ -113,19 +113,19 @@ //! Example of how to run the get_block_template test: //! //! ```console -//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture //! ``` //! //! Example of how to run the submit_block test: //! //! ```console -//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture //! ``` //! //! Example of how to run the has_spending_transaction_ids test: //! //! ```console -//! RUST_LOG=info ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test has_spending_transaction_ids --features "indexer" --release -- --ignored --nocapture +//! RUST_LOG=info ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test has_spending_transaction_ids --features "indexer" --release -- --ignored --nocapture //! ``` //! //! Please refer to the documentation of each test for more information. @@ -134,8 +134,8 @@ //! //! Generate checkpoints on mainnet and testnet using a cached state: //! ```console -//! GENERATE_CHECKPOINTS_MAINNET=1 ENTRYPOINT_FEATURES=zebra-checkpoints ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state docker/entrypoint.sh -//! GENERATE_CHECKPOINTS_TESTNET=1 ENTRYPOINT_FEATURES=zebra-checkpoints ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state docker/entrypoint.sh +//! GENERATE_CHECKPOINTS_MAINNET=1 FEATURES=zebra-checkpoints ZEBRA_CACHE_DIR=/path/to/zebra/state docker/entrypoint.sh +//! GENERATE_CHECKPOINTS_TESTNET=1 FEATURES=zebra-checkpoints ZEBRA_CACHE_DIR=/path/to/zebra/state docker/entrypoint.sh //! ``` //! //! ## Disk Space for Testing @@ -1279,7 +1279,7 @@ fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { // // Replace hard-coded values in create_cached_database_height with: // - the timeout in the environmental variable - // - the path from ZEBRA_CACHED_STATE_DIR + // - the path from ZEBRA_CACHE_DIR if let Some(_timeout_minutes) = timeout_argument { create_cached_database_height( &network, @@ -1792,7 +1792,7 @@ fn lightwalletd_integration() -> Result<()> { /// Make sure `zebrad` can sync from peers, but don't actually launch `lightwalletd`. /// -/// This test only runs when the `ZEBRA_CACHED_STATE_DIR` env var is set. +/// This test only runs when the `ZEBRA_CACHE_DIR` env var is set. /// /// This test might work on Windows. #[test] @@ -1803,8 +1803,8 @@ fn zebrad_update_sync() -> Result<()> { /// Make sure `lightwalletd` can sync from Zebra, in update sync mode. /// /// This test only runs when: -/// - the `ZEBRA_TEST_LIGHTWALLETD`, `ZEBRA_CACHED_STATE_DIR`, and -/// `LIGHTWALLETD_DATA_DIR` env vars are set, and +/// - the `ZEBRA_TEST_LIGHTWALLETD`, `ZEBRA_CACHE_DIR`, and +/// `LWD_CACHE_DIR` env vars are set, and /// - Zebra is compiled with `--features=lightwalletd-grpc-tests`. /// /// This test doesn't work on Windows, so it is always skipped on that platform. @@ -1818,7 +1818,7 @@ fn lightwalletd_update_sync() -> Result<()> { /// Make sure `lightwalletd` can fully sync from genesis using Zebra. /// /// This test only runs when: -/// - the `ZEBRA_TEST_LIGHTWALLETD` and `ZEBRA_CACHED_STATE_DIR` env vars are set, and +/// - the `ZEBRA_TEST_LIGHTWALLETD` and `ZEBRA_CACHE_DIR` env vars are set, and /// - Zebra is compiled with `--features=lightwalletd-grpc-tests`. /// /// @@ -1837,9 +1837,9 @@ fn lightwalletd_full_sync() -> Result<()> { /// /// Runs the tests in this order: /// - launch lightwalletd with empty states, -/// - if `ZEBRA_CACHED_STATE_DIR` is set: +/// - if `ZEBRA_CACHE_DIR` is set: /// - run a full sync -/// - if `ZEBRA_CACHED_STATE_DIR` and `LIGHTWALLETD_DATA_DIR` are set: +/// - if `ZEBRA_CACHE_DIR` and `LWD_CACHE_DIR` are set: /// - run a quick update sync, /// - run a send transaction gRPC test, /// - run read-only gRPC tests. @@ -1855,7 +1855,7 @@ async fn lightwalletd_test_suite() -> Result<()> { launches_lightwalletd: true, })?; - // Only runs when ZEBRA_CACHED_STATE_DIR is set. + // Only runs when ZEBRA_CACHE_DIR is set. lightwalletd_integration_test(UpdateZebraCachedStateNoRpc)?; // These tests need the compile-time gRPC feature @@ -1863,21 +1863,21 @@ async fn lightwalletd_test_suite() -> Result<()> { { // Do the quick tests first - // Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set + // Only runs when LWD_CACHE_DIR and ZEBRA_CACHE_DIR are set lightwalletd_integration_test(UpdateCachedState)?; - // Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set + // Only runs when LWD_CACHE_DIR and ZEBRA_CACHE_DIR are set common::lightwalletd::wallet_grpc_test::run().await?; // Then do the slow tests - // Only runs when ZEBRA_CACHED_STATE_DIR is set. + // Only runs when ZEBRA_CACHE_DIR is set. // When manually running the test suite, allow cached state in the full sync test. lightwalletd_integration_test(FullSyncFromGenesis { allow_lightwalletd_cached_state: true, })?; - // Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set + // Only runs when LWD_CACHE_DIR and ZEBRA_CACHE_DIR are set common::lightwalletd::send_transaction_test::run().await?; } diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index c290cde2cd9..a03fafb6f1b 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -32,7 +32,7 @@ use crate::common::{ }; /// The environmental variable that holds the path to a directory containing a cached Zebra state. -pub const ZEBRA_CACHED_STATE_DIR: &str = "ZEBRA_CACHED_STATE_DIR"; +pub const ZEBRA_CACHE_DIR: &str = "ZEBRA_CACHE_DIR"; /// In integration tests, the interval between database format checks for newly added blocks. /// diff --git a/zebrad/tests/common/configs/v2.1.0.toml b/zebrad/tests/common/configs/v2.1.0.toml new file mode 100644 index 00000000000..54613ba842a --- /dev/null +++ b/zebrad/tests/common/configs/v2.1.0.toml @@ -0,0 +1,85 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +cookie_dir = "cache_dir" +debug_force_finished_sync = false +enable_cookie_auth = true +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 0cbb52e7cb7..d4ee17f323e 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -210,7 +210,7 @@ where /// Spawns a zebrad instance on `network` to test lightwalletd with `test_type`. /// /// If `use_internet_connection` is `false` then spawn, but without any peers. -/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHED_STATE_DIR` +/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHE_DIR` /// environmental variable to provide an initial state to the zebrad instance. /// /// Returns: @@ -261,7 +261,7 @@ pub fn spawn_zebrad_for_rpc + Debug>( /// Otherwise, just create an empty state in this test's new temporary directory. /// /// If `use_internet_connection` is `false` then spawn, but without any peers. -/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHED_STATE_DIR` +/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHE_DIR` /// environmental variable to provide an initial state to the zebrad instance. /// /// Returns: diff --git a/zebrad/tests/common/lightwalletd.rs b/zebrad/tests/common/lightwalletd.rs index fc10242d80f..376ce096865 100644 --- a/zebrad/tests/common/lightwalletd.rs +++ b/zebrad/tests/common/lightwalletd.rs @@ -50,7 +50,7 @@ pub const ZEBRA_TEST_LIGHTWALLETD: &str = "ZEBRA_TEST_LIGHTWALLETD"; /// /// Can also be used to speed up the [`sending_transactions_using_lightwalletd`] test, /// by skipping the lightwalletd initial sync. -pub const LIGHTWALLETD_DATA_DIR: &str = "LIGHTWALLETD_DATA_DIR"; +pub const LWD_CACHE_DIR: &str = "LWD_CACHE_DIR"; /// Should we skip Zebra lightwalletd integration tests? #[allow(clippy::print_stderr)] @@ -76,7 +76,7 @@ pub fn zebra_skip_lightwalletd_tests() -> bool { /// Spawns a lightwalletd instance on `network`, connected to `zebrad_rpc_address`, /// with its gRPC server functionality enabled. /// -/// Expects cached state based on the `test_type`. Use the `LIGHTWALLETD_DATA_DIR` +/// Expects cached state based on the `test_type`. Use the `LWD_CACHE_DIR` /// environmental variable to provide an initial state to the lightwalletd instance. /// /// Returns: diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index bac394099f5..cc8fd06f7fd 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -328,10 +328,10 @@ pub fn check_sync_logs_until( /// Returns the cache directory for Zebra's state. /// -/// It checks the `ZEBRA_CACHED_STATE_DIR` environment variable and returns its value if set. +/// It checks the `ZEBRA_CACHE_DIR` environment variable and returns its value if set. /// Otherwise, it defaults to `"/zebrad-cache"`. fn get_zebra_cached_state_dir() -> PathBuf { - env::var("ZEBRA_CACHED_STATE_DIR") + env::var("ZEBRA_CACHE_DIR") .unwrap_or_else(|_| "/zebrad-cache".to_string()) .into() } diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index 5621b12b690..cacfc55061d 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -14,14 +14,14 @@ use zebra_test::{command::NO_MATCHES_REGEX_ITER, prelude::*}; use zebrad::config::ZebradConfig; use super::{ - cached_state::ZEBRA_CACHED_STATE_DIR, + cached_state::ZEBRA_CACHE_DIR, config::{default_test_config, random_known_rpc_port_config}, failure_messages::{ LIGHTWALLETD_EMPTY_ZEBRA_STATE_IGNORE_MESSAGES, LIGHTWALLETD_FAILURE_MESSAGES, PROCESS_FAILURE_MESSAGES, ZEBRA_FAILURE_MESSAGES, }, launch::{LIGHTWALLETD_DELAY, LIGHTWALLETD_FULL_SYNC_TIP_DELAY, LIGHTWALLETD_UPDATE_TIP_DELAY}, - lightwalletd::LIGHTWALLETD_DATA_DIR, + lightwalletd::LWD_CACHE_DIR, sync::FINISH_PARTIAL_SYNC_TIMEOUT, }; @@ -140,7 +140,7 @@ impl TestType { } } - /// Can this test create a new `LIGHTWALLETD_DATA_DIR` cached state? + /// Can this test create a new `LWD_CACHE_DIR` cached state? pub fn can_create_lightwalletd_cached_state(&self) -> bool { match self { LaunchWithEmptyState { .. } | UseAnyState => false, @@ -152,13 +152,13 @@ impl TestType { /// Returns the Zebra state path for this test, if set. #[allow(clippy::print_stderr)] pub fn zebrad_state_path>(&self, test_name: S) -> Option { - match env::var_os(ZEBRA_CACHED_STATE_DIR) { + match env::var_os(ZEBRA_CACHE_DIR) { Some(path) => Some(path.into()), None => { let test_name = test_name.as_ref(); eprintln!( "skipped {test_name:?} {self:?} lightwalletd test, \ - set the {ZEBRA_CACHED_STATE_DIR:?} environment variable to run the test", + set the {ZEBRA_CACHE_DIR:?} environment variable to run the test", ); None @@ -235,24 +235,24 @@ impl TestType { if !self.launches_lightwalletd() || !use_or_create_lwd_cache { tracing::info!( "running {test_name:?} {self:?} lightwalletd test, \ - ignoring any cached state in the {LIGHTWALLETD_DATA_DIR:?} environment variable", + ignoring any cached state in the {LWD_CACHE_DIR:?} environment variable", ); return None; } - match env::var_os(LIGHTWALLETD_DATA_DIR) { + match env::var_os(LWD_CACHE_DIR) { Some(path) => Some(path.into()), None => { if self.needs_lightwalletd_cached_state() { tracing::info!( "skipped {test_name:?} {self:?} lightwalletd test, \ - set the {LIGHTWALLETD_DATA_DIR:?} environment variable to run the test", + set the {LWD_CACHE_DIR:?} environment variable to run the test", ); } else if self.allow_lightwalletd_cached_state() { tracing::info!( "running {test_name:?} {self:?} lightwalletd test without cached state, \ - set the {LIGHTWALLETD_DATA_DIR:?} environment variable to run with cached state", + set the {LWD_CACHE_DIR:?} environment variable to run with cached state", ); } From dd31d8f936756431d44cfec85eaf962b637fe805 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 13 Feb 2025 14:29:39 +0000 Subject: [PATCH 092/245] fix(ci): prevent duplicate draft releases in release-drafter (#9252) Release Drafter v6.1.0 has a regression that creates multiple duplicate draft releases instead of updating existing ones. This change: - Disables pre-release flag for draft releases - Excludes pre-releases from being included in drafts - Explicitly defines PR event types in workflow See: release-drafter/release-drafter#1425 --- .github/release-drafter.yml | 5 ++++- .github/workflows/release-drafter.yml | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index f291980b693..eacf6c3ba70 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -77,7 +77,10 @@ autolabeler: name-template: 'Zebra $RESOLVED_VERSION' tag-template: 'v$RESOLVED_VERSION' tag-prefix: 'v' -prerelease: true +# Do not mark the draft release as a pre-release +prerelease: false +# Do not include pre-releases in the draft release +include-pre-releases: false # Categories in rough order of importance to users. # Based on https://keepachangelog.com/en/1.0.0/ diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index b5025a4b463..3538c4da913 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -16,8 +16,8 @@ on: - main # pull_request event is required only for autolabeler pull_request: - # Only following types are handled by the action, but one can default to all as well - #types: [opened, reopened, synchronize] + # Only following types are handled by the action + types: [opened, reopened, synchronize] # pull_request_target event is required for autolabeler to support PRs from forks pull_request_target: #types: [opened, reopened, synchronize] From 39017c06d223f7ae1a75b7a56bdfeb9ba53707db Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 13 Feb 2025 15:51:04 +0000 Subject: [PATCH 093/245] feat(ci): add static IPs and improve node deployment workflows (#8891) This change enhances the GCP deployment workflows with several improvements: - Add static IP address support for long-running nodes - Implement dynamic network selection matrix - Add manual deployment trigger support - Update GCP configuration syntax for better compatibility - Remove redundant deploy-instance job - Improve environment labeling and log file handling The static IP addresses will help maintain stable network connectivity for long-running nodes, particularly important for testnet peer discovery. Fixes #8763 --- .github/workflows/cd-deploy-nodes-gcp.yml | 141 ++++++++------------- .github/workflows/manual-zcashd-deploy.yml | 2 +- 2 files changed, 51 insertions(+), 92 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 0f015429d62..1338ed5f7b3 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -226,13 +226,34 @@ jobs: # Runs: # - on every push to the `main` branch # - on every release, when it's published + # - on workflow_dispatch for manual deployments + + # Determine which networks to deploy based on the trigger + + + + : + runs-on: ubuntu-latest + outputs: + networks: ${{ steps.set-networks.outputs.matrix }} + steps: + - id: set-networks + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + # Manually triggered deployment: output a valid JSON array with the single chosen network. + echo "matrix=[\"${{ inputs.network }}\"]" >> $GITHUB_OUTPUT + else + echo 'matrix=["Mainnet","Testnet"]' >> $GITHUB_OUTPUT + fi + deploy-nodes: strategy: matrix: - network: [Mainnet, Testnet] + network: ${{ fromJSON(needs.set-matrix.outputs.networks) }} name: Deploy ${{ matrix.network }} nodes needs: [ + set-matrix, build, versioning, test-configuration-file, @@ -247,7 +268,7 @@ jobs: permissions: contents: "read" id-token: "write" - if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} + if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} steps: - uses: actions/checkout@v4.2.2 @@ -264,7 +285,7 @@ jobs: # Labels in GCP are required to be in lowercase, but the blockchain network # uses sentence case, so we need to downcase the network. # - # Passes the lowercase network to subsequent steps using $NETWORK env variable. + # Passes lowercase network to subsequent steps using $NETWORK env variable. - name: Downcase network name for labels run: | NETWORK_CAPS="${{ matrix.network }}" @@ -281,6 +302,14 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.4 + - name: Get static IP address for long-running nodes + # Now runs when triggered by a release or a manual workflow_dispatch event. + if: ${{ github.event_name == 'release' || github.event_name == 'workflow_dispatch' }} + run: | + set -e # Exit immediately if a command exits with a non-zero status. + # Attempt to retrieve the static IP address for the network. + echo "IP_ADDRESS=$(gcloud compute addresses describe zebra-${NETWORK} --region ${{ vars.GCP_REGION }} --format='value(address)')" >> "$GITHUB_ENV" + - name: Create instance template for ${{ matrix.network }} run: | if [ "${{ github.event_name }}" == "release" ]; then @@ -288,6 +317,11 @@ jobs: else DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" fi + if [ -n "${{ env.IP_ADDRESS }}" ]; then + IP_FLAG="--address=${{ env.IP_ADDRESS }}" + else + IP_FLAG="" + fi DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" @@ -297,13 +331,22 @@ jobs: echo "No cached disk found for ${{ matrix.network }} in main branch" exit 1 fi + + # Set log file based on input or default + if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + LOG_FILE="${{ inputs.log_file }}" + else + LOG_FILE="${{ vars.CD_LOG_FILE }}" + fi + gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --boot-disk-size=10GB \ --boot-disk-type=pd-standard \ --image-project=cos-cloud \ --image-family=cos-stable \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --subnet=${{ vars.GCP_SUBNETWORK }} \ + ${IP_FLAG} \ --create-disk="${DISK_PARAMS}" \ --container-mount-disk=mount-path='/home/zebra/.cache/zebra',name=${DISK_NAME},mode=rw \ --container-stdin \ @@ -313,7 +356,7 @@ jobs: --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ --scopes cloud-platform \ --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ - --labels=app=zebrad,environment=staging,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ + --labels=app=zebrad,environment=${{ github.event_name == 'workflow_dispatch' && 'qa' || 'staging' }},network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ --tags zebrad # Check if our destination instance group exists already @@ -344,95 +387,11 @@ jobs: --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --region "${{ vars.GCP_REGION }}" - # This jobs handles the deployment of a single node (1) in the configured GCP zone - # when an instance is required to test a specific commit - # - # Runs: - # - on request, using workflow_dispatch with regenerate-disks - # - # Note: this instances are not automatically replaced or deleted - deploy-instance: - name: Deploy single ${{ inputs.network }} instance - needs: [build, test-configuration-file, test-zebra-conf-path, get-disk-name] - runs-on: ubuntu-latest - timeout-minutes: 30 - env: - CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} - permissions: - contents: "read" - id-token: "write" - # Run even if we don't need a cached disk, but only when triggered by a workflow_dispatch - if: ${{ !failure() && github.event_name == 'workflow_dispatch' }} - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 - with: - short-length: 7 - - # Makes the Zcash network name lowercase. - # - # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase the network. - # - # Passes the lowercase network to subsequent steps using $NETWORK env variable. - - name: Downcase network name for labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v2.1.8 - with: - workload_identity_provider: "${{ vars.GCP_WIF }}" - service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 - - # Create instance template from container image - - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad - run: | - DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" - DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" - if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then - DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" - elif [ ${{ !inputs.need_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then - echo "No cached disk required" - else - echo "No cached disk found for ${{ matrix.network }} in main branch" - exit 1 - fi - gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ - --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --boot-disk-size=10GB \ - --boot-disk-type=pd-standard \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ - --create-disk="${DISK_PARAMS}" \ - --container-mount-disk=mount-path='/home/zebra/.cache/zebra',name=${DISK_NAME},mode=rw \ - --container-stdin \ - --container-tty \ - --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ - --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ - --scopes cloud-platform \ - --metadata google-logging-enabled=true,google-monitoring-enabled=true \ - --labels=app=zebrad,environment=qa,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ - --tags zebrad \ - --zone ${{ vars.GCP_ZONE }} - failure-issue: name: Open or update issues for release failures # When a new job is added to this workflow, add it to this list. - needs: [versioning, build, deploy-nodes, deploy-instance] + needs: [ versioning, build, deploy-nodes ] + # Only open tickets for failed or cancelled jobs that are not coming from PRs. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 8d6541ff370..e8021ab57de 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -73,7 +73,7 @@ jobs: --container-image electriccoinco/zcashd \ --container-env ZCASHD_NETWORK="${{ inputs.network }}" \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ --scopes cloud-platform \ --labels=app=zcashd,environment=prod,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ From b2fb1a7d56824b2722cc3b134231f55eb7a82123 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 13 Feb 2025 19:15:32 +0100 Subject: [PATCH 094/245] fix(tests): Check if a cached state is present in scanner tests (#9251) * Fix `scan_task_commands` test * Fix `scan_start_where_left` test * fmt * fmt * Fix var for scanning tests --- docker/entrypoint.sh | 2 +- zebra-scan/tests/scan_task_commands.rs | 15 +++++++++------ zebra-scan/tests/scanner.rs | 18 ++++++++++-------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 45376df5b47..f58d7c57147 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -161,7 +161,7 @@ run_tests() { --package zebra-state \ -- --nocapture --include-ignored with_fake_activation_heights - elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then + elif [[ "${TEST_SCANNER}" -eq "1" ]]; then # Test the scanner. exec cargo test --locked --release --package zebra-scan \ -- --nocapture --include-ignored scan_task_commands scan_start_where_left diff --git a/zebra-scan/tests/scan_task_commands.rs b/zebra-scan/tests/scan_task_commands.rs index 1f38a3302fe..55a1bff57a6 100644 --- a/zebra-scan/tests/scan_task_commands.rs +++ b/zebra-scan/tests/scan_task_commands.rs @@ -9,7 +9,7 @@ use std::{fs, time::Duration}; -use color_eyre::{eyre::eyre, Result}; +use color_eyre::Result; use tokio::sync::mpsc::error::TryRecvError; use tower::{util::BoxService, Service}; @@ -61,7 +61,7 @@ pub(crate) async fn run() -> Result<()> { let zebrad_state_path = match std::env::var_os("ZEBRA_CACHE_DIR") { None => { - tracing::error!("ZEBRA_CACHE_DIR is not set"); + tracing::warn!("env var ZEBRA_CACHE_DIR is not set, skipping test"); return Ok(()); } Some(path) => std::path::PathBuf::from(path), @@ -87,9 +87,10 @@ pub(crate) async fn run() -> Result<()> { let (read_state, _, _) = zebra_state::init_read_only(state_config.clone(), &network); - let chain_tip_height = latest_chain_tip - .best_tip_height() - .ok_or_else(|| eyre!("State directory doesn't have a chain tip block"))?; + let Some(chain_tip_height) = latest_chain_tip.best_tip_height() else { + tracing::warn!("chain could not be loaded from cached state, skipping test"); + return Ok(()); + }; let sapling_activation_height = NetworkUpgrade::Sapling .activation_height(&network) @@ -111,7 +112,9 @@ pub(crate) async fn run() -> Result<()> { let storage = Storage::new(&scan_config, &network, false); let mut scan_task = ScanTask::spawn(storage, read_state, chain_tip_change); - tracing::info!("started scan task, sending register/subscribe keys messages with zecpages key to start scanning for a new key",); + tracing::info!( + "started scan task, sending register/subscribe keys messages with zecpages key to start scanning for a new key", + ); let keys = [ZECPAGES_SAPLING_VIEWING_KEY.to_string()]; scan_task.register_keys( diff --git a/zebra-scan/tests/scanner.rs b/zebra-scan/tests/scanner.rs index 9423119fb84..ea7736c5ced 100644 --- a/zebra-scan/tests/scanner.rs +++ b/zebra-scan/tests/scanner.rs @@ -141,20 +141,22 @@ async fn scan_binary_starts() -> Result<()> { #[tokio::test] #[cfg(not(target_os = "windows"))] async fn scan_start_where_left() -> Result<()> { - use ZECPAGES_SAPLING_VIEWING_KEY; - let _init_guard = zebra_test::init(); - let Ok(zebrad_cachedir) = std::env::var("ZEBRA_CACHE_DIR") else { - tracing::info!("skipping scan_start_where_left test due to missing cached state, \ - please set a ZEBRA_CACHE_DIR env var with a populated and valid path to run this test"); + let Ok(zebrad_cache_dir) = std::env::var("ZEBRA_CACHE_DIR") else { + tracing::warn!("env var ZEBRA_CACHE_DIR is not set, skipping test"); return Ok(()); }; + if !Path::new(&zebrad_cache_dir).join("state").is_dir() { + tracing::warn!("cache dir does not contain cached state, skipping test"); + return Ok(()); + } + // Logs the network as zebrad would as part of the metadata when starting up. // This is currently needed for the 'Check startup logs' step in CI to pass. - let network = zebra_chain::parameters::Network::Mainnet; - tracing::info!("Zcash network: {network}"); + let mainnet = zebra_chain::parameters::Network::Mainnet; + tracing::info!("Zcash network: {mainnet}"); let scanning_cache_dir = testdir()?.path().join("scanner").to_path_buf(); @@ -164,7 +166,7 @@ async fn scan_start_where_left() -> Result<()> { let rpc_listen_addr = "127.0.0.1:18232"; let args = args![ "--zebrad-cache-dir", - zebrad_cachedir, + zebrad_cache_dir, "--scanning-cache-dir", scanning_cache_dir.to_str().unwrap(), "--sapling-keys-to-scan", From 4613dcd259c8759ef3b8e695912abd9384ad507d Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Sat, 15 Feb 2025 09:45:33 -0300 Subject: [PATCH 095/245] feat(rpc): add verbose support to getrawmempool (#9249) * feat(rpc): add verbose support to getrawmempool * make 'if verbose' clearer * cargo fmt * fix unused warnings * Update zebra-rpc/src/methods/types/get_raw_mempool.rs Co-authored-by: Alfredo Garcia --------- Co-authored-by: Alfredo Garcia --- zebra-chain/src/transaction/arbitrary.rs | 8 +- zebra-chain/src/transaction/unmined.rs | 11 ++ zebra-rpc/src/methods.rs | 76 +++++++---- zebra-rpc/src/methods/tests/prop.rs | 38 +++++- zebra-rpc/src/methods/tests/snapshot.rs | 8 +- zebra-rpc/src/methods/tests/vectors.rs | 2 + zebra-rpc/src/methods/types.rs | 2 + .../src/methods/types/get_raw_mempool.rs | 118 ++++++++++++++++++ .../components/inbound/tests/fake_peer_set.rs | 2 +- zebrad/src/components/mempool.rs | 3 +- zebrad/src/components/mempool/storage.rs | 12 +- .../components/mempool/storage/tests/prop.rs | 20 +-- .../mempool/storage/tests/vectors.rs | 15 +-- .../mempool/storage/verified_set.rs | 6 +- zebrad/src/components/mempool/tests/prop.rs | 6 +- zebrad/src/components/mempool/tests/vector.rs | 8 +- 16 files changed, 274 insertions(+), 61 deletions(-) create mode 100644 zebra-rpc/src/methods/types/get_raw_mempool.rs diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index 150801cf305..f51b209fc02 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -14,7 +14,7 @@ use crate::{ parameters::{Network, NetworkUpgrade}, primitives::{Bctv14Proof, Groth16Proof, Halo2Proof, ZkSnarkProof}, sapling::{self, AnchorVariant, PerSpendAnchor, SharedAnchor}, - serialization::ZcashDeserializeInto, + serialization::{self, ZcashDeserializeInto}, sprout, transparent, value_balance::{ValueBalance, ValueBalanceError}, LedgerState, @@ -814,6 +814,8 @@ impl Arbitrary for VerifiedUnminedTx { ) }), any::(), + serialization::arbitrary::datetime_u32(), + any::(), ) .prop_map( |( @@ -822,6 +824,8 @@ impl Arbitrary for VerifiedUnminedTx { legacy_sigop_count, (conventional_actions, mut unpaid_actions), fee_weight_ratio, + time, + height, )| { if unpaid_actions > conventional_actions { unpaid_actions = conventional_actions; @@ -837,6 +841,8 @@ impl Arbitrary for VerifiedUnminedTx { conventional_actions, unpaid_actions, fee_weight_ratio, + time: Some(time), + height: Some(height), } }, ) diff --git a/zebra-chain/src/transaction/unmined.rs b/zebra-chain/src/transaction/unmined.rs index da716573e8b..8d808781384 100644 --- a/zebra-chain/src/transaction/unmined.rs +++ b/zebra-chain/src/transaction/unmined.rs @@ -19,6 +19,7 @@ use std::{fmt, sync::Arc}; use crate::{ amount::{Amount, NonNegative}, + block::Height, serialization::ZcashSerialize, transaction::{ AuthDigest, Hash, @@ -358,6 +359,14 @@ pub struct VerifiedUnminedTx { /// /// [ZIP-317]: https://zips.z.cash/zip-0317#block-production pub fee_weight_ratio: f32, + + /// The time the transaction was added to the mempool, or None if it has not + /// reached the mempool yet. + pub time: Option>, + + /// The tip height when the transaction was added to the mempool, or None if + /// it has not reached the mempool yet. + pub height: Option, } impl fmt::Debug for VerifiedUnminedTx { @@ -399,6 +408,8 @@ impl VerifiedUnminedTx { fee_weight_ratio, conventional_actions, unpaid_actions, + time: None, + height: None, }) } diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 45abd973a72..55003872bd3 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,6 +6,8 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. +#[cfg(feature = "getblocktemplate-rpcs")] +use std::collections::HashMap; use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; @@ -56,6 +58,10 @@ pub mod trees; pub mod types; +use types::GetRawMempool; +#[cfg(feature = "getblocktemplate-rpcs")] +use types::MempoolObject; + #[cfg(feature = "getblocktemplate-rpcs")] pub mod get_block_template_rpcs; @@ -215,11 +221,15 @@ pub trait Rpc { /// Returns all transaction ids in the memory pool, as a JSON array. /// + /// # Parameters + /// + /// - `verbose`: (boolean, optional, default=false) true for a json object, false for array of transaction ids. + /// /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) /// method: post /// tags: blockchain #[method(name = "getrawmempool")] - async fn get_raw_mempool(&self) -> Result>; + async fn get_raw_mempool(&self, verbose: Option) -> Result; /// Returns information about the given block's Sapling & Orchard tree state. /// @@ -1063,7 +1073,10 @@ where .ok_or_misc_error("No blocks in state") } - async fn get_raw_mempool(&self) -> Result> { + async fn get_raw_mempool(&self, verbose: Option) -> Result { + #[allow(unused)] + let verbose = verbose.unwrap_or(false); + #[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::block::MAX_BLOCK_BYTES; @@ -1074,7 +1087,7 @@ where let mut mempool = self.mempool.clone(); #[cfg(feature = "getblocktemplate-rpcs")] - let request = if should_use_zcashd_order { + let request = if should_use_zcashd_order || verbose { mempool::Request::FullTransactions } else { mempool::Request::TransactionIds @@ -1094,27 +1107,46 @@ where #[cfg(feature = "getblocktemplate-rpcs")] mempool::Response::FullTransactions { mut transactions, - transaction_dependencies: _, + transaction_dependencies, last_seen_tip_hash: _, } => { - // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker - transactions.sort_by_cached_key(|tx| { - // zcashd uses modified fee here but Zebra doesn't currently - // support prioritizing transactions - std::cmp::Reverse(( - i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 - / tx.transaction.size as u128, - // transaction hashes are compared in their serialized byte-order. - tx.transaction.id.mined_id(), - )) - }); - - let tx_ids: Vec = transactions - .iter() - .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) - .collect(); + if verbose { + let map = transactions + .iter() + .map(|unmined_tx| { + ( + unmined_tx.transaction.id.mined_id().encode_hex(), + MempoolObject::from_verified_unmined_tx( + unmined_tx, + &transactions, + &transaction_dependencies, + ), + ) + }) + .collect::>(); + Ok(GetRawMempool::Verbose(map)) + } else { + // Sort transactions in descending order by fee/size, using + // hash in serialized byte order as a tie-breaker. Note that + // this is only done in not verbose because in verbose mode + // a dictionary is returned, where order does not matter. + transactions.sort_by_cached_key(|tx| { + // zcashd uses modified fee here but Zebra doesn't currently + // support prioritizing transactions + std::cmp::Reverse(( + i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 + / tx.transaction.size as u128, + // transaction hashes are compared in their serialized byte-order. + tx.transaction.id.mined_id(), + )) + }); + let tx_ids: Vec = transactions + .iter() + .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) + .collect(); - Ok(tx_ids) + Ok(GetRawMempool::TxIds(tx_ids)) + } } mempool::Response::TransactionIds(unmined_transaction_ids) => { @@ -1126,7 +1158,7 @@ where // Sort returned transaction IDs in numeric/string order. tx_ids.sort(); - Ok(tx_ids) + Ok(GetRawMempool::TxIds(tx_ids)) } _ => unreachable!("unmatched response to a transactionids request"), diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index e2ff0bc68f5..0200f88a300 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -1,5 +1,7 @@ //! Randomised property tests for RPC methods. +#[cfg(feature = "getblocktemplate-rpcs")] +use std::collections::HashMap; use std::{collections::HashSet, fmt::Debug, sync::Arc}; use futures::{join, FutureExt, TryFutureExt}; @@ -27,7 +29,12 @@ use zebra_state::{BoxError, GetBlockTemplateChainInfo}; use zebra_test::mock_service::MockService; -use crate::methods::{self, types::Balance}; +#[cfg(feature = "getblocktemplate-rpcs")] +use crate::methods::types::MempoolObject; +use crate::methods::{ + self, + types::{Balance, GetRawMempool}, +}; use super::super::{ AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, @@ -228,7 +235,8 @@ proptest! { /// returns those IDs as hexadecimal strings. #[test] fn mempool_transactions_are_sent_to_caller(transactions in any::>(), - network in any::()) { + network in any::(), + verbose in any::>()) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); let (mut mempool, mut state, rpc, mempool_tx_queue) = mock_services(network, NoChainTip); @@ -254,7 +262,7 @@ proptest! { .expect_request(mempool::Request::TransactionIds) .map_ok(|r|r.respond(mempool::Response::TransactionIds(transaction_ids))); - (expected_response, mempool_query) + (GetRawMempool::TxIds(expected_response), mempool_query) }; // Note: this depends on `SHOULD_USE_ZCASHD_ORDER` being true. @@ -278,18 +286,38 @@ proptest! { .map(|tx| tx.transaction.id.mined_id().encode_hex::()) .collect::>(); + let transaction_dependencies = Default::default(); + let expected_response = if verbose.unwrap_or(false) { + let map = transactions + .iter() + .map(|unmined_tx| { + ( + unmined_tx.transaction.id.mined_id().encode_hex(), + MempoolObject::from_verified_unmined_tx( + unmined_tx, + &transactions, + &transaction_dependencies, + ), + ) + }) + .collect::>(); + GetRawMempool::Verbose(map) + } else { + GetRawMempool::TxIds(expected_response) + }; + let mempool_query = mempool .expect_request(mempool::Request::FullTransactions) .map_ok(|r| r.respond(mempool::Response::FullTransactions { transactions, - transaction_dependencies: Default::default(), + transaction_dependencies, last_seen_tip_hash: [0; 32].into(), })); (expected_response, mempool_query) }; - let (rpc_rsp, _) = tokio::join!(rpc.get_raw_mempool(), mempool_query); + let (rpc_rsp, _) = tokio::join!(rpc.get_raw_mempool(verbose), mempool_query); prop_assert_eq!(rpc_rsp?, expected_response); diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 217b54f6510..328d3d0f5a4 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -399,9 +399,13 @@ async fn test_rpc_response_data_for_network(network: &Network) { }); // make the api call - let get_raw_mempool = rpc.get_raw_mempool(); + let get_raw_mempool = rpc.get_raw_mempool(None); let (response, _) = futures::join!(get_raw_mempool, mempool_req); - let get_raw_mempool = response.expect("We should have a GetRawTransaction struct"); + let GetRawMempool::TxIds(get_raw_mempool) = + response.expect("We should have a GetRawTransaction struct") + else { + panic!("should return TxIds for non verbose"); + }; snapshot_rpc_getrawmempool(get_raw_mempool, &settings); diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index c2f3700b3bb..a25b399c697 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1826,6 +1826,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { conventional_actions, unpaid_actions: 0, fee_weight_ratio: 1.0, + time: None, + height: None, }; let next_fake_tip_hash = diff --git a/zebra-rpc/src/methods/types.rs b/zebra-rpc/src/methods/types.rs index db3e36c6554..dcb6d530a69 100644 --- a/zebra-rpc/src/methods/types.rs +++ b/zebra-rpc/src/methods/types.rs @@ -1,7 +1,9 @@ //! Types used in RPC methods. mod get_blockchain_info; +mod get_raw_mempool; mod zec; pub use get_blockchain_info::Balance; +pub use get_raw_mempool::{GetRawMempool, MempoolObject}; pub use zec::Zec; diff --git a/zebra-rpc/src/methods/types/get_raw_mempool.rs b/zebra-rpc/src/methods/types/get_raw_mempool.rs new file mode 100644 index 00000000000..882ac98b8fc --- /dev/null +++ b/zebra-rpc/src/methods/types/get_raw_mempool.rs @@ -0,0 +1,118 @@ +//! Types used in `getrawmempool` RPC method. + +use std::collections::HashMap; +#[cfg(feature = "getblocktemplate-rpcs")] +use std::collections::HashSet; + +#[cfg(feature = "getblocktemplate-rpcs")] +use hex::ToHex as _; + +use super::Zec; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_chain::transaction::VerifiedUnminedTx; +use zebra_chain::{amount::NonNegative, block::Height}; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_node_services::mempool::TransactionDependencies; + +/// Response to a `getrawmempool` RPC request. +/// +/// See the notes for the [`Rpc::get_raw_mempool` method]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[serde(untagged)] +pub enum GetRawMempool { + /// The transaction IDs, as hex strings (verbose=0) + TxIds(Vec), + /// A map of transaction IDs to mempool transaction details objects + /// (verbose=1) + Verbose(HashMap), +} + +/// A mempool transaction details object as returned by `getrawmempool` in +/// verbose mode. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct MempoolObject { + /// Transaction size in bytes. + pub(crate) size: u64, + /// Transaction fee in zatoshi. + pub(crate) fee: Zec, + /// Transaction fee with fee deltas used for mining priority. + #[serde(rename = "modifiedfee")] + pub(crate) modified_fee: Zec, + /// Local time transaction entered pool in seconds since 1 Jan 1970 GMT + pub(crate) time: i64, + /// Block height when transaction entered pool. + pub(crate) height: Height, + /// Number of in-mempool descendant transactions (including this one). + pub(crate) descendantcount: u64, + /// Size of in-mempool descendants (including this one). + pub(crate) descendantsize: u64, + /// Modified fees (see "modifiedfee" above) of in-mempool descendants + /// (including this one). + pub(crate) descendantfees: u64, + /// Transaction IDs of unconfirmed transactions used as inputs for this + /// transaction. + pub(crate) depends: Vec, +} + +impl MempoolObject { + #[cfg(feature = "getblocktemplate-rpcs")] + pub(crate) fn from_verified_unmined_tx( + unmined_tx: &VerifiedUnminedTx, + transactions: &[VerifiedUnminedTx], + transaction_dependencies: &TransactionDependencies, + ) -> Self { + // Map transactions by their txids to make lookups easier + let transactions_by_id = transactions + .iter() + .map(|unmined_tx| (unmined_tx.transaction.id.mined_id(), unmined_tx)) + .collect::>(); + + // Get txids of this transaction's descendants (dependents) + let empty_set = HashSet::new(); + let deps = transaction_dependencies + .dependents() + .get(&unmined_tx.transaction.id.mined_id()) + .unwrap_or(&empty_set); + let deps_len = deps.len(); + + // For each dependent: get the tx, then its size and fee; then sum them + // up + let (deps_size, deps_fees) = deps + .iter() + .filter_map(|id| transactions_by_id.get(id)) + .map(|unmined_tx| (unmined_tx.transaction.size, unmined_tx.miner_fee)) + .reduce(|(size1, fee1), (size2, fee2)| { + (size1 + size2, (fee1 + fee2).unwrap_or_default()) + }) + .unwrap_or((0, Default::default())); + + // Create the MempoolObject from the information we have gathered + let mempool_object = MempoolObject { + size: unmined_tx.transaction.size as u64, + fee: unmined_tx.miner_fee.into(), + // Change this if we ever support fee deltas (prioritisetransaction call) + modified_fee: unmined_tx.miner_fee.into(), + time: unmined_tx + .time + .map(|time| time.timestamp()) + .unwrap_or_default(), + height: unmined_tx.height.unwrap_or(Height(0)), + // Note that the following three count this transaction itself + descendantcount: deps_len as u64 + 1, + descendantsize: (deps_size + unmined_tx.transaction.size) as u64, + descendantfees: (deps_fees + unmined_tx.miner_fee) + .unwrap_or_default() + .into(), + // Get dependencies as a txid vector + depends: transaction_dependencies + .dependencies() + .get(&unmined_tx.transaction.id.mined_id()) + .cloned() + .unwrap_or_else(HashSet::new) + .iter() + .map(|id| id.encode_hex()) + .collect(), + }; + mempool_object + } +} diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index f3c05315258..08ee467d48a 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -1073,7 +1073,7 @@ fn add_some_stuff_to_mempool( // Insert the genesis block coinbase transaction into the mempool storage. mempool_service .storage() - .insert(genesis_transactions[0].clone(), Vec::new()) + .insert(genesis_transactions[0].clone(), Vec::new(), None) .unwrap(); genesis_transactions diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 0d76b778d87..4a9bb3b00e9 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -595,7 +595,8 @@ impl Service for Mempool { // the best chain changes (which is the only way to stay at the same height), and the // mempool re-verifies all pending tx_downloads when there's a `TipAction::Reset`. if best_tip_height == expected_tip_height { - let insert_result = storage.insert(tx.clone(), spent_mempool_outpoints); + let insert_result = + storage.insert(tx, spent_mempool_outpoints, best_tip_height); tracing::trace!( ?insert_result, diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index cee0845ba2b..06ddea4a55b 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -17,6 +17,7 @@ use std::{ use thiserror::Error; use zebra_chain::{ + block::Height, transaction::{self, Hash, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx}, transparent, }; @@ -203,6 +204,7 @@ impl Storage { &mut self, tx: VerifiedUnminedTx, spent_mempool_outpoints: Vec, + height: Option, ) -> Result { // # Security // @@ -238,10 +240,12 @@ impl Storage { // Then, we try to insert into the pool. If this fails the transaction is rejected. let mut result = Ok(unmined_tx_id); - if let Err(rejection_error) = - self.verified - .insert(tx, spent_mempool_outpoints, &mut self.pending_outputs) - { + if let Err(rejection_error) = self.verified.insert( + tx, + spent_mempool_outpoints, + &mut self.pending_outputs, + height, + ) { tracing::debug!( ?tx_id, ?rejection_error, diff --git a/zebrad/src/components/mempool/storage/tests/prop.rs b/zebrad/src/components/mempool/storage/tests/prop.rs index bac143349a5..3ddb67583b0 100644 --- a/zebrad/src/components/mempool/storage/tests/prop.rs +++ b/zebrad/src/components/mempool/storage/tests/prop.rs @@ -72,7 +72,7 @@ proptest! { for (transaction_to_accept, transaction_to_reject) in input_permutations { let id_to_accept = transaction_to_accept.transaction.id; - prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new()), Ok(id_to_accept)); + prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new(), None), Ok(id_to_accept)); // Make unique IDs by converting the index to bytes, and writing it to each ID let unique_ids = (0..MAX_EVICTION_MEMORY_ENTRIES as u32).map(move |index| { @@ -96,7 +96,7 @@ proptest! { // - transaction_to_accept, or // - a rejection from rejections prop_assert_eq!( - storage.insert(transaction_to_reject, Vec::new()), + storage.insert(transaction_to_reject, Vec::new(), None), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); @@ -147,13 +147,13 @@ proptest! { if i < transactions.len() - 1 { // The initial transactions should be successful prop_assert_eq!( - storage.insert(transaction.clone(), Vec::new()), + storage.insert(transaction.clone(), Vec::new(), None), Ok(tx_id) ); } else { // The final transaction will cause a random eviction, // which might return an error if this transaction is chosen - let result = storage.insert(transaction.clone(), Vec::new()); + let result = storage.insert(transaction.clone(), Vec::new(), None); if result.is_ok() { prop_assert_eq!( @@ -281,10 +281,10 @@ proptest! { let id_to_accept = transaction_to_accept.transaction.id; let id_to_reject = transaction_to_reject.transaction.id; - prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new()), Ok(id_to_accept)); + prop_assert_eq!(storage.insert(transaction_to_accept, Vec::new(), None), Ok(id_to_accept)); prop_assert_eq!( - storage.insert(transaction_to_reject, Vec::new()), + storage.insert(transaction_to_reject, Vec::new(), None), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); @@ -332,19 +332,19 @@ proptest! { let id_to_reject = transaction_to_reject.transaction.id; prop_assert_eq!( - storage.insert(first_transaction_to_accept, Vec::new()), + storage.insert(first_transaction_to_accept, Vec::new(), None), Ok(first_id_to_accept) ); prop_assert_eq!( - storage.insert(transaction_to_reject, Vec::new()), + storage.insert(transaction_to_reject, Vec::new(), None), Err(MempoolError::StorageEffectsTip(SameEffectsTipRejectionError::SpendConflict)) ); prop_assert!(storage.contains_rejected(&id_to_reject)); prop_assert_eq!( - storage.insert(second_transaction_to_accept, Vec::new()), + storage.insert(second_transaction_to_accept, Vec::new(), None), Ok(second_id_to_accept) ); @@ -371,7 +371,7 @@ proptest! { .filter_map(|transaction| { let id = transaction.transaction.id; - storage.insert(transaction.clone(), Vec::new()).ok().map(|_| id) + storage.insert(transaction.clone(), Vec::new(), None).ok().map(|_| id) }) .collect(); diff --git a/zebrad/src/components/mempool/storage/tests/vectors.rs b/zebrad/src/components/mempool/storage/tests/vectors.rs index e4c1cd471fc..202631b8bab 100644 --- a/zebrad/src/components/mempool/storage/tests/vectors.rs +++ b/zebrad/src/components/mempool/storage/tests/vectors.rs @@ -40,7 +40,7 @@ fn mempool_storage_crud_exact_mainnet() { .expect("at least one unmined transaction"); // Insert unmined tx into the mempool. - let _ = storage.insert(unmined_tx.clone(), Vec::new()); + let _ = storage.insert(unmined_tx.clone(), Vec::new(), None); // Check that it is in the mempool, and not rejected. assert!(storage.contains_transaction_exact(&unmined_tx.transaction.id.mined_id())); @@ -94,7 +94,7 @@ fn mempool_storage_basic_for_network(network: Network) -> Result<()> { let mut maybe_inserted_transactions = Vec::new(); let mut some_rejected_transactions = Vec::new(); for unmined_transaction in unmined_transactions.clone() { - let result = storage.insert(unmined_transaction.clone(), Vec::new()); + let result = storage.insert(unmined_transaction.clone(), Vec::new(), None); match result { Ok(_) => { // While the transaction was inserted here, it can be rejected later. @@ -168,7 +168,7 @@ fn mempool_storage_crud_same_effects_mainnet() { .expect("at least one unmined transaction"); // Insert unmined tx into the mempool. - let _ = storage.insert(unmined_tx_1.clone(), Vec::new()); + let _ = storage.insert(unmined_tx_1.clone(), Vec::new(), None); // Check that it is in the mempool, and not rejected. assert!(storage.contains_transaction_exact(&unmined_tx_1.transaction.id.mined_id())); @@ -189,7 +189,7 @@ fn mempool_storage_crud_same_effects_mainnet() { Some(SameEffectsChainRejectionError::Mined.into()) ); assert_eq!( - storage.insert(unmined_tx_1, Vec::new()), + storage.insert(unmined_tx_1, Vec::new(), None), Err(SameEffectsChainRejectionError::Mined.into()) ); @@ -207,7 +207,7 @@ fn mempool_storage_crud_same_effects_mainnet() { // Insert unmined tx into the mempool. assert_eq!( - storage.insert(unmined_tx_2.clone(), Vec::new()), + storage.insert(unmined_tx_2.clone(), Vec::new(), None), Ok(unmined_tx_2.transaction.id) ); @@ -230,7 +230,7 @@ fn mempool_storage_crud_same_effects_mainnet() { Some(SameEffectsChainRejectionError::DuplicateSpend.into()) ); assert_eq!( - storage.insert(unmined_tx_2, Vec::new()), + storage.insert(unmined_tx_2, Vec::new(), None), Err(SameEffectsChainRejectionError::DuplicateSpend.into()) ); } @@ -272,6 +272,7 @@ fn mempool_expired_basic_for_network(network: Network) -> Result<()> { ) .expect("verification should pass"), Vec::new(), + None, )?; assert_eq!(storage.transaction_count(), 1); @@ -329,7 +330,7 @@ fn mempool_removes_dependent_transactions() -> Result<()> { } storage - .insert(unmined_tx.clone(), fake_spent_outpoints) + .insert(unmined_tx.clone(), fake_spent_outpoints, None) .expect("should insert transaction"); // Add up to 5 of this transaction's outputs as fake spent outpoints for the next transaction diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index 7cd82fb0be4..50aedb58093 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -7,6 +7,7 @@ use std::{ }; use zebra_chain::{ + block::Height, orchard, sapling, sprout, transaction::{self, UnminedTx, VerifiedUnminedTx}, transparent, @@ -141,9 +142,10 @@ impl VerifiedSet { /// same nullifier. pub fn insert( &mut self, - transaction: VerifiedUnminedTx, + mut transaction: VerifiedUnminedTx, spent_mempool_outpoints: Vec, pending_outputs: &mut PendingOutputs, + height: Option, ) -> Result<(), SameEffectsTipRejectionError> { if self.has_spend_conflicts(&transaction.transaction) { return Err(SameEffectsTipRejectionError::SpendConflict); @@ -176,6 +178,8 @@ impl VerifiedSet { self.transactions_serialized_size += transaction.transaction.size; self.total_cost += transaction.cost(); + transaction.time = Some(chrono::Utc::now()); + transaction.height = height; self.transactions.insert(tx_id, transaction); self.update_metrics(); diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index e12b205e34c..6ad3613f3b4 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -74,7 +74,7 @@ proptest! { // Insert a dummy transaction. mempool .storage() - .insert(transaction.0, Vec::new()) + .insert(transaction.0, Vec::new(), None) .expect("Inserting a transaction should succeed"); // The first call to `poll_ready` shouldn't clear the storage yet. @@ -148,7 +148,7 @@ proptest! { // Insert the dummy transaction into the mempool. mempool .storage() - .insert(transaction.0.clone(), Vec::new()) + .insert(transaction.0.clone(), Vec::new(), None) .expect("Inserting a transaction should succeed"); // Set the new chain tip. @@ -205,7 +205,7 @@ proptest! { // Insert a dummy transaction. mempool .storage() - .insert(transaction, Vec::new()) + .insert(transaction, Vec::new(), None) .expect("Inserting a transaction should succeed"); // The first call to `poll_ready` shouldn't clear the storage yet. diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 9dd3557de9c..5af54dfa6e1 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -63,7 +63,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> { let mut inserted_ids = HashSet::new(); service .storage() - .insert(genesis_transaction.clone(), Vec::new())?; + .insert(genesis_transaction.clone(), Vec::new(), None)?; inserted_ids.insert(genesis_transaction.transaction.id); // Test `Request::TransactionIds` @@ -133,7 +133,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> { inserted_ids.insert(tx.transaction.id); // Error must be ignored because a insert can trigger an eviction and // an error is returned if the transaction being inserted in chosen. - let _ = service.storage().insert(tx.clone(), Vec::new()); + let _ = service.storage().insert(tx.clone(), Vec::new(), None); } // Test `Request::RejectedTransactionIds` @@ -214,7 +214,7 @@ async fn mempool_queue_single() -> Result<(), Report> { for tx in transactions.iter() { // Error must be ignored because a insert can trigger an eviction and // an error is returned if the transaction being inserted in chosen. - let _ = service.storage().insert(tx.clone(), Vec::new()); + let _ = service.storage().insert(tx.clone(), Vec::new(), None); } // Test `Request::Queue` for a new transaction @@ -297,7 +297,7 @@ async fn mempool_service_disabled() -> Result<(), Report> { // Insert the genesis block coinbase transaction into the mempool storage. service .storage() - .insert(genesis_transaction.clone(), Vec::new())?; + .insert(genesis_transaction.clone(), Vec::new(), None)?; // Test if the mempool answers correctly (i.e. is enabled) let response = service From b4211aa1cf7756234a834809b68e1359b4d4b4d6 Mon Sep 17 00:00:00 2001 From: Arya Date: Sat, 15 Feb 2025 17:02:17 -0500 Subject: [PATCH 096/245] feat(network): Track misbehaving peer connections and ban them past a threshold (#9201) * feat(network): Add misbehavior tracking for peers in the address book * - Add a `misbehavior_score` field to `MetaAddr` - Add a `bans_by_ip` field to `AddressBook` - Update the `AddressBook::update()` method to: - increment misbehavior scores in its entries, - add addr ips to bans_by_ip if the score is excessive, - remove any addrs at the banned ip - Avoid responding to `GetAddr` requests with addresses of misbehaving peers (return None from `sanitized()`), - Avoid new inbound or outbound connections to banned ips * Drops banned peer connections in peer set's `poll_ready()` method * Adds rudimentary misbehavior score tracking * fixes some proptests, moves and removes some TODOs * fixes lint * Removes outdated TODO * Adds stub for acceptance test * updates call to updated fn * Stores likely inbound peer connection addresses in address book and return their IPs with the default port instead of the transient port when responding to GetAddr requests * Avoids gossiping peer addrs from inbound connections * updates test to check that sanitize won't return inbound peer addrs or addrs with non-zero misbehaviour scores. updated misbehaviour score for potentially unavoidable errors. * Updates `generate` RPC to support any network where PoW is disabled. Updates acceptance test to check that zebrad instances disconnect once one of them advertises a block with an invalid PoW * minor tangential cleanup * Finishes acceptance test, sends misbehavior updates from syncer, and always updates address book entries if the update is to their misbehaviour score * skip test on windows (to minimize risk of port conflict) * Applies suggestions from code review --- zebra-chain/src/tests/vectors.rs | 9 + zebra-consensus/src/block.rs | 11 ++ zebra-consensus/src/checkpoint.rs | 15 ++ zebra-consensus/src/checkpoint/list.rs | 2 +- zebra-consensus/src/error.rs | 58 ++++++ zebra-consensus/src/router.rs | 9 + zebra-network/src/address_book.rs | 57 ++++++ zebra-network/src/address_book_updater.rs | 32 +++- zebra-network/src/config/cache_dir.rs | 12 ++ zebra-network/src/constants.rs | 7 + zebra-network/src/meta_addr.rs | 98 ++++++++-- zebra-network/src/meta_addr/tests/prop.rs | 2 + zebra-network/src/meta_addr/tests/vectors.rs | 4 + zebra-network/src/peer/connection.rs | 25 ++- .../src/peer/connection/tests/prop.rs | 2 +- zebra-network/src/peer/handshake.rs | 15 +- zebra-network/src/peer_set/initialize.rs | 75 +++++++- .../src/peer_set/initialize/tests/vectors.rs | 25 ++- zebra-network/src/peer_set/set.rs | 25 ++- zebra-network/src/peer_set/set/tests.rs | 2 + .../src/protocol/internal/response.rs | 8 +- .../src/methods/get_block_template_rpcs.rs | 4 +- zebrad/src/commands/start.rs | 5 +- zebrad/src/components/inbound.rs | 38 +++- zebrad/src/components/inbound/downloads.rs | 49 +++-- .../components/inbound/tests/fake_peer_set.rs | 17 +- .../components/inbound/tests/real_peer_set.rs | 16 +- zebrad/src/components/mempool.rs | 26 ++- zebrad/src/components/mempool/downloads.rs | 19 +- zebrad/src/components/mempool/storage.rs | 4 +- zebrad/src/components/mempool/tests/prop.rs | 2 + zebrad/src/components/mempool/tests/vector.rs | 7 +- zebrad/src/components/sync.rs | 28 ++- zebrad/src/components/sync/downloads.rs | 7 +- zebrad/src/components/sync/tests/timing.rs | 2 + zebrad/src/components/sync/tests/vectors.rs | 107 ++++++++--- zebrad/tests/acceptance.rs | 173 ++++++++++++++++++ .../get_block_template.rs | 2 +- .../get_block_template_rpcs/get_peer_info.rs | 2 +- 39 files changed, 871 insertions(+), 130 deletions(-) diff --git a/zebra-chain/src/tests/vectors.rs b/zebra-chain/src/tests/vectors.rs index 44fe6644fbe..08c518d4434 100644 --- a/zebra-chain/src/tests/vectors.rs +++ b/zebra-chain/src/tests/vectors.rs @@ -37,6 +37,15 @@ impl Network { } } + /// Returns iterator over deserialized blocks. + pub fn block_parsed_iter(&self) -> impl Iterator { + self.block_iter().map(|(_, block_bytes)| { + block_bytes + .zcash_deserialize_into::() + .expect("block is structurally valid") + }) + } + /// Returns iterator over verified unmined transactions in the provided block height range. pub fn unmined_transactions_in_blocks( &self, diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 1c959dd284e..c4c4ab336ad 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -100,6 +100,17 @@ impl VerifyBlockError { _ => false, } } + + /// Returns a suggested misbehaviour score increment for a certain error. + pub fn misbehavior_score(&self) -> u32 { + // TODO: Adjust these values based on zcashd (#9258). + use VerifyBlockError::*; + match self { + Block { source } => source.misbehavior_score(), + Equihash { .. } => 100, + _other => 0, + } + } } /// The maximum allowed number of legacy signature check operations in a block. diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index e9fc2b616cb..fd355f0b7ef 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -1037,6 +1037,21 @@ impl VerifyCheckpointError { _ => false, } } + + /// Returns a suggested misbehaviour score increment for a certain error. + pub fn misbehavior_score(&self) -> u32 { + // TODO: Adjust these values based on zcashd (#9258). + match self { + VerifyCheckpointError::VerifyBlock(verify_block_error) => { + verify_block_error.misbehavior_score() + } + VerifyCheckpointError::SubsidyError(_) + | VerifyCheckpointError::CoinbaseHeight { .. } + | VerifyCheckpointError::DuplicateTransaction + | VerifyCheckpointError::AmountError(_) => 100, + _other => 0, + } + } } /// The CheckpointVerifier service implementation. diff --git a/zebra-consensus/src/checkpoint/list.rs b/zebra-consensus/src/checkpoint/list.rs index e7322f08df3..923f13004e7 100644 --- a/zebra-consensus/src/checkpoint/list.rs +++ b/zebra-consensus/src/checkpoint/list.rs @@ -67,7 +67,7 @@ impl ParameterCheckpoint for Network { let (checkpoints_for_network, should_fallback_to_genesis_hash_as_checkpoint) = match self { Network::Mainnet => (MAINNET_CHECKPOINTS, false), Network::Testnet(params) if params.is_default_testnet() => (TESTNET_CHECKPOINTS, false), - Network::Testnet(_params) => (TESTNET_CHECKPOINTS, true), + Network::Testnet(_params) => ("", true), }; // Check that the list starts with the correct genesis block and parses checkpoint list. diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index 9c7307d5ee1..80c111d949f 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -284,6 +284,50 @@ impl From for TransactionError { } } +impl TransactionError { + /// Returns a suggested misbehaviour score increment for a certain error when + /// verifying a mempool transaction. + pub fn mempool_misbehavior_score(&self) -> u32 { + use TransactionError::*; + + // TODO: Adjust these values based on zcashd (#9258). + match self { + ImmatureTransparentCoinbaseSpend { .. } + | UnshieldedTransparentCoinbaseSpend { .. } + | CoinbasePosition + | CoinbaseAfterFirst + | CoinbaseHasJoinSplit + | CoinbaseHasSpend + | CoinbaseHasOutputPreHeartwood + | CoinbaseHasEnableSpendsOrchard + | CoinbaseOutputsNotDecryptable + | CoinbaseInMempool + | NonCoinbaseHasCoinbaseInput + | CoinbaseExpiryBlockHeight { .. } + | IncorrectFee + | Subsidy(_) + | WrongVersion + | NoInputs + | NoOutputs + | BadBalance + | Script(_) + | SmallOrder + | Groth16(_) + | MalformedGroth16(_) + | Ed25519(_) + | RedJubjub(_) + | RedPallas(_) + | BothVPubsNonZero + | DisabledAddToSproutPool + | NotEnoughFlags + | WrongConsensusBranchId + | MissingConsensusBranchId => 100, + + _other => 0, + } + } +} + #[derive(Error, Clone, Debug, PartialEq, Eq)] #[allow(missing_docs)] pub enum BlockError { @@ -373,4 +417,18 @@ impl BlockError { pub fn is_duplicate_request(&self) -> bool { matches!(self, BlockError::AlreadyInChain(..)) } + + /// Returns a suggested misbehaviour score increment for a certain error. + pub(crate) fn misbehavior_score(&self) -> u32 { + use BlockError::*; + + match self { + MissingHeight(_) + | MaxHeight(_, _, _) + | InvalidDifficulty(_, _) + | TargetDifficultyLimit(_, _, _, _, _) + | DifficultyFilter(_, _, _, _) => 100, + _other => 0, + } + } } diff --git a/zebra-consensus/src/router.rs b/zebra-consensus/src/router.rs index 38819d0b245..fb56699ce6b 100644 --- a/zebra-consensus/src/router.rs +++ b/zebra-consensus/src/router.rs @@ -139,6 +139,15 @@ impl RouterError { RouterError::Block { source, .. } => source.is_duplicate_request(), } } + + /// Returns a suggested misbehaviour score increment for a certain error. + pub fn misbehavior_score(&self) -> u32 { + // TODO: Adjust these values based on zcashd (#9258). + match self { + RouterError::Checkpoint { source } => source.misbehavior_score(), + RouterError::Block { source } => source.misbehavior_score(), + } + } } impl Service for BlockVerifierRouter diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index aee8629f3bc..c30ac30943c 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -10,6 +10,7 @@ use std::{ }; use chrono::Utc; +use indexmap::IndexMap; use ordered_map::OrderedMap; use tokio::sync::watch; use tracing::Span; @@ -80,6 +81,9 @@ pub struct AddressBook { // TODO: Replace with `by_ip: HashMap>` to support configured `max_connections_per_ip` greater than 1 most_recent_by_ip: Option>, + /// A list of banned addresses, with the time they were banned. + bans_by_ip: Arc>, + /// The local listener address. local_listener: SocketAddr, @@ -162,6 +166,7 @@ impl AddressBook { address_metrics_tx, last_address_log: None, most_recent_by_ip: should_limit_outbound_conns_per_ip.then(HashMap::new), + bans_by_ip: Default::default(), }; new_book.update_metrics(instant_now, chrono_now); @@ -409,6 +414,14 @@ impl AddressBook { /// peers. #[allow(clippy::unwrap_in_result)] pub fn update(&mut self, change: MetaAddrChange) -> Option { + if self.bans_by_ip.contains_key(&change.addr().ip()) { + tracing::warn!( + ?change, + "attempted to add a banned peer addr to address book" + ); + return None; + } + let previous = self.get(change.addr()); let _guard = self.span.enter(); @@ -428,6 +441,44 @@ impl AddressBook { ); if let Some(updated) = updated { + if updated.misbehavior() >= constants::MAX_PEER_MISBEHAVIOR_SCORE { + // Ban and skip outbound connections with excessively misbehaving peers. + let banned_ip = updated.addr.ip(); + let bans_by_ip = Arc::make_mut(&mut self.bans_by_ip); + + bans_by_ip.insert(banned_ip, Instant::now()); + if bans_by_ip.len() > constants::MAX_BANNED_IPS { + // Remove the oldest banned IP from the address book. + bans_by_ip.shift_remove_index(0); + } + + self.most_recent_by_ip + .as_mut() + .expect("should be some when should_remove_most_recent_by_ip is true") + .remove(&banned_ip); + + let banned_addrs: Vec<_> = self + .by_addr + .descending_keys() + .skip_while(|addr| addr.ip() != banned_ip) + .take_while(|addr| addr.ip() == banned_ip) + .cloned() + .collect(); + + for addr in banned_addrs { + self.by_addr.remove(&addr); + } + + warn!( + ?updated, + total_peers = self.by_addr.len(), + recent_peers = self.recently_live_peers(chrono_now).len(), + "banned ip and removed banned peer addresses from address book", + ); + + return None; + } + // Ignore invalid outbound addresses. // (Inbound connections can be monitored via Zebra's metrics.) if !updated.address_is_valid_for_outbound(&self.network) { @@ -634,6 +685,11 @@ impl AddressBook { .cloned() } + /// Returns banned IP addresses. + pub fn bans(&self) -> Arc> { + self.bans_by_ip.clone() + } + /// Returns the number of entries in this address book. pub fn len(&self) -> usize { self.by_addr.len() @@ -805,6 +861,7 @@ impl Clone for AddressBook { address_metrics_tx, last_address_log: None, most_recent_by_ip: self.most_recent_by_ip.clone(), + bans_by_ip: self.bans_by_ip.clone(), } } } diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index fc3e8f93258..b4e68c0ba2d 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -1,7 +1,13 @@ //! The timestamp collector collects liveness information from peers. -use std::{cmp::max, net::SocketAddr, sync::Arc}; +use std::{ + cmp::max, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::Instant, +}; +use indexmap::IndexMap; use thiserror::Error; use tokio::{ sync::{mpsc, watch}, @@ -43,6 +49,7 @@ impl AddressBookUpdater { local_listener: SocketAddr, ) -> ( Arc>, + watch::Receiver>>, mpsc::Sender, watch::Receiver, JoinHandle>, @@ -74,6 +81,13 @@ impl AddressBookUpdater { }; let worker_address_book = address_book.clone(); + let (bans_sender, bans_receiver) = tokio::sync::watch::channel( + worker_address_book + .lock() + .expect("mutex should be unpoisoned") + .bans(), + ); + let worker = move || { info!("starting the address book updater"); @@ -84,11 +98,24 @@ impl AddressBookUpdater { // // Briefly hold the address book threaded mutex, to update the // state for a single address. - worker_address_book + let updated = worker_address_book .lock() .expect("mutex should be unpoisoned") .update(event); + // `UpdateMisbehavior` events should only be passed to `update()` here, + // so that this channel is always updated when new addresses are banned. + if updated.is_none() { + let bans = worker_address_book + .lock() + .expect("mutex should be unpoisoned") + .bans(); + + if bans.contains_key(&event.addr().ip()) { + let _ = bans_sender.send(bans); + } + } + #[cfg(feature = "progress-bar")] if matches!(howudoin::cancelled(), Some(true)) { address_bar.close(); @@ -136,6 +163,7 @@ impl AddressBookUpdater { ( address_book, + bans_receiver, worker_tx, address_metrics, address_book_updater_task_handle, diff --git a/zebra-network/src/config/cache_dir.rs b/zebra-network/src/config/cache_dir.rs index 1e80b27bb9a..f389da13558 100644 --- a/zebra-network/src/config/cache_dir.rs +++ b/zebra-network/src/config/cache_dir.rs @@ -20,6 +20,18 @@ pub enum CacheDir { CustomPath(PathBuf), } +impl From for CacheDir { + fn from(value: bool) -> Self { + CacheDir::IsEnabled(value) + } +} + +impl From for CacheDir { + fn from(value: PathBuf) -> Self { + CacheDir::CustomPath(value) + } +} + impl CacheDir { /// Returns a `CacheDir` enabled with the default path. pub fn default_path() -> Self { diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index acab966f40c..8a27809ca17 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -383,6 +383,13 @@ pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.5; /// The minimum interval between logging peer set status updates. pub const MIN_PEER_SET_LOG_INTERVAL: Duration = Duration::from_secs(60); +/// The maximum number of peer misbehavior incidents before a peer is +/// disconnected and banned. +pub const MAX_PEER_MISBEHAVIOR_SCORE: u32 = 100; + +/// The maximum number of banned IP addresses to be stored in-memory at any time. +pub const MAX_BANNED_IPS: usize = 20_000; + lazy_static! { /// The minimum network protocol version accepted by this crate for each network, /// represented as a network upgrade. diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 1171ceee2cf..ec9eb6e848d 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -199,11 +199,19 @@ pub struct MetaAddr { /// See the [`MetaAddr::last_failure`] method for details. last_failure: Option, + /// The misbehavior score for this peer. + #[cfg_attr(any(test, feature = "proptest-impl"), proptest(value = 0))] + misbehavior_score: u32, + /// The outcome of our most recent communication attempt with this peer. // // TODO: move the time and services fields into PeerAddrState? // then some fields could be required in some states pub(crate) last_connection_state: PeerAddrState, + + /// Whether this peer address was added to the address book + /// when the peer made an inbound connection. + is_inbound: bool, } /// A change to an existing `MetaAddr`. @@ -260,6 +268,7 @@ pub enum MetaAddrChange { )] addr: PeerSocketAddr, services: PeerServices, + is_inbound: bool, }, /// Updates an existing `MetaAddr` when a peer responds with a message. @@ -280,6 +289,14 @@ pub enum MetaAddrChange { addr: PeerSocketAddr, services: Option, }, + + /// Updates an existing `MetaAddr` when a peer misbehaves such as by advertising + /// semantically invalid blocks or transactions. + #[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))] + UpdateMisbehavior { + addr: PeerSocketAddr, + score_increment: u32, + }, } impl MetaAddr { @@ -306,6 +323,8 @@ impl MetaAddr { last_attempt: None, last_failure: None, last_connection_state: NeverAttemptedGossiped, + misbehavior_score: 0, + is_inbound: false, } } @@ -338,10 +357,15 @@ impl MetaAddr { /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) /// state, or /// - Zebra could advertise unreachable addresses to its own peers. - pub fn new_connected(addr: PeerSocketAddr, services: &PeerServices) -> MetaAddrChange { + pub fn new_connected( + addr: PeerSocketAddr, + services: &PeerServices, + is_inbound: bool, + ) -> MetaAddrChange { UpdateConnected { addr: canonical_peer_addr(*addr), services: *services, + is_inbound, } } @@ -421,6 +445,11 @@ impl MetaAddr { self.last_response.or(self.untrusted_last_seen) } + /// Returns whether the address is from an inbound peer connection + pub fn is_inbound(&self) -> bool { + self.is_inbound + } + /// Returns the unverified "last seen time" gossiped by the remote peer that /// sent us this address. /// @@ -623,6 +652,11 @@ impl MetaAddr { } } + /// Returns a score of misbehavior encountered in a peer at this address. + pub fn misbehavior(&self) -> u32 { + self.misbehavior_score + } + /// Return a sanitized version of this `MetaAddr`, for sending to a remote peer. /// /// Returns `None` if this `MetaAddr` should not be sent to remote peers. @@ -632,6 +666,11 @@ impl MetaAddr { return None; } + // Avoid responding to GetAddr requests with addresses of misbehaving peers. + if self.misbehavior_score != 0 || self.is_inbound { + return None; + } + // Sanitize time let last_seen = self.last_seen()?; let remainder = last_seen @@ -655,6 +694,8 @@ impl MetaAddr { last_attempt: None, last_failure: None, last_connection_state: NeverAttemptedGossiped, + misbehavior_score: 0, + is_inbound: false, }) } } @@ -679,7 +720,8 @@ impl MetaAddrChange { | UpdateAttempt { addr } | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } - | UpdateFailed { addr, .. } => *addr, + | UpdateFailed { addr, .. } + | UpdateMisbehavior { addr, .. } => *addr, } } @@ -695,7 +737,8 @@ impl MetaAddrChange { | UpdateAttempt { addr } | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } - | UpdateFailed { addr, .. } => *addr = new_addr, + | UpdateFailed { addr, .. } + | UpdateMisbehavior { addr, .. } => *addr = new_addr, } } @@ -713,6 +756,7 @@ impl MetaAddrChange { UpdateConnected { services, .. } => Some(*services), UpdateResponded { .. } => None, UpdateFailed { services, .. } => *services, + UpdateMisbehavior { .. } => None, } } @@ -729,7 +773,8 @@ impl MetaAddrChange { UpdateAttempt { .. } | UpdateConnected { .. } | UpdateResponded { .. } - | UpdateFailed { .. } => None, + | UpdateFailed { .. } + | UpdateMisbehavior { .. } => None, } } @@ -760,7 +805,10 @@ impl MetaAddrChange { // peer address. So the attempt time is a lower bound for the actual // handshake time. UpdateAttempt { .. } => Some(now), - UpdateConnected { .. } | UpdateResponded { .. } | UpdateFailed { .. } => None, + UpdateConnected { .. } + | UpdateResponded { .. } + | UpdateFailed { .. } + | UpdateMisbehavior { .. } => None, } } @@ -774,7 +822,7 @@ impl MetaAddrChange { // - the peer will appear to be live for longer, delaying future // reconnection attempts. UpdateConnected { .. } | UpdateResponded { .. } => Some(now), - UpdateFailed { .. } => None, + UpdateFailed { .. } | UpdateMisbehavior { .. } => None, } } @@ -792,7 +840,7 @@ impl MetaAddrChange { // states for longer, and // - the peer will appear to be used for longer, delaying future // reconnection attempts. - UpdateFailed { .. } => Some(now), + UpdateFailed { .. } | UpdateMisbehavior { .. } => Some(now), } } @@ -804,7 +852,7 @@ impl MetaAddrChange { // local listeners get sanitized, so the state doesn't matter here NewLocal { .. } => NeverAttemptedGossiped, UpdateAttempt { .. } => AttemptPending, - UpdateConnected { .. } | UpdateResponded { .. } => Responded, + UpdateConnected { .. } | UpdateResponded { .. } | UpdateMisbehavior { .. } => Responded, UpdateFailed { .. } => Failed, } } @@ -819,6 +867,27 @@ impl MetaAddrChange { last_attempt: self.last_attempt(instant_now), last_failure: self.last_failure(instant_now), last_connection_state: self.peer_addr_state(), + misbehavior_score: self.misbehavior_score(), + is_inbound: self.is_inbound(), + } + } + + /// Returns the misbehavior score increment for the current change. + pub fn misbehavior_score(&self) -> u32 { + match self { + MetaAddrChange::UpdateMisbehavior { + score_increment, .. + } => *score_increment, + _ => 0, + } + } + + /// Returns whether this change was created for a new inbound connection. + pub fn is_inbound(&self) -> bool { + if let MetaAddrChange::UpdateConnected { is_inbound, .. } = self { + *is_inbound + } else { + false } } @@ -841,6 +910,8 @@ impl MetaAddrChange { last_attempt: None, last_failure: None, last_connection_state: self.peer_addr_state(), + misbehavior_score: self.misbehavior_score(), + is_inbound: self.is_inbound(), } } @@ -902,10 +973,11 @@ impl MetaAddrChange { let previous_has_been_attempted = !previous.last_connection_state.is_never_attempted(); let change_to_never_attempted = self.peer_addr_state().is_never_attempted(); + let is_misbehavior_update = self.misbehavior_score() != 0; // Invalid changes - if change_to_never_attempted && previous_has_been_attempted { + if change_to_never_attempted && previous_has_been_attempted && !is_misbehavior_update { // Existing entry has been attempted, change is NeverAttempted // - ignore the change // @@ -916,7 +988,7 @@ impl MetaAddrChange { return None; } - if change_is_out_of_order && !change_is_concurrent { + if change_is_out_of_order && !change_is_concurrent && !is_misbehavior_update { // Change is significantly out of order: ignore it. // // # Security @@ -926,7 +998,7 @@ impl MetaAddrChange { return None; } - if change_is_concurrent && !connection_has_more_progress { + if change_is_concurrent && !connection_has_more_progress && !is_misbehavior_update { // Change is close together in time, and it would revert the connection to an earlier // state. // @@ -992,6 +1064,8 @@ impl MetaAddrChange { last_attempt: None, last_failure: None, last_connection_state: self.peer_addr_state(), + misbehavior_score: previous.misbehavior_score + self.misbehavior_score(), + is_inbound: previous.is_inbound || self.is_inbound(), }) } else { // Existing entry and change are both Attempt, Responded, Failed, @@ -1014,6 +1088,8 @@ impl MetaAddrChange { last_failure: self.last_failure(instant_now).or(previous.last_failure), // Replace the state with the updated state. last_connection_state: self.peer_addr_state(), + misbehavior_score: previous.misbehavior_score + self.misbehavior_score(), + is_inbound: previous.is_inbound || self.is_inbound(), }) } } diff --git a/zebra-network/src/meta_addr/tests/prop.rs b/zebra-network/src/meta_addr/tests/prop.rs index fc3859d7146..4dfdb134ccb 100644 --- a/zebra-network/src/meta_addr/tests/prop.rs +++ b/zebra-network/src/meta_addr/tests/prop.rs @@ -48,6 +48,8 @@ proptest! { // also check the address, port, and services individually prop_assert!(!addr.addr.ip().is_unspecified()); prop_assert_ne!(addr.addr.port(), 0); + prop_assert_eq!(addr.misbehavior(), 0); + prop_assert!(!addr.is_inbound()); if let Some(services) = addr.services { prop_assert!(services.contains(PeerServices::NODE_NETWORK)); diff --git a/zebra-network/src/meta_addr/tests/vectors.rs b/zebra-network/src/meta_addr/tests/vectors.rs index c4775362ade..c4ade96f536 100644 --- a/zebra-network/src/meta_addr/tests/vectors.rs +++ b/zebra-network/src/meta_addr/tests/vectors.rs @@ -36,6 +36,8 @@ fn sanitize_extremes() { last_attempt: None, last_failure: None, last_connection_state: Default::default(), + misbehavior_score: Default::default(), + is_inbound: false, }; let max_time_entry = MetaAddr { @@ -46,6 +48,8 @@ fn sanitize_extremes() { last_attempt: None, last_failure: None, last_connection_state: Default::default(), + misbehavior_score: Default::default(), + is_inbound: false, }; if let Some(min_sanitized) = min_time_entry.sanitize(&Mainnet) { diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 2d99610206e..fa065f53920 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -37,7 +37,7 @@ use crate::{ external::{types::Nonce, InventoryHash, Message}, internal::{InventoryResponse, Request, Response}, }, - BoxError, MAX_TX_INV_IN_SENT_MESSAGE, + BoxError, PeerSocketAddr, MAX_TX_INV_IN_SENT_MESSAGE, }; use InventoryResponse::*; @@ -140,6 +140,7 @@ impl Handler { &mut self, msg: Message, cached_addrs: &mut Vec, + transient_addr: Option, ) -> Option { let mut ignored_msg = None; // TODO: can this be avoided? @@ -215,7 +216,9 @@ impl Handler { Handler::Finished(Err(PeerError::NotFoundResponse(missing_transaction_ids))) } else if pending_ids.is_empty() || ignored_msg.is_some() { // If we got some of what we wanted, let the internal client know. - let available = transactions.into_iter().map(InventoryResponse::Available); + let available = transactions + .into_iter() + .map(|t| InventoryResponse::Available((t, transient_addr))); let missing = pending_ids.into_iter().map(InventoryResponse::Missing); Handler::Finished(Ok(Response::Transactions( @@ -263,7 +266,9 @@ impl Handler { Handler::Finished(Err(PeerError::NotFoundResponse(missing_transaction_ids))) } else { // If we got some of what we wanted, let the internal client know. - let available = transactions.into_iter().map(InventoryResponse::Available); + let available = transactions + .into_iter() + .map(|t| InventoryResponse::Available((t, transient_addr))); let missing = pending_ids.into_iter().map(InventoryResponse::Missing); Handler::Finished(Ok(Response::Transactions( @@ -324,7 +329,9 @@ impl Handler { if pending_hashes.is_empty() { // If we got everything we wanted, let the internal client know. - let available = blocks.into_iter().map(InventoryResponse::Available); + let available = blocks + .into_iter() + .map(|block| InventoryResponse::Available((block, transient_addr))); Handler::Finished(Ok(Response::Blocks(available.collect()))) } else { // Keep on waiting for all the blocks we wanted, until we get them or time out. @@ -368,7 +375,9 @@ impl Handler { Handler::Finished(Err(PeerError::NotFoundResponse(missing_block_hashes))) } else { // If we got some of what we wanted, let the internal client know. - let available = blocks.into_iter().map(InventoryResponse::Available); + let available = blocks + .into_iter() + .map(|block| InventoryResponse::Available((block, transient_addr))); let missing = pending_hashes.into_iter().map(InventoryResponse::Missing); Handler::Finished(Ok(Response::Blocks(available.chain(missing).collect()))) @@ -854,7 +863,7 @@ where let request_msg = match self.state { State::AwaitingResponse { ref mut handler, .. - } => span.in_scope(|| handler.process_message(peer_msg, &mut self.cached_addrs)), + } => span.in_scope(|| handler.process_message(peer_msg, &mut self.cached_addrs, self.connection_info.connected_addr.get_transient_addr())), _ => unreachable!("unexpected state after AwaitingResponse: {:?}, peer_msg: {:?}, client_receiver: {:?}", self.state, peer_msg, @@ -1448,7 +1457,7 @@ where for transaction in transactions.into_iter() { match transaction { - Available(transaction) => { + Available((transaction, _)) => { if let Err(e) = self.peer_tx.send(Message::Tx(transaction)).await { self.fail_with(e).await; return; @@ -1472,7 +1481,7 @@ where for block in blocks.into_iter() { match block { - Available(block) => { + Available((block, _)) => { if let Err(e) = self.peer_tx.send(Message::Block(block)).await { self.fail_with(e).await; return; diff --git a/zebra-network/src/peer/connection/tests/prop.rs b/zebra-network/src/peer/connection/tests/prop.rs index f633aea585a..2a104080c40 100644 --- a/zebra-network/src/peer/connection/tests/prop.rs +++ b/zebra-network/src/peer/connection/tests/prop.rs @@ -113,7 +113,7 @@ proptest! { ); let response = response_result.unwrap(); - prop_assert_eq!(response, Response::Blocks(vec![Available(second_block.0)])); + prop_assert_eq!(response, Response::Blocks(vec![Available((second_block.0, None))])); // Check the state after the response let error = shared_error_slot.try_get_error(); diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index c071cae9877..70d42b095cb 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -262,10 +262,10 @@ impl ConnectedAddr { /// TODO: remove the `get_` from these methods (Rust style avoids `get` prefixes) pub fn get_address_book_addr(&self) -> Option { match self { - OutboundDirect { addr } => Some(*addr), + OutboundDirect { addr } | InboundDirect { addr } => Some(*addr), // TODO: consider using the canonical address of the peer to track // outbound proxy connections - InboundDirect { .. } | OutboundProxy { .. } | InboundProxy { .. } | Isolated => None, + OutboundProxy { .. } | InboundProxy { .. } | Isolated => None, } } @@ -370,6 +370,11 @@ impl ConnectedAddr { addrs.into_iter() } + + /// Returns true if the [`ConnectedAddr`] was created for an inbound connection. + pub fn is_inbound(&self) -> bool { + matches!(self, InboundDirect { .. } | InboundProxy { .. }) + } } impl fmt::Debug for ConnectedAddr { @@ -933,7 +938,11 @@ where // the collector doesn't depend on network activity, // so this await should not hang let _ = address_book_updater - .send(MetaAddr::new_connected(book_addr, &remote_services)) + .send(MetaAddr::new_connected( + book_addr, + &remote_services, + connected_addr.is_inbound(), + )) .await; } diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 7268b21057c..18ef5411f63 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -5,9 +5,9 @@ //! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, convert::Infallible, - net::SocketAddr, + net::{IpAddr, SocketAddr}, pin::Pin, sync::Arc, time::Duration, @@ -19,10 +19,11 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, Future, TryFutureExt, }; +use indexmap::IndexMap; use rand::seq::SliceRandom; use tokio::{ net::{TcpListener, TcpStream}, - sync::broadcast, + sync::{broadcast, mpsc, watch}, time::{sleep, Instant}, }; use tokio_stream::wrappers::IntervalStream; @@ -34,7 +35,7 @@ use tracing_futures::Instrument; use zebra_chain::{chain_tip::ChainTip, diagnostic::task::WaitForPanics}; use crate::{ - address_book_updater::AddressBookUpdater, + address_book_updater::{AddressBookUpdater, MIN_CHANNEL_SIZE}, constants, meta_addr::{MetaAddr, MetaAddrChange}, peer::{ @@ -101,6 +102,7 @@ pub async fn init( ) -> ( Buffer, Request>, Arc>, + mpsc::Sender<(PeerSocketAddr, u32)>, ) where S: Service + Clone + Send + Sync + 'static, @@ -109,8 +111,58 @@ where { let (tcp_listener, listen_addr) = open_listener(&config.clone()).await; - let (address_book, address_book_updater, address_metrics, address_book_updater_guard) = - AddressBookUpdater::spawn(&config, listen_addr); + let ( + address_book, + bans_receiver, + address_book_updater, + address_metrics, + address_book_updater_guard, + ) = AddressBookUpdater::spawn(&config, listen_addr); + + let (misbehavior_tx, mut misbehavior_rx) = mpsc::channel( + // Leave enough room for a misbehaviour update on every peer connection + // before the channel is drained. + config + .peerset_total_connection_limit() + .max(MIN_CHANNEL_SIZE), + ); + + let misbehaviour_updater = address_book_updater.clone(); + tokio::spawn( + async move { + let mut misbehaviors: HashMap = HashMap::new(); + // Batch misbehaviour updates so peers can't keep the address book mutex locked + // by repeatedly sending invalid blocks or transactions. + let mut flush_timer = + IntervalStream::new(tokio::time::interval(Duration::from_secs(30))); + + loop { + tokio::select! { + msg = misbehavior_rx.recv() => match msg { + Some((peer_addr, score_increment)) => *misbehaviors + .entry(peer_addr) + .or_default() + += score_increment, + None => break, + }, + + _ = flush_timer.next() => { + for (addr, score_increment) in misbehaviors.drain() { + let _ = misbehaviour_updater + .send(MetaAddrChange::UpdateMisbehavior { + addr, + score_increment + }) + .await; + } + }, + }; + } + + tracing::warn!("exiting misbehavior update batch task"); + } + .in_current_span(), + ); // Create a broadcast channel for peer inventory advertisements. // If it reaches capacity, this channel drops older inventory advertisements. @@ -173,6 +225,7 @@ where demand_tx.clone(), handle_rx, inv_receiver, + bans_receiver.clone(), address_metrics, MinimumPeerVersion::new(latest_chain_tip, &config.network), None, @@ -188,6 +241,7 @@ where constants::MIN_INBOUND_PEER_CONNECTION_INTERVAL, listen_handshaker, peerset_tx.clone(), + bans_receiver, ); let listen_guard = tokio::spawn(listen_fut.in_current_span()); @@ -258,7 +312,7 @@ where ]) .unwrap(); - (peer_set, address_book) + (peer_set, address_book, misbehavior_tx) } /// Use the provided `outbound_connector` to connect to the configured DNS seeder and @@ -550,6 +604,7 @@ async fn accept_inbound_connections( min_inbound_peer_connection_interval: Duration, handshaker: S, peerset_tx: futures::channel::mpsc::Sender, + bans_receiver: watch::Receiver>>, ) -> Result<(), BoxError> where S: Service, Response = peer::Client, Error = BoxError> @@ -586,6 +641,12 @@ where if let Ok((tcp_stream, addr)) = inbound_result { let addr: PeerSocketAddr = addr.into(); + if bans_receiver.borrow().clone().contains_key(&addr.ip()) { + debug!(?addr, "banned inbound connection attempt"); + std::mem::drop(tcp_stream); + continue; + } + if active_inbound_connections.update_count() >= config.peerset_inbound_connection_limit() || recent_inbound_connections.is_past_limit_or_add(addr.ip()) diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 6a53b2ed0ab..c101bd81e36 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1444,7 +1444,7 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { let inbound_service = service_fn(|_| async { unreachable!("inbound service should never be called") }); - let (_peer_service, address_book) = init( + let (_peer_service, address_book, _) = init( config, inbound_service, NoChainTip, @@ -1510,7 +1510,7 @@ where ..default_config }; - let (_peer_service, address_book) = init( + let (_peer_service, address_book, _) = init( config, inbound_service, NoChainTip, @@ -1547,8 +1547,13 @@ where config.peerset_initial_target_size = peerset_initial_target_size; } - let (address_book, address_book_updater, _address_metrics, _address_book_updater_guard) = - AddressBookUpdater::spawn(&config, config.listen_addr); + let ( + address_book, + _bans_receiver, + address_book_updater, + _address_metrics, + _address_book_updater_guard, + ) = AddressBookUpdater::spawn(&config, config.listen_addr); // Add enough fake peers to go over the limit, even if the limit is zero. let over_limit_peers = config.peerset_outbound_connection_limit() * 2 + 1; @@ -1678,6 +1683,8 @@ where let over_limit_connections = config.peerset_inbound_connection_limit() * 2 + 1; let (peerset_tx, peerset_rx) = mpsc::channel::(over_limit_connections); + let (_bans_tx, bans_rx) = tokio::sync::watch::channel(Default::default()); + // Start listening for connections. let listen_fut = accept_inbound_connections( config.clone(), @@ -1685,6 +1692,7 @@ where MIN_INBOUND_PEER_CONNECTION_INTERVAL_FOR_TESTS, listen_handshaker, peerset_tx.clone(), + bans_rx, ); let listen_task_handle = tokio::spawn(listen_fut); @@ -1789,8 +1797,13 @@ where let (peerset_tx, peerset_rx) = mpsc::channel::(peer_count + 1); - let (_address_book, address_book_updater, _address_metrics, address_book_updater_guard) = - AddressBookUpdater::spawn(&config, unused_v4); + let ( + _address_book, + _bans_receiver, + address_book_updater, + _address_metrics, + address_book_updater_guard, + ) = AddressBookUpdater::spawn(&config, unused_v4); let add_fut = add_initial_peers(config, outbound_connector, peerset_tx, address_book_updater); let add_task_handle = tokio::spawn(add_fut); diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 1cac476ff53..c66493d4f9a 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -100,6 +100,7 @@ use std::{ marker::PhantomData, net::IpAddr, pin::Pin, + sync::Arc, task::{Context, Poll}, time::Instant, }; @@ -111,6 +112,7 @@ use futures::{ stream::FuturesUnordered, task::noop_waker, }; +use indexmap::IndexMap; use itertools::Itertools; use num_integer::div_ceil; use tokio::{ @@ -183,6 +185,9 @@ where /// A channel that asks the peer crawler task to connect to more peers. demand_signal: mpsc::Sender, + /// A watch channel receiver with a copy of banned IP addresses. + bans_receiver: watch::Receiver>>, + // Peer Tracking: Ready Peers // /// Connected peers that are ready to receive requests from Zebra, @@ -280,6 +285,7 @@ where /// and shuts down all the tasks as soon as one task exits; /// - `inv_stream`: receives inventory changes from peers, /// allowing the peer set to direct inventory requests; + /// - `bans_receiver`: receives a map of banned IP addresses that should be dropped; /// - `address_book`: when peer set is busy, it logs address book diagnostics. /// - `minimum_peer_version`: endpoint to see the minimum peer protocol version in real time. /// - `max_conns_per_ip`: configured maximum number of peers that can be in the @@ -291,6 +297,7 @@ where demand_signal: mpsc::Sender, handle_rx: tokio::sync::oneshot::Receiver>>>, inv_stream: broadcast::Receiver, + bans_receiver: watch::Receiver>>, address_metrics: watch::Receiver, minimum_peer_version: MinimumPeerVersion, max_conns_per_ip: Option, @@ -299,6 +306,8 @@ where // New peers discover, demand_signal, + // Banned peers + bans_receiver, // Ready peers ready_services: HashMap::new(), @@ -475,6 +484,12 @@ where Some(Ok((key, svc))) => { trace!(?key, "service became ready"); + if self.bans_receiver.borrow().contains_key(&key.ip()) { + warn!(?key, "service is banned, dropping service"); + std::mem::drop(svc); + continue; + } + self.push_ready(true, key, svc); // Return Ok if at least one peer became ready. @@ -544,7 +559,15 @@ where match peer_readiness { // Still ready, add it back to the list. - Ok(()) => self.push_ready(false, key, svc), + Ok(()) => { + if self.bans_receiver.borrow().contains_key(&key.ip()) { + debug!(?key, "service ip is banned, dropping service"); + std::mem::drop(svc); + continue; + } + + self.push_ready(false, key, svc) + } // Ready -> Errored Err(error) => { diff --git a/zebra-network/src/peer_set/set/tests.rs b/zebra-network/src/peer_set/set/tests.rs index b2b19770583..78c60fbf5d0 100644 --- a/zebra-network/src/peer_set/set/tests.rs +++ b/zebra-network/src/peer_set/set/tests.rs @@ -211,6 +211,7 @@ where .unwrap_or_else(|| guard.create_inventory_receiver()); let address_metrics = guard.prepare_address_book(self.address_book); + let (_bans_sender, bans_receiver) = tokio::sync::watch::channel(Default::default()); let peer_set = PeerSet::new( &config, @@ -218,6 +219,7 @@ where demand_signal, handle_rx, inv_stream, + bans_receiver, address_metrics, minimum_peer_version, max_conns_per_ip, diff --git a/zebra-network/src/protocol/internal/response.rs b/zebra-network/src/protocol/internal/response.rs index 6e1cd3291ff..689542a7f29 100644 --- a/zebra-network/src/protocol/internal/response.rs +++ b/zebra-network/src/protocol/internal/response.rs @@ -7,7 +7,7 @@ use zebra_chain::{ transaction::{UnminedTx, UnminedTxId}, }; -use crate::{meta_addr::MetaAddr, protocol::internal::InventoryResponse}; +use crate::{meta_addr::MetaAddr, protocol::internal::InventoryResponse, PeerSocketAddr}; #[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; @@ -67,14 +67,14 @@ pub enum Response { /// `zcashd` sometimes sends no response, and sometimes sends `notfound`. // // TODO: make this into a HashMap, ()>> - a unique list (#2244) - Blocks(Vec, block::Hash>>), + Blocks(Vec, Option), block::Hash>>), /// A list of found unmined transactions, and missing unmined transaction IDs. /// /// Each list contains zero or more entries. // // TODO: make this into a HashMap> - a unique list (#2244) - Transactions(Vec>), + Transactions(Vec), UnminedTxId>>), } impl fmt::Display for Response { @@ -94,7 +94,7 @@ impl fmt::Display for Response { // Display heights for single-block responses (which Zebra requests and expects) Response::Blocks(blocks) if blocks.len() == 1 => { match blocks.first().expect("len is 1") { - Available(block) => format!( + Available((block, _)) => format!( "Block {{ height: {}, hash: {} }}", block .coinbase_height() diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 2405be36a0a..809fafac69e 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1319,10 +1319,10 @@ where > = self.clone(); let network = self.network.clone(); - if !network.is_regtest() { + if !network.disable_pow() { return Err(ErrorObject::borrowed( 0, - "generate is only supported on regtest", + "generate is only supported on networks where PoW is disabled", None, )); } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index ba6ddce82c9..e2d522a9cd4 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -173,7 +173,7 @@ impl StartCmd { setup_rx, )); - let (peer_set, address_book) = zebra_network::init( + let (peer_set, address_book, misbehavior_sender) = zebra_network::init( config.network.clone(), inbound, latest_chain_tip.clone(), @@ -200,6 +200,7 @@ impl StartCmd { block_verifier_router.clone(), state.clone(), latest_chain_tip.clone(), + misbehavior_sender.clone(), ); info!("initializing mempool"); @@ -211,6 +212,7 @@ impl StartCmd { sync_status.clone(), latest_chain_tip.clone(), chain_tip_change.clone(), + misbehavior_sender.clone(), ); let mempool = BoxService::new(mempool); let mempool = ServiceBuilder::new() @@ -230,6 +232,7 @@ impl StartCmd { mempool: mempool.clone(), state: state.clone(), latest_chain_tip: latest_chain_tip.clone(), + misbehavior_sender, }; setup_tx .send(setup_data) diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index cfc7008e0bd..7af7791e144 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -21,15 +21,15 @@ use futures::{ use tokio::sync::oneshot::{self, error::TryRecvError}; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service, ServiceExt}; -use zebra_network as zn; -use zebra_state as zs; +use zebra_network::{self as zn, PeerSocketAddr}; +use zebra_state::{self as zs}; use zebra_chain::{ block::{self, Block}, serialization::ZcashSerialize, transaction::UnminedTxId, }; -use zebra_consensus::router::RouterError; +use zebra_consensus::{router::RouterError, VerifyBlockError}; use zebra_network::{AddressBook, InventoryResponse}; use zebra_node_services::mempool; @@ -107,6 +107,9 @@ pub struct InboundSetupData { /// Allows efficient access to the best tip of the blockchain. pub latest_chain_tip: zs::LatestChainTip, + + /// A channel to send misbehavior reports to the [`AddressBook`]. + pub misbehavior_sender: tokio::sync::mpsc::Sender<(PeerSocketAddr, u32)>, } /// Tracks the internal state of the [`Inbound`] service during setup. @@ -148,6 +151,9 @@ pub enum Setup { /// A service that manages cached blockchain state. state: State, + + /// A channel to send misbehavior reports to the [`AddressBook`]. + misbehavior_sender: tokio::sync::mpsc::Sender<(PeerSocketAddr, u32)>, }, /// Temporary state used in the inbound service's internal initialization code. @@ -261,6 +267,7 @@ impl Service for Inbound { mempool, state, latest_chain_tip, + misbehavior_sender, } = setup_data; let cached_peer_addr_response = CachedPeerAddrResponse::new(address_book); @@ -279,6 +286,7 @@ impl Service for Inbound { block_downloads, mempool, state, + misbehavior_sender, } } Err(TryRecvError::Empty) => { @@ -314,13 +322,27 @@ impl Service for Inbound { mut block_downloads, mempool, state, + misbehavior_sender, } => { // # Correctness // // Clear the stream but ignore the final Pending return value. // If we returned Pending here, and there were no waiting block downloads, // then inbound requests would wait for the next block download, and hang forever. - while let Poll::Ready(Some(_)) = block_downloads.as_mut().poll_next(cx) {} + while let Poll::Ready(Some(result)) = block_downloads.as_mut().poll_next(cx) { + let Err((err, Some(advertiser_addr))) = result else { + continue; + }; + + let Ok(err) = err.downcast::() else { + continue; + }; + + if err.misbehavior_score() != 0 { + let _ = + misbehavior_sender.try_send((advertiser_addr, err.misbehavior_score())); + } + } result = Ok(()); @@ -329,6 +351,7 @@ impl Service for Inbound { block_downloads, mempool, state, + misbehavior_sender, } } }; @@ -362,6 +385,7 @@ impl Service for Inbound { block_downloads, mempool, state, + misbehavior_sender: _, } => (cached_peer_addr_response, block_downloads, mempool, state), _ => { debug!("ignoring request from remote peer during setup"); @@ -398,7 +422,7 @@ impl Service for Inbound { let state = state.clone(); async move { - let mut blocks: Vec, block::Hash>> = Vec::new(); + let mut blocks: Vec, Option), block::Hash>> = Vec::new(); let mut total_size = 0; // Ignore any block hashes past the response limit. @@ -422,7 +446,7 @@ impl Service for Inbound { // return the size from the state using a wrapper type. total_size += block.zcash_serialized_size(); - blocks.push(Available(block)) + blocks.push(Available((block, None))) }, // We don't need to limit the size of the missing block IDs list, // because it is already limited to the size of the getdata request @@ -473,7 +497,7 @@ impl Service for Inbound { total_size += tx.size; within_limit - }).map(Available); + }).map(|tx| Available((tx, None))); // The network layer handles splitting this response into multiple `tx` // messages, and a `notfound` message if needed. diff --git a/zebrad/src/components/inbound/downloads.rs b/zebrad/src/components/inbound/downloads.rs index a1a36ed26d5..42214048c4c 100644 --- a/zebrad/src/components/inbound/downloads.rs +++ b/zebrad/src/components/inbound/downloads.rs @@ -20,7 +20,7 @@ use zebra_chain::{ block::{self, HeightDiff}, chain_tip::ChainTip, }; -use zebra_network as zn; +use zebra_network::{self as zn, PeerSocketAddr}; use zebra_state as zs; use crate::components::sync::MIN_CONCURRENCY_LIMIT; @@ -107,7 +107,9 @@ where // /// A list of pending block download and verify tasks. #[pin] - pending: FuturesUnordered>>, + pending: FuturesUnordered< + JoinHandle)>>, + >, /// A list of channels that can be used to cancel pending block download and /// verify tasks. @@ -126,7 +128,7 @@ where ZS: Service + Send + Clone + 'static, ZS::Future: Send, { - type Item = Result; + type Item = Result)>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = self.project(); @@ -145,9 +147,9 @@ where this.cancel_handles.remove(&hash); Poll::Ready(Some(Ok(hash))) } - Err((e, hash)) => { + Err((e, hash, advertiser_addr)) => { this.cancel_handles.remove(&hash); - Poll::Ready(Some(Err(e))) + Poll::Ready(Some(Err((e, advertiser_addr)))) } } } else { @@ -249,11 +251,13 @@ where Ok(zs::Response::KnownBlock(Some(_))) => Err("already present".into()), Ok(_) => unreachable!("wrong response"), Err(e) => Err(e), - }?; + } + .map_err(|e| (e, None))?; - let block = if let zn::Response::Blocks(blocks) = network + let (block, advertiser_addr) = if let zn::Response::Blocks(blocks) = network .oneshot(zn::Request::BlocksByHash(std::iter::once(hash).collect())) - .await? + .await + .map_err(|e| (e, None))? { assert_eq!( blocks.len(), @@ -307,15 +311,18 @@ where }) .unwrap_or(block::Height(0)); - let block_height = block.coinbase_height().ok_or_else(|| { - debug!( - ?hash, - "gossiped block with no height: dropped downloaded block" - ); - metrics::counter!("gossip.no.height.dropped.block.count").increment(1); + let block_height = block + .coinbase_height() + .ok_or_else(|| { + debug!( + ?hash, + "gossiped block with no height: dropped downloaded block" + ); + metrics::counter!("gossip.no.height.dropped.block.count").increment(1); - BoxError::from("gossiped block with no height") - })?; + BoxError::from("gossiped block with no height") + }) + .map_err(|e| (e, None))?; if block_height > max_lookahead_height { debug!( @@ -328,7 +335,7 @@ where ); metrics::counter!("gossip.max.height.limit.dropped.block.count").increment(1); - Err("gossiped block height too far ahead")?; + Err("gossiped block height too far ahead").map_err(|e| (e.into(), None))?; } else if block_height < min_accepted_height { debug!( ?hash, @@ -340,13 +347,15 @@ where ); metrics::counter!("gossip.min.height.limit.dropped.block.count").increment(1); - Err("gossiped block height behind the finalized tip")?; + Err("gossiped block height behind the finalized tip") + .map_err(|e| (e.into(), None))?; } verifier .oneshot(zebra_consensus::Request::Commit(block)) .await .map(|hash| (hash, block_height)) + .map_err(|e| (e, advertiser_addr)) } .map_ok(|(hash, height)| { info!(?height, "downloaded and verified gossiped block"); @@ -355,7 +364,7 @@ where }) // Tack the hash onto the error so we can remove the cancel handle // on failure as well as on success. - .map_err(move |e| (e, hash)) + .map_err(move |(e, advertiser_addr)| (e, hash, advertiser_addr)) .in_current_span(); let task = tokio::spawn(async move { @@ -365,7 +374,7 @@ where _ = &mut cancel_rx => { trace!("task cancelled prior to completion"); metrics::counter!("gossip.cancelled.count").increment(1); - Err(("canceled".into(), hash)) + Err(("canceled".into(), hash, None)) } verification = fut => verification, } diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 08ee467d48a..ab0e36f48a3 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -106,7 +106,7 @@ async fn mempool_requests_for_transactions() { response, added_transactions .into_iter() - .map(Available) + .map(|tx| Available((tx, None))) .collect::>(), ) } @@ -259,7 +259,10 @@ async fn mempool_advertise_transaction_ids() -> Result<(), crate::BoxError> { .expect_request(Request::TransactionsById(txs)) .map(|responder| { let unmined_transaction = UnminedTx::from(test_transaction.clone()); - responder.respond(Response::Transactions(vec![Available(unmined_transaction)])) + responder.respond(Response::Transactions(vec![Available(( + unmined_transaction, + None, + ))])) }); // Simulate a successful transaction verification let verification = tx_verifier.expect_request_that(|_| true).map(|responder| { @@ -676,7 +679,7 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> { peer_set .expect_request(Request::BlocksByHash(iter::once(block_hash).collect())) .await - .respond(Response::Blocks(vec![Available(block)])); + .respond(Response::Blocks(vec![Available((block, None))])); // Wait for the chain tip update if let Err(timeout_error) = timeout( @@ -712,7 +715,7 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> { peer_set .expect_request(Request::BlocksByHash(iter::once(block_hash).collect())) .await - .respond(Response::Blocks(vec![Available(block)])); + .respond(Response::Blocks(vec![Available((block, None))])); let response = state_service .clone() @@ -808,6 +811,7 @@ async fn caches_getaddr_response() { .service(Inbound::new(MAX_INBOUND_CONCURRENCY, setup_rx)); let inbound_service = BoxService::new(inbound_service); let inbound_service = ServiceBuilder::new().buffer(1).service(inbound_service); + let (misbehavior_sender, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let setup_data = InboundSetupData { address_book: address_book.clone(), @@ -816,6 +820,7 @@ async fn caches_getaddr_response() { mempool: buffered_mempool_service.clone(), state: state_service.clone(), latest_chain_tip, + misbehavior_sender, }; let r = setup_tx.send(setup_data); // We can't expect or unwrap because the returned Result does not implement Debug @@ -963,6 +968,7 @@ async fn setup( // Don't wait for the chain tip update here, we wait for expect_request(AdvertiseBlock) below, // which is called by the gossip_best_tip_block_hashes task once the chain tip changes. + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let (mut mempool_service, transaction_receiver) = Mempool::new( &MempoolConfig::default(), buffered_peer_set.clone(), @@ -971,6 +977,7 @@ async fn setup( sync_status.clone(), latest_chain_tip.clone(), chain_tip_change.clone(), + misbehavior_tx, ); // Pretend we're close to tip @@ -1031,6 +1038,7 @@ async fn setup( let inbound_service = BoxService::new(inbound_service); let inbound_service = ServiceBuilder::new().buffer(1).service(inbound_service); + let (misbehavior_sender, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let setup_data = InboundSetupData { address_book, block_download_peer_set: buffered_peer_set, @@ -1038,6 +1046,7 @@ async fn setup( mempool: mempool_service.clone(), state: state_service.clone(), latest_chain_tip, + misbehavior_sender, }; let r = setup_tx.send(setup_data); // We can't expect or unwrap because the returned Result does not implement Debug diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 11211ccabed..82e337dc8c5 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -340,7 +340,8 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError // We respond with an unrelated transaction, so the peer gives up on the request. let unrelated_response: Transaction = zebra_test::vectors::DUMMY_TX1.zcash_deserialize_into()?; - let unrelated_response = Response::Transactions(vec![Available(unrelated_response.into())]); + let unrelated_response = + Response::Transactions(vec![Available((unrelated_response.into(), None))]); let ( // real services @@ -487,8 +488,8 @@ async fn outbound_tx_partial_response_notfound() -> Result<(), crate::BoxError> let repeated_tx: Transaction = zebra_test::vectors::DUMMY_TX1.zcash_deserialize_into()?; let repeated_tx: UnminedTx = repeated_tx.into(); let repeated_response = Response::Transactions(vec![ - Available(repeated_tx.clone()), - Available(repeated_tx.clone()), + Available((repeated_tx.clone(), None)), + Available((repeated_tx.clone(), None)), ]); let ( @@ -523,6 +524,7 @@ async fn outbound_tx_partial_response_notfound() -> Result<(), crate::BoxError> let available: Vec = tx_response .iter() .filter_map(InventoryResponse::available) + .map(|(tx, _)| tx) .collect(); let missing: Vec = tx_response .iter() @@ -658,7 +660,7 @@ async fn setup( ..NetworkConfig::default() }; - let (mut peer_set, address_book) = zebra_network::init( + let (mut peer_set, address_book, _) = zebra_network::init( network_config, inbound_service.clone(), latest_chain_tip.clone(), @@ -694,6 +696,7 @@ async fn setup( .service(BoxService::new(mock_tx_verifier.clone())); // Mempool + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let mempool_config = MempoolConfig::default(); let (mut mempool_service, transaction_receiver) = Mempool::new( &mempool_config, @@ -703,6 +706,7 @@ async fn setup( sync_status.clone(), latest_chain_tip.clone(), chain_tip_change.clone(), + misbehavior_tx, ); // Enable the mempool @@ -715,6 +719,7 @@ async fn setup( .service(mempool_service); // Initialize the inbound service + let (misbehavior_sender, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let setup_data = InboundSetupData { address_book, block_download_peer_set: peer_set.clone(), @@ -722,6 +727,7 @@ async fn setup( mempool: mempool_service.clone(), state: state_service.clone(), latest_chain_tip, + misbehavior_sender, }; let r = setup_tx.send(setup_data); // We can't expect or unwrap because the returned Result does not implement Debug @@ -862,7 +868,7 @@ mod submitblock_test { .buffer(10) .service(BoxService::new(inbound_service)); - let (peer_set, _address_book) = zebra_network::init( + let (peer_set, _address_book, _misbehavior_tx) = zebra_network::init( network_config, inbound_service.clone(), latest_chain_tip.clone(), diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 4a9bb3b00e9..e022bb464be 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -27,7 +27,7 @@ use std::{ }; use futures::{future::FutureExt, stream::Stream}; -use tokio::sync::{broadcast, oneshot}; +use tokio::sync::{broadcast, mpsc, oneshot}; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service}; use zebra_chain::{ @@ -37,7 +37,7 @@ use zebra_chain::{ transaction::UnminedTxId, }; use zebra_consensus::{error::TransactionError, transaction}; -use zebra_network as zn; +use zebra_network::{self as zn, PeerSocketAddr}; use zebra_node_services::mempool::{Gossip, Request, Response}; use zebra_state as zs; use zebra_state::{ChainTipChange, TipAction}; @@ -71,7 +71,8 @@ pub use storage::{ pub use self::tests::UnboxMempoolError; use downloads::{ - Downloads as TxDownloads, TRANSACTION_DOWNLOAD_TIMEOUT, TRANSACTION_VERIFY_TIMEOUT, + Downloads as TxDownloads, TransactionDownloadVerifyError, TRANSACTION_DOWNLOAD_TIMEOUT, + TRANSACTION_VERIFY_TIMEOUT, }; type Outbound = Buffer, zn::Request>; @@ -239,6 +240,9 @@ pub struct Mempool { /// Used to broadcast transaction ids to peers. transaction_sender: broadcast::Sender>, + /// Sender for reporting peer addresses that advertised unexpectedly invalid transactions. + misbehavior_sender: mpsc::Sender<(PeerSocketAddr, u32)>, + // Diagnostics // /// Queued transactions pending download or verification transmitter. @@ -263,6 +267,7 @@ pub struct Mempool { } impl Mempool { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( config: &Config, outbound: Outbound, @@ -271,6 +276,7 @@ impl Mempool { sync_status: SyncStatus, latest_chain_tip: zs::LatestChainTip, chain_tip_change: ChainTipChange, + misbehavior_sender: mpsc::Sender<(PeerSocketAddr, u32)>, ) -> (Self, broadcast::Receiver>) { let (transaction_sender, transaction_receiver) = tokio::sync::broadcast::channel(gossip::MAX_CHANGES_BEFORE_SEND * 2); @@ -286,6 +292,7 @@ impl Mempool { state, tx_verifier, transaction_sender, + misbehavior_sender, #[cfg(feature = "progress-bar")] queued_count_bar: None, #[cfg(feature = "progress-bar")] @@ -622,6 +629,19 @@ impl Service for Mempool { } } Ok(Err((tx_id, error))) => { + if let TransactionDownloadVerifyError::Invalid { + error, + advertiser_addr: Some(advertiser_addr), + } = &error + { + if error.mempool_misbehavior_score() != 0 { + let _ = self.misbehavior_sender.try_send(( + *advertiser_addr, + error.mempool_misbehavior_score(), + )); + } + }; + tracing::debug!(?tx_id, ?error, "mempool transaction failed to verify"); metrics::counter!("mempool.failed.verify.tasks.total", "reason" => error.to_string()).increment(1); diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index 68d29aadcaf..79243933a14 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -50,7 +50,7 @@ use zebra_chain::{ transparent, }; use zebra_consensus::transaction as tx; -use zebra_network as zn; +use zebra_network::{self as zn, PeerSocketAddr}; use zebra_node_services::mempool::Gossip; use zebra_state::{self as zs, CloneError}; @@ -124,8 +124,11 @@ pub enum TransactionDownloadVerifyError { #[error("transaction download / verification was cancelled")] Cancelled, - #[error("transaction did not pass consensus validation: {0}")] - Invalid(#[from] zebra_consensus::error::TransactionError), + #[error("transaction did not pass consensus validation: {error}")] + Invalid { + error: zebra_consensus::error::TransactionError, + advertiser_addr: Option, + }, } /// Represents a [`Stream`] of download and verification tasks. @@ -330,7 +333,7 @@ where trace!(?txid, ?next_height, "got next height"); - let tx = match gossiped_tx { + let (tx, advertiser_addr) = match gossiped_tx { Gossip::Id(txid) => { let req = zn::Request::TransactionsById(std::iter::once(txid).collect()); @@ -348,7 +351,7 @@ where _ => unreachable!("wrong response to transaction request"), }; - let tx = tx.available().expect( + let (tx, advertiser_addr) = tx.available().expect( "unexpected missing tx status: single tx failures should be errors", ); @@ -356,14 +359,14 @@ where "mempool.downloaded.transactions.total", "version" => format!("{}",tx.transaction.version()), ).increment(1); - tx + (tx, advertiser_addr) } Gossip::Tx(tx) => { metrics::counter!( "mempool.pushed.transactions.total", "version" => format!("{}",tx.transaction.version()), ).increment(1); - tx + (tx, None) } }; @@ -386,7 +389,7 @@ where // Hide the transaction data to avoid filling the logs trace!(?txid, result = ?result.as_ref().map(|_tx| ()), "verified transaction for the mempool"); - result.map_err(|e| TransactionDownloadVerifyError::Invalid(e.into())) + result.map_err(|e| TransactionDownloadVerifyError::Invalid { error: e.into(), advertiser_addr } ) } .map_ok(|(tx, spent_mempool_outpoints, tip_height)| { metrics::counter!( diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index 06ddea4a55b..5b2de766409 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -647,8 +647,8 @@ impl Storage { // Consensus verification failed. Reject transaction to avoid // having to download and verify it again just for it to fail again. - TransactionDownloadVerifyError::Invalid(e) => { - self.reject(tx_id, ExactTipRejectionError::FailedVerification(e).into()) + TransactionDownloadVerifyError::Invalid { error, .. } => { + self.reject(tx_id, ExactTipRejectionError::FailedVerification(error).into()) } } } diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index 6ad3613f3b4..c2c8f5d4adc 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -271,6 +271,7 @@ fn setup( let (mut chain_tip_sender, latest_chain_tip, chain_tip_change) = ChainTipSender::new(None, network); + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let (mempool, _transaction_receiver) = Mempool::new( &Config { tx_cost_limit: 160_000_000, @@ -282,6 +283,7 @@ fn setup( sync_status, latest_chain_tip, chain_tip_change, + misbehavior_tx, ); // sends a fake chain tip so that the mempool can be enabled diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 5af54dfa6e1..ec5726f0240 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -805,7 +805,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .expect_request_that(|req| matches!(req, zn::Request::TransactionsById(_))) .map(|responder| { responder.respond(zn::Response::Transactions(vec![ - zn::InventoryResponse::Available(tx.clone().into()), + zn::InventoryResponse::Available((tx.clone().into(), None)), ])); }) .await; @@ -864,7 +864,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .expect_request_that(|req| matches!(req, zn::Request::TransactionsById(_))) .map(|responder| { responder.respond(zn::Response::Transactions(vec![ - zn::InventoryResponse::Available(tx.into()), + zn::InventoryResponse::Available((tx.into(), None)), ])); }) .await; @@ -1069,7 +1069,7 @@ async fn setup( let tx_verifier = MockService::build().for_unit_tests(); let (sync_status, recent_syncs) = SyncStatus::new(); - + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let (mempool, mut mempool_transaction_receiver) = Mempool::new( &mempool::Config { tx_cost_limit, @@ -1081,6 +1081,7 @@ async fn setup( sync_status, latest_chain_tip, chain_tip_change.clone(), + misbehavior_tx, ); tokio::spawn(async move { while mempool_transaction_receiver.recv().await.is_ok() {} }); diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index bc35656dcc3..f55705e5122 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -9,7 +9,7 @@ use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::IndexSet; use serde::{Deserialize, Serialize}; use tokio::{ - sync::watch, + sync::{mpsc, watch}, task::JoinError, time::{sleep, timeout}, }; @@ -23,7 +23,7 @@ use zebra_chain::{ chain_tip::ChainTip, }; use zebra_consensus::ParameterCheckpoint as _; -use zebra_network as zn; +use zebra_network::{self as zn, PeerSocketAddr}; use zebra_state as zs; use crate::{ @@ -380,6 +380,9 @@ where /// Receiver that is `true` when the downloader is past the lookahead limit. /// This is based on the downloaded block height and the state tip height. past_lookahead_limit_receiver: zs::WatchReceiver, + + /// Sender for reporting peer addresses that advertised unexpectedly invalid transactions. + misbehavior_sender: mpsc::Sender<(PeerSocketAddr, u32)>, } /// Polls the network to determine whether further blocks are available and @@ -425,6 +428,7 @@ where verifier: ZV, state: ZS, latest_chain_tip: ZSTip, + misbehavior_sender: mpsc::Sender<(PeerSocketAddr, u32)>, ) -> (Self, SyncStatus) { let mut download_concurrency_limit = config.sync.download_concurrency_limit; let mut checkpoint_verify_concurrency_limit = @@ -513,6 +517,7 @@ where prospective_tips: HashSet::new(), recent_syncs, past_lookahead_limit_receiver, + misbehavior_sender, }; (new_syncer, sync_status) @@ -1094,10 +1099,23 @@ where Ok((height, hash)) => { trace!(?height, ?hash, "verified and committed block to state"); - Ok(()) + return Ok(()); } - Err(_) => Self::handle_response(response), - } + + Err(BlockDownloadVerifyError::Invalid { + ref error, + advertiser_addr: Some(advertiser_addr), + .. + }) if error.misbehavior_score() != 0 => { + let _ = self + .misbehavior_sender + .try_send((advertiser_addr, error.misbehavior_score())); + } + + Err(_) => {} + }; + + Self::handle_response(response) } /// Handles a response to block hash submission, passing through any extra hashes. diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index 7ee352ae473..91ce7f34e9a 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -27,7 +27,7 @@ use zebra_chain::{ block::{self, Height, HeightDiff}, chain_tip::ChainTip, }; -use zebra_network as zn; +use zebra_network::{self as zn, PeerSocketAddr}; use zebra_state as zs; use crate::components::sync::{ @@ -125,6 +125,7 @@ pub enum BlockDownloadVerifyError { error: zebra_consensus::router::RouterError, height: block::Height, hash: block::Hash, + advertiser_addr: Option, }, #[error("block validation request failed: {error:?} {height:?} {hash:?}")] @@ -374,7 +375,7 @@ where rsp = block_req => rsp.map_err(|error| BlockDownloadVerifyError::DownloadFailed { error, hash})?, }; - let block = if let zn::Response::Blocks(blocks) = rsp { + let (block, advertiser_addr) = if let zn::Response::Blocks(blocks) = rsp { assert_eq!( blocks.len(), 1, @@ -550,7 +551,7 @@ where .map(|hash| (block_height, hash)) .map_err(|err| { match err.downcast::() { - Ok(error) => BlockDownloadVerifyError::Invalid { error: *error, height: block_height, hash }, + Ok(error) => BlockDownloadVerifyError::Invalid { error: *error, height: block_height, hash, advertiser_addr }, Err(error) => BlockDownloadVerifyError::ValidationRequestError { error, height: block_height, hash }, } }) diff --git a/zebrad/src/components/sync/tests/timing.rs b/zebrad/src/components/sync/tests/timing.rs index be3d79e698f..e6c30ca4bd4 100644 --- a/zebrad/src/components/sync/tests/timing.rs +++ b/zebrad/src/components/sync/tests/timing.rs @@ -158,6 +158,7 @@ fn request_genesis_is_rate_limited() { ); // start the sync + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let (mut chain_sync, _) = ChainSync::new( &ZebradConfig::default(), Height(0), @@ -165,6 +166,7 @@ fn request_genesis_is_rate_limited() { verifier_service, state_service, latest_chain_tip, + misbehavior_tx, ); // run `request_genesis()` with a timeout of 13 seconds diff --git a/zebrad/src/components/sync/tests/vectors.rs b/zebrad/src/components/sync/tests/vectors.rs index 3a656904ef0..a808d02bdb3 100644 --- a/zebrad/src/components/sync/tests/vectors.rs +++ b/zebrad/src/components/sync/tests/vectors.rs @@ -86,7 +86,10 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block0_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block0.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block0.clone(), + None, + ))])); block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) @@ -160,11 +163,17 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block1_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block1.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block1.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block2_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block2.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block2.clone(), + None, + ))])); // We can't guarantee the verification request order let mut remaining_blocks: HashMap> = @@ -224,11 +233,17 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block3_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block3.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block3.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block4_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block4.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block4.clone(), + None, + ))])); // We can't guarantee the verification request order let mut remaining_blocks: HashMap> = @@ -313,7 +328,10 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block0_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block0.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block0.clone(), + None, + ))])); block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) @@ -389,11 +407,17 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block1_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block1.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block1.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block2_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block2.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block2.clone(), + None, + ))])); // We can't guarantee the verification request order let mut remaining_blocks: HashMap> = @@ -455,11 +479,17 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block3_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block3.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block3.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block4_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block4.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block4.clone(), + None, + ))])); // We can't guarantee the verification request order let mut remaining_blocks: HashMap> = @@ -530,7 +560,10 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block0_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block982k.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block982k.clone(), + None, + ))])); // Block is dropped because it is too far ahead of the tip. // We expect more requests to the state service, because the syncer keeps on running. @@ -595,7 +628,10 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block0_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block0.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block0.clone(), + None, + ))])); block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) @@ -677,15 +713,24 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { iter::once(block982k_hash).collect(), )) .await - .respond(zn::Response::Blocks(vec![Available(block982k.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block982k.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block1_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block1.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block1.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block2_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block2.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block2.clone(), + None, + ))])); // At this point, the following tasks race: // - The valid chain verifier requests @@ -756,7 +801,10 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block0_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block0.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block0.clone(), + None, + ))])); block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) @@ -830,11 +878,17 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block1_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block1.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block1.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block2_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block2.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block2.clone(), + None, + ))])); // We can't guarantee the verification request order let mut remaining_blocks: HashMap> = @@ -896,17 +950,26 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block3_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block3.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block3.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash(iter::once(block4_hash).collect())) .await - .respond(zn::Response::Blocks(vec![Available(block4.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block4.clone(), + None, + ))])); peer_set .expect_request(zn::Request::BlocksByHash( iter::once(block982k_hash).collect(), )) .await - .respond(zn::Response::Blocks(vec![Available(block982k.clone())])); + .respond(zn::Response::Blocks(vec![Available(( + block982k.clone(), + None, + ))])); // At this point, the following tasks race: // - The valid chain verifier requests @@ -961,6 +1024,7 @@ fn setup() -> ( let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new(); + let (misbehavior_tx, _misbehavior_rx) = tokio::sync::mpsc::channel(1); let (chain_sync, sync_status) = ChainSync::new( &config, Height(0), @@ -968,6 +1032,7 @@ fn setup() -> ( block_verifier_router.clone(), state_service.clone(), mock_chain_tip, + misbehavior_tx, ); let chain_sync_future = chain_sync.sync(); diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 89afff9bbcc..757a61059cf 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3715,3 +3715,176 @@ fn check_no_git_refs_in_cargo_lock() { panic!("Cargo.lock includes git sources") } } + +/// Check that Zebra will disconnect from misbehaving peers. +#[tokio::test] +#[cfg(all(feature = "getblocktemplate-rpcs", not(target_os = "windows")))] +async fn disconnects_from_misbehaving_peers() -> Result<()> { + use std::sync::{atomic::AtomicBool, Arc}; + + use common::regtest::MiningRpcMethods; + use zebra_chain::parameters::testnet::{self, ConfiguredActivationHeights}; + use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; + + let _init_guard = zebra_test::init(); + let network = testnet::Parameters::build() + .with_activation_heights(ConfiguredActivationHeights { + canopy: Some(1), + nu5: Some(2), + nu6: Some(3), + ..Default::default() + }) + .with_slow_start_interval(Height::MIN) + .with_disable_pow(true) + .to_network(); + + let test_type = LaunchWithEmptyState { + launches_lightwalletd: false, + }; + let test_name = "disconnects_from_misbehaving_peers_test"; + + if !common::launch::can_spawn_zebrad_for_test_type(test_name, test_type, false) { + tracing::warn!("skipping disconnects_from_misbehaving_peers test"); + return Ok(()); + } + + // Get the zebrad config + let mut config = test_type + .zebrad_config(test_name, false, None, &network) + .expect("already checked config")?; + + config.network.cache_dir = false.into(); + config.network.listen_addr = format!("127.0.0.1:{}", random_known_port()).parse()?; + + let rpc_listen_addr = config.rpc.listen_addr.unwrap(); + let rpc_client_1 = RpcRequestClient::new(rpc_listen_addr); + + tracing::info!( + ?rpc_listen_addr, + network_listen_addr = ?config.network.listen_addr, + "starting a zebrad child on incompatible custom Testnet" + ); + + let is_finished = Arc::new(AtomicBool::new(false)); + + { + let is_finished = Arc::clone(&is_finished); + let config = config.clone(); + let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); + tokio::task::spawn_blocking(move || -> Result<()> { + let mut zebrad_child = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .bypass_test_capture(true) + .with_timeout(test_type.zebrad_timeout()) + .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + + while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { + zebrad_child.wait_for_stdout_line(Some("zebraA1".to_string())); + } + + Ok(()) + }); + } + + config.network.initial_testnet_peers = [config.network.listen_addr.to_string()].into(); + config.network.network = Network::new_default_testnet(); + config.network.listen_addr = "127.0.0.1:0".parse()?; + config.rpc.listen_addr = Some(format!("127.0.0.1:{}", random_known_port()).parse()?); + + let rpc_listen_addr = config.rpc.listen_addr.unwrap(); + let rpc_client_2 = RpcRequestClient::new(rpc_listen_addr); + + tracing::info!( + ?rpc_listen_addr, + network_listen_addr = ?config.network.listen_addr, + "starting a zebrad child on the default Testnet" + ); + + { + let is_finished = Arc::clone(&is_finished); + tokio::task::spawn_blocking(move || -> Result<()> { + let (zebrad_failure_messages, zebrad_ignore_messages) = + test_type.zebrad_failure_messages(); + let mut zebrad_child = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .bypass_test_capture(true) + .with_timeout(test_type.zebrad_timeout()) + .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + + while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { + zebrad_child.wait_for_stdout_line(Some("zebraB2".to_string())); + } + + Ok(()) + }); + } + + tracing::info!("waiting for zebrad nodes to connect"); + + // Wait a few seconds for Zebra to start up and make outbound peer connections + tokio::time::sleep(LAUNCH_DELAY).await; + + tracing::info!("checking for peers"); + + // Call `getpeerinfo` to check that the zebrad instances have connected + let peer_info: Vec = rpc_client_2 + .json_result_from_call("getpeerinfo", "[]") + .await + .map_err(|err| eyre!(err))?; + + assert!(!peer_info.is_empty(), "should have outbound peer"); + + tracing::info!( + ?peer_info, + "found peer connection, committing genesis block" + ); + + let genesis_block = network.block_parsed_iter().next().unwrap(); + rpc_client_1.submit_block(genesis_block.clone()).await?; + rpc_client_2.submit_block(genesis_block).await?; + + // Call the `generate` method to mine blocks in the zebrad instance where PoW is disabled + tracing::info!("committed genesis block, mining blocks with invalid PoW"); + tokio::time::sleep(Duration::from_secs(2)).await; + + rpc_client_1.call("generate", "[500]").await?; + + tracing::info!("wait for misbehavior messages to flush into address updater channel"); + + tokio::time::sleep(Duration::from_secs(30)).await; + + tracing::info!("calling getpeerinfo to confirm Zebra has dropped the peer connection"); + + // Call `getpeerinfo` to check that the zebrad instances have disconnected + for i in 0..600 { + let peer_info: Vec = rpc_client_2 + .json_result_from_call("getpeerinfo", "[]") + .await + .map_err(|err| eyre!(err))?; + + if peer_info.is_empty() { + break; + } else if i % 10 == 0 { + tracing::info!(?peer_info, "has not yet disconnected from misbehaving peer"); + } + + rpc_client_1.call("generate", "[1]").await?; + + tokio::time::sleep(Duration::from_secs(1)).await; + } + + let peer_info: Vec = rpc_client_2 + .json_result_from_call("getpeerinfo", "[]") + .await + .map_err(|err| eyre!(err))?; + + tracing::info!(?peer_info, "called getpeerinfo"); + + assert!(peer_info.is_empty(), "should have no peers"); + + is_finished.store(true, std::sync::atomic::Ordering::SeqCst); + + Ok(()) +} diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index a91c500834f..b6c2ed61619 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -162,7 +162,7 @@ pub(crate) async fn run() -> Result<()> { /// or `ProposalResponse` in 'proposal' mode. async fn try_validate_block_template(client: &RpcRequestClient) -> Result<()> { let mut response_json_result: GetBlockTemplate = client - .json_result_from_call("getblocktemplate", "[]".to_string()) + .json_result_from_call("getblocktemplate", "[]") .await .expect("response should be success output with a serialized `GetBlockTemplate`"); diff --git a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs index dd30954948c..53494b95f38 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs @@ -40,7 +40,7 @@ pub(crate) async fn run() -> Result<()> { // call `getpeerinfo` RPC method let peer_info_result: Vec = RpcRequestClient::new(rpc_address) - .json_result_from_call("getpeerinfo", "[]".to_string()) + .json_result_from_call("getpeerinfo", "[]") .await .map_err(|err| eyre!(err))?; From 5cf5178d50a0db2d1fefea9cfd7422a65541d33a Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 18 Feb 2025 00:23:55 -0300 Subject: [PATCH 097/245] feature(rpc): Extend `getinfo` (#9261) * introduce new fields to `GetInfo` * add address book to methods for getinfo connections field * add the version field to getinfo * track last node error or warning and display it in getinfo rpc method * add the rest of the fields, minor cleanup * fix the tests * clippy --- Cargo.lock | 1 + zebra-rpc/Cargo.toml | 1 + zebra-rpc/src/methods.rs | 213 +++++++++++++++++- zebra-rpc/src/methods/tests/prop.rs | 6 +- zebra-rpc/src/methods/tests/snapshot.rs | 19 +- .../tests/snapshots/get_info@mainnet_10.snap | 15 +- .../tests/snapshots/get_info@testnet_10.snap | 15 +- zebra-rpc/src/methods/tests/vectors.rs | 72 ++++-- zebra-rpc/src/server.rs | 7 +- zebra-rpc/src/server/tests/vectors.rs | 4 + zebrad/src/application.rs | 5 + zebrad/src/commands/start.rs | 3 +- zebrad/src/components/tracing/component.rs | 56 ++++- zebrad/tests/acceptance.rs | 3 + 14 files changed, 380 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d4b5a3719b..9095e4f1ee5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6174,6 +6174,7 @@ dependencies = [ "proptest", "prost", "rand 0.8.5", + "semver", "serde", "serde_json", "thiserror 2.0.11", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 463b9288c69..4518051a2fe 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -62,6 +62,7 @@ jsonrpsee-types = { workspace = true } jsonrpsee-proc-macros = { workspace = true } hyper = { workspace = true } http-body-util = { workspace = true } +semver = { workspace = true } serde_json = { workspace = true } indexmap = { workspace = true, features = ["serde"] } diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 55003872bd3..fea1cad0d92 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -37,6 +37,7 @@ use zebra_chain::{ }, }; use zebra_consensus::ParameterCheckpoint; +use zebra_network::address_book_peers::AddressBookPeers; use zebra_node_services::mempool; use zebra_state::{ HashOrHeight, OutputIndex, OutputLocation, ReadRequest, ReadResponse, TransactionLocation, @@ -89,7 +90,7 @@ pub trait Rpc { /// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95) #[method(name = "getinfo")] - fn get_info(&self) -> Result; + async fn get_info(&self) -> Result; /// Returns blockchain state information, as a [`GetBlockChainInfo`] JSON struct. /// @@ -358,7 +359,7 @@ pub trait Rpc { /// RPC method implementations. #[derive(Clone)] -pub struct RpcImpl +pub struct RpcImpl where Mempool: Service< mempool::Request, @@ -379,6 +380,7 @@ where + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { // Configuration // @@ -414,9 +416,19 @@ where // /// A sender component of a channel used to send transactions to the mempool queue. queue_sender: broadcast::Sender, + + /// Peer address book. + address_book: AddressBook, + + /// The last warning or error event logged by the server. + last_event: LoggedLastEvent, } -impl Debug for RpcImpl +/// A type alias for the last event logged by the server. +pub type LoggedLastEvent = + Arc)>>>; + +impl Debug for RpcImpl where Mempool: Service< mempool::Request, @@ -437,6 +449,7 @@ where + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Skip fields without Debug impls, and skip channels @@ -450,7 +463,7 @@ where } } -impl RpcImpl +impl RpcImpl where Mempool: Service< mempool::Request, @@ -471,6 +484,7 @@ where + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { /// Create a new instance of the RPC handler. // @@ -486,6 +500,8 @@ where mempool: Mempool, state: State, latest_chain_tip: Tip, + address_book: AddressBook, + last_event: LoggedLastEvent, ) -> (Self, JoinHandle<()>) where VersionString: ToString + Clone + Send + 'static, @@ -511,6 +527,8 @@ where state: state.clone(), latest_chain_tip: latest_chain_tip.clone(), queue_sender, + address_book, + last_event, }; // run the process queue @@ -525,7 +543,7 @@ where } #[async_trait] -impl RpcServer for RpcImpl +impl RpcServer for RpcImpl where Mempool: Service< mempool::Request, @@ -546,11 +564,56 @@ where + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { - fn get_info(&self) -> Result { + async fn get_info(&self) -> Result { + let version = GetInfo::version(&self.build_version).ok_or(ErrorObject::owned( + server::error::LegacyCode::Misc.into(), + "invalid version string", + None::<()>, + ))?; + + // TODO: Change to use `currently_live_peers()` after #9214 + let connections = self.address_book.recently_live_peers(Utc::now()).len(); + + let last_error_recorded = self.last_event.lock().expect("mutex poisoned").clone(); + let (last_event, _last_event_level, last_event_time) = last_error_recorded.unwrap_or(( + GetInfo::default().errors, + tracing::Level::INFO, + Utc::now(), + )); + + let tip_height = self + .latest_chain_tip + .best_tip_height() + .unwrap_or(Height::MIN); + let testnet = self.network.is_a_test_network(); + + // This field is behind the `ENABLE_WALLET` feature flag in zcashd: + // https://github.com/zcash/zcash/blob/v6.1.0/src/rpc/misc.cpp#L113 + // However it is not documented as optional: + // https://github.com/zcash/zcash/blob/v6.1.0/src/rpc/misc.cpp#L70 + // For compatibility, we keep the field in the response, but always return 0. + let pay_tx_fee = 0.0; + + let relay_fee = zebra_chain::transaction::zip317::MIN_MEMPOOL_TX_FEE_RATE as f64 + / (zebra_chain::amount::COIN as f64); + let difficulty = chain_tip_difficulty(self.network.clone(), self.state.clone()).await?; + let response = GetInfo { + version, build: self.build_version.clone(), subversion: self.user_agent.clone(), + protocol_version: zebra_network::constants::CURRENT_NETWORK_PROTOCOL_VERSION.0, + blocks: tip_height.0, + connections, + proxy: None, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors: last_event, + errors_timestamp: last_event_time.to_string(), }; Ok(response) @@ -1542,33 +1605,161 @@ where /// Response to a `getinfo` RPC request. /// /// See the notes for the [`Rpc::get_info` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetInfo { + /// The node version + version: u64, + /// The node version build number build: String, /// The server sub-version identifier, used as the network protocol user-agent subversion: String, + + /// The protocol version + #[serde(rename = "protocolversion")] + protocol_version: u32, + + /// The current number of blocks processed in the server + blocks: u32, + + /// The total (inbound and outbound) number of connections the node has + connections: usize, + + /// The proxy (if any) used by the server. Currently always `None` in Zebra. + #[serde(skip_serializing_if = "Option::is_none")] + proxy: Option, + + /// The current network difficulty + difficulty: f64, + + /// True if the server is running in testnet mode, false otherwise + testnet: bool, + + /// The minimum transaction fee in ZEC/kB + #[serde(rename = "paytxfee")] + pay_tx_fee: f64, + + /// The minimum relay fee for non-free transactions in ZEC/kB + #[serde(rename = "relayfee")] + relay_fee: f64, + + /// The last error or warning message, or "no errors" if there are no errors + errors: String, + + /// The time of the last error or warning message, or "no errors timestamp" if there are no errors + #[serde(rename = "errorstimestamp")] + errors_timestamp: String, } impl Default for GetInfo { fn default() -> Self { GetInfo { + version: 0, build: "some build version".to_string(), subversion: "some subversion".to_string(), + protocol_version: 0, + blocks: 0, + connections: 0, + proxy: None, + difficulty: 0.0, + testnet: false, + pay_tx_fee: 0.0, + relay_fee: 0.0, + errors: "no errors".to_string(), + errors_timestamp: "no errors timestamp".to_string(), } } } impl GetInfo { /// Constructs [`GetInfo`] from its constituent parts. - pub fn from_parts(build: String, subversion: String) -> Self { - Self { build, subversion } + #[allow(clippy::too_many_arguments)] + pub fn from_parts( + version: u64, + build: String, + subversion: String, + protocol_version: u32, + blocks: u32, + connections: usize, + proxy: Option, + difficulty: f64, + testnet: bool, + pay_tx_fee: f64, + relay_fee: f64, + errors: String, + errors_timestamp: String, + ) -> Self { + Self { + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + errors_timestamp, + } } /// Returns the contents of ['GetInfo']. - pub fn into_parts(self) -> (String, String) { - (self.build, self.subversion) + pub fn into_parts( + self, + ) -> ( + u64, + String, + String, + u32, + u32, + usize, + Option, + f64, + bool, + f64, + f64, + String, + String, + ) { + ( + self.version, + self.build, + self.subversion, + self.protocol_version, + self.blocks, + self.connections, + self.proxy, + self.difficulty, + self.testnet, + self.pay_tx_fee, + self.relay_fee, + self.errors, + self.errors_timestamp, + ) + } + + /// Create the node version number. + pub fn version(build_string: &str) -> Option { + let semver_version = semver::Version::parse(build_string.strip_prefix('v')?).ok()?; + let build_number = semver_version + .build + .as_str() + .split('.') + .next() + .and_then(|num_str| num_str.parse::().ok()) + .unwrap_or_default(); + + // https://github.com/zcash/zcash/blob/v6.1.0/src/clientversion.h#L55-L59 + let version_number = 1_000_000 * semver_version.major + + 10_000 * semver_version.minor + + 100 * semver_version.patch + + build_number; + + Some(version_number) } } diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 0200f88a300..b7df3b30127 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -24,6 +24,7 @@ use zebra_chain::{ }; use zebra_consensus::ParameterCheckpoint; +use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::mempool; use zebra_state::{BoxError, GetBlockTemplateChainInfo}; @@ -968,6 +969,7 @@ fn mock_services( zebra_state::ReadRequest, >, Tip, + MockAddressBookPeers, >, tokio::task::JoinHandle<()>, ) @@ -978,7 +980,7 @@ where let state = MockService::build().for_prop_tests(); let (rpc, mempool_tx_queue) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", network, false, @@ -986,6 +988,8 @@ where mempool.clone(), Buffer::new(state.clone(), 1), chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); (mempool, state, rpc, mempool_tx_queue) diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 328d3d0f5a4..5e2754b2382 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -25,6 +25,7 @@ use zebra_chain::{ serialization::ZcashDeserializeInto, subtree::NoteCommitmentSubtreeData, }; +use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::BoxError; use zebra_state::{ReadRequest, ReadResponse, MAX_ON_DISK_HEIGHT}; use zebra_test::mock_service::MockService; @@ -116,6 +117,8 @@ async fn test_z_get_treestate() { Buffer::new(MockService::build().for_unit_tests::<_, _, BoxError>(), 1), state, tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Request the treestate by a hash. @@ -198,7 +201,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { // Init RPC let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "/Zebra:RPC test/", network.clone(), false, @@ -206,6 +209,8 @@ async fn test_rpc_response_data_for_network(network: &Network) { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // We only want a snapshot of the `getblocksubsidy` and `getblockchaininfo` methods for the non-default Testnet (with an NU6 activation height). @@ -220,7 +225,10 @@ async fn test_rpc_response_data_for_network(network: &Network) { } // `getinfo` - let get_info = rpc.get_info().expect("We should have a GetInfo struct"); + let get_info = rpc + .get_info() + .await + .expect("We should have a GetInfo struct"); snapshot_rpc_getinfo(get_info, &settings); // `getblockchaininfo` @@ -522,7 +530,7 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { let mempool = MockService::build().for_unit_tests(); let (rpc, _) = RpcImpl::new( - "RPC test", + "0.0.1", "/Zebra:RPC test/", network.clone(), false, @@ -530,6 +538,8 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { mempool, state.clone(), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Test the response format from `z_getsubtreesbyindex` for Sapling. @@ -599,6 +609,9 @@ fn snapshot_rpc_getinfo(info: GetInfo, settings: &insta::Settings) { // replace with: "[SubVersion]" }), + ".errorstimestamp" => dynamic_redaction(|_value, _path| { + "[LastErrorTimestamp]" + }), }) }); } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_info@mainnet_10.snap index 884da201ee4..85f77bdc486 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_info@mainnet_10.snap @@ -1,9 +1,18 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs -assertion_line: 161 expression: info --- { - "build": "vRPC test", - "subversion": "[SubVersion]" + "version": 100, + "build": "v0.0.1", + "subversion": "[SubVersion]", + "protocolversion": 170120, + "blocks": 10, + "connections": 0, + "difficulty": 1.0, + "testnet": false, + "paytxfee": 0.0, + "relayfee": 0.000001, + "errors": "no errors", + "errorstimestamp": "[LastErrorTimestamp]" } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_info@testnet_10.snap index 884da201ee4..c1197ff0190 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_info@testnet_10.snap @@ -1,9 +1,18 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs -assertion_line: 161 expression: info --- { - "build": "vRPC test", - "subversion": "[SubVersion]" + "version": 100, + "build": "v0.0.1", + "subversion": "[SubVersion]", + "protocolversion": 170120, + "blocks": 10, + "connections": 0, + "difficulty": 1.0, + "testnet": true, + "paytxfee": 0.0, + "relayfee": 0.000001, + "errors": "no errors", + "errorstimestamp": "[LastErrorTimestamp]" } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index a25b399c697..4ae9cfb1aef 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -14,9 +14,10 @@ use zebra_chain::{ serialization::{ZcashDeserializeInto, ZcashSerialize}, transaction::UnminedTxId, }; +use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::BoxError; -use zebra_state::{LatestChainTip, ReadStateService}; +use zebra_state::{GetBlockTemplateChainInfo, LatestChainTip, ReadStateService}; use zebra_test::mock_service::MockService; use super::super::*; @@ -29,7 +30,7 @@ async fn rpc_getinfo() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "/Zebra:RPC test/", Mainnet, false, @@ -37,13 +38,36 @@ async fn rpc_getinfo() { Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), NoChainTip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); - let get_info = rpc.get_info().expect("We should have a GetInfo struct"); + let getinfo_future = tokio::spawn(async move { rpc.get_info().await }); + + // Make the mock service respond with + let response_handler = state + .expect_request(zebra_state::ReadRequest::ChainInfo) + .await; + response_handler.respond(zebra_state::ReadResponse::ChainInfo( + GetBlockTemplateChainInfo { + tip_hash: Mainnet.genesis_hash(), + tip_height: Height::MIN, + history_tree: Default::default(), + expected_difficulty: Default::default(), + cur_time: zebra_chain::serialization::DateTime32::now(), + min_time: zebra_chain::serialization::DateTime32::now(), + max_time: zebra_chain::serialization::DateTime32::now(), + }, + )); + + let get_info = getinfo_future + .await + .expect("getinfo future should not panic") + .expect("getinfo future should not return an error"); // make sure there is a `build` field in the response, // and that is equal to the provided string, with an added 'v' version prefix. - assert_eq!(get_info.build, "vRPC test"); + assert_eq!(get_info.build, "v0.0.1"); // make sure there is a `subversion` field, // and that is equal to the Zebra user agent. @@ -120,7 +144,7 @@ async fn rpc_getblock() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -128,6 +152,8 @@ async fn rpc_getblock() { Buffer::new(mempool.clone(), 1), read_state.clone(), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Make height calls with verbosity=0 and check response @@ -461,7 +487,7 @@ async fn rpc_getblock_parse_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -469,6 +495,8 @@ async fn rpc_getblock_parse_error() { Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), NoChainTip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Make sure we get an error if Zebra can't parse the block height. @@ -504,7 +532,7 @@ async fn rpc_getblock_missing_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -512,6 +540,8 @@ async fn rpc_getblock_missing_error() { Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), NoChainTip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Make sure Zebra returns the correct error code `-8` for missing blocks @@ -566,7 +596,7 @@ async fn rpc_getblockheader() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -574,6 +604,8 @@ async fn rpc_getblockheader() { Buffer::new(mempool.clone(), 1), read_state.clone(), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Make height calls with verbose=false and check response @@ -677,7 +709,7 @@ async fn rpc_getbestblockhash() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -685,6 +717,8 @@ async fn rpc_getbestblockhash() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Get the tip hash using RPC method `get_best_block_hash` @@ -723,7 +757,7 @@ async fn rpc_getrawtransaction() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -731,6 +765,8 @@ async fn rpc_getrawtransaction() { Buffer::new(mempool.clone(), 1), read_state.clone(), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // Test case where transaction is in mempool. @@ -899,7 +935,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { zebra_state::populated_state(blocks.clone(), &Mainnet).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -907,6 +943,8 @@ async fn rpc_getaddresstxids_invalid_arguments() { Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // call the method with an invalid address string @@ -1048,7 +1086,7 @@ async fn rpc_getaddresstxids_response_with( let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", network.clone(), false, @@ -1056,6 +1094,8 @@ async fn rpc_getaddresstxids_response_with( Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), latest_chain_tip.clone(), + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // call the method with valid arguments @@ -1100,7 +1140,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let rpc = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -1108,6 +1148,8 @@ async fn rpc_getaddressutxos_invalid_arguments() { Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), NoChainTip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // call the method with an invalid address string @@ -1145,7 +1187,7 @@ async fn rpc_getaddressutxos_response() { zebra_state::populated_state(blocks.clone(), &Mainnet).await; let rpc = RpcImpl::new( - "RPC test", + "0.0.1", "RPC test", Mainnet, false, @@ -1153,6 +1195,8 @@ async fn rpc_getaddressutxos_response() { Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), latest_chain_tip, + MockAddressBookPeers::new(vec![]), + crate::methods::LoggedLastEvent::new(None.into()), ); // call the method with a valid address diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 3d54ab8ea54..b1f7e2e8faf 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -24,7 +24,7 @@ use zebra_node_services::mempool; use crate::{ config::Config, - methods::{RpcImpl, RpcServer as _}, + methods::{LoggedLastEvent, RpcImpl, RpcServer as _}, server::{ http_request_compatibility::HttpRequestMiddlewareLayer, rpc_call_compatibility::FixRpcResponseMiddleware, @@ -122,6 +122,7 @@ impl RpcServer { network: Network, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] mined_block_sender: Option>, + last_event: LoggedLastEvent, ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> where VersionString: ToString + Clone + Send + 'static, @@ -171,7 +172,7 @@ impl RpcServer { latest_chain_tip.clone(), block_verifier_router, sync_status, - address_book, + address_book.clone(), mined_block_sender, ); @@ -188,6 +189,8 @@ impl RpcServer { mempool, state, latest_chain_tip, + address_book, + last_event, ); let http_middleware_layer = if config.enable_cookie_auth { diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 6cb83f98326..b65f9a2ca0f 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -57,6 +57,7 @@ async fn rpc_server_spawn() { NoChainTip, Mainnet, None, + crate::methods::LoggedLastEvent::new(None.into()), ); info!("spawned RPC server, checking services..."); @@ -117,6 +118,7 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { NoChainTip, Mainnet, None, + crate::methods::LoggedLastEvent::new(None.into()), ) .await .expect(""); @@ -173,6 +175,7 @@ async fn rpc_server_spawn_port_conflict() { NoChainTip, Mainnet, None, + crate::methods::LoggedLastEvent::new(None.into()), ) .await; @@ -193,6 +196,7 @@ async fn rpc_server_spawn_port_conflict() { NoChainTip, Mainnet, None, + crate::methods::LoggedLastEvent::new(None.into()), ) .await; diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index b26734f943a..02fb808239c 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -36,6 +36,11 @@ fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { /// Application state pub static APPLICATION: AppCell = AppCell::new(); +lazy_static::lazy_static! { + /// The last log event that occurred in the application. + pub static ref LAST_LOG_EVENT: Arc)>>> = Arc::new(std::sync::Mutex::new(None)); +} + /// Returns the `zebrad` version for this build, in SemVer 2.0 format. /// /// Includes `git describe` build metatata if available: diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index e2d522a9cd4..388dc443cea 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -90,7 +90,7 @@ use zebra_rpc::server::RpcServer; use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use crate::{ - application::{build_version, user_agent}, + application::{build_version, user_agent, LAST_LOG_EVENT}, components::{ inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, mempool::{self, Mempool}, @@ -273,6 +273,7 @@ impl StartCmd { Some(submit_block_channel.sender()), #[cfg(not(feature = "getblocktemplate-rpcs"))] None, + LAST_LOG_EVENT.clone(), ); rpc_task_handle.await.unwrap() } else { diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index b1f9f8c8c82..e79a9754332 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -3,9 +3,12 @@ use std::{ fs::{self, File}, io::Write, + sync::{Arc, Mutex}, }; use abscissa_core::{Component, FrameworkError, Shutdown}; + +use tracing::{field::Visit, Level}; use tracing_appender::non_blocking::{NonBlocking, NonBlockingBuilder, WorkerGuard}; use tracing_error::ErrorLayer; use tracing_subscriber::{ @@ -13,7 +16,7 @@ use tracing_subscriber::{ layer::SubscriberExt, reload::Handle, util::SubscriberInitExt, - EnvFilter, + EnvFilter, Layer, }; use zebra_chain::parameters::Network; @@ -184,7 +187,13 @@ impl Tracing { #[cfg(not(feature = "filter-reload"))] let filter_handle = None; - let subscriber = logger.finish().with(ErrorLayer::default()); + let warn_error_layer = LastWarnErrorLayer { + last_event: crate::application::LAST_LOG_EVENT.clone(), + }; + let subscriber = logger + .finish() + .with(warn_error_layer) + .with(ErrorLayer::default()); (subscriber, filter_handle) }; @@ -426,3 +435,46 @@ impl Drop for Tracing { howudoin::disable(); } } + +// Visitor to extract only the "message" field from a log event. +struct MessageVisitor { + message: Option, +} + +impl Visit for MessageVisitor { + fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { + if field.name() == "message" { + self.message = Some(format!("{:?}", value)); + } + } +} + +// Layer to store the last WARN or ERROR log event. +#[derive(Debug, Clone)] +struct LastWarnErrorLayer { + last_event: Arc)>>>, +} + +impl Layer for LastWarnErrorLayer +where + S: tracing::Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, +{ + fn on_event( + &self, + event: &tracing::Event<'_>, + _ctx: tracing_subscriber::layer::Context<'_, S>, + ) { + let level = *event.metadata().level(); + let timestamp = chrono::Utc::now(); + + if level == Level::WARN || level == Level::ERROR { + let mut visitor = MessageVisitor { message: None }; + event.record(&mut visitor); + + if let Some(message) = visitor.message { + let mut last_event = self.last_event.lock().expect("lock poisoned"); + *last_event = Some((message, level, timestamp)); + } + } + } +} diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 757a61059cf..1201bcd6e04 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1600,6 +1600,9 @@ async fn rpc_endpoint(parallel_cpu_threads: bool) -> Result<()> { // Create an http client let client = RpcRequestClient::new(rpc_address); + // Run `zebrad` for a few seconds before testing the endpoint + std::thread::sleep(LAUNCH_DELAY); + // Make the call to the `getinfo` RPC method let res = client.call("getinfo", "[]".to_string()).await?; From 9591c003eed5a85250f3e8b09a28b3e9cf039188 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 18 Feb 2025 06:03:38 -0500 Subject: [PATCH 098/245] Replaces mutex with watch channel (#9262) --- zebra-rpc/src/methods.rs | 29 +++++++++---------- zebra-rpc/src/methods/tests/prop.rs | 3 +- zebra-rpc/src/methods/tests/snapshot.rs | 9 ++++-- zebra-rpc/src/methods/tests/vectors.rs | 33 ++++++++++++++-------- zebra-rpc/src/server/tests/vectors.rs | 11 +++++--- zebrad/src/application.rs | 3 +- zebrad/src/commands/start.rs | 4 +-- zebrad/src/components/tracing/component.rs | 11 ++++---- 8 files changed, 60 insertions(+), 43 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index fea1cad0d92..a55897a16b7 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -18,7 +18,10 @@ use indexmap::IndexMap; use jsonrpsee::core::{async_trait, RpcResult as Result}; use jsonrpsee_proc_macros::rpc; use jsonrpsee_types::{ErrorCode, ErrorObject}; -use tokio::{sync::broadcast, task::JoinHandle}; +use tokio::{ + sync::{broadcast, watch}, + task::JoinHandle, +}; use tower::{Service, ServiceExt}; use tracing::Instrument; @@ -421,12 +424,11 @@ where address_book: AddressBook, /// The last warning or error event logged by the server. - last_event: LoggedLastEvent, + last_warn_error_log_rx: LoggedLastEvent, } /// A type alias for the last event logged by the server. -pub type LoggedLastEvent = - Arc)>>>; +pub type LoggedLastEvent = watch::Receiver)>>; impl Debug for RpcImpl where @@ -501,7 +503,7 @@ where state: State, latest_chain_tip: Tip, address_book: AddressBook, - last_event: LoggedLastEvent, + last_warn_error_log_rx: LoggedLastEvent, ) -> (Self, JoinHandle<()>) where VersionString: ToString + Clone + Send + 'static, @@ -528,7 +530,7 @@ where latest_chain_tip: latest_chain_tip.clone(), queue_sender, address_book, - last_event, + last_warn_error_log_rx, }; // run the process queue @@ -567,17 +569,12 @@ where AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { async fn get_info(&self) -> Result { - let version = GetInfo::version(&self.build_version).ok_or(ErrorObject::owned( - server::error::LegacyCode::Misc.into(), - "invalid version string", - None::<()>, - ))?; + let version = GetInfo::version(&self.build_version).expect("invalid version string"); - // TODO: Change to use `currently_live_peers()` after #9214 let connections = self.address_book.recently_live_peers(Utc::now()).len(); - let last_error_recorded = self.last_event.lock().expect("mutex poisoned").clone(); - let (last_event, _last_event_level, last_event_time) = last_error_recorded.unwrap_or(( + let last_error_recorded = self.last_warn_error_log_rx.borrow().clone(); + let (last_error_log, _level, last_error_log_time) = last_error_recorded.unwrap_or(( GetInfo::default().errors, tracing::Level::INFO, Utc::now(), @@ -612,8 +609,8 @@ where testnet, pay_tx_fee, relay_fee, - errors: last_event, - errors_timestamp: last_event_time.to_string(), + errors: last_error_log, + errors_timestamp: last_error_log_time.to_string(), }; Ok(response) diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index b7df3b30127..0e8db0f36e6 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -979,6 +979,7 @@ where let mempool = MockService::build().for_prop_tests(); let state = MockService::build().for_prop_tests(); + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, mempool_tx_queue) = RpcImpl::new( "0.0.1", "RPC test", @@ -989,7 +990,7 @@ where Buffer::new(state.clone(), 1), chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); (mempool, state, rpc, mempool_tx_queue) diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 5e2754b2382..dd3a49f2c57 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -108,6 +108,7 @@ async fn test_z_get_treestate() { let (_, state, tip, _) = zebra_state::populated_state(blocks.clone(), &testnet).await; + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, _) = RpcImpl::new( "", "", @@ -118,7 +119,7 @@ async fn test_z_get_treestate() { state, tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Request the treestate by a hash. @@ -200,6 +201,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { .await; // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "/Zebra:RPC test/", @@ -210,7 +212,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { read_state, latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // We only want a snapshot of the `getblocksubsidy` and `getblockchaininfo` methods for the non-default Testnet (with an NU6 activation height). @@ -529,6 +531,7 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { let mut state = MockService::build().for_unit_tests(); let mempool = MockService::build().for_unit_tests(); + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, _) = RpcImpl::new( "0.0.1", "/Zebra:RPC test/", @@ -539,7 +542,7 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { state.clone(), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Test the response format from `z_getsubtreesbyindex` for Sapling. diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 4ae9cfb1aef..315c2385d94 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -29,6 +29,7 @@ async fn rpc_getinfo() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "/Zebra:RPC test/", @@ -39,7 +40,7 @@ async fn rpc_getinfo() { Buffer::new(state.clone(), 1), NoChainTip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); let getinfo_future = tokio::spawn(async move { rpc.get_info().await }); @@ -143,6 +144,7 @@ async fn rpc_getblock() { zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -153,7 +155,7 @@ async fn rpc_getblock() { read_state.clone(), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Make height calls with verbosity=0 and check response @@ -486,6 +488,7 @@ async fn rpc_getblock_parse_error() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -496,7 +499,7 @@ async fn rpc_getblock_parse_error() { Buffer::new(state.clone(), 1), NoChainTip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Make sure we get an error if Zebra can't parse the block height. @@ -531,6 +534,7 @@ async fn rpc_getblock_missing_error() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -541,7 +545,7 @@ async fn rpc_getblock_missing_error() { Buffer::new(state.clone(), 1), NoChainTip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Make sure Zebra returns the correct error code `-8` for missing blocks @@ -595,6 +599,7 @@ async fn rpc_getblockheader() { zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -605,7 +610,7 @@ async fn rpc_getblockheader() { read_state.clone(), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Make height calls with verbose=false and check response @@ -708,6 +713,7 @@ async fn rpc_getbestblockhash() { zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -718,7 +724,7 @@ async fn rpc_getbestblockhash() { read_state, latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Get the tip hash using RPC method `get_best_block_hash` @@ -756,6 +762,7 @@ async fn rpc_getrawtransaction() { latest_chain_tip_sender.send_best_tip_height(Height(10)); // Init RPC + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -766,7 +773,7 @@ async fn rpc_getrawtransaction() { read_state.clone(), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // Test case where transaction is in mempool. @@ -934,6 +941,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { let (_state, read_state, latest_chain_tip, _chain_tip_change) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -944,7 +952,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { Buffer::new(read_state.clone(), 1), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // call the method with an invalid address string @@ -1085,6 +1093,7 @@ async fn rpc_getaddresstxids_response_with( ) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "0.0.1", "RPC test", @@ -1095,7 +1104,7 @@ async fn rpc_getaddresstxids_response_with( Buffer::new(read_state.clone(), 1), latest_chain_tip.clone(), MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // call the method with valid arguments @@ -1139,6 +1148,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let (_tx, rx) = tokio::sync::watch::channel(None); let rpc = RpcImpl::new( "0.0.1", "RPC test", @@ -1149,7 +1159,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { Buffer::new(state.clone(), 1), NoChainTip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // call the method with an invalid address string @@ -1186,6 +1196,7 @@ async fn rpc_getaddressutxos_response() { let (_state, read_state, latest_chain_tip, _chain_tip_change) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_tx, rx) = tokio::sync::watch::channel(None); let rpc = RpcImpl::new( "0.0.1", "RPC test", @@ -1196,7 +1207,7 @@ async fn rpc_getaddressutxos_response() { Buffer::new(read_state.clone(), 1), latest_chain_tip, MockAddressBookPeers::new(vec![]), - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); // call the method with a valid address diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index b65f9a2ca0f..183f54d23b9 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -44,6 +44,7 @@ async fn rpc_server_spawn() { info!("spawning RPC server..."); + let (_tx, rx) = watch::channel(None); let _rpc_server_task_handle = RpcServer::spawn( config, Default::default(), @@ -57,7 +58,7 @@ async fn rpc_server_spawn() { NoChainTip, Mainnet, None, - crate::methods::LoggedLastEvent::new(None.into()), + rx, ); info!("spawned RPC server, checking services..."); @@ -105,6 +106,7 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { info!("spawning RPC server..."); + let (_tx, rx) = watch::channel(None); let rpc_server_task_handle = RpcServer::spawn( config, Default::default(), @@ -118,7 +120,7 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { NoChainTip, Mainnet, None, - crate::methods::LoggedLastEvent::new(None.into()), + rx, ) .await .expect(""); @@ -162,6 +164,7 @@ async fn rpc_server_spawn_port_conflict() { info!("spawning RPC server 1..."); + let (_tx, rx) = watch::channel(None); let _rpc_server_1_task_handle = RpcServer::spawn( config.clone(), Default::default(), @@ -175,7 +178,7 @@ async fn rpc_server_spawn_port_conflict() { NoChainTip, Mainnet, None, - crate::methods::LoggedLastEvent::new(None.into()), + rx.clone(), ) .await; @@ -196,7 +199,7 @@ async fn rpc_server_spawn_port_conflict() { NoChainTip, Mainnet, None, - crate::methods::LoggedLastEvent::new(None.into()), + rx, ) .await; diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 02fb808239c..ae946a6e0dc 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -14,6 +14,7 @@ use abscissa_core::{ }; use semver::{BuildMetadata, Version}; +use tokio::sync::watch; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_state::{ constants::LOCK_FILE_ERROR, state_database_format_version_in_code, @@ -38,7 +39,7 @@ pub static APPLICATION: AppCell = AppCell::new(); lazy_static::lazy_static! { /// The last log event that occurred in the application. - pub static ref LAST_LOG_EVENT: Arc)>>> = Arc::new(std::sync::Mutex::new(None)); + pub static ref LAST_WARN_ERROR_LOG_SENDER: watch::Sender)>> = watch::Sender::new(None); } /// Returns the `zebrad` version for this build, in SemVer 2.0 format. diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 388dc443cea..45e4554a08e 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -90,7 +90,7 @@ use zebra_rpc::server::RpcServer; use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use crate::{ - application::{build_version, user_agent, LAST_LOG_EVENT}, + application::{build_version, user_agent, LAST_WARN_ERROR_LOG_SENDER}, components::{ inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, mempool::{self, Mempool}, @@ -273,7 +273,7 @@ impl StartCmd { Some(submit_block_channel.sender()), #[cfg(not(feature = "getblocktemplate-rpcs"))] None, - LAST_LOG_EVENT.clone(), + LAST_WARN_ERROR_LOG_SENDER.subscribe(), ); rpc_task_handle.await.unwrap() } else { diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index e79a9754332..0f8a6655ae1 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -3,11 +3,11 @@ use std::{ fs::{self, File}, io::Write, - sync::{Arc, Mutex}, }; use abscissa_core::{Component, FrameworkError, Shutdown}; +use tokio::sync::watch; use tracing::{field::Visit, Level}; use tracing_appender::non_blocking::{NonBlocking, NonBlockingBuilder, WorkerGuard}; use tracing_error::ErrorLayer; @@ -188,7 +188,7 @@ impl Tracing { let filter_handle = None; let warn_error_layer = LastWarnErrorLayer { - last_event: crate::application::LAST_LOG_EVENT.clone(), + last_warn_error_sender: crate::application::LAST_WARN_ERROR_LOG_SENDER.clone(), }; let subscriber = logger .finish() @@ -452,7 +452,7 @@ impl Visit for MessageVisitor { // Layer to store the last WARN or ERROR log event. #[derive(Debug, Clone)] struct LastWarnErrorLayer { - last_event: Arc)>>>, + last_warn_error_sender: watch::Sender)>>, } impl Layer for LastWarnErrorLayer @@ -472,8 +472,9 @@ where event.record(&mut visitor); if let Some(message) = visitor.message { - let mut last_event = self.last_event.lock().expect("lock poisoned"); - *last_event = Some((message, level, timestamp)); + let _ = self + .last_warn_error_sender + .send(Some((message, level, timestamp))); } } } From 5953f8a2c7761ffe0df65f68ffefb04def0765d6 Mon Sep 17 00:00:00 2001 From: Greg Pfeil Date: Tue, 18 Feb 2025 04:21:49 -0700 Subject: [PATCH 099/245] refactor(ci): do not run workflows tied to `ZcashFoundation` infra in forks (#9257) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix GitHub workflows There are two workflows that have bugs in them: - cd-deploy-nodes-gcp is missing a job name, which is then depended-on later - sub-ci-integration-tests-gcp changed the name of a job without changing its dependents * Stop certain CI jobs from running on forks There are various GitHub CI jobs that won’t work on forks. E.g., some need credentials for ZF’s DockerHub or GCP accounts. Unfortunately, you can’t prevent entire workflows from running this way, but this disables a minimal number of jobs to keep forks from failing whenever `main` is pushed to. --- .github/workflows/cd-deploy-nodes-gcp.yml | 9 +++------ .github/workflows/chore-delete-gcp-resources.yml | 2 ++ .github/workflows/ci-unit-tests-os.yml | 2 +- .github/workflows/docs-deploy-firebase.yml | 6 ++++++ .github/workflows/docs-dockerhub-description.yml | 1 + .github/workflows/sub-build-docker-image.yml | 3 ++- .github/workflows/sub-ci-integration-tests-gcp.yml | 2 +- 7 files changed, 16 insertions(+), 9 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 1338ed5f7b3..f7d15886293 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -227,12 +227,9 @@ jobs: # - on every push to the `main` branch # - on every release, when it's published # - on workflow_dispatch for manual deployments - + # Determine which networks to deploy based on the trigger - - - - : + set-matrix: runs-on: ubuntu-latest outputs: networks: ${{ steps.set-networks.outputs.matrix }} @@ -268,7 +265,7 @@ jobs: permissions: contents: "read" id-token: "write" - if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} + if: ${{ !cancelled() && !failure() && github.repository_owner == 'ZcashFoundation' && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} steps: - uses: actions/checkout@v4.2.2 diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 661c8c05093..92d9103480b 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -34,6 +34,7 @@ env: jobs: delete-resources: name: Delete old GCP resources + if: github.repository_owner == 'ZcashFoundation' runs-on: ubuntu-latest permissions: contents: 'read' @@ -105,6 +106,7 @@ jobs: # The same artifacts are used for both mainnet and testnet. clean-registries: name: Delete unused artifacts in registry + if: github.repository_owner == 'ZcashFoundation'' runs-on: ubuntu-latest permissions: contents: 'read' diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 6e9dc77d91d..d37328a5c12 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -309,7 +309,7 @@ jobs: needs: [ test, install-from-lockfile-no-cache, check-cargo-lock, cargo-deny, unused-deps ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) - if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) + if: (failure() || cancelled()) && github.repository_owner == 'ZcashFoundation' && github.event.pull_request == null runs-on: ubuntu-latest steps: - uses: jayqi/failed-build-issue-action@v1 diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index eca70c4d98b..eecb9ffacf5 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -105,6 +105,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud + if: github.repository_owner == 'ZcashFoundation' id: auth uses: google-github-actions/auth@v2.1.8 with: @@ -114,11 +115,13 @@ jobs: # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - name: Add $GCP_FIREBASE_SA_PATH to env + if: github.repository_owner == 'ZcashFoundation' run: | # shellcheck disable=SC2002 echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - name: Deploy Zebra book to firebase + if: github.repository_owner == 'ZcashFoundation' uses: FirebaseExtended/action-hosting-deploy@v0.9.0 with: firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} @@ -163,6 +166,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud + if: github.repository_owner == 'ZcashFoundation' id: auth uses: google-github-actions/auth@v2.1.8 with: @@ -171,11 +175,13 @@ jobs: # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - name: Add $GCP_FIREBASE_SA_PATH to env + if: github.repository_owner == 'ZcashFoundation' run: | # shellcheck disable=SC2002 echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - name: Deploy internal docs to firebase + if: github.repository_owner == 'ZcashFoundation' uses: FirebaseExtended/action-hosting-deploy@v0.9.0 with: firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index 754208d6651..8efdaca1d54 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -15,6 +15,7 @@ on: jobs: dockerHubDescription: + if: github.repository_owner == 'ZcashFoundation' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4.2.2 diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 1ec1b88c0d2..7a64abaa22b 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -61,6 +61,7 @@ env: jobs: build: name: Build images + if: github.repository_owner == 'ZcashFoundation' timeout-minutes: 210 runs-on: ubuntu-latest environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} @@ -111,7 +112,7 @@ jobs: # DockerHub release and CI tags. # This tag makes sure tests are using exactly the right image, even when multiple PRs run at the same time. type=sha,event=push - # These CI-only tags support CI on PRs, the main branch, and scheduled full syncs. + # These CI-only tags support CI on PRs, the main branch, and scheduled full syncs. # These tags do not appear on DockerHub, because DockerHub images are only published on the release event. type=ref,event=pr type=ref,event=branch diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 438b32ac235..fde8ef835ca 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -523,7 +523,7 @@ jobs: lightwalletd-grpc-test, get-block-template-test, submit-block-test, - scan-task-commands-test, + test-scanner, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) From 1e9f021784ea9e0f5df17b4e778b2df93fb5e68f Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Tue, 18 Feb 2025 15:49:45 -0400 Subject: [PATCH 100/245] make chain_tip_difficultu pub to reuse in zaino (#9271) --- zebra-rpc/src/methods.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index a55897a16b7..0a5e6e351f5 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -2733,7 +2733,7 @@ mod opthex { } } /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. -pub(crate) async fn chain_tip_difficulty(network: Network, mut state: State) -> Result +pub async fn chain_tip_difficulty(network: Network, mut state: State) -> Result where State: Service< zebra_state::ReadRequest, From ac25192afcb1c3280ff32d2bb2e75f91f1d9ceb0 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 19 Feb 2025 18:44:06 +0100 Subject: [PATCH 101/245] docs(docker): update examples for running Zebra in Docker (#9269) * Rm `.env` files * Update `mining-docker.md` * Revert "Rm `.env` files" This reverts commit caaa4559c3eac58173126eddd9f37b9a8642ad26. * Add `enable_cookie_auth` to default Zebra conf * Rename `default_zebra_config.toml` * fmt `prometheus.yaml` * Update `docker/test.env` * Update `docker/.env` * Refactor `docker compose` for lwd * Enable disabling cookie authentication * Update `docker compose` for tests * Update general `docker compose` * Update docs for running Zebra in Docker * Add example `docker compose` file for Grafana * Fix a bug in an example command * Refactor test execution logic in entrypoint * Rename `v2.1.0.toml` conf to `custom-conf.toml` * Fix CI tests for loading of custom conf files * Use the new conf file name in CI checks * Use an extended regexp for custom conf CI check --- .dockerignore | 2 +- .github/workflows/cd-deploy-nodes-gcp.yml | 4 +- .../workflows/sub-ci-unit-tests-docker.yml | 4 +- book/src/user/docker.md | 227 ++++++++---------- book/src/user/mining-docker.md | 47 +++- docker/.env | 85 ++++--- docker/Dockerfile | 4 +- ..._config.toml => default-zebra-config.toml} | 12 +- docker/docker-compose.grafana.yml | 52 ++++ docker/docker-compose.lwd.yml | 44 ++-- docker/docker-compose.test.yml | 19 +- docker/docker-compose.yml | 53 ++-- docker/entrypoint.sh | 73 +++--- docker/test.env | 110 ++++++--- docker/zcash.conf | 2 + prometheus.yaml | 7 +- zebrad/tests/common/configs/custom-conf.toml | 54 +++++ zebrad/tests/common/configs/v2.1.0.toml | 85 ------- 18 files changed, 483 insertions(+), 401 deletions(-) rename docker/{default_zebra_config.toml => default-zebra-config.toml} (81%) create mode 100644 docker/docker-compose.grafana.yml create mode 100644 docker/zcash.conf create mode 100644 zebrad/tests/common/configs/custom-conf.toml delete mode 100644 zebrad/tests/common/configs/v2.1.0.toml diff --git a/.dockerignore b/.dockerignore index b26a7d4f413..137c05f2931 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,4 +21,4 @@ !zebra-* !zebrad !docker/entrypoint.sh -!docker/default_zebra_config.toml +!docker/default-zebra-config.toml diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index f7d15886293..3577a70711e 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -168,8 +168,8 @@ jobs: with: test_id: "custom-conf" docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v2.1.0.toml"' - grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v2.1.0.toml"' + test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' + grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index ab4d9a97f1d..c9553e11bca 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -157,8 +157,8 @@ jobs: with: test_id: "custom-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v2.1.0.toml"' - test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v2.1.0.toml"' + test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' + grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' failure-issue: name: Open or update issues for main branch failures diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 7709bdca4e7..1dbe9425bee 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -1,176 +1,159 @@ # Zebra with Docker -The easiest way to run Zebra is using [Docker](https://docs.docker.com/get-docker/). +The foundation maintains a Docker infrastructure for deploying and testing Zebra. -We've embraced Docker in Zebra for most of the solution lifecycle, from development environments to CI (in our pipelines), and deployment to end users. +## Quick Start -> [!TIP] -> We recommend using `docker compose` sub-command over the plain `docker` CLI, especially for more advanced use-cases like running CI locally, as it provides a more convenient and powerful way to manage multi-container Docker applications. See [CI/CD Local Testing](#cicd-local-testing) for more information, and other compose files available in the [docker](https://github.com/ZcashFoundation/zebra/tree/main/docker) folder. - -## Quick usage - -You can deploy Zebra for daily use with the images available in [Docker Hub](https://hub.docker.com/r/zfnd/zebra) or build it locally for testing. - -### Ready to use image - -Using `docker compose`: +To get Zebra quickly up and running, you can use an off-the-rack image from +[Docker Hub](https://hub.docker.com/r/zfnd/zebra/tags): ```shell -docker compose -f docker/docker-compose.yml up +docker run --name zebra zfnd/zebra ``` -With plain `docker` CLI: +If you want to preserve Zebra's state, you can create a Docker volume: ```shell docker volume create zebrad-cache - -docker run -d --platform linux/amd64 \ - --restart unless-stopped \ - --env-file .env \ - --mount type=volume,source=zebrad-cache,target=/home/zebra/.cache/zebra \ - -p 8233:8233 \ - --memory 16G \ - --cpus 4 \ - zfnd/zebra ``` -### Build it locally +And mount it before you start the container: ```shell -git clone --depth 1 --branch v2.2.0 https://github.com/ZcashFoundation/zebra.git -docker build --file docker/Dockerfile --target runtime --tag zebra:local . -docker run --detach zebra:local -``` - -### Alternatives - -See [Building Zebra](https://github.com/ZcashFoundation/zebra#building-zebra) for more information. - -## Advanced usage - -You're able to specify various parameters when building or launching the Docker image, which are meant to be used by developers and CI pipelines. For example, specifying the Network where Zebra will run (Mainnet, Testnet, etc), or enabling features like metrics with Prometheus. - -For example, if we'd like to enable metrics on the image, we'd build it using the following `build-arg`: - -> [!IMPORTANT] -> To fully use and display the metrics, you'll need to run a Prometheus and Grafana server, and configure it to scrape and visualize the metrics endpoint. This is explained in more detailed in the [Metrics](https://zebra.zfnd.org/user/metrics.html#zebra-metrics) section of the User Guide. - -```shell -docker build -f ./docker/Dockerfile --target runtime --build-arg FEATURES='default-release-binaries prometheus' --tag local/zebra.mining:latest . +docker run \ + --mount type=volume,source=zebrad-cache,target=/home/zebra/.cache/zebra \ + --name zebra \ + zfnd/zebra ``` -To increase the log output we can optionally add these `build-arg`s: +You can also use `docker compose`, which we recommend. First get the repo: ```shell ---build-arg RUST_BACKTRACE=full --build-arg RUST_LOG=debug --build-arg COLORBT_SHOW_HIDDEN=1 +git clone --depth 1 --branch v2.2.0 https://github.com/ZcashFoundation/zebra.git +cd zebra ``` -And after our image has been built, we can run it on `Mainnet` with the following command, which will expose the metrics endpoint on port `9999` and force the logs to be colored: +Then run: ```shell -docker run --env LOG_COLOR="true" -p 9999:9999 local/zebra.mining -``` - -Based on our actual `entrypoint.sh` script, the following configuration file will be generated (on the fly, at startup) and used by Zebra: - -```toml -[network] -network = "Mainnet" -listen_addr = "0.0.0.0" -[state] -cache_dir = "/home/zebra/.cache/zebra" -[metrics] -endpoint_addr = "127.0.0.1:9999" +docker compose -f docker/docker-compose.yml up ``` -### Running Zebra with Lightwalletd +## Custom Images -To run Zebra with Lightwalletd, we recommend using the provided `docker compose` files for Zebra and Lightwalletd, which will start both services and connect them together, while exposing ports, mounting volumes, and setting environment variables. +If you want to use your own images with, for example, some opt-in compilation +features enabled, add the desired features to the `FEATURES` variable in the +`docker/.env` file and build the image: ```shell -docker compose -f docker/docker-compose.yml -f docker/docker-compose.lwd.yml up +docker build \ + --file docker/Dockerfile \ + --env-file docker/.env \ + --target runtime \ + --tag zebra:local \ + . ``` -### CI/CD Local Testing - -To run CI tests locally, which mimics the testing done in our CI pipelines on GitHub Actions, use the `docker-compose.test.yml` file. This setup allows for a consistent testing environment both locally and in CI. - -#### Running Tests Locally - -1. **Setting Environment Variables**: - - Modify the `test.env` file to set the desired test configurations. - - For running all tests, set `RUN_ALL_TESTS=1` in `test.env`. - -2. **Starting the Test Environment**: - - Use Docker Compose to start the testing environment: - - ```shell - docker-compose -f docker/docker-compose.test.yml up - ``` - - - This will start the Docker container and run the tests based on `test.env` settings. - -3. **Viewing Test Output**: - - The test results and logs will be displayed in the terminal. +All available Cargo features are listed at +. -4. **Stopping the Environment**: - - Once testing is complete, stop the environment using: +## Configuring Zebra - ```shell - docker-compose -f docker/docker-compose.test.yml down - ``` +To configure Zebra, edit the `docker/default-zebra-config.toml` config file and +uncomment the `configs` mapping in `docker/docker-compose.yml` so your config +takes effect. You can see if your config works as intended by looking at Zebra's +logs. -This approach ensures you can run the same tests locally that are run in CI, providing a robust way to validate changes before pushing to the repository. +Alternatively, you can configure Zebra by setting the environment variables in +the `docker/.env` file. Note that the config options of this file are limited to +the variables already present in the commented out blocks in it and adding new +ones will not be effective. Also note that the values of the variables take +precedence over the values set in the `docker/default-zebra-config.toml` config +file. The `docker/.env` file serves as a quick way to override the most commonly +used settings for Zebra, whereas the `docker/default-zebra-config.toml` file +provides full config capabilities. -### Build and Run Time Configuration +### RPC -#### Build Time Arguments +Zebra's RPC server is disabled by default. To enable it, you need to set its RPC +port. You can do that either in the `docker/default-zebra-config.toml` file or +`docker/.env` file, as described in the two paragraphs above. -#### Configuration +When connecting to Zebra's RPC server, your RPC clients need to provide an +authentication cookie to the server or you need to turn the authentication off +in Zebra's config. By default, the cookie file is stored at +`/home/zebra/.cache/zebra/.cookie` in the container. You can print its contents +by running -- `FEATURES`: Specifies the features to build `zebrad` with. Example: `"default-release-binaries getblocktemplate-rpcs"` - -#### Logging +```bash +docker exec zebra cat /home/zebra/.cache/zebra/.cookie +``` -- `RUST_LOG`: Sets the trace log level. Example: `"debug"` -- `RUST_BACKTRACE`: Enables or disables backtraces. Example: `"full"` -- `RUST_LIB_BACKTRACE`: Enables or disables library backtraces. Example: `1` -- `COLORBT_SHOW_HIDDEN`: Enables or disables showing hidden backtraces. Example: `1` +when the `zebra` container is running. Note that Zebra generates the cookie file +only if the RPC server is enabled, and each Zebra instance generates a unique +one. To turn the authentication off, either set `ENABLE_COOKIE_AUTH=false` in +`docker/.env` or set -#### Tests +```toml +[rpc] +enable_cookie_auth = false +``` -- `ZEBRA_SKIP_IPV6_TESTS`: Skips IPv6 tests. Example: `1` +in `docker/default-zebra-config.toml` and mount this config file into the +container's filesystem in `docker/docker-compose.yml` as described at the +beginning of this section. -#### CI/CD +## Examples -- `SHORT_SHA`: Represents the short SHA of the commit. Example: `"a1b2c3d"` +To make the initial setup of Zebra with other services easier, we provide some +example files for `docker compose`. The following subsections will walk you +through those examples. -#### Run Time Variables +### Running Zebra with Lightwalletd -- `NETWORK`: Specifies the network type. Example: `"Mainnet"` +The following command will run Zebra with Lightwalletd: -#### Zebra Configuration +```shell +docker compose -f docker/docker-compose.lwd.yml up +``` -- `ZEBRA_CACHE_DIR`: Directory for cached state. Example: `"/home/zebra/.cache/zebra"` +Note that Docker will run Zebra with the RPC server enabled and the cookie +authentication mechanism disabled since Lightwalletd doesn't support it. Instead +of configuring Zebra via the recommended config file or `docker/.env` file, we +configured the RPC server by setting environment variables directly in the +`docker/docker-compose.lwd.yml` file. This is a shortcut we can take when we are +familiar with the `docker/.env` file. -#### Mining Configuration +### Running Zebra with Prometheus and Grafana -- `RPC_LISTEN_ADDR`: Address for RPC to listen on. Example: `"0.0.0.0"` -- `MINER_ADDRESS`: Address for the miner. Example: `"t1XhG6pT9xRqRQn3BHP7heUou1RuYrbcrCc"` +The following commands will run Zebra with Prometheus and Grafana: -#### Other Configuration +```shell +docker compose -f docker/docker-compose.grafana.yml build --no-cache +docker compose -f docker/docker-compose.grafana.yml up +``` -- `METRICS_ENDPOINT_ADDR`: Address for metrics endpoint. Example: `"0.0.0.0"` -- `METRICS_ENDPOINT_PORT`: Port for metrics endpoint. Example: `9999` -- `LOG_FILE`: Path to the log file. Example: `"/path/to/log/file.log"` -- `LOG_COLOR`: Enables or disables log color. Example: `false` -- `TRACING_ENDPOINT_ADDR`: Address for tracing endpoint. Example: `"0.0.0.0"` -- `TRACING_ENDPOINT_PORT`: Port for tracing endpoint. Example: `3000` +In this example, we build a local Zebra image with the `prometheus` Cargo +compilation feature. Note that we enable this feature by specifying its name in +the build arguments. Having this Cargo feature specified at build time makes +`cargo` compile Zebra with the metrics support for Prometheus enabled. Note that +we also specify this feature as an environment variable at run time. Having this +feature specified at run time makes Docker's entrypoint script configure Zebra +to open a scraping endpoint on `localhost:9999` for Prometheus. -Specific tests are defined in `docker/test.env` file and can be enabled by setting the corresponding environment variable to `1`. +Once all services are up, the Grafana web UI should be available at +`localhost:3000`, the Prometheus web UI should be at `localhost:9090`, and +Zebra's scraping page should be at `localhost:9999`. The default login and +password for Grafana are both `admin`. To make Grafana use Prometheus, you need +to add Prometheus as a data source with the URL `http://localhost:9090` in +Grafana's UI. You can then import various Grafana dashboards from the `grafana` +directory in the Zebra repo. -## Registries +### Running CI Tests Locally -The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml) (`zebrad-test`, `lightwalletd`) might be deleted on a scheduled basis. +To run CI tests locally, first set the variables in the `test.env` file to +configure the tests, then run: -We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zfnd-dev-zebra/us/zebra) to build external tools and test images. +```shell +docker-compose -f docker/docker-compose.test.yml up +``` diff --git a/book/src/user/mining-docker.md b/book/src/user/mining-docker.md index 96f47918763..2550ba23610 100644 --- a/book/src/user/mining-docker.md +++ b/book/src/user/mining-docker.md @@ -1,40 +1,43 @@ # Mining with Zebra in Docker -Zebra's [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) can be used for your mining -operations. If you don't have Docker, see the -[manual configuration instructions](https://zebra.zfnd.org/user/mining.html). +Zebra's [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) can be used +for your mining operations. If you don't have Docker, see the [manual +configuration instructions](https://zebra.zfnd.org/user/mining.html). Using docker, you can start mining by running: ```bash -docker run -e MINER_ADDRESS="t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" -p 8232:8232 zfnd/zebra:latest +docker run --name -zebra_local -e MINER_ADDRESS="t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" -e ZEBRA_RPC_PORT=8232 -p 8232:8232 zfnd/zebra:latest ``` -This command starts a container on Mainnet and binds port 8232 on your Docker host. If you -want to start generating blocks, you need to let Zebra sync first. +This command starts a container on Mainnet and binds port 8232 on your Docker +host. If you want to start generating blocks, you need to let Zebra sync first. Note that you must pass the address for your mining rewards via the `MINER_ADDRESS` environment variable when you are starting the container, as we -did with the ZF funding stream address above. The address we used starts with the prefix `t1`, -meaning it is a Mainnet P2PKH address. Please remember to set your own address -for the rewards. +did with the ZF funding stream address above. The address we used starts with +the prefix `t1`, meaning it is a Mainnet P2PKH address. Please remember to set +your own address for the rewards. The port we mapped between the container and the host with the `-p` flag in the example above is Zebra's default Mainnet RPC port. Instead of listing the environment variables on the command line, you can use -Docker's `--env-file` flag to specify a file containing the variables. You -can find more info here +Docker's `--env-file` flag to specify a file containing the variables. You can +find more info here https://docs.docker.com/engine/reference/commandline/run/#env. -## Mining on Testnet +If you don't want to set any environment variables, you can edit the +`docker/default-zebra-config.toml` file, and pass it to Zebra before starting +the container. There's an example in `docker/docker-compose.yml` of how to do +that. If you want to mine on Testnet, you need to set the `NETWORK` environment variable to `Testnet` and use a Testnet address for the rewards. For example, running ```bash -docker run -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -p 18232:18232 zfnd/zebra:latest +docker run --name zebra_local -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -e ZEBRA_RPC_PORT=18232 -p 18232:18232 zfnd/zebra:latest ``` will start a container on Testnet and bind port 18232 on your Docker host, which @@ -42,3 +45,21 @@ is the standard Testnet RPC port. Notice that we also used a different rewards address. It starts with the prefix `t2`, indicating that it is a Testnet address. A Mainnet address would prevent Zebra from starting on Testnet, and conversely, a Testnet address would prevent Zebra from starting on Mainnet. + +To connect to the RPC port, you will need the contents of the [cookie +file](https://zebra.zfnd.org/user/mining.html?highlight=cookie#testing-the-setup) +Zebra uses for authentication. By default, it is stored at +`/home/zebra/.cache/zebra/.cookie`. You can print its contents by running + +```bash +docker exec -it zebra_local cat /home/zebra/.cache/zebra/.cookie +``` + +If you want to avoid authentication, you can turn it off by setting + +```toml +[rpc] +enable_cookie_auth = false +``` + +in Zebra's config file before you start the container. diff --git a/docker/.env b/docker/.env index 2d96240f23e..b4e8bade406 100644 --- a/docker/.env +++ b/docker/.env @@ -1,33 +1,54 @@ -RUST_LOG=info -# This variable forces the use of color in the logs -ZEBRA_FORCE_USE_COLOR=1 -LOG_COLOR=true - -### -# Configuration Variables -# These variables are used to configure the zebra node -# Check the entrypoint.sh script for more details -### - -# The config file full path used in the Dockerfile. -ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml -# [network] -NETWORK=Mainnet -ZEBRA_LISTEN_ADDR=0.0.0.0 -# [consensus] -ZEBRA_CHECKPOINT_SYNC=true -# [state] -# Set this to change the default cached state directory -ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache -# [metrics] -METRICS_ENDPOINT_ADDR=0.0.0.0 -METRICS_ENDPOINT_PORT=9999 -# [tracing] -TRACING_ENDPOINT_ADDR=0.0.0.0 -TRACING_ENDPOINT_PORT=3000 -# [rpc] -RPC_LISTEN_ADDR=0.0.0.0 -# if ${RPC_PORT} is not set, it will use the default value for the current network -RPC_PORT=8232 +# Configuration variables for running Zebra in Docker +# Sets the network Zebra runs will run on. +# +# NETWORK=Mainnet + +# Zebra's RPC server is disabled by default. To enable it, set its port number. +# +# ZEBRA_RPC_PORT=8232 # Default RPC port number on Mainnet. +# ZEBRA_RPC_PORT=18323 # Default RPC port number on Testnet. + +# To disable cookie authentication, set the value below to false. +# +# ENABLE_COOKIE_AUTH=true + +# Sets a custom directory for the state and network caches. Zebra will also +# store its cookie authentication file in this directory. +# +# ZEBRA_CACHE_DIR="/home/zebra/.cache/zebra" + +# Sets custom Cargo features. Available features are listed at +# . +# +# Must be set at build time. +# +# FEATURES="" + +# Logging to a file is disabled by default. To enable it, uncomment the line +# below and alternatively set your own path. +# +# LOG_FILE="/home/zebra/.local/state/zebrad.log" + +# Zebra recognizes whether its logs are being written to a terminal or a file, +# and uses colored logs for terminals and uncolored logs for files. Setting the +# variable below to true will force colored logs even for files and setting it +# to false will disable colors even for terminals. +# +# LOG_COLOR=true + +# To disable logging to journald, set the value to false. +# +# USE_JOURNALD=true + +# If you are going to use Zebra as a backend for a mining pool, set your mining +# address. +# +# MINER_ADDRESS="your_mining_address" + +# Controls the output of `env_logger`: +# https://docs.rs/env_logger/latest/env_logger/ +# +# Must be set at build time. +# +# RUST_LOG=info diff --git a/docker/Dockerfile b/docker/Dockerfile index 320ff15aec4..0868ef98917 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -112,7 +112,7 @@ COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/ # Use the same default config as in the production environment. ENV ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" -COPY --chown=${UID}:${GID} ./docker/default_zebra_config.toml ${ZEBRA_CONF_PATH} +COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} ARG LWD_CACHE_DIR ENV LWD_CACHE_DIR="${HOME}/.cache/lwd" @@ -204,7 +204,7 @@ RUN adduser --system --gid ${GID} --uid ${UID} --home ${HOME} ${USER} ARG ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" ENV ZEBRA_CONF_PATH=${ZEBRA_CONF_PATH} -COPY --chown=${UID}:${GID} ./docker/default_zebra_config.toml ${ZEBRA_CONF_PATH} +COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} ARG ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" ENV ZEBRA_CACHE_DIR=${ZEBRA_CACHE_DIR} diff --git a/docker/default_zebra_config.toml b/docker/default-zebra-config.toml similarity index 81% rename from docker/default_zebra_config.toml rename to docker/default-zebra-config.toml index d31a702ade4..5b5e5b6ebd1 100644 --- a/docker/default_zebra_config.toml +++ b/docker/default-zebra-config.toml @@ -25,13 +25,19 @@ cache_dir = "/home/zebra/.cache/zebra" cookie_dir = "/home/zebra/.cache/zebra" +# To disable cookie authentication, uncomment the line below and set the value +# to false. + +# enable_cookie_auth = true + [state] cache_dir = "/home/zebra/.cache/zebra" [tracing] -# Zebra uses colored output if it is attached to a terminal. To disable colors, -# set `use_color` to false. To enable colors even for non-terminal outputs, set -# `use_color` to `true` and uncomment the line below. +# Zebra recognizes whether its logs are being written to a terminal or a file, +# and uses colored logs for terminals and uncolored logs for files. To force +# colors even for files, uncomment the line below. To disable colors, set +# `use_color` to false. # force_use_color = true use_color = true diff --git a/docker/docker-compose.grafana.yml b/docker/docker-compose.grafana.yml new file mode 100644 index 00000000000..2c7b6b4d7ab --- /dev/null +++ b/docker/docker-compose.grafana.yml @@ -0,0 +1,52 @@ +services: + zebra: + container_name: zebra + build: + context: ../ + dockerfile: docker/Dockerfile + target: runtime + args: + - FEATURES=prometheus + volumes: + - zebrad-cache:/home/zebra/.cache/zebra + tty: true + environment: + - FEATURES=prometheus + network_mode: "host" + ports: + - 9999:9999 + + prometheus: + container_name: prometheus + image: prom/prometheus + volumes: + - prometheus-cache:/prometheus + configs: + - source: prometheus-config + target: /etc/prometheus/prometheus.yml + network_mode: "host" + ports: + - 9090:9090 + + grafana: + container_name: grafana + image: grafana/grafana + volumes: + - grafana-cache:/var/lib/grafana + network_mode: "host" + ports: + - 3000:3000 + +volumes: + zebrad-cache: + driver: local + + grafana-cache: + driver: local + + prometheus-cache: + driver: local + +configs: + prometheus-config: + file: ../prometheus.yaml diff --git a/docker/docker-compose.lwd.yml b/docker/docker-compose.lwd.yml index 7d8c56b1855..456e7602d97 100644 --- a/docker/docker-compose.lwd.yml +++ b/docker/docker-compose.lwd.yml @@ -1,15 +1,22 @@ -version: "3.8" - services: zebra: + container_name: zebra + image: zfnd/zebra + platform: linux/amd64 + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + volumes: + - zebrad-cache:/home/zebra/.cache/zebra + tty: true + environment: + - ZEBRA_RPC_PORT=8232 + - ENABLE_COOKIE_AUTH=false ports: - - "8232:8232" # Opens an RPC endpoint (for lightwalletd and mining) - healthcheck: - start_period: 1m - interval: 15s - timeout: 10s - retries: 3 - test: ["CMD-SHELL", "curl --data-binary '{\"id\":\"curltest\", \"method\": \"getinfo\"}' -H 'content-type: application/json' 127.0.0.1:8232 || exit 1"] + - "8232:8232" lightwalletd: image: electriccoinco/lightwalletd @@ -29,13 +36,11 @@ services: configs: - source: lwd_config target: /etc/lightwalletd/zcash.conf - uid: '2002' # Golang's container default user uid - gid: '2002' # Golang's container default group gid - mode: 0440 volumes: - - litewalletd-data:/var/lib/lightwalletd/db - #! This setup with --no-tls-very-insecure is only for testing purposes - #! For production environments follow the guidelines here: https://github.com/zcash/lightwalletd#production-usage + - lwd-cache:/var/lib/lightwalletd/db + #! This setup with `--no-tls-very-insecure` is only for testing purposes. + #! For production environments, follow the guidelines here: + #! https://github.com/zcash/lightwalletd#production-usage command: > --no-tls-very-insecure --grpc-bind-addr=0.0.0.0:9067 @@ -50,10 +55,11 @@ services: configs: lwd_config: - # Change the following line to point to a zcash.conf on your host machine - # to allow for easy configuration changes without rebuilding the image - file: ./zcash-lightwalletd/zcash.conf + file: ./zcash.conf volumes: - litewalletd-data: + zebrad-cache: + driver: local + + lwd-cache: driver: local diff --git a/docker/docker-compose.test.yml b/docker/docker-compose.test.yml index b5730359bcb..d3659612a82 100644 --- a/docker/docker-compose.test.yml +++ b/docker/docker-compose.test.yml @@ -1,30 +1,13 @@ -version: "3.8" - services: zebra: + container_name: zebra build: context: ../ dockerfile: docker/Dockerfile target: tests - restart: unless-stopped - deploy: - resources: - reservations: - cpus: "4" - memory: 16G - # Change this to the command you want to run, respecting the entrypoint.sh - # For example, to run the tests, use the following command: - # command: ["cargo", "test", "--locked", "--release", "--features", "${FEATURES}", "--package", "zebrad", "--test", "acceptance", "--", "--nocapture", "--include-ignored", "sync_large_checkpoints_"] volumes: - zebrad-cache:/home/zebra/.cache/zebra - lwd-cache:/home/zebra/.cache/lwd - ports: - # Zebra uses the following inbound and outbound TCP ports - - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) - - "8233:8233" # Mainnet Network (for peer connections) - - "18233:18233" # Testnet Network - # - "9999:9999" # Metrics - # - "3000:3000" # Tracing env_file: - test.env diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index f9c71d6e0d4..a30e248b020 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,13 +1,8 @@ -version: "3.8" - services: zebra: + container_name: zebra image: zfnd/zebra platform: linux/amd64 - build: - context: ../ - dockerfile: docker/Dockerfile - target: runtime restart: unless-stopped deploy: resources: @@ -16,33 +11,33 @@ services: memory: 16G env_file: - .env - logging: - options: - max-size: "10m" - max-file: "5" - #! Uncomment the `configs` mapping below to use the `zebrad.toml` config file from the host machine - #! NOTE: This will override the zebrad.toml in the image and make some variables irrelevant - # configs: - # - source: zebra_config - # target: /etc/zebrad/zebrad.toml - # uid: '2001' # Rust's container default user uid - # gid: '2001' # Rust's container default group gid - # mode: 0440 volumes: - zebrad-cache:/home/zebra/.cache/zebra - ports: - # Zebra uses the following default inbound and outbound TCP ports - - "8233:8233" # Mainnet Network (for peer connections) - # - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) - # - "18233:18233" # Testnet Network - # - "9999:9999" # Metrics - # - "3000:3000" # Tracing + # Having `tty` set to true makes Zebra use colored logs. + tty: true + #! Uncomment the `configs` mapping below to make your custom configuration + #! take effect. + # + # configs: + # - source: zebra-config + # target: /home/zebra/.config/zebrad.toml + # + # Uncomment the `ports` mapping below to map ports between the container and + # host. + # + # ports: + # - "8232:8232" # RPC endpoint on Mainnet + # - "18232:18232" # RPC endpoint on Testnet + # - "8233:8233" # peer connections on Mainnet + # - "18233:18233" # peer connections on Testnet + # - "9999:9999" # Metrics + # - "3000:3000" # Tracing configs: - zebra_config: - # Change the following line to point to a zebrad.toml on your host machine - # to allow for easy configuration changes without rebuilding the image - file: ../zebrad/tests/common/configs/v1.0.0-rc.2.toml + zebra-config: + #! To customize the default configuration, edit this file before starting + #! the container. + file: ./default-zebra-config.toml volumes: zebrad-cache: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index f58d7c57147..eca56196418 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -6,7 +6,7 @@ # # ## Notes # -# - `$ZEBRA_CONF_PATH` must point to a Zebra conf file writable by `$USER`. +# - `$ZEBRA_CONF_PATH` must point to a Zebra conf file. set -eo pipefail @@ -16,25 +16,32 @@ if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then exit 1 fi -# Populates the config file for Zebra, using the env vars set by the Dockerfile -# or user. -# -# Also prints the content of the generated config file. +# Generates a config file for Zebra using env vars set in "docker/.env" and +# prints the location of the generated config file. # # ## Positional Parameters # -# - "$1": the file to write the config to +# - "$1": the file to read the default config from prepare_conf_file() { - # Set a custom `network`. + # Copy the default config to a new location for writing. + CONF=~/zebrad.toml + cp "${1}" "${CONF}" + + # Set a custom network. if [[ "${NETWORK}" ]]; then - sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${1}" + sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${CONF}" fi # Enable the RPC server by setting its port. if [[ "${ZEBRA_RPC_PORT}" ]]; then - sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${1}" - sed -i 's/ *# Mainnet$//' "${1}" - sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${1}" + sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${CONF}" + sed -i 's/ *# Mainnet$//' "${CONF}" + sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${CONF}" + fi + + # Disable or enable cookie authentication. + if [[ "${ENABLE_COOKIE_AUTH}" ]]; then + sed -i '/# enable_cookie_auth = true/s/^# //; s/true/'"${ENABLE_COOKIE_AUTH//\"/}"'/' "${CONF}" fi # Set a custom state, network and cookie cache dirs. @@ -44,41 +51,40 @@ prepare_conf_file() { # use them to set the cache dirs separately if needed. if [[ "${ZEBRA_CACHE_DIR}" ]]; then mkdir -p "${ZEBRA_CACHE_DIR//\"/}" - sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${1}" + sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${CONF}" fi # Enable the Prometheus metrics endpoint. if [[ "${FEATURES}" == *"prometheus"* ]]; then - sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${1}" + sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${CONF}" fi # Enable logging to a file by setting a custom log file path. if [[ "${LOG_FILE}" ]]; then mkdir -p "$(dirname "${LOG_FILE//\"/}")" - sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${1}" + sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${CONF}" fi # Enable or disable colored logs. if [[ "${LOG_COLOR}" ]]; then - sed -i '/# force_use_color = true/s/^# //' "${1}" - sed -i '/use_color = true/s/true/'"${LOG_COLOR//\"/}"'/' "${1}" + sed -i '/# force_use_color = true/s/^# //' "${CONF}" + sed -i '/use_color = true/s/true/'"${LOG_COLOR//\"/}"'/' "${CONF}" fi # Enable or disable logging to systemd-journald. if [[ "${USE_JOURNALD}" ]]; then - sed -i '/# use_journald = true/s/^# //; s/true/'"${USE_JOURNALD//\"/}"'/' "${1}" + sed -i '/# use_journald = true/s/^# //; s/true/'"${USE_JOURNALD//\"/}"'/' "${CONF}" fi # Set a mining address. if [[ "${MINER_ADDRESS}" ]]; then - sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${1}" + sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${CONF}" fi # Trim all comments and empty lines. - sed -i '/^#/d; /^$/d' "${1}" + sed -i '/^#/d; /^$/d' "${CONF}" - echo "Prepared the following Zebra config:" - cat "$1" + echo "${CONF}" } # Checks if a directory contains subdirectories @@ -251,31 +257,36 @@ run_tests() { run_cargo_test "${FEATURES}" "submit_block" else - if [[ "$1" == "zebrad" ]]; then - shift - exec zebrad -c "${ZEBRA_CONF_PATH}" "$@" - else - exec "$@" - fi + exec "$@" fi } # Main Script Logic -prepare_conf_file "$ZEBRA_CONF_PATH" +echo "Prepared the following Zebra config:" + +CONF_PATH=$(prepare_conf_file "${ZEBRA_CONF_PATH}") +cat "${CONF_PATH}" # - If "$1" is "--", "-", or "zebrad", run `zebrad` with the remaining params. -# - If "$1" is "tests", run tests. +# - If "$1" is "tests": +# - and "$2" is "zebrad", run `zebrad` with the remaining params, +# - else run tests with the remaining params. # - TODO: If "$1" is "monitoring", start a monitoring node. # - If "$1" doesn't match any of the above, run "$@" directly. case "$1" in --* | -* | zebrad) shift - exec zebrad --config "${ZEBRA_CONF_PATH}" "$@" + exec zebrad --config "${CONF_PATH}" "$@" ;; test) shift - run_tests "$@" + if [[ "$1" == "zebrad" ]]; then + shift + exec zebrad --config "${CONF_PATH}" "$@" + else + run_tests "$@" + fi ;; monitoring) # TODO: Impl logic for starting a monitoring node. diff --git a/docker/test.env b/docker/test.env index fd2a7c876b7..c129a72a979 100644 --- a/docker/test.env +++ b/docker/test.env @@ -1,60 +1,94 @@ -### -# Configuration Variables -# These variables are used to configure the zebra node -# Check the entrypoint.sh script for more details -### - -# Set this to change the default log level (must be set at build time) -RUST_LOG=info -# This variable forces the use of color in the logs -ZEBRA_FORCE_USE_COLOR=1 -LOG_COLOR=true -# Path to the config file. This variable has a default set in entrypoint.sh -# ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml -# [network] -NETWORK=Mainnet -# [state] -# Set this to change the default cached state directory -ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache -# [tracing] -LOG_COLOR=false -TRACING_ENDPOINT_ADDR=0.0.0.0 -TRACING_ENDPOINT_PORT=3000 - -#### -# Test Variables -# These variables are used to run tests in the Dockerfile -# Check the entrypoint.sh script for more details -#### +# Configuration variables for running Zebra in Docker + +# Sets the network Zebra runs will run on. +# +# NETWORK=Mainnet + +# Zebra's RPC server is disabled by default. To enable it, set its port number. +# +# ZEBRA_RPC_PORT=8232 # Default RPC port number on Mainnet. +# ZEBRA_RPC_PORT=18323 # Default RPC port number on Testnet. + +# To disable cookie authentication, set the value below to false. +# +# ENABLE_COOKIE_AUTH=true + +# Sets a custom directory for the state and network caches. Zebra will also +# store its cookie authentication file in this directory. +# +# ZEBRA_CACHE_DIR="/home/zebra/.cache/zebra" + +# Sets custom Cargo features. Available features are listed at +# . +# +# Must be set at build time. +# +# FEATURES="" + +# Logging to a file is disabled by default. To enable it, uncomment the line +# below and alternatively set your own path. +# +# LOG_FILE="/home/zebra/.local/state/zebrad.log" + +# Zebra recognizes whether its logs are being written to a terminal or a file, +# and uses colored logs for terminals and uncolored logs for files. Setting the +# variable below to true will force colored logs even for files and setting it +# to false will disable colors even for terminals. +# +# LOG_COLOR=true + +# To disable logging to journald, set the value to false. +# +# USE_JOURNALD=true + +# If you are going to use Zebra as a backend for a mining pool, set your mining +# address. +# +# MINER_ADDRESS="your_mining_address" + +# Controls the output of `env_logger`: +# https://docs.rs/env_logger/latest/env_logger/ +# +# Must be set at build time. +# +# RUST_LOG=info # Unit tests -# TODO: These variables are evaluated to any value, even setting a NULL value will evaluate to true + +# TODO: These variables are evaluated to any value, even setting a NULL value +# will evaluate to true. +# # TEST_FAKE_ACTIVATION_HEIGHTS= -# ZEBRA_SKIP_NETWORK_TESTS -# ZEBRA_SKIP_IPV6_TESTS +# ZEBRA_SKIP_NETWORK_TESTS= +# ZEBRA_SKIP_IPV6_TESTS= RUN_ALL_TESTS= -RUN_ALL_EXPERIMENTAL_TESTS= TEST_ZEBRA_EMPTY_SYNC= ZEBRA_TEST_LIGHTWALLETD= + # Integration Tests -# Most of these tests require a cached state directory to save the network state + +# Most of these tests require a cached state directory to save the network state. TEST_DISK_REBUILD= -# These tests needs a Zebra cached state TEST_CHECKPOINT_SYNC= GENERATE_CHECKPOINTS_MAINNET= GENERATE_CHECKPOINTS_TESTNET= TEST_UPDATE_SYNC= -# These tests need a Lightwalletd binary + a Zebra cached state +TEST_SCANNER= + +# These tests need a Lightwalletd binary + a Zebra cached state. TEST_LWD_RPC_CALL= TEST_GET_BLOCK_TEMPLATE= TEST_SUBMIT_BLOCK= -# These tests need a Lightwalletd binary + Lightwalletd cached state + a Zebra cached state + +# These tests need a Lightwalletd binary + Lightwalletd cached state + a Zebra +# cached state. TEST_LWD_UPDATE_SYNC= TEST_LWD_GRPC= TEST_LWD_TRANSACTIONS= + # Full sync tests -# These tests could take a long time to run, depending on the network + +# These tests take 3 days on Mainnet and one day on Testnet. FULL_SYNC_MAINNET_TIMEOUT_MINUTES= FULL_SYNC_TESTNET_TIMEOUT_MINUTES= TEST_LWD_FULL_SYNC= diff --git a/docker/zcash.conf b/docker/zcash.conf new file mode 100644 index 00000000000..22f9ab8495d --- /dev/null +++ b/docker/zcash.conf @@ -0,0 +1,2 @@ +rpcpassword=none +rpcbind=zebra diff --git a/prometheus.yaml b/prometheus.yaml index 5501da2b3f1..86ca1d5e7e4 100644 --- a/prometheus.yaml +++ b/prometheus.yaml @@ -1,7 +1,6 @@ scrape_configs: - - job_name: 'zebrad' + - job_name: "zebrad" scrape_interval: 500ms - metrics_path: '/' + metrics_path: "/" static_configs: - - targets: ['localhost:9999'] - + - targets: ["localhost:9999"] diff --git a/zebrad/tests/common/configs/custom-conf.toml b/zebrad/tests/common/configs/custom-conf.toml new file mode 100644 index 00000000000..98b22ecdc0f --- /dev/null +++ b/zebrad/tests/common/configs/custom-conf.toml @@ -0,0 +1,54 @@ +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true +extra_coinbase_data = "do you even shield?" + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +cookie_dir = "cache_dir" +debug_force_finished_sync = false +enable_cookie_auth = true +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/configs/v2.1.0.toml b/zebrad/tests/common/configs/v2.1.0.toml deleted file mode 100644 index 54613ba842a..00000000000 --- a/zebrad/tests/common/configs/v2.1.0.toml +++ /dev/null @@ -1,85 +0,0 @@ -# Default configuration for zebrad. -# -# This file can be used as a skeleton for custom configs. -# -# Unspecified fields use default values. Optional fields are Some(field) if the -# field is present and None if it is absent. -# -# This file is generated as an example using zebrad's current defaults. -# You should set only the config options you want to keep, and delete the rest. -# Only a subset of fields are present in the skeleton, since optional values -# whose default is None are omitted. -# -# The config format (including a complete list of sections and fields) is -# documented here: -# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html -# -# zebrad attempts to load configs in the following order: -# -# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; -# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); -# 3. The default config. -# -# The user's preference directory and the default path to the `zebrad` config are platform dependent, -# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : -# -# | Platform | Value | Example | -# | -------- | ------------------------------------- | ---------------------------------------------- | -# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | -# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | -# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | - -[consensus] -checkpoint_sync = true - -[mempool] -eviction_memory_time = "1h" -tx_cost_limit = 80000000 - -[metrics] - -[mining] -debug_like_zcashd = true - -[network] -cache_dir = true -crawl_new_peer_interval = "1m 1s" -initial_mainnet_peers = [ - "dnsseed.z.cash:8233", - "dnsseed.str4d.xyz:8233", - "mainnet.seeder.zfnd.org:8233", - "mainnet.is.yolo.money:8233", -] -initial_testnet_peers = [ - "dnsseed.testnet.z.cash:18233", - "testnet.seeder.zfnd.org:18233", - "testnet.is.yolo.money:18233", -] -listen_addr = "0.0.0.0:8233" -max_connections_per_ip = 1 -network = "Mainnet" -peerset_initial_target_size = 25 - -[rpc] -cookie_dir = "cache_dir" -debug_force_finished_sync = false -enable_cookie_auth = true -parallel_cpu_threads = 0 - -[state] -cache_dir = "cache_dir" -delete_old_database = true -ephemeral = false - -[sync] -checkpoint_verify_concurrency_limit = 1000 -download_concurrency_limit = 50 -full_verify_concurrency_limit = 20 -parallel_cpu_threads = 0 - -[tracing] -buffer_limit = 128000 -force_use_color = false -use_color = true -use_journald = false - From 14459e7cee33694eb78b94046b00f0c2eed18405 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 19 Feb 2025 19:31:20 +0000 Subject: [PATCH 102/245] fix(ci): improve workflow conditions for cached disk jobs (#9274) * fix(ci): Improve workflow conditions for cached disk jobs Refactor GitHub workflow conditions to: - Handle workflow dispatch events more precisely - Prevent running cached disk jobs on forked PRs - Ensure consistent behavior across different deployment workflows - Avoid skipping main branch deployments - Updated the if condition for the deploy-nodes job to ensure it only runs when the build job runs successfully and is not skipped. * fix(ci): disable custom Zebra config test and fix registry condition - Comment out the `test-zebra-conf-path` job in the deployment workflow - Fix a syntax error in the GCP resource deletion workflow's condition - Remove an extra single quote in the `if` condition for the `clean-registries` job --- .../cd-deploy-nodes-gcp.patch-external.yml | 2 +- .../workflows/cd-deploy-nodes-gcp.patch.yml | 2 +- .github/workflows/cd-deploy-nodes-gcp.yml | 30 ++++++++++--------- .../workflows/chore-delete-gcp-resources.yml | 2 +- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 4cb146d4029..05ae0c1127f 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -15,7 +15,7 @@ jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) get-disk-name: name: Get disk name / Get Mainnet cached disk - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} + if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} runs-on: ubuntu-latest steps: - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index b5ea40a0e2e..2d9a4b80373 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -31,7 +31,7 @@ jobs: get-disk-name: name: Get disk name / Get Mainnet cached disk runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} + if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} steps: - run: 'echo "No build required"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 3577a70711e..cd6349a5264 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -148,28 +148,30 @@ jobs: # # Passes the disk name to subsequent jobs using `cached_disk_name` output # + # For push events, this job always runs. + # For workflow_dispatch events, it runs only if inputs.need_cached_disk is true. + # PRs from forked repositories are skipped. get-disk-name: name: Get disk name uses: ./.github/workflows/sub-find-cached-disks.yml - # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. - # This workflow also runs on release tags, the event name check will run it on releases. - if: ${{ inputs.need_cached_disk && github.event_name != 'release' && !(github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork) }} + if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} - disk_prefix: zebrad-cache + disk_prefix: zebrad-cache disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} + # TODO: Re-enable this test once we have a way to mount a custom config file just for this test # Test that Zebra works using $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test CD custom Docker config file - needs: build - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: "custom-conf" - docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' - grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' + # test-zebra-conf-path: + # name: Test CD custom Docker config file + # needs: build + # uses: ./.github/workflows/sub-test-zebra-config.yml + # with: + # test_id: "custom-conf" + # docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + # test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' + # grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git @@ -265,7 +267,7 @@ jobs: permissions: contents: "read" id-token: "write" - if: ${{ !cancelled() && !failure() && github.repository_owner == 'ZcashFoundation' && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} + if: ${{ !cancelled() && !failure() && needs.build.result == 'success' && github.repository_owner == 'ZcashFoundation' && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} steps: - uses: actions/checkout@v4.2.2 diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 92d9103480b..e611498fdb2 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -106,7 +106,7 @@ jobs: # The same artifacts are used for both mainnet and testnet. clean-registries: name: Delete unused artifacts in registry - if: github.repository_owner == 'ZcashFoundation'' + if: github.repository_owner == 'ZcashFoundation' runs-on: ubuntu-latest permissions: contents: 'read' From 0b8bef37acb1e435a244b7e705102baba600b885 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Feb 2025 19:53:46 +0000 Subject: [PATCH 103/245] build(deps): bump the devops group across 1 directory with 3 updates (#9275) Bumps the devops group with 3 updates in the / directory: [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action), [docker/build-push-action](https://github.com/docker/build-push-action) and [docker/scout-action](https://github.com/docker/scout-action). Updates `docker/setup-buildx-action` from 3.8.0 to 3.9.0 - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v3.8.0...v3.9.0) Updates `docker/build-push-action` from 6.13.0 to 6.14.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.13.0...v6.14.0) Updates `docker/scout-action` from 1.16.1 to 1.16.3 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.16.1...v1.16.3) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/scout-action dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/sub-build-docker-image.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 7a64abaa22b..4bfdaefb107 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -147,7 +147,7 @@ jobs: # Setup Docker Buildx to use Docker Build Cloud - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.8.0 + uses: docker/setup-buildx-action@v3.9.0 with: version: "lab:latest" driver: cloud @@ -156,7 +156,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.13.0 + uses: docker/build-push-action@v6.14.0 with: target: ${{ inputs.dockerfile_target }} context: . @@ -187,7 +187,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.16.1 + uses: docker/scout-action@v1.16.3 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. From c4e8e60c215f6a1d382256cc2463a795e5d7d7f6 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 20 Feb 2025 09:00:03 +0000 Subject: [PATCH 104/245] fix(ci): temporarily disable `test-zebra-conf-path` (#9279) --- .github/workflows/cd-deploy-nodes-gcp.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index cd6349a5264..edce86135b9 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -256,7 +256,8 @@ jobs: build, versioning, test-configuration-file, - test-zebra-conf-path, + # TODO: Re-enable this test once we have a way to mount a custom config file just for this test + # test-zebra-conf-path, get-disk-name, ] runs-on: ubuntu-latest From 361fa65039cfea344136e3d7152d1d918e37f4e1 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 21 Feb 2025 16:03:51 +0100 Subject: [PATCH 105/245] chore: Update the PR template (#9277) * Update PR template * Don't automatically assign the `C-trivial` label * Use `C-exclude-from-changelog` label * Simplify the PR template * Simplify the PR template * Simplify the PR template * Update .github/pull_request_template.md Co-authored-by: Pili Guerra <1311133+mpguerra@users.noreply.github.com> * grammar --------- Co-authored-by: Pili Guerra <1311133+mpguerra@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/release.md | 2 +- .../release-checklist.md | 2 +- .github/dependabot.yml | 4 +- .github/pull_request_template.md | 48 ++--- .github/release-drafter.yml | 174 +++++++----------- 5 files changed, 93 insertions(+), 137 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 080bc385c20..dc42b4aa05e 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -2,7 +2,7 @@ name: "🚀 Zebra Release" about: 'Zebra team use only' title: 'Publish next Zebra release: (version)' -labels: 'A-release, C-trivial, P-Medium :zap:' +labels: 'A-release, C-exclude-from-changelog, P-Medium :zap:' assignees: '' --- diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index d71683b04ba..107f3f413fe 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -2,7 +2,7 @@ name: 'Release Checklist Template' about: 'Checklist to create and publish a Zebra release' title: 'Release Zebra (version)' -labels: 'A-release, C-trivial, P-Critical :ambulance:' +labels: 'A-release, C-exclude-from-changelog, P-Critical :ambulance:' assignees: '' --- diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6fa1f4ef67e..7b0b210e1e1 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,7 +11,7 @@ updates: # Limit dependabot to 1 PR per reviewer open-pull-requests-limit: 6 labels: - - 'C-trivial' + - 'C-exclude-from-changelog' - 'A-rust' - 'A-dependencies' - 'P-Low :snowflake:' @@ -46,7 +46,7 @@ updates: timezone: America/New_York open-pull-requests-limit: 4 labels: - - 'C-trivial' + - 'C-exclude-from-changelog' - 'A-devops' - 'A-dependencies' - 'P-Low :snowflake:' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 2c96aae4c07..57eb5bc62f1 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,54 +1,46 @@ -## Motivation - -### Specifications & References +## Motivation ## Solution - + ### Tests +### Specifications & References + + + ### Follow-up Work -### PR Author's Checklist +### PR Checklist - + -- [ ] The PR name will make sense to users. -- [ ] The PR provides a CHANGELOG summary. +- [ ] The PR name is suitable for the release notes. - [ ] The solution is tested. - [ ] The documentation is up to date. - [ ] The PR has a priority label. - -### PR Reviewer's Checklist - - - -- [ ] The PR Author's checklist is complete. -- [ ] The PR resolves the issue. - +- [ ] If the PR shouldn't be in the release notes, it has the + `C-exclude-from-changelog` label. diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index eacf6c3ba70..1c9c002609b 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -6,77 +6,44 @@ # Automatically label PRs based on their branch, title, or changed files. # This helps categorise PRs in the CHANGELOG. autolabeler: - - label: 'C-security' + - label: "C-security" branch: - - '/secur/i' + - "/secur/i" title: - - '/secur/i' - - '/crash/i' - - '/destr/i' - - '/unsafe/i' - - label: 'C-deprecated' + - "/secur/i" + - "/crash/i" + - "/destr/i" + - "/unsafe/i" + - label: "C-deprecated" branch: - - '/deprecat/i' + - "/deprecat/i" title: - - '/deprecat/i' - - label: 'extra-reviews' + - "/deprecat/i" + - label: "extra-reviews" branch: - - '/remov/i' - - '/deprecat/i' + - "/remov/i" + - "/deprecat/i" title: - - '/remov/i' - - '/deprecat/i' - - '/crash/i' - - '/destr/i' - - '/unsafe/i' - - label: 'C-feature' + - "/remov/i" + - "/deprecat/i" + - "/crash/i" + - "/destr/i" + - "/unsafe/i" + - label: "C-feature" branch: - - '/feat/i' + - "/feat/i" title: - - '/feat/i' - - label: 'C-bug' + - "/feat/i" + - label: "C-bug" branch: - - '/bug/i' + - "/bug/i" title: - - '/bug/i' - # Changes that are almost always trivial for users - - label: 'C-trivial' - branch: - - '/clean/i' - - '/chore/i' - - '/clippy/i' - - '/test/i' - title: - - '/clean/i' - - '/chore/i' - - '/clippy/i' - - '/test/i' - - '/(ci)/i' - - '/(cd)/i' - - '/job/i' - - '/patch/i' - - '/actions/i' - files: - # Regular changes that don't need to go in the CHANGELOG - - 'CHANGELOG.md' - - 'zebra-consensus/src/checkpoint/*-checkpoints.txt' - # Developer-only changes - - '.gitignore' - - '.dockerignore' - # Test-only changes - - 'zebra-test' - - '.cargo/config.toml' - - 'clippy.toml' - # CI-only changes - - '.github' - - '.codespellrc' - - 'codecov.yml' - - 'deny.toml' + - "/bug/i" # The release name, tag, and settings for the draft CHANGELOG. -name-template: 'Zebra $RESOLVED_VERSION' -tag-template: 'v$RESOLVED_VERSION' -tag-prefix: 'v' +name-template: "Zebra $RESOLVED_VERSION" +tag-template: "v$RESOLVED_VERSION" +tag-prefix: "v" # Do not mark the draft release as a pre-release prerelease: false # Do not include pre-releases in the draft release @@ -84,48 +51,46 @@ include-pre-releases: false # Categories in rough order of importance to users. # Based on https://keepachangelog.com/en/1.0.0/ -category-template: '### $TITLE' +category-template: "### $TITLE" categories: - - title: 'Security' + - title: "Security" labels: - - 'C-security' + - "C-security" # Other labels that are usually security issues - - 'I-invalid-data' - - 'I-consensus' - - 'I-crash' - - 'I-destructive' - - 'I-hang' - - 'I-lose-funds' - - 'I-privacy' - - 'I-remote-node-overload' - - 'I-unbounded-growth' - - 'I-memory-safety' - - title: 'Removed' + - "I-invalid-data" + - "I-consensus" + - "I-crash" + - "I-destructive" + - "I-hang" + - "I-lose-funds" + - "I-privacy" + - "I-remote-node-overload" + - "I-unbounded-growth" + - "I-memory-safety" + - title: "Removed" labels: - - 'C-removal' - - title: 'Deprecated' + - "C-removal" + - title: "Deprecated" labels: - - 'C-deprecation' + - "C-deprecation" # TODO: when release drafter has per-category templates, add this to the Deprecated category template: # 'These features might be removed in Zebra $NEXT_MINOR_VERSION' - - title: 'Added' + - title: "Added" labels: - - 'C-feature' - - title: 'Changed' + - "C-feature" + - title: "Changed" labels: - - 'C-enhancement' - - title: 'Fixed' + - "C-enhancement" + - title: "Fixed" labels: - - 'C-bug' + - "C-bug" # Other labels that are usually bugs - - 'I-build-fail' - - 'I-integration-fail' - - 'I-panic' - # TODO: if we're happy with the trivial PRs, use "exclude-labels:" instead - - title: 'Trivial *TODO:* put this in a PR comment, not the CHANGELOG' + - "I-build-fail" + - "I-integration-fail" + - "I-panic" + - title: "Excluded *TODO:* put this in a PR comment, not the CHANGELOG" labels: - - 'C-trivial' - - 'C-cleanup' + - "C-exclude-from-changelog" # The next release's $RESOLVED_VERSION, based on the labels of the PRs in the release. # @@ -139,15 +104,15 @@ version-resolver: # - # network upgrade release PRs minor: labels: - - 'C-feature' - - 'C-breaking' - - 'C-removal' - - 'C-deprecation' + - "C-feature" + - "C-breaking" + - "C-removal" + - "C-deprecation" # We increment the patch version for every release default: patch # How PR names get turned into CHANGELOG entries. -change-template: '- $TITLE ([#$NUMBER]($URL))' +change-template: "- $TITLE ([#$NUMBER]($URL))" sort-by: title sort-direction: ascending # Characters escaped when converting PR titles to CHANGELOG entries. @@ -156,31 +121,30 @@ change-title-escapes: '\<*_&#@' # Strip PR series numbers, leading spaces, and conventional commit prefixes from PR titles. replacers: - search: '/- [0-9\. ]*([a-zA-Z0-9\(\)!]+:)?/' - replace: '- ' + replace: "- " # The list of contributors to each release. exclude-contributors: - - 'dependabot' # 'dependabot[bot]' - - 'mergifyio' # 'mergify[bot]' + - "dependabot" # 'dependabot[bot]' + - "mergifyio" # 'mergify[bot]' # The template for the draft CHANGELOG. template: | ## [Zebra $RESOLVED_VERSION](https://github.com/ZcashFoundation/zebra/releases/tag/v$RESOLVED_VERSION) - *TODO*: date - + This release *TODO*: a summary of the significant user-visible changes in the release - + ### Breaking Changes - + This release has the following breaking changes: - *TODO*: Check the `Removed` and `Deprecated` sections for any breaking changes - *TODO*: Add a short description of the user impact of each breaking change, and any actions users need to take - + $CHANGES - + ### Contributors - + Thank you to everyone who contributed to this release, we couldn't make Zebra without you: $CONTRIBUTORS - - + # the trailing newlines in the template are deliberate From 2a184e74b1e42107cfef7ebd1a7228e0741f4320 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 24 Feb 2025 10:17:34 +0100 Subject: [PATCH 106/245] change(ci): Remove check for custom configs from GCP deployments (#9280) --- .../cd-deploy-nodes-gcp.patch-external.yml | 8 -------- .../workflows/cd-deploy-nodes-gcp.patch.yml | 7 ------- .github/workflows/cd-deploy-nodes-gcp.yml | 19 ++----------------- 3 files changed, 2 insertions(+), 32 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 05ae0c1127f..7eade3ad5d2 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -45,11 +45,3 @@ jobs: if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' - - test-zebra-conf-path: - name: Test CD custom Docker config file / Test custom-conf in Docker - needs: build - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index 2d9a4b80373..922749a2901 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -55,10 +55,3 @@ jobs: if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' - - test-zebra-conf-path: - name: Test CD custom Docker config file / Test custom-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index edce86135b9..bce32c5db23 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -6,7 +6,6 @@ # 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. # 3. `test-configuration-file`: Validates Zebra using the default config with the latest version. # 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration. -# 5. `test-zebra-conf-path`: Verifies Zebra with a custom Docker config file. # 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed. # 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted. # @@ -157,22 +156,10 @@ jobs: if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} - disk_prefix: zebrad-cache + disk_prefix: zebrad-cache disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} - # TODO: Re-enable this test once we have a way to mount a custom config file just for this test - # Test that Zebra works using $ZEBRA_CONF_PATH config - # test-zebra-conf-path: - # name: Test CD custom Docker config file - # needs: build - # uses: ./.github/workflows/sub-test-zebra-config.yml - # with: - # test_id: "custom-conf" - # docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - # test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' - # grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' - # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git # @@ -256,8 +243,6 @@ jobs: build, versioning, test-configuration-file, - # TODO: Re-enable this test once we have a way to mount a custom config file just for this test - # test-zebra-conf-path, get-disk-name, ] runs-on: ubuntu-latest @@ -390,7 +375,7 @@ jobs: failure-issue: name: Open or update issues for release failures # When a new job is added to this workflow, add it to this list. - needs: [ versioning, build, deploy-nodes ] + needs: [versioning, build, deploy-nodes] # Only open tickets for failed or cancelled jobs that are not coming from PRs. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) From 80a25c1c4c20c2c677cff0240b5dcbbad8a673ae Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 24 Feb 2025 15:34:19 +0100 Subject: [PATCH 107/245] Fix Clippy lints for Rust 1.85 (#9289) --- zebra-consensus/src/checkpoint.rs | 1 - zebra-rpc/src/server/http_request_compatibility.rs | 2 +- zebra-state/src/service.rs | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index fd355f0b7ef..70f36d9c2da 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -1164,7 +1164,6 @@ where let tip = match state_service .oneshot(zs::Request::Tip) .await - .map_err(Into::into) .map_err(VerifyCheckpointError::Tip)? { zs::Response::Tip(tip) => tip, diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 4e7aa3f94cc..a5e0c41f57b 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -66,7 +66,7 @@ impl HttpRequestMiddleware { /// Check if the request is authenticated. pub fn check_credentials(&self, headers: &header::HeaderMap) -> bool { - self.cookie.as_ref().map_or(true, |internal_cookie| { + self.cookie.as_ref().is_none_or(|internal_cookie| { headers .get(header::AUTHORIZATION) .and_then(|auth_header| auth_header.to_str().ok()) diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 9e3fbfed2d4..50514d9859e 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -935,7 +935,6 @@ impl Service for StateService { // https://github.com/rust-lang/rust/issues/70142 .and_then(convert::identity) .map(Response::Committed) - .map_err(Into::into) } .instrument(span) .boxed() @@ -983,7 +982,6 @@ impl Service for StateService { // https://github.com/rust-lang/rust/issues/70142 .and_then(convert::identity) .map(Response::Committed) - .map_err(Into::into) } .instrument(span) .boxed() From 38b7313ea1df6aa800ca81dbe08399f6ee60ccff Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 24 Feb 2025 15:35:39 +0100 Subject: [PATCH 108/245] chore: bump Rust from 1.84.0 to 1.85.0 in Docker (#9290) --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 0868ef98917..2a36bc8db3f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ # We first set default values for build arguments used across the stages. # Each stage must define the build arguments (ARGs) it uses. -ARG RUST_VERSION=1.84.0 +ARG RUST_VERSION=1.85.0 # In this stage we download all system requirements to build the project # From 79e18e045ce8ca11c4078495e2425cc204cf03c2 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 24 Feb 2025 15:35:59 +0100 Subject: [PATCH 109/245] chore(CI): Update codespell's config (#9288) --- .codespellrc | 2 +- zebra-state/src/request.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.codespellrc b/.codespellrc index d14b397da25..8c06f3917e1 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,4 +1,4 @@ [codespell] -ignore-words-list = crate,Sur,inout,Groth,groth,re-use, +ignore-words-list = crate,Sur,inout,Groth,groth,re-use,abl, exclude-file = book/mermaid.min.js skip = ./zebra-rpc/qa/rpc-tests,./supply-chain diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index d5eb92e2d90..1535fa81c1f 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -734,7 +734,7 @@ pub enum Request { /// This request is purely informational, and there are no guarantees about /// whether the UTXO remains unspent or is on the best chain, or any chain. /// Its purpose is to allow asynchronous script verification or to wait until - /// the UTXO arrives in the state before validating dependant transactions. + /// the UTXO arrives in the state before validating dependent transactions. /// /// # Correctness /// From 29ed501f11bf00c11214ee151a6f892619e72183 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 26 Feb 2025 08:08:38 -0400 Subject: [PATCH 110/245] allow deserialization of Balance with skipped ID field (#9299) --- zebra-rpc/src/methods/types/get_blockchain_info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-rpc/src/methods/types/get_blockchain_info.rs b/zebra-rpc/src/methods/types/get_blockchain_info.rs index 57eda10faec..82b5644a1ea 100644 --- a/zebra-rpc/src/methods/types/get_blockchain_info.rs +++ b/zebra-rpc/src/methods/types/get_blockchain_info.rs @@ -12,7 +12,7 @@ use super::*; #[serde(rename_all = "camelCase")] pub struct Balance { /// Name of the pool - #[serde(skip_serializing_if = "String::is_empty")] + #[serde(skip_serializing_if = "String::is_empty", default)] id: String, /// Total amount in the pool, in ZEC chain_value: Zec, From 797ba629771f5932aa36338662e63d2019bc26d0 Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Fri, 28 Feb 2025 19:03:37 +0300 Subject: [PATCH 111/245] fix: fix redundant #[non_exhaustive] attributes on enum variants (#9309) --- zebra-script/src/lib.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 096a36be9e8..71ebab835fc 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -31,22 +31,16 @@ use zebra_chain::{ #[non_exhaustive] pub enum Error { /// script verification failed - #[non_exhaustive] ScriptInvalid, /// could not deserialize tx - #[non_exhaustive] TxDeserialize, /// input index out of bounds - #[non_exhaustive] TxIndex, /// tx has an invalid size - #[non_exhaustive] TxSizeMismatch, /// tx is a coinbase transaction and should not be verified - #[non_exhaustive] TxCoinbase, /// unknown error from zcash_script: {0} - #[non_exhaustive] Unknown(zcash_script_error_t), } From de7e5b547f0d5e0b16e86ceb2a5a8ad5a231b21b Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 3 Mar 2025 19:21:03 +0100 Subject: [PATCH 112/245] refactor(docker): allow r/w access in mounted volumes (#9281) * Switch to a non-privileged user in tests * Change test env setup * Remove unneeded ARGs * Simplify UID & GID handling in `runtime` target * Simplify docs * refactor(docker): Improve user and permission handling in Dockerfiles - Add gosu for flexible non-root user execution - Enhance user and group creation with configurable UID/GID - Modify entrypoint script to support dynamic user switching - Improve cache and log directory permission management - Update comments to clarify user and permission strategies * refactor(docker): Improve Zebra config file handling in entrypoint script - Enhance error handling for missing config file (now exits with error) - Simplify config preparation logic by removing redundant file copying - Update comments to reflect new config file handling approach - Ensure consistent use of ZEBRA_CONF_PATH throughout the script * refactor(docker): Enhance container user security and configuration - Increase UID/GID to 10001 to minimize host system user conflicts - Remove `--system` flag from user and group creation to prevent potential issues - Add detailed comments explaining UID/GID selection rationale - Improve security by using high UID/GID values to reduce namespace collision risks - Remove redundant `chmod` for entrypoint script Co-authored-by: Marek --------- Co-authored-by: Gustavo Valverde Co-authored-by: Gustavo Valverde --- book/src/user/docker.md | 2 +- docker/Dockerfile | 111 +++++++++++++++++++++++++--------------- docker/entrypoint.sh | 101 ++++++++++++++++++++---------------- 3 files changed, 129 insertions(+), 85 deletions(-) diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 1dbe9425bee..90dad34a2d7 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -21,7 +21,7 @@ And mount it before you start the container: ```shell docker run \ - --mount type=volume,source=zebrad-cache,target=/home/zebra/.cache/zebra \ + --mount source=zebrad-cache,target=/home/zebra/.cache/zebra \ --name zebra \ zfnd/zebra ``` diff --git a/docker/Dockerfile b/docker/Dockerfile index 2a36bc8db3f..064c57212ec 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -# check=skip=UndefinedVar +# check=skip=UndefinedVar,UserExist # We use gosu in the entrypoint instead of USER directive # If you want to include a file in the Docker image, add it to .dockerignore. # @@ -13,8 +13,18 @@ # We first set default values for build arguments used across the stages. # Each stage must define the build arguments (ARGs) it uses. -ARG RUST_VERSION=1.85.0 +# Build zebrad with these features +# +# Keep these argument defaults in sync with GitHub vars.RUST_PROD_FEATURES +# https://github.com/ZcashFoundation/zebra/settings/variables/actions +ARG FEATURES="default-release-binaries" + +ARG USER="zebra" +ARG UID=10001 +ARG GID=10001 +ARG HOME="/home/${USER}" +ARG RUST_VERSION=1.85.0 # In this stage we download all system requirements to build the project # # It also captures all the build arguments to be used as environment variables. @@ -47,6 +57,14 @@ ARG SHORT_SHA # https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 ENV SHORT_SHA=${SHORT_SHA:-} +# Set the working directory for the build. +ARG HOME +WORKDIR ${HOME} +ENV HOME=${HOME} +ENV CARGO_HOME="${HOME}/.cargo/" + +ENV USER=${USER} + # This stage builds tests without running them. # # We also download needed dependencies for tests to work, from other images. @@ -60,19 +78,17 @@ ENV FEATURES=${FEATURES} ARG ZEBRA_SKIP_IPV6_TESTS ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} -# Set up the test environment the same way the production environment is. This -# is not very DRY as the same code repeats for the `runtime` target below, but I -# didn't find a suitable way to share the setup between the two targets. +# This environment setup is almost identical to the `runtime` target so that the +# `tests` target differs minimally. In fact, a subset of this setup is used for +# the `runtime` target. -ENV UID=101 -ENV GID=${UID} -ENV USER="zebra" -ENV HOME="/home/${USER}" -ENV CARGO_HOME="${HOME}/.cargo/" +ARG UID +ARG GID +ARG HOME +ARG USER -RUN adduser --system --gid ${GID} --uid ${UID} --home ${HOME} ${USER} - -WORKDIR ${HOME} +RUN addgroup --gid ${GID} ${USER} && \ + adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} # Build Zebra test binaries, but don't run them @@ -110,26 +126,28 @@ RUN --mount=type=bind,source=zebrad,target=zebrad \ # Copy the lightwalletd binary and source files to be able to run tests COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/ +# Copy the gosu binary to be able to run the entrypoint as non-root user +# and allow to change permissions for mounted cache directories +COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ + # Use the same default config as in the production environment. ENV ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} -ARG LWD_CACHE_DIR ENV LWD_CACHE_DIR="${HOME}/.cache/lwd" -RUN mkdir -p ${LWD_CACHE_DIR} -RUN chown -R ${UID}:${GID} ${LWD_CACHE_DIR} +RUN mkdir -p ${LWD_CACHE_DIR} && \ + chown -R ${UID}:${GID} ${LWD_CACHE_DIR} # Use the same cache dir as in the production environment. -ARG ZEBRA_CACHE_DIR ENV ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" -RUN mkdir -p ${ZEBRA_CACHE_DIR} -RUN chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} +RUN mkdir -p ${ZEBRA_CACHE_DIR} && \ + chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} -COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh COPY ./ ${HOME} - RUN chown -R ${UID}:${GID} ${HOME} +COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh + ENTRYPOINT [ "entrypoint.sh", "test" ] # In this stage we build a release (generate the zebrad binary) @@ -140,6 +158,7 @@ ENTRYPOINT [ "entrypoint.sh", "test" ] FROM deps AS release ARG FEATURES +ARG HOME RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ --mount=type=bind,source=tower-fallback,target=tower-fallback \ @@ -157,47 +176,46 @@ RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ --mount=type=bind,source=zebrad,target=zebrad \ --mount=type=bind,source=Cargo.toml,target=Cargo.toml \ --mount=type=bind,source=Cargo.lock,target=Cargo.lock \ - --mount=type=cache,target=${APP_HOME}/target/ \ + --mount=type=cache,target=${HOME}/target/ \ --mount=type=cache,target=/usr/local/cargo/git/db \ --mount=type=cache,target=/usr/local/cargo/registry/ \ cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \ - cp ${APP_HOME}/target/release/zebrad /usr/local/bin + cp ${HOME}/target/release/zebrad /usr/local/bin # This step starts from scratch using Debian and only adds the resulting binary # from the `release` stage. FROM debian:bookworm-slim AS runtime -COPY --from=release /usr/local/bin/zebrad /usr/local/bin/ -COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh - ARG FEATURES ENV FEATURES=${FEATURES} # Create a non-privileged system user for running `zebrad`. -ARG USER="zebra" +ARG USER ENV USER=${USER} # System users have no home dirs, but we set one for users' convenience. -ARG HOME="/home/zebra" +ARG HOME WORKDIR ${HOME} -# System UIDs should be set according to -# https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/uidrange.html. +# We use a high UID/GID (10001) to avoid overlap with host system users. +# This reduces the risk of container user namespace conflicts with host accounts, +# which could potentially lead to privilege escalation if a container escape occurs. # -# In Debian, the default dynamic range for system UIDs is defined by -# [FIRST_SYSTEM_UID, LAST_SYSTEM_UID], which is set to [100, 999] in -# `etc/adduser.conf`: -# https://manpages.debian.org/bullseye/adduser/adduser.8.en.html +# We do not use the `--system` flag for user creation since: +# 1. System user ranges (100-999) can collide with host system users +# (see: https://github.com/nginxinc/docker-nginx/issues/490) +# 2. There's no value added and warning messages can be raised at build time +# (see: https://github.com/dotnet/dotnet-docker/issues/4624) # -# Debian assigns GID 100 to group `users`, so we set UID = GID = 101 as the -# default value. -ARG UID=101 +# The high UID/GID values provide an additional security boundary in containers +# where user namespaces are shared with the host. +ARG UID ENV UID=${UID} -ARG GID=${UID} +ARG GID ENV GID=${GID} -RUN addgroup --system --gid ${GID} ${USER} -RUN adduser --system --gid ${GID} --uid ${UID} --home ${HOME} ${USER} +RUN addgroup --gid ${GID} ${USER} && \ + adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} # We set the default locations of the conf and cache dirs according to the XDG # spec: https://specifications.freedesktop.org/basedir-spec/latest/ @@ -211,7 +229,18 @@ ENV ZEBRA_CACHE_DIR=${ZEBRA_CACHE_DIR} RUN mkdir -p ${ZEBRA_CACHE_DIR} && chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} RUN chown -R ${UID}:${GID} ${HOME} -USER $USER + +# We're explicitly NOT using the USER directive here. +# Instead, we run as root initially and use gosu in the entrypoint.sh +# to step down to the non-privileged user. This allows us to change permissions +# on mounted volumes before running the application as a non-root user. +# User with UID=${UID} is created above and used via gosu in entrypoint.sh. + +# Copy the gosu binary to be able to run the entrypoint as non-root user +COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ + +COPY --from=release /usr/local/bin/zebrad /usr/local/bin/ +COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh ENTRYPOINT [ "entrypoint.sh" ] CMD ["zebrad"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index eca56196418..9e70675e1f8 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -12,36 +12,42 @@ set -eo pipefail # Exit early if `ZEBRA_CONF_PATH` does not point to a file. if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then - echo "the ZEBRA_CONF_PATH env var does not point to a Zebra conf file" + echo "ERROR: No Zebra config file found at ZEBRA_CONF_PATH (${ZEBRA_CONF_PATH})." + echo "Please ensure the file exists or mount your custom config file and set ZEBRA_CONF_PATH accordingly." exit 1 fi -# Generates a config file for Zebra using env vars set in "docker/.env" and -# prints the location of the generated config file. +# Define function to execute commands as the specified user +exec_as_user() { + if [[ "$(id -u)" = '0' ]]; then + exec gosu "${USER}" "$@" + else + exec "$@" + fi +} + +# Modifies the existing Zebra config file at ZEBRA_CONF_PATH using environment variables. # -# ## Positional Parameters +# The config options this function supports are also listed in the "docker/.env" file. # -# - "$1": the file to read the default config from +# This function modifies the existing file in-place and prints its location. prepare_conf_file() { - # Copy the default config to a new location for writing. - CONF=~/zebrad.toml - cp "${1}" "${CONF}" # Set a custom network. - if [[ "${NETWORK}" ]]; then - sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${CONF}" + if [[ -n "${NETWORK}" ]]; then + sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${ZEBRA_CONF_PATH}" fi # Enable the RPC server by setting its port. - if [[ "${ZEBRA_RPC_PORT}" ]]; then - sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${CONF}" - sed -i 's/ *# Mainnet$//' "${CONF}" - sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${CONF}" + if [[ -n "${ZEBRA_RPC_PORT}" ]]; then + sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${ZEBRA_CONF_PATH}" + sed -i 's/ *# Mainnet$//' "${ZEBRA_CONF_PATH}" + sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${ZEBRA_CONF_PATH}" fi # Disable or enable cookie authentication. - if [[ "${ENABLE_COOKIE_AUTH}" ]]; then - sed -i '/# enable_cookie_auth = true/s/^# //; s/true/'"${ENABLE_COOKIE_AUTH//\"/}"'/' "${CONF}" + if [[ -n "${ENABLE_COOKIE_AUTH}" ]]; then + sed -i '/# enable_cookie_auth = true/s/^# //; s/true/'"${ENABLE_COOKIE_AUTH//\"/}"'/' "${ZEBRA_CONF_PATH}" fi # Set a custom state, network and cookie cache dirs. @@ -49,42 +55,52 @@ prepare_conf_file() { # We're pointing all three cache dirs at the same location, so users will find # all cached data in that single location. We can introduce more env vars and # use them to set the cache dirs separately if needed. - if [[ "${ZEBRA_CACHE_DIR}" ]]; then + if [[ -n "${ZEBRA_CACHE_DIR}" ]]; then mkdir -p "${ZEBRA_CACHE_DIR//\"/}" - sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${CONF}" + sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${ZEBRA_CONF_PATH}" + # Fix permissions right after creating/configuring the directory + if [[ "$(id -u)" = '0' ]]; then + # "Setting permissions for the cache directory + chown -R "${USER}:${USER}" "${ZEBRA_CACHE_DIR//\"/}" + fi fi # Enable the Prometheus metrics endpoint. if [[ "${FEATURES}" == *"prometheus"* ]]; then - sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${CONF}" + sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${ZEBRA_CONF_PATH}" fi # Enable logging to a file by setting a custom log file path. - if [[ "${LOG_FILE}" ]]; then + if [[ -n "${LOG_FILE}" ]]; then mkdir -p "$(dirname "${LOG_FILE//\"/}")" - sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${CONF}" + sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${ZEBRA_CONF_PATH}" + # Fix permissions right after creating/configuring the log directory + if [[ "$(id -u)" = '0' ]]; then + # "Setting permissions for the log directory + chown -R "${USER}:${USER}" "$(dirname "${LOG_FILE//\"/}")" + fi fi # Enable or disable colored logs. - if [[ "${LOG_COLOR}" ]]; then - sed -i '/# force_use_color = true/s/^# //' "${CONF}" - sed -i '/use_color = true/s/true/'"${LOG_COLOR//\"/}"'/' "${CONF}" + if [[ -n "${LOG_COLOR}" ]]; then + sed -i '/# force_use_color = true/s/^# //' "${ZEBRA_CONF_PATH}" + sed -i '/use_color = true/s/true/'"${LOG_COLOR//\"/}"'/' "${ZEBRA_CONF_PATH}" fi # Enable or disable logging to systemd-journald. - if [[ "${USE_JOURNALD}" ]]; then - sed -i '/# use_journald = true/s/^# //; s/true/'"${USE_JOURNALD//\"/}"'/' "${CONF}" + if [[ -n "${USE_JOURNALD}" ]]; then + sed -i '/# use_journald = true/s/^# //; s/true/'"${USE_JOURNALD//\"/}"'/' "${ZEBRA_CONF_PATH}" fi # Set a mining address. - if [[ "${MINER_ADDRESS}" ]]; then - sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${CONF}" + if [[ -n "${MINER_ADDRESS}" ]]; then + sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${ZEBRA_CONF_PATH}" fi # Trim all comments and empty lines. - sed -i '/^#/d; /^$/d' "${CONF}" + sed -i '/^#/d; /^$/d' "${ZEBRA_CONF_PATH}" - echo "${CONF}" + echo "${ZEBRA_CONF_PATH}" } # Checks if a directory contains subdirectories @@ -116,11 +132,11 @@ check_directory_files() { # https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options, # - or be empty. # - The remaining params will be appended to a command starting with -# `exec cargo test ... -- ...` +# `exec_as_user cargo test ... -- ...` run_cargo_test() { # Start constructing the command, ensuring that $1 is enclosed in single # quotes as it's a feature list - local cmd="exec cargo test --locked --release --features '$1' --package zebrad --test acceptance -- --nocapture --include-ignored" + local cmd="exec_as_user cargo test --locked --release --features '$1' --package zebrad --test acceptance -- --nocapture --include-ignored" # Shift the first argument, as it's already included in the cmd shift @@ -153,23 +169,23 @@ run_tests() { # Run unit, basic acceptance tests, and ignored tests, only showing command # output if the test fails. If the lightwalletd environment variables are # set, we will also run those tests. - exec cargo test --locked --release --workspace --features "${FEATURES}" \ + exec_as_user cargo test --locked --release --workspace --features "${FEATURES}" \ -- --nocapture --include-ignored --skip check_no_git_refs_in_cargo_lock elif [[ "${RUN_CHECK_NO_GIT_REFS}" -eq "1" ]]; then # Run the check_no_git_refs_in_cargo_lock test. - exec cargo test --locked --release --workspace --features "${FEATURES}" \ + exec_as_user cargo test --locked --release --workspace --features "${FEATURES}" \ -- --nocapture --include-ignored check_no_git_refs_in_cargo_lock elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then # Run state tests with fake activation heights. - exec cargo test --locked --release --lib --features "zebra-test" \ + exec_as_user cargo test --locked --release --lib --features "zebra-test" \ --package zebra-state \ -- --nocapture --include-ignored with_fake_activation_heights elif [[ "${TEST_SCANNER}" -eq "1" ]]; then # Test the scanner. - exec cargo test --locked --release --package zebra-scan \ + exec_as_user cargo test --locked --release --package zebra-scan \ -- --nocapture --include-ignored scan_task_commands scan_start_where_left elif [[ "${TEST_ZEBRA_EMPTY_SYNC}" -eq "1" ]]; then @@ -257,16 +273,15 @@ run_tests() { run_cargo_test "${FEATURES}" "submit_block" else - exec "$@" + exec_as_user "$@" fi } # Main Script Logic +prepare_conf_file "${ZEBRA_CONF_PATH}" echo "Prepared the following Zebra config:" - -CONF_PATH=$(prepare_conf_file "${ZEBRA_CONF_PATH}") -cat "${CONF_PATH}" +cat "${ZEBRA_CONF_PATH}" # - If "$1" is "--", "-", or "zebrad", run `zebrad` with the remaining params. # - If "$1" is "tests": @@ -277,13 +292,13 @@ cat "${CONF_PATH}" case "$1" in --* | -* | zebrad) shift - exec zebrad --config "${CONF_PATH}" "$@" + exec_as_user zebrad --config "${ZEBRA_CONF_PATH}" "$@" ;; test) shift if [[ "$1" == "zebrad" ]]; then shift - exec zebrad --config "${CONF_PATH}" "$@" + exec_as_user zebrad --config "${ZEBRA_CONF_PATH}" "$@" else run_tests "$@" fi @@ -293,6 +308,6 @@ monitoring) : ;; *) - exec "$@" + exec_as_user "$@" ;; esac From f873aa12a6a0617d80ae5056389a485c2f430a9a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 10 Mar 2025 14:17:26 +0000 Subject: [PATCH 113/245] docs(decisions): add architectural decision records structure (#9310) * docs(decisions): add architectural decision records structure Create a structured decision records system to document important technical choices across multiple domains (DevOps, Network, Consensus, etc.). This implements a modified MADR template approach for preserving context, trade-offs, and reasoning behind significant architectural decisions. * fix(docs): suggestions from code review Co-authored-by: Marek --------- Co-authored-by: Marek --- docs/decisions/README.md | 22 ++++ docs/decisions/devops/0001-docker-high-uid.md | 51 ++++++++ docs/decisions/devops/0002-docker-use-gosu.md | 51 ++++++++ .../devops/0003-filesystem-hierarchy.md | 115 ++++++++++++++++++ docs/decisions/template.md | 49 ++++++++ 5 files changed, 288 insertions(+) create mode 100644 docs/decisions/README.md create mode 100644 docs/decisions/devops/0001-docker-high-uid.md create mode 100644 docs/decisions/devops/0002-docker-use-gosu.md create mode 100644 docs/decisions/devops/0003-filesystem-hierarchy.md create mode 100644 docs/decisions/template.md diff --git a/docs/decisions/README.md b/docs/decisions/README.md new file mode 100644 index 00000000000..91d5bd9188f --- /dev/null +++ b/docs/decisions/README.md @@ -0,0 +1,22 @@ +# Decision Log + +We capture important decisions with [architectural decision records](https://adr.github.io/). + +These records provide context, trade-offs, and reasoning taken at our community & technical cross-roads. Our goal is to preserve the understanding of the project growth, and capture enough insight to effectively revisit previous decisions. + +To get started, create a new decision record using the template: + +```sh +cp template.md NNNN-title-with-dashes.md +``` + +For more rationale for this approach, see [Michael Nygard's article](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions). + +We've inherited MADR [ADR template](https://adr.github.io/madr/), which is a bit more verbose than Nygard's original template. We may simplify it in the future. + +## Evolving Decisions + +Many decisions build on each other, a driver of iterative change and messiness +in software. By laying out the "story arc" of a particular system within the +application, we hope future maintainers will be able to identify how to rewind +decisions when refactoring the application becomes necessary. diff --git a/docs/decisions/devops/0001-docker-high-uid.md b/docs/decisions/devops/0001-docker-high-uid.md new file mode 100644 index 00000000000..bef88c1980a --- /dev/null +++ b/docs/decisions/devops/0001-docker-high-uid.md @@ -0,0 +1,51 @@ +--- +status: accepted +date: 2025-02-28 +story: Appropriate UID/GID values for container users +--- + +# Use High UID/GID Values for Container Users + +## Context & Problem Statement + +Docker containers share the host's user namespace by default. If container UIDs/GIDs overlap with privileged host accounts, this could lead to privilege escalation if a container escape vulnerability is exploited. Low UIDs (especially in the system user range of 100-999) are particularly risky as they often map to privileged system users on the host. + +Our previous approach used UID/GID 101 with the `--system` flag for user creation, which falls within the system user range and could potentially overlap with critical system users on the host. + +## Priorities & Constraints + +* Enhance security by reducing the risk of container user namespace overlaps +* Avoid warnings during container build related to system user ranges +* Maintain compatibility with common Docker practices +* Prevent potential privilege escalation in case of container escape + +## Considered Options + +* Option 1: Keep using low UID/GID (101) with `--system` flag +* Option 2: Use UID/GID (1000+) without `--system` flag +* Option 3: Use high UID/GID (10000+) without `--system` flag + +## Decision Outcome + +Chosen option: [Option 3: Use high UID/GID (10000+) without `--system` flag] + +We decided to: + +1. Change the default UID/GID from 101 to 10001 +2. Remove the `--system` flag from user/group creation commands +3. Document the security rationale for these changes + +This approach significantly reduces the risk of UID/GID collision with host system users while avoiding build-time warnings related to system user ranges. Using a very high UID/GID (10001) provides an additional security boundary in containers where user namespaces are shared with the host. + +### Expected Consequences + +* Improved security posture by reducing the risk of container escapes leading to privilege escalation +* Elimination of build-time warnings related to system user UID/GID ranges +* Consistency with industry best practices for container security +* No functional impact on container operation, as the internal user permissions remain the same + +## More Information + +* [NGINX Docker User ID Issue](https://github.com/nginxinc/docker-nginx/issues/490) - Demonstrates the risks of using UID 101 which overlaps with `systemd-network` user on Debian systems +* [.NET Docker Issue on System Users](https://github.com/dotnet/dotnet-docker/issues/4624) - Details the problems with using `--system` flag and the SYS_UID_MAX warnings +* [Docker Security Best Practices](https://docs.docker.com/develop/security-best-practices/) - General security recommendations for Docker containers diff --git a/docs/decisions/devops/0002-docker-use-gosu.md b/docs/decisions/devops/0002-docker-use-gosu.md new file mode 100644 index 00000000000..0bdd2931f89 --- /dev/null +++ b/docs/decisions/devops/0002-docker-use-gosu.md @@ -0,0 +1,51 @@ +--- +status: accepted +date: 2025-02-28 +story: Volumes permissions and privilege management in container entrypoint +--- + +# Use gosu for Privilege Dropping in Entrypoint + +## Context & Problem Statement + +Running containerized applications as the root user is a security risk. If an attacker compromises the application, they gain root access within the container, potentially facilitating a container escape. However, some operations during container startup, such as creating directories or modifying file permissions in locations not owned by the application user, require root privileges. We need a way to perform these initial setup tasks as root, but then switch to a non-privileged user *before* executing the main application (`zebrad`). Using `USER` in the Dockerfile is insufficient because it applies to the entire runtime, and we need to change permissions *after* volumes are mounted. + +## Priorities & Constraints + +* Minimize the security risk by running the main application (`zebrad`) as a non-privileged user. +* Allow initial setup tasks (file/directory creation, permission changes) that require root privileges. +* Maintain a clean and efficient entrypoint script. +* Avoid complex signal handling and TTY issues associated with `su` and `sudo`. +* Ensure 1:1 parity with Docker's `--user` flag behavior. + +## Considered Options + +* Option 1: Use `USER` directive in Dockerfile. +* Option 2: Use `su` within the entrypoint script. +* Option 3: Use `sudo` within the entrypoint script. +* Option 4: Use `gosu` within the entrypoint script. +* Option 5: Use `chroot --userspec` +* Option 6: Use `setpriv` + +## Decision Outcome + +Chosen option: [Option 4: Use `gosu` within the entrypoint script] + +We chose to use `gosu` because it provides a simple and secure way to drop privileges from root to a non-privileged user *after* performing necessary setup tasks. `gosu` avoids the TTY and signal-handling complexities of `su` and `sudo`. It's designed specifically for this use case (dropping privileges in container entrypoints) and leverages the same underlying mechanisms as Docker itself for user/group handling, ensuring consistent behavior. + +### Expected Consequences + +* Improved security by running `zebrad` as a non-privileged user. +* Simplified entrypoint script compared to using `su` or `sudo`. +* Avoidance of TTY and signal-handling issues. +* Consistent behavior with Docker's `--user` flag. +* No negative impact on functionality, as initial setup tasks can still be performed. + +## More Information + +* [gosu GitHub repository](https://github.com/tianon/gosu#why) - Explains the rationale behind `gosu` and its advantages over `su` and `sudo`. +* [gosu usage warning](https://github.com/tianon/gosu#warning) - Highlights the core use case (stepping down from root) and potential vulnerabilities in other scenarios. +* Alternatives considered: + * `chroot --userspec`: While functional, it's less common and less directly suited to this specific task than `gosu`. + * `setpriv`: A viable alternative, but `gosu` is already well-established in our workflow and offers the desired functionality with a smaller footprint than a full `util-linux` installation. + * `su-exec`: Another minimal alternative, but it has known parser bugs that could lead to unexpected root execution. diff --git a/docs/decisions/devops/0003-filesystem-hierarchy.md b/docs/decisions/devops/0003-filesystem-hierarchy.md new file mode 100644 index 00000000000..13c626dec5e --- /dev/null +++ b/docs/decisions/devops/0003-filesystem-hierarchy.md @@ -0,0 +1,115 @@ +--- +status: proposed +date: 2025-02-28 +story: Standardize filesystem hierarchy for Zebra deployments +--- + +# Standardize Filesystem Hierarchy: FHS vs. XDG + +## Context & Problem Statement + +Zebra currently has inconsistencies in its filesystem layout, particularly regarding where configuration, data, cache files, and binaries are stored. We need a standardized approach compatible with: + +1. Traditional Linux systems. +2. Containerized deployments (Docker). +3. Cloud environments with stricter filesystem restrictions (e.g., Google's Container-Optimized OS). + +We previously considered using the Filesystem Hierarchy Standard (FHS) exclusively ([Issue #3432](https://github.com/ZcashFoundation/zebra/issues/3432)). However, recent changes introduced the XDG Base Directory Specification, which offers a user-centric approach. We need to decide whether to: + +* Adhere to FHS. +* Adopt XDG Base Directory Specification. +* Use a hybrid approach, leveraging the strengths of both. + +The choice impacts how we structure our Docker images, where configuration files are located, and how users interact with Zebra in different environments. + +## Priorities & Constraints + +* **Security:** Minimize the risk of privilege escalation by adhering to least-privilege principles. +* **Maintainability:** Ensure a clear and consistent filesystem layout that is easy to understand and maintain. +* **Compatibility:** Work seamlessly across various Linux distributions, Docker, and cloud environments (particularly those with restricted filesystems like Google's Container-Optimized OS). +* **User Experience:** Provide a predictable and user-friendly experience for locating configuration and data files. +* **Flexibility:** Allow users to override default locations via environment variables where appropriate. +* **Avoid Breaking Changes:** Minimize disruption to existing users and deployments, if possible. + +## Considered Options + +### Option 1: FHS + +* Configuration: `/etc/zebrad/` +* Data: `/var/lib/zebrad/` +* Cache: `/var/cache/zebrad/` +* Logs: `/var/log/zebrad/` +* Binary: `/opt/zebra/bin/zebrad` or `/usr/local/bin/zebrad` + +### Option 2: XDG Base Directory Specification + +* Configuration: `$HOME/.config/zebrad/` +* Data: `$HOME/.local/share/zebrad/` +* Cache: `$HOME/.cache/zebrad/` +* State: `$HOME/.local/state/zebrad/` +* Binary: `$HOME/.local/bin/zebrad` or `/usr/local/bin/zebrad` + +### Option 3: Hybrid Approach (FHS for System-Wide, XDG for User-Specific) + +* System-wide configuration: `/etc/zebrad/` +* User-specific configuration: `$XDG_CONFIG_HOME/zebrad/` +* System-wide data (read-only, shared): `/usr/share/zebrad/` (e.g., checkpoints) +* User-specific data: `$XDG_DATA_HOME/zebrad/` +* Cache: `$XDG_CACHE_HOME/zebrad/` +* State: `$XDG_STATE_HOME/zebrad/` +* Runtime: `$XDG_RUNTIME_DIR/zebrad/` +* Binary: `/opt/zebra/bin/zebrad` (system-wide) or `$HOME/.local/bin/zebrad` (user-specific) + +## Pros and Cons of the Options + +### FHS + +* **Pros:** + * Traditional and well-understood by system administrators. + * Clear separation of configuration, data, cache, and binaries. + * Suitable for packaged software installations. + +* **Cons:** + * Less user-friendly; requires root access to modify configuration. + * Can conflict with stricter cloud environments restricting writes to `/etc` and `/var`. + * Doesn't handle multi-user scenarios as gracefully as XDG. + +### XDG Base Directory Specification + +* **Pros:** + * User-centric: configuration and data stored in user-writable locations. + * Better suited for containerized and cloud environments. + * Handles multi-user scenarios gracefully. + * Clear separation of configuration, data, cache, and state. + +* **Cons:** + * Less traditional; might be unfamiliar to some system administrators. + * Requires environment variables to be set correctly. + * Binary placement less standardized. + +### Hybrid Approach (FHS for System-Wide, XDG for User-Specific) + +* **Pros:** + * Combines strengths of FHS and XDG. + * Allows system-wide defaults while prioritizing user-specific configurations. + * Flexible and adaptable to different deployment scenarios. + * Clear binary placement in `/opt`. + +* **Cons:** + * More complex than either FHS or XDG alone. + * Requires careful consideration of precedence rules. + +## Decision Outcome + +Pending + +## Expected Consequences + +Pending + +## More Information + +* [Filesystem Hierarchy Standard (FHS) v3.0](https://refspecs.linuxfoundation.org/FHS_3.0/fhs-3.0.html) +* [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/latest/) +* [Zebra Issue #3432: Use the Filesystem Hierarchy Standard (FHS) for deployments and artifacts](https://github.com/ZcashFoundation/zebra/issues/3432) +* [Google Container-Optimized OS: Working with the File System](https://cloud.google.com/container-optimized-os/docs/concepts/disks-and-filesystem#working_with_the_file_system) diff --git a/docs/decisions/template.md b/docs/decisions/template.md new file mode 100644 index 00000000000..8b1b61f2e09 --- /dev/null +++ b/docs/decisions/template.md @@ -0,0 +1,49 @@ +--- +# status and date are the only required elements. Feel free to remove the rest. +status: {[proposed | rejected | accepted | deprecated | … | superseded by [ADR-NAME](adr-file-name.md)]} +date: {YYYY-MM-DD when the decision was last updated} +builds-on: {[Short Title](2021-05-15-short-title.md)} +story: {description or link to contextual issue} +--- + +# {short title of solved problem and solution} + +## Context and Problem Statement + +{2-3 sentences explaining the problem and the forces influencing the decision.} + + +## Priorities & Constraints + +* {List of concerns or constraints} +* {Factors influencing the decision} + +## Considered Options + +* Option 1: Thing +* Option 2: Another + +### Pros and Cons of the Options + +#### Option 1: {Brief description} + +* Good, because {reason} +* Bad, because {reason} + +## Decision Outcome + +Chosen option [Option 1: Thing] + +{Clearly state the chosen option and provide justification. Reference the "Pros and Cons of the Options" section below if applicable.} + +### Expected Consequences + +* List of outcomes resulting from this decision + + +## More Information + + + + + From 49741e8b476aff1d1dd2f6a2f57ec9d24404d345 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 12 Mar 2025 10:43:31 +0000 Subject: [PATCH 114/245] fix(ci): Add Rust environment variables to Docker test workflows (#9318) * feat(ci): Add Rust environment variables to Docker test workflows Enhance test workflows by adding Rust-specific environment variables: - Include RUST_LOG for logging configuration - Add RUST_BACKTRACE and RUST_LIB_BACKTRACE for improved error tracing - Include COLORBT_SHOW_HIDDEN for detailed backtraces - Add CARGO_INCREMENTAL for build performance optimization These changes improve debugging capabilities and provide more flexibility in test environments across dockerized CI jobs. * feat(ci): Add CARGO_INCREMENTAL build argument to Docker workflow Enable configurable Rust incremental compilation in Docker builds by: - Adding CARGO_INCREMENTAL as a build argument in GitHub Actions workflow - Setting a default value of 0 in Dockerfile to control build performance - Integrating with existing Rust environment variable configuration This change provides more granular control over Rust compilation strategies in containerized builds. * fix(docker): resolve user creation issues in Dockerfile - Move WORKDIR after user creation to prevent home directory ownership issues - Properly set environment variables for UID, GID, HOME, and USER in each stage - Reorganize Dockerfile to ensure home directory is created after user setup - Fix interactive prompts during adduser by ensuring proper directory ownership * fix(docs): Apply suggestions from code review Co-authored-by: Marek --------- Co-authored-by: Marek --- .github/workflows/sub-build-docker-image.yml | 1 + .../workflows/sub-ci-unit-tests-docker.yml | 43 ++++++++++++++++--- .../sub-deploy-integration-tests-gcp.yml | 11 ++++- docker/Dockerfile | 36 ++++++++-------- 4 files changed, 67 insertions(+), 24 deletions(-) diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 4bfdaefb107..c0f271b9472 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -166,6 +166,7 @@ jobs: build-args: | SHORT_SHA=${{ env.GITHUB_SHA_SHORT }} RUST_LOG=${{ env.RUST_LOG }} + CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} FEATURES=${{ env.FEATURES }} push: true # It's recommended to build images with max-level provenance attestations diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index c9553e11bca..f82f24ea6f1 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -53,10 +53,15 @@ jobs: - name: Run all tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run -t \ + docker run --tty \ -e RUN_ALL_TESTS=1 \ -e FEATURES="journald prometheus filter-reload" \ -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ + -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ + -e CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Run state tests with fake activation heights. @@ -85,9 +90,14 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run -t \ + docker run --tty \ -e TEST_FAKE_ACTIVATION_HEIGHTS=1 \ -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ + -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ + -e CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. @@ -108,7 +118,15 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty \ + -e TEST_ZEBRA_EMPTY_SYNC=1 \ + -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ + -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ + -e CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. test-lightwalletd-integration: @@ -128,7 +146,13 @@ jobs: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e ZEBRA_TEST_LIGHTWALLETD=1 -e TEST_LWD_INTEGRATION=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty \ + -e ZEBRA_TEST_LIGHTWALLETD=1 \ + -e TEST_LWD_INTEGRATION=1 \ + -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: @@ -147,8 +171,8 @@ jobs: with: test_id: "testnet-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' test_variables: "-e NETWORK=Testnet" + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: @@ -198,4 +222,11 @@ jobs: - name: Run check_no_git_refs_in_cargo_lock run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - docker run --tty -e RUN_CHECK_NO_GIT_REFS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} + docker run --tty \ + -e RUN_CHECK_NO_GIT_REFS=1 \ + -e NETWORK="${{ inputs.network || vars.ZCASH_NETWORK }}" \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ + -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index b054814355f..6533996757c 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -85,6 +85,11 @@ on: description: 'Application name, used to work out when a job is an update job' env: + RUST_LOG: ${{ vars.RUST_LOG }} + RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} + RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} + COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} + CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} # How many previous log lines we show at the start of each new log job. # Increase this number if some log lines are skipped between jobs # @@ -95,7 +100,6 @@ env: # How many blocks to wait before creating an updated cached state image. # 1 day is approximately 1152 blocks. CACHED_STATE_UPDATE_LIMIT: 576 - jobs: # Find a cached state disk for ${{ inputs.test_id }}, matching all of: # - disk cached state prefix -> zebrad-cache or lwd-cache @@ -279,6 +283,11 @@ jobs: --tty \ --detach \ ${{ inputs.test_variables }} \ + -e RUST_LOG=${{ env.RUST_LOG }} \ + -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ + -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ + -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ + -e CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} \ ${MOUNT_FLAGS} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ ' diff --git a/docker/Dockerfile b/docker/Dockerfile index 064c57212ec..d120d40d8b9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -52,19 +52,14 @@ ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-1} ARG COLORBT_SHOW_HIDDEN ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} +ARG CARGO_INCREMENTAL +ENV CARGO_INCREMENTAL=${CARGO_INCREMENTAL:-0} + ARG SHORT_SHA # If this is not set, it must be an empty string, so Zebra can try an alternative git commit source: # https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 ENV SHORT_SHA=${SHORT_SHA:-} -# Set the working directory for the build. -ARG HOME -WORKDIR ${HOME} -ENV HOME=${HOME} -ENV CARGO_HOME="${HOME}/.cargo/" - -ENV USER=${USER} - # This stage builds tests without running them. # # We also download needed dependencies for tests to work, from other images. @@ -81,15 +76,22 @@ ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} # This environment setup is almost identical to the `runtime` target so that the # `tests` target differs minimally. In fact, a subset of this setup is used for # the `runtime` target. - ARG UID +ENV UID=${UID} ARG GID +ENV GID=${GID} ARG HOME +ENV HOME=${HOME} ARG USER +ENV USER=${USER} RUN addgroup --gid ${GID} ${USER} && \ adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} +# Set the working directory for the build. +WORKDIR ${HOME} +ENV CARGO_HOME="${HOME}/.cargo/" + # Build Zebra test binaries, but don't run them # Leverage a cache mount to /usr/local/cargo/registry/ @@ -189,14 +191,8 @@ FROM debian:bookworm-slim AS runtime ARG FEATURES ENV FEATURES=${FEATURES} -# Create a non-privileged system user for running `zebrad`. -ARG USER -ENV USER=${USER} - -# System users have no home dirs, but we set one for users' convenience. -ARG HOME -WORKDIR ${HOME} - +# Create a non-privileged user for running `zebrad`. +# # We use a high UID/GID (10001) to avoid overlap with host system users. # This reduces the risk of container user namespace conflicts with host accounts, # which could potentially lead to privilege escalation if a container escape occurs. @@ -213,10 +209,16 @@ ARG UID ENV UID=${UID} ARG GID ENV GID=${GID} +ARG HOME +ENV HOME=${HOME} +ARG USER +ENV USER=${USER} RUN addgroup --gid ${GID} ${USER} && \ adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} +WORKDIR ${HOME} + # We set the default locations of the conf and cache dirs according to the XDG # spec: https://specifications.freedesktop.org/basedir-spec/latest/ From f0140a4cd8bbc7b1eb8b5d3be7be60e6248b0802 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 18 Mar 2025 09:58:19 +0100 Subject: [PATCH 115/245] fix(ci): Better permission and cache dirs handling in Docker (#9323) * Use gosu only once * Remove `COLORBT_SHOW_HIDDEN` * Simplify Dockerfile * Remove `check_directory_files` from entrypoint * Remove check for `ZEBRA_CONF_PATH` in entrypoint * Simplify ownership setting for `ZEBRA_CACHE_DIR` * Simplify ownership setting for `LOG_FILE` * Refactor Dockerfile & entrypoint * Refactor vars in Dockerfile * fmt * Use `chown` for `ZEBRA_CONF_PATH` * `run_cargo_test` -> `run_test` * Make `run_test` runnable with gosu * Cosmetics * Don't pre-compile Zebra * Revert: "Don't pre-compile Zebra" * Fix the custom conf test * Reintroduce `CARGO_HOME` in Dockerfile * Pass `FEATURES` as env var to entrypoint * Fix ARGs in Dockerfile * Revert "Remove `COLORBT_SHOW_HIDDEN`" This reverts commit 960d5ca3088907d3fdb20b311687cbdc9834a0fe. * Specify cache state dir in CI * Specify lwd cache dir in CI * refactor: reorganize variables and avoid running entrypoint commands in subshell (#9326) * refactor(docker): improve container configuration and security - Optimize Dockerfile build stages and environment variables - Improve file operations with proper ownership - Streamline entrypoint script privilege management * refactor(docker): enhance user management and directory ownership - Add HOME argument back to ensure proper user home directory setup - Implement ownership change for the user's home directory * refactor(docker): remove redundant cache directory setup - Eliminate explicit creation and ownership setting for LWD and Zebra cache directories in Dockerfile. - Introduce default values for cache directories in entrypoint script, allowing for environment variable overrides. * fix: run all cargo commands as user * chore: reduce diff * fix: revert to more robust command array --------- Co-authored-by: Gustavo Valverde Co-authored-by: Gustavo Valverde --- .../sub-ci-integration-tests-gcp.yml | 24 ++-- .../workflows/sub-ci-unit-tests-docker.yml | 2 +- docker/Dockerfile | 111 +++++++----------- docker/entrypoint.sh | 86 ++++++-------- 4 files changed, 88 insertions(+), 135 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index fde8ef835ca..071c46f0b74 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -108,7 +108,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -178,7 +178,7 @@ jobs: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true @@ -209,7 +209,7 @@ jobs: test_id: checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1" + test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false @@ -285,7 +285,7 @@ jobs: app_name: zebrad test_id: checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1" + test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed @@ -316,7 +316,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -351,7 +351,7 @@ jobs: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: true @@ -379,7 +379,7 @@ jobs: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true saves_to_disk: false secrets: inherit @@ -401,7 +401,7 @@ jobs: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -424,7 +424,7 @@ jobs: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -451,7 +451,7 @@ jobs: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -474,7 +474,7 @@ jobs: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -497,7 +497,7 @@ jobs: app_name: zebra-scan test_id: scanner-tests test_description: Tests the scanner. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCANNER=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCANNER=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index f82f24ea6f1..852d26503b0 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -181,7 +181,7 @@ jobs: with: test_id: "custom-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - test_variables: '-e ZEBRA_CONF_PATH="zebrad/tests/common/configs/custom-conf.toml"' + test_variables: '-e ZEBRA_CONF_PATH="/home/zebra/zebrad/tests/common/configs/custom-conf.toml"' grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' failure-issue: diff --git a/docker/Dockerfile b/docker/Dockerfile index d120d40d8b9..8da67cb20e2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -3,32 +3,29 @@ # If you want to include a file in the Docker image, add it to .dockerignore. # -# We are using 4 (TODO: 5) stages: +# We use 4 (TODO: 5) stages: # - deps: installs build dependencies and sets default values -# - tests: builds tests binaries +# - tests: prepares a test image # - release: builds release binaries -# - runtime: runs the release binaries +# - runtime: prepares the release image # - TODO: Add a `monitoring` stage # # We first set default values for build arguments used across the stages. # Each stage must define the build arguments (ARGs) it uses. -# Build zebrad with these features -# -# Keep these argument defaults in sync with GitHub vars.RUST_PROD_FEATURES +ARG RUST_VERSION=1.85.0 + +# Keep in sync with vars.RUST_PROD_FEATURES in GitHub # https://github.com/ZcashFoundation/zebra/settings/variables/actions ARG FEATURES="default-release-binaries" -ARG USER="zebra" ARG UID=10001 -ARG GID=10001 +ARG GID=${UID} +ARG USER="zebra" ARG HOME="/home/${USER}" +ARG CARGO_HOME="${HOME}/.cargo" -ARG RUST_VERSION=1.85.0 -# In this stage we download all system requirements to build the project -# -# It also captures all the build arguments to be used as environment variables. -# We set defaults for the arguments, in case the build does not include this information. +# This stage prepares Zebra's build deps and captures build args as env vars. FROM rust:${RUST_VERSION}-bookworm AS deps SHELL ["/bin/bash", "-xo", "pipefail", "-c"] @@ -39,25 +36,20 @@ RUN apt-get -qq update && \ protobuf-compiler \ && rm -rf /var/lib/apt/lists/* /tmp/* -# Build arguments and variables set for tracelog levels and debug information -ARG RUST_LOG -ENV RUST_LOG=${RUST_LOG:-info} - -ARG RUST_BACKTRACE -ENV RUST_BACKTRACE=${RUST_BACKTRACE:-1} - -ARG RUST_LIB_BACKTRACE -ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-1} - -ARG COLORBT_SHOW_HIDDEN -ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} - +# Build arguments and variables ARG CARGO_INCREMENTAL ENV CARGO_INCREMENTAL=${CARGO_INCREMENTAL:-0} -ARG SHORT_SHA -# If this is not set, it must be an empty string, so Zebra can try an alternative git commit source: +ARG CARGO_HOME +ENV CARGO_HOME=${CARGO_HOME} + +ARG FEATURES +ENV FEATURES=${FEATURES} + +# If this is not set, it must be an empty string, so Zebra can try an +# alternative git commit source: # https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 +ARG SHORT_SHA ENV SHORT_SHA=${SHORT_SHA:-} # This stage builds tests without running them. @@ -66,9 +58,6 @@ ENV SHORT_SHA=${SHORT_SHA:-} # An entrypoint.sh is only available in this step for easier test handling with variables. FROM deps AS tests -ARG FEATURES -ENV FEATURES=${FEATURES} - # Skip IPv6 tests by default, as some CI environment don't have IPv6 available ARG ZEBRA_SKIP_IPV6_TESTS ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} @@ -80,20 +69,18 @@ ARG UID ENV UID=${UID} ARG GID ENV GID=${GID} -ARG HOME -ENV HOME=${HOME} ARG USER ENV USER=${USER} +ARG HOME -RUN addgroup --gid ${GID} ${USER} && \ - adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} +RUN addgroup --quiet --gid ${GID} ${USER} && \ + adduser --quiet --gid ${GID} --uid ${UID} --home ${HOME} ${USER} --disabled-password --gecos "" # Set the working directory for the build. WORKDIR ${HOME} -ENV CARGO_HOME="${HOME}/.cargo/" # Build Zebra test binaries, but don't run them - +# # Leverage a cache mount to /usr/local/cargo/registry/ # for downloaded dependencies, a cache mount to /usr/local/cargo/git/db # for git repository dependencies, and a cache mount to ${HOME}/target/ for @@ -136,30 +123,22 @@ COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ ENV ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} -ENV LWD_CACHE_DIR="${HOME}/.cache/lwd" -RUN mkdir -p ${LWD_CACHE_DIR} && \ - chown -R ${UID}:${GID} ${LWD_CACHE_DIR} +# As the build has already run with the root user, +# we need to set the correct permissions for the home and cargo home dirs owned by it. +RUN chown -R ${UID}:${GID} "${HOME}" && \ + chown -R ${UID}:${GID} "${CARGO_HOME}" -# Use the same cache dir as in the production environment. -ENV ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" -RUN mkdir -p ${ZEBRA_CACHE_DIR} && \ - chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} - -COPY ./ ${HOME} -RUN chown -R ${UID}:${GID} ${HOME} - -COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY --chown=${UID}:${GID} ./ ${HOME} +COPY --chown=${UID}:${GID} ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh ENTRYPOINT [ "entrypoint.sh", "test" ] -# In this stage we build a release (generate the zebrad binary) +# This stage builds the zebrad release binary. # -# This step also adds `cache mounts` as this stage is completely independent from the -# `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting -# zebrad binary from this step. +# It also adds `cache mounts` as this stage is completely independent from the +# `test` stage. The resulting zebrad binary is used in the `runtime` stage. FROM deps AS release -ARG FEATURES ARG HOME RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ @@ -184,8 +163,8 @@ RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \ cp ${HOME}/target/release/zebrad /usr/local/bin -# This step starts from scratch using Debian and only adds the resulting binary -# from the `release` stage. +# This stage starts from scratch using Debian and copies the built zebrad binary +# from the `release` stage along with other binaries and files. FROM debian:bookworm-slim AS runtime ARG FEATURES @@ -209,28 +188,22 @@ ARG UID ENV UID=${UID} ARG GID ENV GID=${GID} -ARG HOME -ENV HOME=${HOME} ARG USER ENV USER=${USER} +ARG HOME -RUN addgroup --gid ${GID} ${USER} && \ - adduser --gid ${GID} --uid ${UID} --home ${HOME} ${USER} +RUN addgroup --quiet --gid ${GID} ${USER} && \ + adduser --quiet --gid ${GID} --uid ${UID} --home ${HOME} ${USER} --disabled-password --gecos "" WORKDIR ${HOME} # We set the default locations of the conf and cache dirs according to the XDG # spec: https://specifications.freedesktop.org/basedir-spec/latest/ +RUN chown -R ${UID}:${GID} ${HOME} + ARG ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" ENV ZEBRA_CONF_PATH=${ZEBRA_CONF_PATH} -COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} - -ARG ZEBRA_CACHE_DIR="${HOME}/.cache/zebra" -ENV ZEBRA_CACHE_DIR=${ZEBRA_CACHE_DIR} -RUN mkdir -p ${ZEBRA_CACHE_DIR} && chown -R ${UID}:${GID} ${ZEBRA_CACHE_DIR} - -RUN chown -R ${UID}:${GID} ${HOME} # We're explicitly NOT using the USER directive here. # Instead, we run as root initially and use gosu in the entrypoint.sh @@ -240,9 +213,9 @@ RUN chown -R ${UID}:${GID} ${HOME} # Copy the gosu binary to be able to run the entrypoint as non-root user COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ - COPY --from=release /usr/local/bin/zebrad /usr/local/bin/ -COPY ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} +COPY --chown=${UID}:${GID} ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh ENTRYPOINT [ "entrypoint.sh" ] CMD ["zebrad"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 9e70675e1f8..1f4f868c1be 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -10,6 +10,15 @@ set -eo pipefail + +# These are the default cached state directories for Zebra and lightwalletd. +# +# They are set to `${HOME}/.cache/zebra` and `${HOME}/.cache/lwd` +# respectively, but can be overridden by setting the +# `ZEBRA_CACHE_DIR` and `LWD_CACHE_DIR` environment variables. +: "${ZEBRA_CACHE_DIR:=${HOME}/.cache/zebra}" +: "${LWD_CACHE_DIR:=${HOME}/.cache/lwd}" + # Exit early if `ZEBRA_CONF_PATH` does not point to a file. if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then echo "ERROR: No Zebra config file found at ZEBRA_CONF_PATH (${ZEBRA_CONF_PATH})." @@ -17,13 +26,9 @@ if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then exit 1 fi -# Define function to execute commands as the specified user +# Use gosu to drop privileges and execute the given command as the specified UID:GID exec_as_user() { - if [[ "$(id -u)" = '0' ]]; then - exec gosu "${USER}" "$@" - else - exec "$@" - fi + exec gosu "${UID}:${GID}" "$@" } # Modifies the existing Zebra config file at ZEBRA_CONF_PATH using environment variables. @@ -32,7 +37,6 @@ exec_as_user() { # # This function modifies the existing file in-place and prints its location. prepare_conf_file() { - # Set a custom network. if [[ -n "${NETWORK}" ]]; then sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${ZEBRA_CONF_PATH}" @@ -57,12 +61,14 @@ prepare_conf_file() { # use them to set the cache dirs separately if needed. if [[ -n "${ZEBRA_CACHE_DIR}" ]]; then mkdir -p "${ZEBRA_CACHE_DIR//\"/}" + chown -R "${UID}:${GID}" "${ZEBRA_CACHE_DIR//\"/}" sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${ZEBRA_CONF_PATH}" - # Fix permissions right after creating/configuring the directory - if [[ "$(id -u)" = '0' ]]; then - # "Setting permissions for the cache directory - chown -R "${USER}:${USER}" "${ZEBRA_CACHE_DIR//\"/}" - fi + fi + + # Set a custom lightwalletd cache dir. + if [[ -n "${LWD_CACHE_DIR}" ]]; then + mkdir -p "${LWD_CACHE_DIR//\"/}" + chown -R "${UID}:${GID}" "${LWD_CACHE_DIR//\"/}" fi # Enable the Prometheus metrics endpoint. @@ -73,12 +79,8 @@ prepare_conf_file() { # Enable logging to a file by setting a custom log file path. if [[ -n "${LOG_FILE}" ]]; then mkdir -p "$(dirname "${LOG_FILE//\"/}")" + chown -R "${UID}:${GID}" "$(dirname "${LOG_FILE//\"/}")" sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${ZEBRA_CONF_PATH}" - # Fix permissions right after creating/configuring the log directory - if [[ "$(id -u)" = '0' ]]; then - # "Setting permissions for the log directory - chown -R "${USER}:${USER}" "$(dirname "${LOG_FILE//\"/}")" - fi fi # Enable or disable colored logs. @@ -103,29 +105,9 @@ prepare_conf_file() { echo "${ZEBRA_CONF_PATH}" } -# Checks if a directory contains subdirectories -# -# Exits with 0 if it does, and 1 otherwise. -check_directory_files() { - local dir="$1" - # Check if the directory exists - if [[ -d "${dir}" ]]; then - # Check if there are any subdirectories - if find "${dir}" -mindepth 1 -type d | read -r; then - : - else - echo "No subdirectories found in ${dir}." - exit 1 - fi - else - echo "Directory ${dir} does not exist." - exit 1 - fi -} - # Runs cargo test with an arbitrary number of arguments. # -# ## Positional Parameters +# Positional Parameters # # - '$1' must contain # - either cargo FEATURES as described here: @@ -134,34 +116,29 @@ check_directory_files() { # - The remaining params will be appended to a command starting with # `exec_as_user cargo test ... -- ...` run_cargo_test() { - # Start constructing the command, ensuring that $1 is enclosed in single - # quotes as it's a feature list - local cmd="exec_as_user cargo test --locked --release --features '$1' --package zebrad --test acceptance -- --nocapture --include-ignored" - # Shift the first argument, as it's already included in the cmd + local features="$1" shift + # Start constructing the command array + local cmd=(cargo test --locked --release --features "${features}" --package zebrad --test acceptance -- --nocapture --include-ignored) + # Loop through the remaining arguments for arg in "$@"; do if [[ -n ${arg} ]]; then # If the argument is non-empty, add it to the command - cmd+=" ${arg}" + cmd+=("${arg}") fi done - # Run the command using eval. This will replace the current process with the - # cargo command. - echo "Running:" - echo "${cmd}" - eval "${cmd}" || { - echo "Cargo test failed" - exit 1 - } + echo "Running: ${cmd[*]}" + # Execute directly to become PID 1 + exec_as_user "${cmd[@]}" } # Runs tests depending on the env vars. # -# Positional Parameters +# ## Positional Parameters # # - $@: Arbitrary command that will be executed if no test env var is set. run_tests() { @@ -280,11 +257,14 @@ run_tests() { # Main Script Logic prepare_conf_file "${ZEBRA_CONF_PATH}" +echo "INFO: Using the following environment variables:" +printenv + echo "Prepared the following Zebra config:" cat "${ZEBRA_CONF_PATH}" # - If "$1" is "--", "-", or "zebrad", run `zebrad` with the remaining params. -# - If "$1" is "tests": +# - If "$1" is "test": # - and "$2" is "zebrad", run `zebrad` with the remaining params, # - else run tests with the remaining params. # - TODO: If "$1" is "monitoring", start a monitoring node. From 32de8ef5fdbf534ce8819a1744b28875b99294d2 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Sat, 22 Mar 2025 17:25:30 +0200 Subject: [PATCH 116/245] fix(docker): set `HOME` environment variable explicitly in Dockerfile (#9333) The HOME environment variable was defaulting to /root when the container started, causing cache directories to be incorrectly set up under /root/.cache/zebra instead of /home/zebra/.cache/zebra. This explicit setting ensures the HOME environment variable is correctly set to the zebra user's home directory. --- docker/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8da67cb20e2..564965ff38d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -72,6 +72,7 @@ ENV GID=${GID} ARG USER ENV USER=${USER} ARG HOME +ENV HOME=${HOME} RUN addgroup --quiet --gid ${GID} ${USER} && \ adduser --quiet --gid ${GID} --uid ${UID} --home ${HOME} ${USER} --disabled-password --gecos "" @@ -191,6 +192,7 @@ ENV GID=${GID} ARG USER ENV USER=${USER} ARG HOME +ENV HOME=${HOME} RUN addgroup --quiet --gid ${GID} ${USER} && \ adduser --quiet --gid ${GID} --uid ${UID} --home ${HOME} ${USER} --disabled-password --gecos "" From 431a76c04aed0973edd287ea757cc715bfc27a99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 12:14:34 +0000 Subject: [PATCH 117/245] build(deps): bump the devops group across 1 directory with 8 updates (#9335) Bumps the devops group with 8 updates in the / directory: | Package | From | To | | --- | --- | --- | | [docker/login-action](https://github.com/docker/login-action) | `3.3.0` | `3.4.0` | | [codecov/codecov-action](https://github.com/codecov/codecov-action) | `5.3.1` | `5.4.0` | | [tj-actions/changed-files](https://github.com/tj-actions/changed-files) | `45.0.7` | `46.0.1` | | [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) | `2.7.7` | `2.7.8` | | [docker/metadata-action](https://github.com/docker/metadata-action) | `5.6.1` | `5.7.0` | | [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) | `3.9.0` | `3.10.0` | | [docker/build-push-action](https://github.com/docker/build-push-action) | `6.14.0` | `6.15.0` | | [docker/scout-action](https://github.com/docker/scout-action) | `1.16.3` | `1.17.0` | Updates `docker/login-action` from 3.3.0 to 3.4.0 - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v3.3.0...v3.4.0) Updates `codecov/codecov-action` from 5.3.1 to 5.4.0 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.3.1...v5.4.0) Updates `tj-actions/changed-files` from 45.0.7 to 46.0.1 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v45.0.7...v46.0.1) Updates `Swatinem/rust-cache` from 2.7.7 to 2.7.8 - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/v2.7.7...v2.7.8) Updates `docker/metadata-action` from 5.6.1 to 5.7.0 - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v5.6.1...v5.7.0) Updates `docker/setup-buildx-action` from 3.9.0 to 3.10.0 - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v3.9.0...v3.10.0) Updates `docker/build-push-action` from 6.14.0 to 6.15.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.14.0...v6.15.0) Updates `docker/scout-action` from 1.16.3 to 1.17.0 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.16.3...v1.17.0) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major dependency-group: devops - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: docker/metadata-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/scout-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/chore-delete-gcp-resources.yml | 2 +- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 8 ++++---- .github/workflows/ci-unit-tests-os.yml | 4 ++-- .github/workflows/docs-deploy-firebase.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 12 ++++++------ 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index e611498fdb2..c80c6c21f0e 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -130,7 +130,7 @@ jobs: token_format: 'access_token' - name: Login to Google Artifact Registry - uses: docker/login-action@v3.3.0 + uses: docker/login-action@v3.4.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 3314bf0959c..9ce9ddce4fd 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.3.1 + uses: codecov/codecov-action@v5.4.0 diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index df13ae1b1f7..b2917c17cb6 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.7 + uses: tj-actions/changed-files@v46.0.1 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.7 + uses: tj-actions/changed-files@v46.0.1 with: files: | .github/workflows/*.yml @@ -93,7 +93,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.7.7 + - uses: Swatinem/rust-cache@v2.7.8 with: shared-key: "clippy-cargo-lock" @@ -138,7 +138,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.7.7 + #- uses: Swatinem/rust-cache@v2.7.8 - run: | cargo fmt --all -- --check diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index d37328a5c12..71468bd9348 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -115,7 +115,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.7.7 + - uses: Swatinem/rust-cache@v2.7.8 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -224,7 +224,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.7.7 + - uses: Swatinem/rust-cache@v2.7.8 with: shared-key: "clippy-cargo-lock" diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index eecb9ffacf5..7e2c3d6f025 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -158,7 +158,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.7.7 + - uses: Swatinem/rust-cache@v2.7.8 - name: Build internal docs run: | diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index c0f271b9472..2728c7ad4ec 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -88,7 +88,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v5.6.1 + uses: docker/metadata-action@v5.7.0 with: # list of Docker images to use as base name for tags # We only publish images to DockerHub if a release is not a pre-release @@ -132,14 +132,14 @@ jobs: access_token_lifetime: 10800s - name: Login to Google Artifact Registry - uses: docker/login-action@v3.3.0 + uses: docker/login-action@v3.4.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken password: ${{ steps.auth.outputs.access_token }} - name: Login to DockerHub - uses: docker/login-action@v3.3.0 + uses: docker/login-action@v3.4.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -147,7 +147,7 @@ jobs: # Setup Docker Buildx to use Docker Build Cloud - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.9.0 + uses: docker/setup-buildx-action@v3.10.0 with: version: "lab:latest" driver: cloud @@ -156,7 +156,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.14.0 + uses: docker/build-push-action@v6.15.0 with: target: ${{ inputs.dockerfile_target }} context: . @@ -188,7 +188,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.16.3 + uses: docker/scout-action@v1.17.0 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. From 979cdaf963280486b1557c3ffee0589f4396eaba Mon Sep 17 00:00:00 2001 From: Elijah Hampton Date: Tue, 25 Mar 2025 15:10:33 -0400 Subject: [PATCH 118/245] feat(state): Implements reconsider_block method (#9260) * Adds reconsider_block method and appropriate test cases * Refactors invalidated_blocks state to use IndexMap. Returns error from validate_and_commit if a candidate block's hash is in the map of invalidated blocks. Stores invalidated_blocks by height and clears when finalizing. Checks against non finalized tip hash to create a new chain if parnt_chain doesn't exist. Renames ReconsiderError variant NonPreviouslyInvalidatedBlock to MissingInvalidatedBlock. * Moves MAX_INVALIDATED_BLOCKS constant to constants.rs * Maintains invalidate_blocks cleanup in reconsider_block and finalize(). Removes unused ReconsiderError variant. Opts to refuse block consideration if parent_chain does not exist. Adds db handle to reconsider_block function. Edits max blocks constant documentation * Checks the finalized state first to create a new chain from non finalized blocks only before checking parent_chain. --- zebra-state/src/constants.rs | 6 + zebra-state/src/error.rs | 21 ++++ .../src/service/non_finalized_state.rs | 114 ++++++++++++++++-- .../src/service/non_finalized_state/chain.rs | 4 +- .../non_finalized_state/tests/vectors.rs | 113 +++++++++++++++-- 5 files changed, 239 insertions(+), 19 deletions(-) diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 2c3671d838f..d4ebb5f4000 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -117,6 +117,12 @@ pub const MAX_FIND_BLOCK_HEADERS_RESULTS: u32 = 160; /// These database versions can be recreated from their directly preceding versions. pub const RESTORABLE_DB_VERSIONS: [u64; 1] = [26]; +/// The maximum number of invalidated block records. +/// +/// This limits the memory use to around: +/// `100 entries * up to 99 blocks * 2 MB per block = 20 GB` +pub const MAX_INVALIDATED_BLOCKS: usize = 100; + lazy_static! { /// Regex that matches the RocksDB error when its lock file is already open. pub static ref LOCK_FILE_ERROR: Regex = Regex::new("(lock file).*(temporarily unavailable)|(in use)|(being used by another process)").expect("regex is valid"); diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index 632591f4cb3..0e96c232da9 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -46,11 +46,32 @@ pub type BoxError = Box; #[error("block is not contextually valid: {}", .0)] pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError); +/// An error describing the reason a block or its descendants could not be reconsidered after +/// potentially being invalidated from the chain_set. +#[derive(Debug, Error)] +pub enum ReconsiderError { + #[error("Block with hash {0} was not previously invalidated")] + MissingInvalidatedBlock(block::Hash), + + #[error("Parent chain not found for block {0}")] + ParentChainNotFound(block::Hash), + + #[error("Invalidated blocks list is empty when it should contain at least one block")] + InvalidatedBlocksEmpty, + + #[error("{0}")] + ValidationError(#[from] ValidateContextError), +} + /// An error describing why a block failed contextual validation. #[derive(Debug, Error, Clone, PartialEq, Eq)] #[non_exhaustive] #[allow(missing_docs)] pub enum ValidateContextError { + #[error("block hash {block_hash} was previously invalidated")] + #[non_exhaustive] + BlockPreviouslyInvalidated { block_hash: block::Hash }, + #[error("block parent not found in any chain, or not enough blocks in chain")] #[non_exhaustive] NotReadyToBeCommitted, diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 91ae30ae23d..35027a64a60 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -8,14 +8,17 @@ use std::{ sync::Arc, }; +use indexmap::IndexMap; use zebra_chain::{ - block::{self, Block, Hash}, + block::{self, Block, Hash, Height}, parameters::Network, - sprout, transparent, + sprout::{self}, + transparent, }; use crate::{ - constants::MAX_NON_FINALIZED_CHAIN_FORKS, + constants::{MAX_INVALIDATED_BLOCKS, MAX_NON_FINALIZED_CHAIN_FORKS}, + error::ReconsiderError, request::{ContextuallyVerifiedBlock, FinalizableBlock}, service::{check, finalized_state::ZebraDb}, SemanticallyVerifiedBlock, ValidateContextError, @@ -47,7 +50,7 @@ pub struct NonFinalizedState { /// Blocks that have been invalidated in, and removed from, the non finalized /// state. - invalidated_blocks: HashMap>>, + invalidated_blocks: IndexMap>>, // Configuration // @@ -233,6 +236,10 @@ impl NonFinalizedState { self.insert(side_chain); } + // Remove all invalidated_blocks at or below the finalized height + self.invalidated_blocks + .retain(|height, _blocks| *height >= best_chain_root.height); + self.update_metrics_for_chains(); // Add the treestate to the finalized block. @@ -294,13 +301,100 @@ impl NonFinalizedState { invalidated_blocks }; - self.invalidated_blocks - .insert(block_hash, Arc::new(invalidated_blocks)); + self.invalidated_blocks.insert( + invalidated_blocks.first().unwrap().clone().height, + Arc::new(invalidated_blocks), + ); + + while self.invalidated_blocks.len() > MAX_INVALIDATED_BLOCKS { + self.invalidated_blocks.shift_remove_index(0); + } self.update_metrics_for_chains(); self.update_metrics_bars(); } + /// Reconsiders a previously invalidated block and its descendants into the non-finalized state + /// based on a block_hash. Reconsidered blocks are inserted into the previous chain and re-inserted + /// into the chain_set. + pub fn reconsider_block( + &mut self, + block_hash: block::Hash, + finalized_state: &ZebraDb, + ) -> Result<(), ReconsiderError> { + // Get the invalidated blocks that were invalidated by the given block_hash + let height = self + .invalidated_blocks + .iter() + .find_map(|(height, blocks)| { + if blocks.first()?.hash == block_hash { + Some(height) + } else { + None + } + }) + .ok_or(ReconsiderError::MissingInvalidatedBlock(block_hash))?; + + let mut invalidated_blocks = self + .invalidated_blocks + .clone() + .shift_remove(height) + .ok_or(ReconsiderError::MissingInvalidatedBlock(block_hash))?; + let mut_blocks = Arc::make_mut(&mut invalidated_blocks); + + // Find and fork the parent chain of the invalidated_root. Update the parent chain + // with the invalidated_descendants + let invalidated_root = mut_blocks + .first() + .ok_or(ReconsiderError::InvalidatedBlocksEmpty)?; + + let root_parent_hash = invalidated_root.block.header.previous_block_hash; + + // If the parent is the tip of the finalized_state we create a new chain and insert it + // into the non finalized state + let chain_result = if root_parent_hash == finalized_state.finalized_tip_hash() { + let chain = Chain::new( + &self.network, + finalized_state + .finalized_tip_height() + .ok_or(ReconsiderError::ParentChainNotFound(block_hash))?, + finalized_state.sprout_tree_for_tip(), + finalized_state.sapling_tree_for_tip(), + finalized_state.orchard_tree_for_tip(), + finalized_state.history_tree(), + finalized_state.finalized_value_pool(), + ); + Arc::new(chain) + } else { + // The parent is not the finalized_tip and still exist in the NonFinalizedState + // or else we return an error due to the parent not existing in the NonFinalizedState + self.parent_chain(root_parent_hash) + .map_err(|_| ReconsiderError::ParentChainNotFound(block_hash))? + }; + + let mut modified_chain = Arc::unwrap_or_clone(chain_result); + for block in Arc::unwrap_or_clone(invalidated_blocks) { + modified_chain = modified_chain.push(block)?; + } + + let (height, hash) = modified_chain.non_finalized_tip(); + + // Only track invalidated_blocks that are not yet finalized. Once blocks are finalized (below the best_chain_root_height) + // we can discard the block. + if let Some(best_chain_root_height) = finalized_state.finalized_tip_height() { + self.invalidated_blocks + .retain(|height, _blocks| *height >= best_chain_root_height); + } + + self.insert_with(Arc::new(modified_chain), |chain_set| { + chain_set.retain(|chain| chain.non_finalized_tip_hash() != root_parent_hash) + }); + + self.update_metrics_for_committed_block(height, hash); + + Ok(()) + } + /// Commit block to the non-finalized state as a new chain where its parent /// is the finalized tip. #[tracing::instrument(level = "debug", skip(self, finalized_state, prepared))] @@ -352,6 +446,12 @@ impl NonFinalizedState { prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { + if self.invalidated_blocks.contains_key(&prepared.height) { + return Err(ValidateContextError::BlockPreviouslyInvalidated { + block_hash: prepared.hash, + }); + } + // Reads from disk // // TODO: if these disk reads show up in profiles, run them in parallel, using std::thread::spawn() @@ -624,7 +724,7 @@ impl NonFinalizedState { } /// Return the invalidated blocks. - pub fn invalidated_blocks(&self) -> HashMap>> { + pub fn invalidated_blocks(&self) -> IndexMap>> { self.invalidated_blocks.clone() } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index c7d0d2877c6..e017a742c0d 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -359,7 +359,7 @@ impl Chain { (block, treestate) } - // Returns the block at the provided height and all of its descendant blocks. + /// Returns the block at the provided height and all of its descendant blocks. pub fn child_blocks(&self, block_height: &block::Height) -> Vec { self.blocks .range(block_height..) @@ -367,7 +367,7 @@ impl Chain { .collect() } - // Returns a new chain without the invalidated block or its descendants. + /// Returns a new chain without the invalidated block or its descendants. pub fn invalidate_block( &self, block_hash: block::Hash, diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 5b392e4a0b9..c95f5cf7070 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -216,6 +216,17 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { Ok(()) } +#[test] +fn invalidate_block_removes_block_and_descendants_from_chain() -> Result<()> { + let _init_guard = zebra_test::init(); + + for network in Network::iter() { + invalidate_block_removes_block_and_descendants_from_chain_for_network(network)?; + } + + Ok(()) +} + fn invalidate_block_removes_block_and_descendants_from_chain_for_network( network: Network, ) -> Result<()> { @@ -267,26 +278,36 @@ fn invalidate_block_removes_block_and_descendants_from_chain_for_network( ); let invalidated_blocks_state = &state.invalidated_blocks; - assert!( - invalidated_blocks_state.contains_key(&block2.hash()), - "invalidated blocks map should reference the hash of block2" - ); - let invalidated_blocks_state_descendants = - invalidated_blocks_state.get(&block2.hash()).unwrap(); + // Find an entry in the IndexMap that contains block2 hash + let (_, invalidated_blocks_state_descendants) = invalidated_blocks_state + .iter() + .find_map(|(height, blocks)| { + assert!( + blocks.iter().any(|block| block.hash == block2.hash()), + "invalidated_blocks should reference the hash of block2" + ); + + if blocks.iter().any(|block| block.hash == block2.hash()) { + Some((height, blocks)) + } else { + None + } + }) + .unwrap(); match network { Network::Mainnet => assert!( invalidated_blocks_state_descendants .iter() .any(|block| block.height == block::Height(653601)), - "invalidated descendants vec should contain block3" + "invalidated descendants should contain block3" ), Network::Testnet(_parameters) => assert!( invalidated_blocks_state_descendants .iter() .any(|block| block.height == block::Height(584001)), - "invalidated descendants vec should contain block3" + "invalidated descendants should contain block3" ), } @@ -294,16 +315,88 @@ fn invalidate_block_removes_block_and_descendants_from_chain_for_network( } #[test] -fn invalidate_block_removes_block_and_descendants_from_chain() -> Result<()> { +fn reconsider_block_and_reconsider_chain_correctly_reconsiders_blocks_and_descendants() -> Result<()> +{ let _init_guard = zebra_test::init(); for network in Network::iter() { - invalidate_block_removes_block_and_descendants_from_chain_for_network(network)?; + reconsider_block_inserts_block_and_descendants_into_chain_for_network(network.clone())?; } Ok(()) } +fn reconsider_block_inserts_block_and_descendants_into_chain_for_network( + network: Network, +) -> Result<()> { + let block1: Arc = Arc::new(network.test_block(653599, 583999).unwrap()); + let block2 = block1.make_fake_child().set_work(10); + let block3 = block2.make_fake_child().set_work(1); + + let mut state = NonFinalizedState::new(&network); + let finalized_state = FinalizedState::new( + &Config::ephemeral(), + &network, + #[cfg(feature = "elasticsearch")] + false, + ); + + let fake_value_pool = ValueBalance::::fake_populated_pool(); + finalized_state.set_finalized_value_pool(fake_value_pool); + + state.commit_new_chain(block1.clone().prepare(), &finalized_state)?; + state.commit_block(block2.clone().prepare(), &finalized_state)?; + state.commit_block(block3.clone().prepare(), &finalized_state)?; + + assert_eq!( + state + .best_chain() + .unwrap_or(&Arc::new(Chain::default())) + .blocks + .len(), + 3 + ); + + // Invalidate block2 to update the invalidated_blocks NonFinalizedState + state.invalidate_block(block2.hash()); + + // Perform checks to ensure the invalidated_block and descendants were added to the invalidated_block + // state + let post_invalidated_chain = state.best_chain().unwrap(); + + assert_eq!(post_invalidated_chain.blocks.len(), 1); + assert!( + post_invalidated_chain.contains_block_hash(block1.hash()), + "the new modified chain should contain block1" + ); + + assert!( + !post_invalidated_chain.contains_block_hash(block2.hash()), + "the new modified chain should not contain block2" + ); + assert!( + !post_invalidated_chain.contains_block_hash(block3.hash()), + "the new modified chain should not contain block3" + ); + + // Reconsider block2 and check that both block2 and block3 were `reconsidered` into the + // best chain + state.reconsider_block(block2.hash(), &finalized_state.db)?; + + let best_chain = state.best_chain().unwrap(); + + assert!( + best_chain.contains_block_hash(block2.hash()), + "the best chain should again contain block2" + ); + assert!( + best_chain.contains_block_hash(block3.hash()), + "the best chain should again contain block3" + ); + + Ok(()) +} + #[test] // This test gives full coverage for `take_chain_if` fn commit_block_extending_best_chain_doesnt_drop_worst_chains() -> Result<()> { From 9dfdddeb8841bd9c3b456a28a47e361d43f315d5 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 25 Mar 2025 21:10:38 +0200 Subject: [PATCH 119/245] fix(rpc): getblocktemplate coinbase outputs order (#9272) * change the output order in getblocktemplate zcashd mode * add a test for coinbase output order * update snapshots --- .../get_block_template.rs | 32 ++++++++++++------- ...template_basic.coinbase_tx@mainnet_10.snap | 8 ++--- ...template_basic.coinbase_tx@testnet_10.snap | 8 ++--- .../get_block_template_basic@mainnet_10.snap | 6 ++-- .../get_block_template_basic@testnet_10.snap | 6 ++-- ...late_long_poll.coinbase_tx@mainnet_10.snap | 8 ++--- ...late_long_poll.coinbase_tx@testnet_10.snap | 8 ++--- ...t_block_template_long_poll@mainnet_10.snap | 6 ++-- ...t_block_template_long_poll@testnet_10.snap | 6 ++-- zebra-rpc/src/methods/tests/vectors.rs | 28 +++++++++++++--- 10 files changed, 72 insertions(+), 44 deletions(-) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 2a8a9f60109..34495fbf4bb 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -394,24 +394,34 @@ fn combine_coinbase_outputs( miner_reward: Amount, like_zcashd: bool, ) -> Vec<(Amount, transparent::Script)> { - // Combine all the funding streams with the miner reward. - let mut coinbase_outputs: Vec<(Amount, &transparent::Address)> = funding_streams - .into_iter() - .map(|(_receiver, (amount, address))| (amount, address)) - .collect(); - coinbase_outputs.push((miner_reward, miner_address)); - - let mut coinbase_outputs: Vec<(Amount, transparent::Script)> = coinbase_outputs - .iter() - .map(|(amount, address)| (*amount, address.create_script_from_address())) - .collect(); + // Collect all the funding streams and convert them to outputs. + let funding_streams_outputs: Vec<(Amount, &transparent::Address)> = + funding_streams + .into_iter() + .map(|(_receiver, (amount, address))| (amount, address)) + .collect(); + + let mut coinbase_outputs: Vec<(Amount, transparent::Script)> = + funding_streams_outputs + .iter() + .map(|(amount, address)| (*amount, address.create_script_from_address())) + .collect(); // The HashMap returns funding streams in an arbitrary order, // but Zebra's snapshot tests expect the same order every time. if like_zcashd { // zcashd sorts outputs in serialized data order, excluding the length field coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); + + // The miner reward is always the first output independent of the sort order + coinbase_outputs.insert( + 0, + (miner_reward, miner_address.create_script_from_address()), + ); } else { + // Unlike zcashd, in Zebra the miner reward is part of the sorting + coinbase_outputs.push((miner_reward, miner_address.create_script_from_address())); + // Zebra sorts by amount then script. // // Since the sort is stable, equal amounts will remain sorted by script. diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap index 09651b869b7..633e24938fa 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap @@ -13,6 +13,10 @@ V4( ), ], outputs: [ + Output( + value: 250000000, + lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), + ), Output( value: 21875000, lock_script: Script("a91469a9f95a98fe581b6eb52841ef4806dc4402eb9087"), @@ -21,10 +25,6 @@ V4( value: 25000000, lock_script: Script("a914931fec54c1fea86e574462cc32013f5400b8912987"), ), - Output( - value: 250000000, - lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), - ), Output( value: 15625000, lock_script: Script("a914d45cb1adffb5215a42720532a076f02c7c778c9087"), diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap index 1c46a9f0147..25ccde85fea 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap @@ -13,6 +13,10 @@ V4( ), ], outputs: [ + Output( + value: 250000000, + lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), + ), Output( value: 15625000, lock_script: Script("a9140c0bcca02f3cba01a5d7423ac3903d40586399eb87"), @@ -25,10 +29,6 @@ V4( value: 25000000, lock_script: Script("a91471e1df05024288a00802de81e08c437859586c8787"), ), - Output( - value: 250000000, - lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), - ), ], lock_time: Height(Height(0)), expiry_height: Height(1842421), diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap index 4ae7a20bbcd..7f5399d40f2 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap @@ -12,15 +12,15 @@ expression: block_template "lightclientroothash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a", "finalsaplingroothash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a", "defaultroots": { - "merkleroot": "e049ed10466f566a045702ad712bbb596c6863cd08cdb4646da749b2287bc219", + "merkleroot": "7df8ce149b9beafc09b649c2ccfd866ce96d1b94331d9f7f3728cbefa36431f6", "chainhistoryroot": "94470fa66ebd1a5fdb109a5aa3f3204f14de3a42135e71aa7f4c44055847e0b5", "authdataroot": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "blockcommitmentshash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a" }, "transactions": [], "coinbasetxn": { - "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff050341be1900ffffffff0438c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b891298780b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad87286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000041be19000000000000000000000000", - "hash": "e049ed10466f566a045702ad712bbb596c6863cd08cdb4646da749b2287bc219", + "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff050341be1900ffffffff0480b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad8738c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000041be19000000000000000000000000", + "hash": "7df8ce149b9beafc09b649c2ccfd866ce96d1b94331d9f7f3728cbefa36431f6", "authdigest": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "depends": [], "fee": 0, diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap index ad2f5b99bf1..aebd8e85443 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap @@ -12,15 +12,15 @@ expression: block_template "lightclientroothash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792", "finalsaplingroothash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792", "defaultroots": { - "merkleroot": "f1f2db76c33c4a81f799d0c5cfba83c99c20cc8c51958a615d410fe7fbf92b34", + "merkleroot": "e919e7a3325b82b571b3658408118c14f2688ea254f9cb54d354a8ff17bd5b81", "chainhistoryroot": "03bc75f00c307a05aed2023819e18c2672cbe15fbd3200944997def141967387", "authdataroot": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "blockcommitmentshash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792" }, "transactions": [], "coinbasetxn": { - "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503f51c1c00ffffffff04286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c878780b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad8700000000f51c1c000000000000000000000000", - "hash": "f1f2db76c33c4a81f799d0c5cfba83c99c20cc8c51958a615d410fe7fbf92b34", + "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503f51c1c00ffffffff0480b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad87286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c878700000000f51c1c000000000000000000000000", + "hash": "e919e7a3325b82b571b3658408118c14f2688ea254f9cb54d354a8ff17bd5b81", "authdigest": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "depends": [], "fee": 0, diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap index 09651b869b7..633e24938fa 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap @@ -13,6 +13,10 @@ V4( ), ], outputs: [ + Output( + value: 250000000, + lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), + ), Output( value: 21875000, lock_script: Script("a91469a9f95a98fe581b6eb52841ef4806dc4402eb9087"), @@ -21,10 +25,6 @@ V4( value: 25000000, lock_script: Script("a914931fec54c1fea86e574462cc32013f5400b8912987"), ), - Output( - value: 250000000, - lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), - ), Output( value: 15625000, lock_script: Script("a914d45cb1adffb5215a42720532a076f02c7c778c9087"), diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap index 1c46a9f0147..25ccde85fea 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap @@ -13,6 +13,10 @@ V4( ), ], outputs: [ + Output( + value: 250000000, + lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), + ), Output( value: 15625000, lock_script: Script("a9140c0bcca02f3cba01a5d7423ac3903d40586399eb87"), @@ -25,10 +29,6 @@ V4( value: 25000000, lock_script: Script("a91471e1df05024288a00802de81e08c437859586c8787"), ), - Output( - value: 250000000, - lock_script: Script("a914adadadadadadadadadadadadadadadadadadadad87"), - ), ], lock_time: Height(Height(0)), expiry_height: Height(1842421), diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap index 0aeec89e761..29df204734b 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap @@ -12,15 +12,15 @@ expression: block_template "lightclientroothash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a", "finalsaplingroothash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a", "defaultroots": { - "merkleroot": "e049ed10466f566a045702ad712bbb596c6863cd08cdb4646da749b2287bc219", + "merkleroot": "7df8ce149b9beafc09b649c2ccfd866ce96d1b94331d9f7f3728cbefa36431f6", "chainhistoryroot": "94470fa66ebd1a5fdb109a5aa3f3204f14de3a42135e71aa7f4c44055847e0b5", "authdataroot": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "blockcommitmentshash": "02990723c6b62a724651322d141b4a72a4ffd66518167d809badbd5117d5518a" }, "transactions": [], "coinbasetxn": { - "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff050341be1900ffffffff0438c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b891298780b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad87286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000041be19000000000000000000000000", - "hash": "e049ed10466f566a045702ad712bbb596c6863cd08cdb4646da749b2287bc219", + "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff050341be1900ffffffff0480b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad8738c94d010000000017a91469a9f95a98fe581b6eb52841ef4806dc4402eb908740787d010000000017a914931fec54c1fea86e574462cc32013f5400b8912987286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c90870000000041be19000000000000000000000000", + "hash": "7df8ce149b9beafc09b649c2ccfd866ce96d1b94331d9f7f3728cbefa36431f6", "authdigest": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "depends": [], "fee": 0, diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap index b970563ad13..e8670f39828 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap @@ -12,15 +12,15 @@ expression: block_template "lightclientroothash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792", "finalsaplingroothash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792", "defaultroots": { - "merkleroot": "f1f2db76c33c4a81f799d0c5cfba83c99c20cc8c51958a615d410fe7fbf92b34", + "merkleroot": "e919e7a3325b82b571b3658408118c14f2688ea254f9cb54d354a8ff17bd5b81", "chainhistoryroot": "03bc75f00c307a05aed2023819e18c2672cbe15fbd3200944997def141967387", "authdataroot": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "blockcommitmentshash": "3b25791957f9383b6ce851d728a78309664d5d7a82ca87b6a9125a2f2c529792" }, "transactions": [], "coinbasetxn": { - "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503f51c1c00ffffffff04286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c878780b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad8700000000f51c1c000000000000000000000000", - "hash": "f1f2db76c33c4a81f799d0c5cfba83c99c20cc8c51958a615d410fe7fbf92b34", + "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503f51c1c00ffffffff0480b2e60e0000000017a914adadadadadadadadadadadadadadadadadadadad87286bee000000000017a9140c0bcca02f3cba01a5d7423ac3903d40586399eb8738c94d010000000017a9144e3f0d9a33a2721604cbae2de8d9171e21f8fbe48740787d010000000017a91471e1df05024288a00802de81e08c437859586c878700000000f51c1c000000000000000000000000", + "hash": "e919e7a3325b82b571b3658408118c14f2688ea254f9cb54d354a8ff17bd5b81", "authdigest": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "depends": [], "fee": 0, diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 315c2385d94..b21d7388b08 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1601,7 +1601,6 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { amount::NonNegative, block::{Hash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, chain_sync_status::MockSyncStatus, - parameters::NetworkKind, serialization::DateTime32, transaction::{zip317, VerifiedUnminedTx}, work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256}, @@ -1633,15 +1632,21 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); - let network = NetworkKind::Mainnet; + let network = Network::Mainnet; let miner_address = match use_p2pkh { - false => Some(transparent::Address::from_script_hash(network, [0x7e; 20])), - true => Some(transparent::Address::from_pub_key_hash(network, [0x7e; 20])), + false => Some(transparent::Address::from_script_hash( + network.kind(), + [0x7e; 20], + )), + true => Some(transparent::Address::from_pub_key_hash( + network.kind(), + [0x7e; 20], + )), }; #[allow(clippy::unnecessary_struct_initialization)] let mining_config = crate::config::mining::Config { - miner_address, + miner_address: miner_address.clone(), extra_coinbase_data: None, debug_like_zcashd: true, // TODO: Use default field values when optional features are enabled in tests #8183 @@ -1727,6 +1732,19 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") }; + let coinbase_transaction = + Transaction::zcash_deserialize(get_block_template.coinbase_txn.data.as_ref()) + .expect("coinbase transaction data should be deserializable"); + + assert_eq!( + coinbase_transaction + .outputs() + .first() + .unwrap() + .address(&network), + miner_address + ); + assert_eq!( get_block_template.capabilities, GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD.to_vec() From ffb37bf362edc12f7f19a42b207cf5ab611a7d67 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 26 Mar 2025 11:41:19 -0400 Subject: [PATCH 120/245] Comments out `disconnects_from_misbehaving_peers` tess (#9306) Co-authored-by: Gustavo Valverde --- zebrad/tests/acceptance.rs | 322 ++++++++++++++++++------------------- 1 file changed, 161 insertions(+), 161 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 1201bcd6e04..797d5440204 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3719,175 +3719,175 @@ fn check_no_git_refs_in_cargo_lock() { } } -/// Check that Zebra will disconnect from misbehaving peers. -#[tokio::test] -#[cfg(all(feature = "getblocktemplate-rpcs", not(target_os = "windows")))] -async fn disconnects_from_misbehaving_peers() -> Result<()> { - use std::sync::{atomic::AtomicBool, Arc}; +// /// Check that Zebra will disconnect from misbehaving peers. +// #[tokio::test] +// #[cfg(all(feature = "getblocktemplate-rpcs", not(target_os = "windows")))] +// async fn disconnects_from_misbehaving_peers() -> Result<()> { +// use std::sync::{atomic::AtomicBool, Arc}; + +// use common::regtest::MiningRpcMethods; +// use zebra_chain::parameters::testnet::{self, ConfiguredActivationHeights}; +// use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; + +// let _init_guard = zebra_test::init(); +// let network = testnet::Parameters::build() +// .with_activation_heights(ConfiguredActivationHeights { +// canopy: Some(1), +// nu5: Some(2), +// nu6: Some(3), +// ..Default::default() +// }) +// .with_slow_start_interval(Height::MIN) +// .with_disable_pow(true) +// .to_network(); + +// let test_type = LaunchWithEmptyState { +// launches_lightwalletd: false, +// }; +// let test_name = "disconnects_from_misbehaving_peers_test"; + +// if !common::launch::can_spawn_zebrad_for_test_type(test_name, test_type, false) { +// tracing::warn!("skipping disconnects_from_misbehaving_peers test"); +// return Ok(()); +// } + +// // Get the zebrad config +// let mut config = test_type +// .zebrad_config(test_name, false, None, &network) +// .expect("already checked config")?; + +// config.network.cache_dir = false.into(); +// config.network.listen_addr = format!("127.0.0.1:{}", random_known_port()).parse()?; + +// let rpc_listen_addr = config.rpc.listen_addr.unwrap(); +// let rpc_client_1 = RpcRequestClient::new(rpc_listen_addr); + +// tracing::info!( +// ?rpc_listen_addr, +// network_listen_addr = ?config.network.listen_addr, +// "starting a zebrad child on incompatible custom Testnet" +// ); + +// let is_finished = Arc::new(AtomicBool::new(false)); + +// { +// let is_finished = Arc::clone(&is_finished); +// let config = config.clone(); +// let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); +// tokio::task::spawn_blocking(move || -> Result<()> { +// let mut zebrad_child = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .bypass_test_capture(true) +// .with_timeout(test_type.zebrad_timeout()) +// .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + +// while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { +// zebrad_child.wait_for_stdout_line(Some("zebraA1".to_string())); +// } + +// Ok(()) +// }); +// } + +// config.network.initial_testnet_peers = [config.network.listen_addr.to_string()].into(); +// config.network.network = Network::new_default_testnet(); +// config.network.listen_addr = "127.0.0.1:0".parse()?; +// config.rpc.listen_addr = Some(format!("127.0.0.1:{}", random_known_port()).parse()?); + +// let rpc_listen_addr = config.rpc.listen_addr.unwrap(); +// let rpc_client_2 = RpcRequestClient::new(rpc_listen_addr); + +// tracing::info!( +// ?rpc_listen_addr, +// network_listen_addr = ?config.network.listen_addr, +// "starting a zebrad child on the default Testnet" +// ); + +// { +// let is_finished = Arc::clone(&is_finished); +// tokio::task::spawn_blocking(move || -> Result<()> { +// let (zebrad_failure_messages, zebrad_ignore_messages) = +// test_type.zebrad_failure_messages(); +// let mut zebrad_child = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .bypass_test_capture(true) +// .with_timeout(test_type.zebrad_timeout()) +// .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + +// while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { +// zebrad_child.wait_for_stdout_line(Some("zebraB2".to_string())); +// } + +// Ok(()) +// }); +// } + +// tracing::info!("waiting for zebrad nodes to connect"); + +// // Wait a few seconds for Zebra to start up and make outbound peer connections +// tokio::time::sleep(LAUNCH_DELAY).await; + +// tracing::info!("checking for peers"); + +// // Call `getpeerinfo` to check that the zebrad instances have connected +// let peer_info: Vec = rpc_client_2 +// .json_result_from_call("getpeerinfo", "[]") +// .await +// .map_err(|err| eyre!(err))?; + +// assert!(!peer_info.is_empty(), "should have outbound peer"); + +// tracing::info!( +// ?peer_info, +// "found peer connection, committing genesis block" +// ); + +// let genesis_block = network.block_parsed_iter().next().unwrap(); +// rpc_client_1.submit_block(genesis_block.clone()).await?; +// rpc_client_2.submit_block(genesis_block).await?; + +// // Call the `generate` method to mine blocks in the zebrad instance where PoW is disabled +// tracing::info!("committed genesis block, mining blocks with invalid PoW"); +// tokio::time::sleep(Duration::from_secs(2)).await; + +// rpc_client_1.call("generate", "[500]").await?; - use common::regtest::MiningRpcMethods; - use zebra_chain::parameters::testnet::{self, ConfiguredActivationHeights}; - use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; +// tracing::info!("wait for misbehavior messages to flush into address updater channel"); - let _init_guard = zebra_test::init(); - let network = testnet::Parameters::build() - .with_activation_heights(ConfiguredActivationHeights { - canopy: Some(1), - nu5: Some(2), - nu6: Some(3), - ..Default::default() - }) - .with_slow_start_interval(Height::MIN) - .with_disable_pow(true) - .to_network(); +// tokio::time::sleep(Duration::from_secs(30)).await; - let test_type = LaunchWithEmptyState { - launches_lightwalletd: false, - }; - let test_name = "disconnects_from_misbehaving_peers_test"; - - if !common::launch::can_spawn_zebrad_for_test_type(test_name, test_type, false) { - tracing::warn!("skipping disconnects_from_misbehaving_peers test"); - return Ok(()); - } +// tracing::info!("calling getpeerinfo to confirm Zebra has dropped the peer connection"); + +// // Call `getpeerinfo` to check that the zebrad instances have disconnected +// for i in 0..600 { +// let peer_info: Vec = rpc_client_2 +// .json_result_from_call("getpeerinfo", "[]") +// .await +// .map_err(|err| eyre!(err))?; - // Get the zebrad config - let mut config = test_type - .zebrad_config(test_name, false, None, &network) - .expect("already checked config")?; +// if peer_info.is_empty() { +// break; +// } else if i % 10 == 0 { +// tracing::info!(?peer_info, "has not yet disconnected from misbehaving peer"); +// } - config.network.cache_dir = false.into(); - config.network.listen_addr = format!("127.0.0.1:{}", random_known_port()).parse()?; +// rpc_client_1.call("generate", "[1]").await?; - let rpc_listen_addr = config.rpc.listen_addr.unwrap(); - let rpc_client_1 = RpcRequestClient::new(rpc_listen_addr); +// tokio::time::sleep(Duration::from_secs(1)).await; +// } - tracing::info!( - ?rpc_listen_addr, - network_listen_addr = ?config.network.listen_addr, - "starting a zebrad child on incompatible custom Testnet" - ); +// let peer_info: Vec = rpc_client_2 +// .json_result_from_call("getpeerinfo", "[]") +// .await +// .map_err(|err| eyre!(err))?; - let is_finished = Arc::new(AtomicBool::new(false)); +// tracing::info!(?peer_info, "called getpeerinfo"); - { - let is_finished = Arc::clone(&is_finished); - let config = config.clone(); - let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); - tokio::task::spawn_blocking(move || -> Result<()> { - let mut zebrad_child = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .bypass_test_capture(true) - .with_timeout(test_type.zebrad_timeout()) - .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); - - while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { - zebrad_child.wait_for_stdout_line(Some("zebraA1".to_string())); - } +// assert!(peer_info.is_empty(), "should have no peers"); - Ok(()) - }); - } - - config.network.initial_testnet_peers = [config.network.listen_addr.to_string()].into(); - config.network.network = Network::new_default_testnet(); - config.network.listen_addr = "127.0.0.1:0".parse()?; - config.rpc.listen_addr = Some(format!("127.0.0.1:{}", random_known_port()).parse()?); - - let rpc_listen_addr = config.rpc.listen_addr.unwrap(); - let rpc_client_2 = RpcRequestClient::new(rpc_listen_addr); - - tracing::info!( - ?rpc_listen_addr, - network_listen_addr = ?config.network.listen_addr, - "starting a zebrad child on the default Testnet" - ); - - { - let is_finished = Arc::clone(&is_finished); - tokio::task::spawn_blocking(move || -> Result<()> { - let (zebrad_failure_messages, zebrad_ignore_messages) = - test_type.zebrad_failure_messages(); - let mut zebrad_child = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .bypass_test_capture(true) - .with_timeout(test_type.zebrad_timeout()) - .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); - - while !is_finished.load(std::sync::atomic::Ordering::SeqCst) { - zebrad_child.wait_for_stdout_line(Some("zebraB2".to_string())); - } - - Ok(()) - }); - } - - tracing::info!("waiting for zebrad nodes to connect"); - - // Wait a few seconds for Zebra to start up and make outbound peer connections - tokio::time::sleep(LAUNCH_DELAY).await; +// is_finished.store(true, std::sync::atomic::Ordering::SeqCst); - tracing::info!("checking for peers"); - - // Call `getpeerinfo` to check that the zebrad instances have connected - let peer_info: Vec = rpc_client_2 - .json_result_from_call("getpeerinfo", "[]") - .await - .map_err(|err| eyre!(err))?; - - assert!(!peer_info.is_empty(), "should have outbound peer"); - - tracing::info!( - ?peer_info, - "found peer connection, committing genesis block" - ); - - let genesis_block = network.block_parsed_iter().next().unwrap(); - rpc_client_1.submit_block(genesis_block.clone()).await?; - rpc_client_2.submit_block(genesis_block).await?; - - // Call the `generate` method to mine blocks in the zebrad instance where PoW is disabled - tracing::info!("committed genesis block, mining blocks with invalid PoW"); - tokio::time::sleep(Duration::from_secs(2)).await; - - rpc_client_1.call("generate", "[500]").await?; - - tracing::info!("wait for misbehavior messages to flush into address updater channel"); - - tokio::time::sleep(Duration::from_secs(30)).await; - - tracing::info!("calling getpeerinfo to confirm Zebra has dropped the peer connection"); - - // Call `getpeerinfo` to check that the zebrad instances have disconnected - for i in 0..600 { - let peer_info: Vec = rpc_client_2 - .json_result_from_call("getpeerinfo", "[]") - .await - .map_err(|err| eyre!(err))?; - - if peer_info.is_empty() { - break; - } else if i % 10 == 0 { - tracing::info!(?peer_info, "has not yet disconnected from misbehaving peer"); - } - - rpc_client_1.call("generate", "[1]").await?; - - tokio::time::sleep(Duration::from_secs(1)).await; - } - - let peer_info: Vec = rpc_client_2 - .json_result_from_call("getpeerinfo", "[]") - .await - .map_err(|err| eyre!(err))?; - - tracing::info!(?peer_info, "called getpeerinfo"); - - assert!(peer_info.is_empty(), "should have no peers"); - - is_finished.store(true, std::sync::atomic::Ordering::SeqCst); - - Ok(()) -} +// Ok(()) +// } From 26c569e29fcc3792191f9770c3018d6d63e028f7 Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Fri, 28 Mar 2025 07:41:01 +0000 Subject: [PATCH 121/245] rpc: Permit JSON-RPC IDs to be non-strings (#9341) Fixes ZcashFoundation/zebra#9314. --- zebra-rpc/src/server/rpc_call_compatibility.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index 2bd22b72924..654fe710122 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -51,13 +51,18 @@ impl<'a> RpcServiceT<'a> for FixRpcResponseMiddleware { let json: serde_json::Value = serde_json::from_str(response.into_parts().0.as_str()) .expect("response string should be valid json"); - let id = json["id"] - .as_str() - .expect("response json should have an id") - .to_string(); + let id = match &json["id"] { + serde_json::Value::Null => Some(jsonrpsee::types::Id::Null), + serde_json::Value::Number(n) => { + n.as_u64().map(jsonrpsee::types::Id::Number) + } + serde_json::Value::String(s) => Some(jsonrpsee::types::Id::Str(s.into())), + _ => None, + } + .expect("response json should have an id"); return MethodResponse::error( - jsonrpsee_types::Id::Str(id.into()), + id, ErrorObject::borrowed(new_error_code, "Invalid params", None), ); } From 49011f84602e4ebb0e33b632e8f37e75c47d3252 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 28 Mar 2025 06:52:05 -0400 Subject: [PATCH 122/245] change(state): Refactor format upgrades into trait (#9263) * Adds a new trait for disk format upgrades, implements in on a new struct, `PruneTrees`, and moves the logic for tree deduplication to the trait impl * refactors add subtrees format upgrade to use new trait * refactors fix tree keys, cache genesis roots, and value balance upgrades to use new trait * Applies suggestions from code review: - Avoids duplicate validation of format upgrades at startup when db is already upgraded, - Minor refactors - Doc fixes and cleanups * Applies suggestions from code review --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-state/src/constants.rs | 7 - zebra-state/src/lib.rs | 3 - .../finalized_state/disk_format/upgrade.rs | 307 ++++++------------ .../disk_format/upgrade/add_subtrees.rs | 221 +++++++------ .../disk_format/upgrade/no_migration.rs | 49 +++ .../disk_format/upgrade/prune_trees.rs | 145 +++++++++ .../upgrade/tree_keys_and_caches_upgrade.rs | 57 ++++ 7 files changed, 472 insertions(+), 317 deletions(-) create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/prune_trees.rs create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/tree_keys_and_caches_upgrade.rs diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index d4ebb5f4000..b7e9fd2859c 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -78,13 +78,6 @@ pub fn state_database_format_version_in_code() -> Version { } } -/// Returns the highest database version that modifies the subtree index format. -/// -/// This version is used by tests to wait for the subtree upgrade to finish. -pub fn latest_version_for_adding_subtrees() -> Version { - Version::parse("25.2.2").expect("Hardcoded version string should be valid.") -} - /// The name of the file containing the minor and patch database versions. /// /// Use [`Config::version_file_path()`] to get the path to this file. diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 461b2d4b19f..bd8f4d9b26c 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -81,9 +81,6 @@ pub use service::{ init_test, init_test_services, }; -#[cfg(any(test, feature = "proptest-impl"))] -pub use constants::latest_version_for_adding_subtrees; - #[cfg(any(test, feature = "proptest-impl"))] pub use config::hidden::{ write_database_format_version_to_disk, write_state_database_format_version_to_disk, diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 93625a848dc..146c5bc1b7c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -6,7 +6,7 @@ use std::{ thread::{self, JoinHandle}, }; -use crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender, TryRecvError}; +use crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender}; use semver::Version; use tracing::Span; @@ -20,14 +20,14 @@ use zebra_chain::{ use DbFormatChange::*; -use crate::{ - constants::latest_version_for_adding_subtrees, - service::finalized_state::{DiskWriteBatch, ZebraDb}, -}; +use crate::service::finalized_state::ZebraDb; pub(crate) mod add_subtrees; pub(crate) mod cache_genesis_roots; pub(crate) mod fix_tree_key_type; +pub(crate) mod no_migration; +pub(crate) mod prune_trees; +pub(crate) mod tree_keys_and_caches_upgrade; #[cfg(not(feature = "indexer"))] pub(crate) mod drop_tx_locs_by_spends; @@ -35,6 +35,68 @@ pub(crate) mod drop_tx_locs_by_spends; #[cfg(feature = "indexer")] pub(crate) mod track_tx_locs_by_spends; +/// Defines method signature for running disk format upgrades. +pub trait DiskFormatUpgrade { + /// Returns the version at which this upgrade is applied. + fn version(&self) -> Version; + + /// Returns the description of this upgrade. + fn description(&self) -> &'static str; + + /// Runs disk format upgrade. + fn run( + &self, + initial_tip_height: Height, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result<(), CancelFormatChange>; + + /// Check that state has been upgraded to this format correctly. + /// + /// The outer `Result` indicates whether the validation was cancelled (due to e.g. node shutdown). + /// The inner `Result` indicates whether the validation itself failed or not. + fn validate( + &self, + _db: &ZebraDb, + _cancel_receiver: &Receiver, + ) -> Result, CancelFormatChange> { + Ok(Ok(())) + } + + /// Prepare for disk format upgrade. + fn prepare( + &self, + _initial_tip_height: Height, + _upgrade_db: &ZebraDb, + _cancel_receiver: &Receiver, + _older_disk_version: &Version, + ) -> Result<(), CancelFormatChange> { + Ok(()) + } + + /// Returns true if the [`DiskFormatUpgrade`] needs to run a migration on existing data in the db. + fn needs_migration(&self) -> bool { + true + } +} + +fn format_upgrades( + min_version: Option, +) -> impl Iterator> { + let min_version = move || min_version.clone().unwrap_or(Version::new(0, 0, 0)); + + // Note: Disk format upgrades must be run in order of database version. + ([ + Box::new(prune_trees::PruneTrees), + Box::new(add_subtrees::AddSubtrees), + Box::new(tree_keys_and_caches_upgrade::FixTreeKeyTypeAndCacheGenesisRoots), + // Value balance upgrade + Box::new(no_migration::NoMigration::new(26, 0, 0)), + ] as [Box; 4]) + .into_iter() + .filter(move |upgrade| upgrade.version() > min_version()) +} + /// The kind of database format change or validity check we're performing. #[derive(Clone, Debug, Eq, PartialEq)] pub enum DbFormatChange { @@ -474,155 +536,31 @@ impl DbFormatChange { return Ok(()); }; - // Note commitment tree de-duplication database upgrade task. - - let version_for_pruning_trees = - Version::parse("25.1.1").expect("Hardcoded version string should be valid."); - - // Check if we need to prune the note commitment trees in the database. - if older_disk_version < &version_for_pruning_trees { - let timer = CodeTimer::start(); - - // Prune duplicate Sapling note commitment trees. - - // The last tree we checked. - let mut last_tree = db - .sapling_tree_by_height(&Height(0)) - .expect("Checked above that the genesis block is in the database."); - - // Run through all the possible duplicate trees in the finalized chain. - // The block after genesis is the first possible duplicate. - for (height, tree) in db.sapling_tree_by_height_range(Height(1)..=initial_tip_height) { - // Return early if there is a cancel signal. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); - } - - // Delete any duplicate trees. - if tree == last_tree { - let mut batch = DiskWriteBatch::new(); - batch.delete_sapling_tree(db, &height); - db.write_batch(batch) - .expect("Deleting Sapling note commitment trees should always succeed."); - } - - // Compare against the last tree to find unique trees. - last_tree = tree; - } - - // Prune duplicate Orchard note commitment trees. - - // The last tree we checked. - let mut last_tree = db - .orchard_tree_by_height(&Height(0)) - .expect("Checked above that the genesis block is in the database."); + // Apply or validate format upgrades + for upgrade in format_upgrades(Some(older_disk_version.clone())) { + if upgrade.needs_migration() { + let timer = CodeTimer::start(); - // Run through all the possible duplicate trees in the finalized chain. - // The block after genesis is the first possible duplicate. - for (height, tree) in db.orchard_tree_by_height_range(Height(1)..=initial_tip_height) { - // Return early if there is a cancel signal. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); - } + upgrade.prepare(initial_tip_height, db, cancel_receiver, older_disk_version)?; + upgrade.run(initial_tip_height, db, cancel_receiver)?; - // Delete any duplicate trees. - if tree == last_tree { - let mut batch = DiskWriteBatch::new(); - batch.delete_orchard_tree(db, &height); - db.write_batch(batch) - .expect("Deleting Orchard note commitment trees should always succeed."); - } + // Before marking the state as upgraded, check that the upgrade completed successfully. + upgrade + .validate(db, cancel_receiver)? + .expect("db should be valid after upgrade"); - // Compare against the last tree to find unique trees. - last_tree = tree; + timer.finish(module_path!(), line!(), upgrade.description()); } - // Before marking the state as upgraded, check that the upgrade completed successfully. - Self::check_for_duplicate_trees(db, cancel_receiver)? - .expect("database format is valid after upgrade"); - // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(db, &version_for_pruning_trees); - - timer.finish(module_path!(), line!(), "deduplicate trees upgrade"); - } - - // Note commitment subtree creation database upgrade task. - - let latest_version_for_adding_subtrees = latest_version_for_adding_subtrees(); - let first_version_for_adding_subtrees = - Version::parse("25.2.0").expect("Hardcoded version string should be valid."); - - // Check if we need to add or fix note commitment subtrees in the database. - if older_disk_version < &latest_version_for_adding_subtrees { - let timer = CodeTimer::start(); - - if older_disk_version >= &first_version_for_adding_subtrees { - // Clear previous upgrade data, because it was incorrect. - add_subtrees::reset(initial_tip_height, db, cancel_receiver)?; - } - - add_subtrees::run(initial_tip_height, db, cancel_receiver)?; - - // Before marking the state as upgraded, check that the upgrade completed successfully. - add_subtrees::subtree_format_validity_checks_detailed(db, cancel_receiver)? - .expect("database format is valid after upgrade"); - - // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the - // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(db, &latest_version_for_adding_subtrees); - - timer.finish(module_path!(), line!(), "add subtrees upgrade"); - } - - // Sprout & history tree key formats, and cached genesis tree roots database upgrades. - - let version_for_tree_keys_and_caches = - Version::parse("25.3.0").expect("Hardcoded version string should be valid."); - - // Check if we need to do the upgrade. - if older_disk_version < &version_for_tree_keys_and_caches { - let timer = CodeTimer::start(); - - // It shouldn't matter what order these are run in. - cache_genesis_roots::run(initial_tip_height, db, cancel_receiver)?; - fix_tree_key_type::run(initial_tip_height, db, cancel_receiver)?; - - // Before marking the state as upgraded, check that the upgrade completed successfully. - cache_genesis_roots::detailed_check(db, cancel_receiver)? - .expect("database format is valid after upgrade"); - fix_tree_key_type::detailed_check(db, cancel_receiver)? - .expect("database format is valid after upgrade"); - - // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the - // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(db, &version_for_tree_keys_and_caches); - - timer.finish(module_path!(), line!(), "tree keys and caches upgrade"); - } - - let version_for_upgrading_value_balance_format = - Version::parse("26.0.0").expect("hard-coded version string should be valid"); - - // Check if we need to do the upgrade. - if older_disk_version < &version_for_upgrading_value_balance_format { - Self::mark_as_upgraded_to(db, &version_for_upgrading_value_balance_format) + info!( + newer_running_version = ?upgrade.version(), + "Zebra automatically upgraded the database format" + ); + Self::mark_as_upgraded_to(db, &upgrade.version()); } - // # New Upgrades Usually Go Here - // - // New code goes above this comment! - // - // Run the latest format upgrade code after the other upgrades are complete, - // then mark the format as upgraded. The code should check `cancel_receiver` - // every time it runs its inner update loop. - - info!( - %newer_running_version, - "Zebra automatically upgraded the database format to:" - ); - Ok(()) } @@ -669,13 +607,9 @@ impl DbFormatChange { // Do the quick checks first, so we don't have to do this in every detailed check. results.push(Self::format_validity_checks_quick(db)); - results.push(Self::check_for_duplicate_trees(db, cancel_receiver)?); - results.push(add_subtrees::subtree_format_validity_checks_detailed( - db, - cancel_receiver, - )?); - results.push(cache_genesis_roots::detailed_check(db, cancel_receiver)?); - results.push(fix_tree_key_type::detailed_check(db, cancel_receiver)?); + for upgrade in format_upgrades(None) { + results.push(upgrade.validate(db, cancel_receiver)?); + } // The work is done in the functions we just called. timer.finish(module_path!(), line!(), "format_validity_checks_detailed()"); @@ -689,66 +623,6 @@ impl DbFormatChange { Ok(Ok(())) } - /// Check that note commitment trees were correctly de-duplicated. - // - // TODO: move this method into an deduplication upgrade module file, - // along with the upgrade code above. - #[allow(clippy::unwrap_in_result)] - fn check_for_duplicate_trees( - db: &ZebraDb, - cancel_receiver: &Receiver, - ) -> Result, CancelFormatChange> { - // Runtime test: make sure we removed all duplicates. - // We always run this test, even if the state has supposedly been upgraded. - let mut result = Ok(()); - - let mut prev_height = None; - let mut prev_tree = None; - for (height, tree) in db.sapling_tree_by_height_range(..) { - // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); - } - - if prev_tree == Some(tree.clone()) { - result = Err(format!( - "found duplicate sapling trees after running de-duplicate tree upgrade:\ - height: {height:?}, previous height: {:?}, tree root: {:?}", - prev_height.unwrap(), - tree.root() - )); - error!(?result); - } - - prev_height = Some(height); - prev_tree = Some(tree); - } - - let mut prev_height = None; - let mut prev_tree = None; - for (height, tree) in db.orchard_tree_by_height_range(..) { - // Return early if the format check is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); - } - - if prev_tree == Some(tree.clone()) { - result = Err(format!( - "found duplicate orchard trees after running de-duplicate tree upgrade:\ - height: {height:?}, previous height: {:?}, tree root: {:?}", - prev_height.unwrap(), - tree.root() - )); - error!(?result); - } - - prev_height = Some(height); - prev_tree = Some(tree); - } - - Ok(result) - } - /// Mark a newly created database with the current format version. /// /// This should be called when a newly created database is opened. @@ -950,3 +824,12 @@ impl Drop for DbFormatChangeThreadHandle { } } } + +#[test] +fn format_upgrades_are_in_version_order() { + let mut last_version = Version::new(0, 0, 0); + for upgrade in format_upgrades(None) { + assert!(upgrade.version() > last_version); + last_version = upgrade.version(); + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs index 8f47e4f28d7..55ce1218139 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use crossbeam_channel::{Receiver, TryRecvError}; use hex_literal::hex; use itertools::Itertools; +use semver::Version; use tracing::instrument; use zebra_chain::{ @@ -17,90 +18,144 @@ use zebra_chain::{ }; use crate::service::finalized_state::{ - disk_format::upgrade::CancelFormatChange, DiskWriteBatch, ZebraDb, + disk_format::upgrade::{CancelFormatChange, DiskFormatUpgrade}, + DiskWriteBatch, ZebraDb, }; -/// Runs disk format upgrade for adding Sapling and Orchard note commitment subtrees to database. -/// -/// Trees are added to the database in reverse height order, so that wallets can sync correctly -/// while the upgrade is running. -/// -/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. -#[allow(clippy::unwrap_in_result)] -#[instrument(skip(upgrade_db, cancel_receiver))] -pub fn run( - initial_tip_height: Height, - upgrade_db: &ZebraDb, - cancel_receiver: &Receiver, -) -> Result<(), CancelFormatChange> { - // # Consensus - // - // Zebra stores exactly one note commitment tree for every block with sapling notes. - // (It also stores the empty note commitment tree for the genesis block, but we skip that.) - // - // The consensus rules limit blocks to less than 2^16 sapling and 2^16 orchard outputs. So a - // block can't complete multiple level 16 subtrees (or complete an entire subtree by itself). - // Currently, with 2MB blocks and v4/v5 sapling and orchard output sizes, the subtree index can - // increase by at most 1 every ~20 blocks. - // - // # Compatibility - // - // Because wallets search backwards from the chain tip, subtrees need to be added to the - // database in reverse height order. (Tip first, genesis last.) - // - // Otherwise, wallets that sync during the upgrade will be missing some notes. +/// Implements [`DiskFormatUpgrade`] for populating Sapling and Orchard note commitment subtrees. +pub struct AddSubtrees; - // Generate a list of sapling subtree inputs: previous and current trees, and their end heights. - let subtrees = upgrade_db - .sapling_tree_by_reversed_height_range(..=initial_tip_height) - // We need both the tree and its previous tree for each shielded block. - .tuple_windows() - // Because the iterator is reversed, the larger tree is first. - .map(|((end_height, tree), (prev_end_height, prev_tree))| { - (prev_end_height, prev_tree, end_height, tree) - }) - // Find new subtrees. - .filter(|(_prev_end_height, prev_tree, _end_height, tree)| { - tree.contains_new_subtree(prev_tree) - }); +impl DiskFormatUpgrade for AddSubtrees { + fn version(&self) -> Version { + Version::new(25, 2, 2) + } - for (prev_end_height, prev_tree, end_height, tree) in subtrees { - // Return early if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); + fn description(&self) -> &'static str { + "add subtrees upgrade" + } + + fn prepare( + &self, + initial_tip_height: Height, + upgrade_db: &ZebraDb, + cancel_receiver: &Receiver, + older_disk_version: &Version, + ) -> Result<(), CancelFormatChange> { + let first_version_for_adding_subtrees = Version::new(25, 2, 0); + if older_disk_version >= &first_version_for_adding_subtrees { + // Clear previous upgrade data, because it was incorrect. + reset(initial_tip_height, upgrade_db, cancel_receiver)?; } - let subtree = - calculate_sapling_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree); - write_sapling_subtree(upgrade_db, subtree); + Ok(()) } - // Generate a list of orchard subtree inputs: previous and current trees, and their end heights. - let subtrees = upgrade_db - .orchard_tree_by_reversed_height_range(..=initial_tip_height) - // We need both the tree and its previous tree for each shielded block. - .tuple_windows() - // Because the iterator is reversed, the larger tree is first. - .map(|((end_height, tree), (prev_end_height, prev_tree))| { - (prev_end_height, prev_tree, end_height, tree) - }) - // Find new subtrees. - .filter(|(_prev_end_height, prev_tree, _end_height, tree)| { - tree.contains_new_subtree(prev_tree) - }); + /// Runs disk format upgrade for adding Sapling and Orchard note commitment subtrees to database. + /// + /// Trees are added to the database in reverse height order, so that wallets can sync correctly + /// while the upgrade is running. + /// + /// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. + fn run( + &self, + initial_tip_height: Height, + upgrade_db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result<(), CancelFormatChange> { + // # Consensus + // + // Zebra stores exactly one note commitment tree for every block with sapling notes. + // (It also stores the empty note commitment tree for the genesis block, but we skip that.) + // + // The consensus rules limit blocks to less than 2^16 sapling and 2^16 orchard outputs. So a + // block can't complete multiple level 16 subtrees (or complete an entire subtree by itself). + // Currently, with 2MB blocks and v4/v5 sapling and orchard output sizes, the subtree index can + // increase by at most 1 every ~20 blocks. + // + // # Compatibility + // + // Because wallets search backwards from the chain tip, subtrees need to be added to the + // database in reverse height order. (Tip first, genesis last.) + // + // Otherwise, wallets that sync during the upgrade will be missing some notes. + + // Generate a list of sapling subtree inputs: previous and current trees, and their end heights. + let subtrees = upgrade_db + .sapling_tree_by_reversed_height_range(..=initial_tip_height) + // We need both the tree and its previous tree for each shielded block. + .tuple_windows() + // Because the iterator is reversed, the larger tree is first. + .map(|((end_height, tree), (prev_end_height, prev_tree))| { + (prev_end_height, prev_tree, end_height, tree) + }) + // Find new subtrees. + .filter(|(_prev_end_height, prev_tree, _end_height, tree)| { + tree.contains_new_subtree(prev_tree) + }); + + for (prev_end_height, prev_tree, end_height, tree) in subtrees { + // Return early if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } - for (prev_end_height, prev_tree, end_height, tree) in subtrees { - // Return early if the upgrade is cancelled. - if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { - return Err(CancelFormatChange); + let subtree = + calculate_sapling_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree); + write_sapling_subtree(upgrade_db, subtree); } - let subtree = - calculate_orchard_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree); - write_orchard_subtree(upgrade_db, subtree); + // Generate a list of orchard subtree inputs: previous and current trees, and their end heights. + let subtrees = upgrade_db + .orchard_tree_by_reversed_height_range(..=initial_tip_height) + // We need both the tree and its previous tree for each shielded block. + .tuple_windows() + // Because the iterator is reversed, the larger tree is first. + .map(|((end_height, tree), (prev_end_height, prev_tree))| { + (prev_end_height, prev_tree, end_height, tree) + }) + // Find new subtrees. + .filter(|(_prev_end_height, prev_tree, _end_height, tree)| { + tree.contains_new_subtree(prev_tree) + }); + + for (prev_end_height, prev_tree, end_height, tree) in subtrees { + // Return early if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + let subtree = + calculate_orchard_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree); + write_orchard_subtree(upgrade_db, subtree); + } + + Ok(()) } - Ok(()) + #[allow(clippy::unwrap_in_result)] + fn validate( + &self, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result, CancelFormatChange> { + // This is redundant in some code paths, but not in others. But it's quick anyway. + let quick_result = subtree_format_calculation_pre_checks(db); + + // Check the entire format before returning any errors. + let sapling_result = check_sapling_subtrees(db, cancel_receiver)?; + let orchard_result = check_orchard_subtrees(db, cancel_receiver)?; + + if quick_result.is_err() || sapling_result.is_err() || orchard_result.is_err() { + let err = Err(format!( + "missing or invalid subtree(s): \ + quick: {quick_result:?}, sapling: {sapling_result:?}, orchard: {orchard_result:?}" + )); + warn!(?err); + return Ok(err); + } + + Ok(Ok(())) + } } /// Reset data from previous upgrades. This data can be complete or incomplete. @@ -304,30 +359,6 @@ fn quick_check_orchard_subtrees(db: &ZebraDb) -> Result<(), &'static str> { Ok(()) } -/// Check that note commitment subtrees were correctly added. -pub fn subtree_format_validity_checks_detailed( - db: &ZebraDb, - cancel_receiver: &Receiver, -) -> Result, CancelFormatChange> { - // This is redundant in some code paths, but not in others. But it's quick anyway. - let quick_result = subtree_format_calculation_pre_checks(db); - - // Check the entire format before returning any errors. - let sapling_result = check_sapling_subtrees(db, cancel_receiver)?; - let orchard_result = check_orchard_subtrees(db, cancel_receiver)?; - - if quick_result.is_err() || sapling_result.is_err() || orchard_result.is_err() { - let err = Err(format!( - "missing or invalid subtree(s): \ - quick: {quick_result:?}, sapling: {sapling_result:?}, orchard: {orchard_result:?}" - )); - warn!(?err); - return Ok(err); - } - - Ok(Ok(())) -} - /// Check that Sapling note commitment subtrees were correctly added. /// /// Returns an error if a note commitment subtree is missing or incorrect. diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs new file mode 100644 index 00000000000..a312f176a73 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs @@ -0,0 +1,49 @@ +//! An implementation of [`DiskFormatUpgrade`] for marking the database as upgraded to a new format version. + +use crossbeam_channel::Receiver; + +use semver::Version; +use zebra_chain::block::Height; + +use crate::service::finalized_state::ZebraDb; + +use super::{CancelFormatChange, DiskFormatUpgrade}; + +/// Implements [`DiskFormatUpgrade`] for in-place upgrades that do not involve any migration +/// of existing data into the new format. +pub struct NoMigration { + version: Version, +} + +impl NoMigration { + /// Creates a new instance of the [`NoMigration`] upgrade. + pub fn new(major: u64, minor: u64, patch: u64) -> Self { + Self { + version: Version::new(major, minor, patch), + } + } +} + +impl DiskFormatUpgrade for NoMigration { + fn version(&self) -> Version { + self.version.clone() + } + + fn description(&self) -> &'static str { + "no migration" + } + + #[allow(clippy::unwrap_in_result)] + fn run( + &self, + _initial_tip_height: Height, + _db: &ZebraDb, + _cancel_receiver: &Receiver, + ) -> Result<(), CancelFormatChange> { + Ok(()) + } + + fn needs_migration(&self) -> bool { + false + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/prune_trees.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/prune_trees.rs new file mode 100644 index 00000000000..3817de42eed --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/prune_trees.rs @@ -0,0 +1,145 @@ +//! Prunes duplicate Sapling and Orchard note commitment trees from database + +use crossbeam_channel::{Receiver, TryRecvError}; + +use semver::Version; +use zebra_chain::block::Height; + +use crate::service::finalized_state::{DiskWriteBatch, ZebraDb}; + +use super::{CancelFormatChange, DiskFormatUpgrade}; + +/// Implements [`DiskFormatUpgrade`] for pruning duplicate Sapling and Orchard note commitment trees from database +pub struct PruneTrees; + +impl DiskFormatUpgrade for PruneTrees { + fn version(&self) -> Version { + Version::new(25, 1, 1) + } + + fn description(&self) -> &'static str { + "deduplicate trees upgrade" + } + + #[allow(clippy::unwrap_in_result)] + fn run( + &self, + initial_tip_height: Height, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result<(), CancelFormatChange> { + // Prune duplicate Sapling note commitment trees. + + // The last tree we checked. + let mut last_tree = db + .sapling_tree_by_height(&Height(0)) + .expect("Checked above that the genesis block is in the database."); + + // Run through all the possible duplicate trees in the finalized chain. + // The block after genesis is the first possible duplicate. + for (height, tree) in db.sapling_tree_by_height_range(Height(1)..=initial_tip_height) { + // Return early if there is a cancel signal. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + // Delete any duplicate trees. + if tree == last_tree { + let mut batch = DiskWriteBatch::new(); + batch.delete_sapling_tree(db, &height); + db.write_batch(batch) + .expect("Deleting Sapling note commitment trees should always succeed."); + } + + // Compare against the last tree to find unique trees. + last_tree = tree; + } + + // Prune duplicate Orchard note commitment trees. + + // The last tree we checked. + let mut last_tree = db + .orchard_tree_by_height(&Height(0)) + .expect("Checked above that the genesis block is in the database."); + + // Run through all the possible duplicate trees in the finalized chain. + // The block after genesis is the first possible duplicate. + for (height, tree) in db.orchard_tree_by_height_range(Height(1)..=initial_tip_height) { + // Return early if there is a cancel signal. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + // Delete any duplicate trees. + if tree == last_tree { + let mut batch = DiskWriteBatch::new(); + batch.delete_orchard_tree(db, &height); + db.write_batch(batch) + .expect("Deleting Orchard note commitment trees should always succeed."); + } + + // Compare against the last tree to find unique trees. + last_tree = tree; + } + + Ok(()) + } + + /// Check that note commitment trees were correctly de-duplicated. + #[allow(clippy::unwrap_in_result)] + fn validate( + &self, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result, CancelFormatChange> { + // Runtime test: make sure we removed all duplicates. + // We always run this test, even if the state has supposedly been upgraded. + let mut result = Ok(()); + + let mut prev_height = None; + let mut prev_tree = None; + for (height, tree) in db.sapling_tree_by_height_range(..) { + // Return early if the format check is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + if prev_tree == Some(tree.clone()) { + result = Err(format!( + "found duplicate sapling trees after running de-duplicate tree upgrade:\ + height: {height:?}, previous height: {:?}, tree root: {:?}", + prev_height.unwrap(), + tree.root() + )); + error!(?result); + } + + prev_height = Some(height); + prev_tree = Some(tree); + } + + let mut prev_height = None; + let mut prev_tree = None; + for (height, tree) in db.orchard_tree_by_height_range(..) { + // Return early if the format check is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + if prev_tree == Some(tree.clone()) { + result = Err(format!( + "found duplicate orchard trees after running de-duplicate tree upgrade:\ + height: {height:?}, previous height: {:?}, tree root: {:?}", + prev_height.unwrap(), + tree.root() + )); + error!(?result); + } + + prev_height = Some(height); + prev_tree = Some(tree); + } + + Ok(result) + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/tree_keys_and_caches_upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/tree_keys_and_caches_upgrade.rs new file mode 100644 index 00000000000..53fd189c20e --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/tree_keys_and_caches_upgrade.rs @@ -0,0 +1,57 @@ +//! Applies the [`fix_tree_key_type`] and [`cache_genesis_roots`] upgrades to the database. + +use crossbeam_channel::Receiver; + +use semver::Version; +use zebra_chain::block::Height; + +use crate::service::finalized_state::ZebraDb; + +use super::{cache_genesis_roots, fix_tree_key_type, CancelFormatChange, DiskFormatUpgrade}; + +/// Implements [`DiskFormatUpgrade`] for updating the sprout and history tree key type from +/// `Height` to the empty key `()` and the genesis note commitment trees to cache their roots +pub struct FixTreeKeyTypeAndCacheGenesisRoots; + +impl DiskFormatUpgrade for FixTreeKeyTypeAndCacheGenesisRoots { + fn version(&self) -> Version { + Version::new(25, 3, 0) + } + + fn description(&self) -> &'static str { + "tree keys and caches upgrade" + } + + #[allow(clippy::unwrap_in_result)] + fn run( + &self, + initial_tip_height: Height, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result<(), CancelFormatChange> { + // It shouldn't matter what order these are run in. + cache_genesis_roots::run(initial_tip_height, db, cancel_receiver)?; + fix_tree_key_type::run(initial_tip_height, db, cancel_receiver)?; + Ok(()) + } + + #[allow(clippy::unwrap_in_result)] + fn validate( + &self, + db: &ZebraDb, + cancel_receiver: &Receiver, + ) -> Result, CancelFormatChange> { + let results = [ + cache_genesis_roots::detailed_check(db, cancel_receiver)?, + fix_tree_key_type::detailed_check(db, cancel_receiver)?, + ]; + + let result = if results.iter().any(Result::is_err) { + Err(format!("{results:?}")) + } else { + Ok(()) + }; + + Ok(result) + } +} From 949326258a2a31e70a6592c7bd02abf9072c00cd Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Fri, 28 Mar 2025 15:45:06 +0200 Subject: [PATCH 123/245] feat(rpc): fill size field in getblock with verbosity=2 (#9327) * feat(rpc): fill size field in getblock with verbosity=2 * replace unwrap with expect --- zebra-rpc/src/methods.rs | 61 ++++++------ ...k_verbose_hash_verbosity_2@mainnet_10.snap | 1 + ...k_verbose_hash_verbosity_2@testnet_10.snap | 1 + ...verbose_height_verbosity_2@mainnet_10.snap | 1 + ...verbose_height_verbosity_2@testnet_10.snap | 1 + zebra-rpc/src/methods/tests/vectors.rs | 4 +- zebra-state/src/lib.rs | 4 +- zebra-state/src/request.rs | 19 ++++ zebra-state/src/response.rs | 8 ++ zebra-state/src/service.rs | 26 +++++ .../service/finalized_state/zebra_db/block.rs | 96 ++++++++++++++++++- zebra-state/src/service/read.rs | 3 +- zebra-state/src/service/read/block.rs | 26 +++++ 13 files changed, 216 insertions(+), 35 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 0a5e6e351f5..cd162435755 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -172,12 +172,7 @@ pub trait Rpc { /// /// # Notes /// - /// Zebra previously partially supported verbosity=1 by returning only the - /// fields required by lightwalletd ([`lightwalletd` only reads the `tx` - /// field of the result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152)). - /// That verbosity level was migrated to "3"; so while lightwalletd will - /// still work by using verbosity=1, it will sync faster if it is changed to - /// use verbosity=3. + /// The `size` field is only returned with verbosity=2. /// /// The undocumented `chainwork` field is not returned. #[method(name = "getblock")] @@ -887,7 +882,7 @@ where let transactions_request = match verbosity { 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), - 2 => zebra_state::ReadRequest::Block(hash_or_height), + 2 => zebra_state::ReadRequest::BlockAndSize(hash_or_height), _other => panic!("get_block_header_fut should be none"), }; @@ -916,28 +911,34 @@ where } let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); - let tx: Vec<_> = match tx_ids_response.map_misc_error()? { - zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids - .ok_or_misc_error("block not found")? - .iter() - .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) - .collect(), - zebra_state::ReadResponse::Block(block) => block - .ok_or_misc_error("Block not found")? - .transactions - .iter() - .map(|tx| { - GetBlockTransaction::Object(TransactionObject::from_transaction( - tx.clone(), - Some(height), - Some( - confirmations - .try_into() - .expect("should be less than max block height, i32::MAX"), - ), - )) - }) - .collect(), + let (tx, size): (Vec<_>, Option) = match tx_ids_response.map_misc_error()? { + zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => ( + tx_ids + .ok_or_misc_error("block not found")? + .iter() + .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) + .collect(), + None, + ), + zebra_state::ReadResponse::BlockAndSize(block_and_size) => { + let (block, size) = block_and_size.ok_or_misc_error("Block not found")?; + let transactions = block + .transactions + .iter() + .map(|tx| { + GetBlockTransaction::Object(TransactionObject::from_transaction( + tx.clone(), + Some(height), + Some( + confirmations + .try_into() + .expect("should be less than max block height, i32::MAX"), + ), + )) + }) + .collect(); + (transactions, Some(size)) + } _ => unreachable!("unmatched response to a transaction_ids_for_block request"), }; @@ -984,7 +985,7 @@ where difficulty: Some(difficulty), tx, trees, - size: None, + size: size.map(|size| size as i64), block_commitments: Some(block_commitments), final_sapling_root: Some(final_sapling_root), final_orchard_root, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index a974f13a67b..16890f1845e 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, + "size": 1617, "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index c0da399be69..088a2cc297c 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, + "size": 1618, "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index a974f13a67b..16890f1845e 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "0007bc227e1c57a4a70e237cad00e7b7ce565155ab49166bc57397a26d339283", "confirmations": 10, + "size": 1617, "height": 1, "version": 4, "merkleroot": "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index c0da399be69..088a2cc297c 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -5,6 +5,7 @@ expression: block { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "confirmations": 10, + "size": 1618, "height": 1, "version": 4, "merkleroot": "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index b21d7388b08..94f54c65d47 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -316,7 +316,7 @@ async fn rpc_getblock() { })) .collect(), trees, - size: None, + size: Some(block.zcash_serialize_to_vec().unwrap().len() as i64), version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), block_commitments: Some(expected_block_commitments), @@ -364,7 +364,7 @@ async fn rpc_getblock() { })) .collect(), trees, - size: None, + size: Some(block.zcash_serialize_to_vec().unwrap().len() as i64), version: Some(block.header.version), merkle_root: Some(block.header.merkle_root), block_commitments: Some(expected_block_commitments), diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index bd8f4d9b26c..3cf59bbc1ab 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -51,7 +51,9 @@ pub use request::Spend; pub use response::{GetBlockTemplateChainInfo, KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip, TipAction}, - check, init, init_read_only, + check, + finalized_state::FinalizedState, + init, init_read_only, non_finalized_state::NonFinalizedState, spawn_init, spawn_init_read_only, watch_receiver::WatchReceiver, diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 1535fa81c1f..03a43626945 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -715,6 +715,14 @@ pub enum Request { /// [`block::Height`] using `.into()`. Block(HashOrHeight), + //// Same as Block, but also returns serialized block size. + //// + /// Returns + /// + /// * [`ReadResponse::BlockAndSize(Some((Arc, usize)))`](ReadResponse::BlockAndSize) if the block is in the best chain; + /// * [`ReadResponse::BlockAndSize(None)`](ReadResponse::BlockAndSize) otherwise. + BlockAndSize(HashOrHeight), + /// Looks up a block header by hash or height in the current best chain. /// /// Returns @@ -837,6 +845,7 @@ impl Request { Request::Transaction(_) => "transaction", Request::UnspentBestChainUtxo { .. } => "unspent_best_chain_utxo", Request::Block(_) => "block", + Request::BlockAndSize(_) => "block_and_size", Request::BlockHeader(_) => "block_header", Request::FindBlockHashes { .. } => "find_block_hashes", Request::FindBlockHeaders { .. } => "find_block_headers", @@ -897,6 +906,14 @@ pub enum ReadRequest { /// [`block::Height`] using `.into()`. Block(HashOrHeight), + //// Same as Block, but also returns serialized block size. + //// + /// Returns + /// + /// * [`ReadResponse::BlockAndSize(Some((Arc, usize)))`](ReadResponse::BlockAndSize) if the block is in the best chain; + /// * [`ReadResponse::BlockAndSize(None)`](ReadResponse::BlockAndSize) otherwise. + BlockAndSize(HashOrHeight), + /// Looks up a block header by hash or height in the current best chain. /// /// Returns @@ -1143,6 +1160,7 @@ impl ReadRequest { ReadRequest::TipPoolValues => "tip_pool_values", ReadRequest::Depth(_) => "depth", ReadRequest::Block(_) => "block", + ReadRequest::BlockAndSize(_) => "block_and_size", ReadRequest::BlockHeader(_) => "block_header", ReadRequest::Transaction(_) => "transaction", ReadRequest::TransactionIdsForBlock(_) => "transaction_ids_for_block", @@ -1200,6 +1218,7 @@ impl TryFrom for ReadRequest { Request::BestChainBlockHash(hash) => Ok(ReadRequest::BestChainBlockHash(hash)), Request::Block(hash_or_height) => Ok(ReadRequest::Block(hash_or_height)), + Request::BlockAndSize(hash_or_height) => Ok(ReadRequest::BlockAndSize(hash_or_height)), Request::BlockHeader(hash_or_height) => Ok(ReadRequest::BlockHeader(hash_or_height)), Request::Transaction(tx_hash) => Ok(ReadRequest::Transaction(tx_hash)), Request::UnspentBestChainUtxo(outpoint) => { diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 0e847e9d0cf..321b5d8935f 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -50,6 +50,9 @@ pub enum Response { /// Response to [`Request::Block`] with the specified block. Block(Option>), + /// Response to [`Request::BlockAndSize`] with the specified block and size. + BlockAndSize(Option<(Arc, usize)>), + /// The response to a `BlockHeader` request. BlockHeader { /// The header of the requested block @@ -157,6 +160,10 @@ pub enum ReadResponse { /// Response to [`ReadRequest::Block`] with the specified block. Block(Option>), + /// Response to [`ReadRequest::BlockAndSize`] with the specified block and + /// serialized size. + BlockAndSize(Option<(Arc, usize)>), + /// The response to a `BlockHeader` request. BlockHeader { /// The header of the requested block @@ -311,6 +318,7 @@ impl TryFrom for Response { ReadResponse::BlockHash(hash) => Ok(Response::BlockHash(hash)), ReadResponse::Block(block) => Ok(Response::Block(block)), + ReadResponse::BlockAndSize(block) => Ok(Response::BlockAndSize(block)), ReadResponse::BlockHeader { header, hash, diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 50514d9859e..bef0af00019 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1092,6 +1092,7 @@ impl Service for StateService { | Request::Transaction(_) | Request::UnspentBestChainUtxo(_) | Request::Block(_) + | Request::BlockAndSize(_) | Request::BlockHeader(_) | Request::FindBlockHashes { .. } | Request::FindBlockHeaders { .. } @@ -1311,6 +1312,31 @@ impl Service for ReadStateService { .wait_for_panics() } + // Used by the get_block (raw) RPC and the StateService. + ReadRequest::BlockAndSize(hash_or_height) => { + let state = self.clone(); + + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + let block_and_size = state.non_finalized_state_receiver.with_watch_data( + |non_finalized_state| { + read::block_and_size( + non_finalized_state.best_chain(), + &state.db, + hash_or_height, + ) + }, + ); + + // The work is done in the future. + timer.finish(module_path!(), line!(), "ReadRequest::BlockAndSize"); + + Ok(ReadResponse::BlockAndSize(block_and_size)) + }) + }) + .wait_for_panics() + } + // Used by the get_block (verbose) RPC and the StateService. ReadRequest::BlockHeader(hash_or_height) => { let state = self.clone(); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 6ad4cd93a60..8c515f0c27e 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -24,7 +24,7 @@ use zebra_chain::{ parallel::tree::NoteCommitmentTrees, parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, - serialization::TrustedPreallocate, + serialization::{CompactSizeMessage, TrustedPreallocate, ZcashSerialize as _}, transaction::{self, Transaction}, transparent, value_balance::ValueBalance, @@ -39,6 +39,7 @@ use crate::{ transparent::{AddressBalanceLocation, OutputLocation}, }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, + FromDisk, RawBytes, }, BoxError, HashOrHeight, }; @@ -132,6 +133,19 @@ impl ZebraDb { Some(header) } + /// Returns the raw [`block::Header`] with [`block::Hash`] or [`Height`], if + /// it exists in the finalized chain. + #[allow(clippy::unwrap_in_result)] + fn raw_block_header(&self, hash_or_height: HashOrHeight) -> Option { + // Block Header + let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap(); + + let height = hash_or_height.height_or_else(|hash| self.height(hash))?; + let header: RawBytes = self.db.zs_get(&block_header_by_height, &height)?; + + Some(header) + } + /// Returns the [`Block`] with [`block::Hash`] or /// [`Height`], if it exists in the finalized chain. // @@ -161,6 +175,59 @@ impl ZebraDb { })) } + /// Returns the [`Block`] with [`block::Hash`] or [`Height`], if it exists + /// in the finalized chain, and its serialized size. + #[allow(clippy::unwrap_in_result)] + pub fn block_and_size(&self, hash_or_height: HashOrHeight) -> Option<(Arc, usize)> { + let (raw_header, raw_txs) = self.raw_block(hash_or_height)?; + + let header = Arc::::from_bytes(raw_header.raw_bytes()); + let txs: Vec<_> = raw_txs + .iter() + .map(|raw_tx| Arc::::from_bytes(raw_tx.raw_bytes())) + .collect(); + + // Compute the size of the block from the size of header and size of + // transactions. This requires summing them all and also adding the + // size of the CompactSize-encoded transaction count. + // See https://developer.bitcoin.org/reference/block_chain.html#serialized-blocks + let tx_count = CompactSizeMessage::try_from(txs.len()) + .expect("must work for a previously serialized block"); + let tx_raw = tx_count + .zcash_serialize_to_vec() + .expect("must work for a previously serialized block"); + let size = raw_header.raw_bytes().len() + + raw_txs + .iter() + .map(|raw_tx| raw_tx.raw_bytes().len()) + .sum::() + + tx_raw.len(); + + let block = Block { + header, + transactions: txs, + }; + Some((Arc::new(block), size)) + } + + /// Returns the raw [`Block`] with [`block::Hash`] or + /// [`Height`], if it exists in the finalized chain. + #[allow(clippy::unwrap_in_result)] + fn raw_block(&self, hash_or_height: HashOrHeight) -> Option<(RawBytes, Vec)> { + // Block + let height = hash_or_height.height_or_else(|hash| self.height(hash))?; + let header = self.raw_block_header(height.into())?; + + // Transactions + + let transactions = self + .raw_transactions_by_height(height) + .map(|(_, tx)| tx) + .collect(); + + Some((header, transactions)) + } + /// Returns the Sapling [`note commitment tree`](sapling::tree::NoteCommitmentTree) specified by /// a hash or height, if it exists in the finalized state. #[allow(clippy::unwrap_in_result)] @@ -233,6 +300,19 @@ impl ZebraDb { ) } + /// Returns an iterator of all raw [`Transaction`]s for a provided block + /// height in finalized state. + #[allow(clippy::unwrap_in_result)] + fn raw_transactions_by_height( + &self, + height: Height, + ) -> impl Iterator + '_ { + self.raw_transactions_by_location_range( + TransactionLocation::min_for_height(height) + ..=TransactionLocation::max_for_height(height), + ) + } + /// Returns an iterator of all [`Transaction`]s in the provided range /// of [`TransactionLocation`]s in finalized state. #[allow(clippy::unwrap_in_result)] @@ -247,6 +327,20 @@ impl ZebraDb { self.db.zs_forward_range_iter(tx_by_loc, range) } + /// Returns an iterator of all raw [`Transaction`]s in the provided range + /// of [`TransactionLocation`]s in finalized state. + #[allow(clippy::unwrap_in_result)] + fn raw_transactions_by_location_range( + &self, + range: R, + ) -> impl Iterator + '_ + where + R: RangeBounds, + { + let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); + self.db.zs_forward_range_iter(tx_by_loc, range) + } + /// Returns the [`TransactionLocation`] for [`transaction::Hash`], /// if it exists in the finalized chain. #[allow(clippy::unwrap_in_result)] diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index 2cc4c63e361..8a015881b8f 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -29,7 +29,8 @@ pub use address::{ utxo::{address_utxos, AddressUtxos}, }; pub use block::{ - any_utxo, block, block_header, mined_transaction, transaction_hashes_for_block, unspent_utxo, + any_utxo, block, block_and_size, block_header, mined_transaction, transaction_hashes_for_block, + unspent_utxo, }; #[cfg(feature = "indexer")] diff --git a/zebra-state/src/service/read/block.rs b/zebra-state/src/service/read/block.rs index 99d4189a3e4..f00d69091b9 100644 --- a/zebra-state/src/service/read/block.rs +++ b/zebra-state/src/service/read/block.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use zebra_chain::{ block::{self, Block, Height}, + serialization::ZcashSerialize as _, transaction::{self, Transaction}, transparent::{self, Utxo}, }; @@ -51,6 +52,31 @@ where .or_else(|| db.block(hash_or_height)) } +/// Returns the [`Block`] with [`block::Hash`] or +/// [`Height`], if it exists in the non-finalized `chain` or finalized `db`. +pub fn block_and_size( + chain: Option, + db: &ZebraDb, + hash_or_height: HashOrHeight, +) -> Option<(Arc, usize)> +where + C: AsRef, +{ + // # Correctness + // + // Since blocks are the same in the finalized and non-finalized state, we + // check the most efficient alternative first. (`chain` is always in memory, + // but `db` stores blocks on disk, with a memory cache.) + chain + .as_ref() + .and_then(|chain| chain.as_ref().block(hash_or_height)) + .map(|contextual| { + let size = contextual.block.zcash_serialize_to_vec().unwrap().len(); + (contextual.block.clone(), size) + }) + .or_else(|| db.block_and_size(hash_or_height)) +} + /// Returns the [`block::Header`] with [`block::Hash`] or /// [`Height`], if it exists in the non-finalized `chain` or finalized `db`. pub fn block_header( From da6c218afb1010c2b66bedfb3ab60079537f82d8 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Sun, 30 Mar 2025 19:53:30 +0300 Subject: [PATCH 124/245] ensure secondary rocksdb instance has caught up to the primary instance (#9346) Co-authored-by: Arya --- zebra-rpc/src/sync.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/zebra-rpc/src/sync.rs b/zebra-rpc/src/sync.rs index c678f580b4a..aa32de217b7 100644 --- a/zebra-rpc/src/sync.rs +++ b/zebra-rpc/src/sync.rs @@ -150,6 +150,14 @@ impl TrustedChainSync { } }; + // # Correctness + // + // Ensure that the secondary rocksdb instance has caught up to the primary instance + // before attempting to commit the new block to the non-finalized state. It is sufficient + // to call this once here, as a new chain tip block has already been retrieved and so + // we know that the primary rocksdb instance has already been updated. + self.try_catch_up_with_primary().await; + let block_hash = block.hash; let commit_result = if self.non_finalized_state.chain_count() == 0 { self.non_finalized_state From 81c2cc4fbc36ba603e6ebc835b779f67968580ae Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Mon, 31 Mar 2025 14:04:54 +0100 Subject: [PATCH 125/245] fix(ci): Add workflow that runs `zizmor` for GHA static analysis (#9110 ) Source: woodruffw/zizmor@c6fef48587365328a3769766f7f4babf1c510213 --- .github/workflows/zizmor.yml | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/zizmor.yml diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 00000000000..25f14de877a --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,31 @@ +name: GitHub Actions Security Analysis with zizmor 🌈 + +on: + push: + branches: ["main"] + pull_request: + branches: ["*"] + +jobs: + zizmor: + name: zizmor latest via Cargo + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + persist-credentials: false + - name: Install the latest version of uv + uses: astral-sh/setup-uv@887a942a15af3a7626099df99e897a18d9e5ab3a # v4 + - name: Run zizmor 🌈 + run: uvx zizmor --format sarif . > results.sarif + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + with: + sarif_file: results.sarif + category: zizmor From db48daea05cd439f27d27acc95b7e7ffb4000e15 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 31 Mar 2025 22:28:25 +0100 Subject: [PATCH 126/245] fix(docker): add default CMD to test stage in Dockerfile (#9334) * fix(docker): set `HOME` environment variable explicitly in Dockerfile The HOME environment variable was defaulting to /root when the container started, causing cache directories to be incorrectly set up under /root/.cache/zebra instead of /home/zebra/.cache/zebra. This explicit setting ensures the HOME environment variable is correctly set to the zebra user's home directory. * fix(docker): add default `CMD` to test stage in Dockerfile When running the container using docker-compose without explicitly providing a command, the entrypoint.sh script was attempting to execute `exec_as_user` with no arguments, resulting in a gosu error: ``` exec_as_user exec gosu 10001:10001 Usage: gosu user-spec command [args] ``` By adding `CMD ["cargo", "test"]` to the test stage in the Dockerfile, we ensure a default command is available for the entrypoint script to execute, preventing the gosu error when no command is explicitly provided. This fix allows `docker-compose.test.yml` to run successfully without needing to specify a command in the service definition. --- docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 564965ff38d..fc61f525914 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -133,6 +133,7 @@ COPY --chown=${UID}:${GID} ./ ${HOME} COPY --chown=${UID}:${GID} ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh ENTRYPOINT [ "entrypoint.sh", "test" ] +CMD [ "cargo", "test" ] # This stage builds the zebrad release binary. # From b0bbca65e9960627ccaadba131224e2de86c1190 Mon Sep 17 00:00:00 2001 From: crStiv Date: Tue, 1 Apr 2025 22:32:42 +0300 Subject: [PATCH 127/245] feat: Add Mempool Specification to Zebra Book (#9336) * Create mempool-specification.md * Create mempool-architecture.md * Update SUMMARY.md * Update book/src/dev/mempool-specification.md Co-authored-by: Alfredo Garcia * Update mempool-architecture.md * Update mempool-architecture.md --------- Co-authored-by: Alfredo Garcia --- book/src/SUMMARY.md | 2 + book/src/dev/diagrams/mempool-architecture.md | 88 ++++++++ book/src/dev/mempool-specification.md | 206 ++++++++++++++++++ 3 files changed, 296 insertions(+) create mode 100644 book/src/dev/diagrams/mempool-architecture.md create mode 100644 book/src/dev/mempool-specification.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 9f8715a8806..06f04e658ce 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -27,8 +27,10 @@ - [Developer Documentation](dev.md) - [Contribution Guide](CONTRIBUTING.md) - [Design Overview](dev/overview.md) + - [Mempool Specification](dev/mempool-specification.md) - [Diagrams](dev/diagrams.md) - [Network Architecture](dev/diagrams/zebra-network.md) + - [Mempool Architecture](dev/diagrams/mempool-architecture.md) - [Upgrading the State Database](dev/state-db-upgrades.md) - [Zebra versioning and releases](dev/release-process.md) - [Continuous Integration](dev/continuous-integration.md) diff --git a/book/src/dev/diagrams/mempool-architecture.md b/book/src/dev/diagrams/mempool-architecture.md new file mode 100644 index 00000000000..f0e11184048 --- /dev/null +++ b/book/src/dev/diagrams/mempool-architecture.md @@ -0,0 +1,88 @@ +# Mempool Architecture Diagram + +This diagram illustrates the architecture of the Zebra mempool, showing its main components and the flow of transactions through the system. + +```mermaid +graph TD + %% External Components + Net[Network Service] + State[State Service] + TxVerifier[Transaction Verifier] + RPC[RPC Service] + + %% Mempool Main Components + Mempool{{Mempool Service}} + Storage{{Storage}} + Downloads{{Transaction Downloads}} + Crawler{{Crawler}} + QueueChecker{{Queue Checker}} + + %% Transaction Flow + Net -->|1- Poll peers| Mempool + RPC -->|1- Direct submit| Mempool + Crawler -->|1- Poll peers| Net + Crawler -->|2- Queue transactions| Mempool + + Mempool -->|3- Queue for download| Downloads + Downloads -->|4a- Download request| Net + Net -->|4b- Transaction data| Downloads + + Downloads -->|5a- Verify request| TxVerifier + TxVerifier -->|5b- Verification result| Downloads + + Downloads -->|6a- Check UTXO| State + State -->|6b- UTXO data| Downloads + + Downloads -->|7- Store verified tx| Storage + + QueueChecker -->|8a- Check for verified| Mempool + Mempool -->|8b- Process verified| QueueChecker + + Storage -->|9- Query responses| Mempool + Mempool -->|10- Gossip new tx| Net + + %% State Management + State -->|Chain tip changes| Mempool + Mempool -->|Updates verification context| Downloads + + %% Mempool responds to service requests + RPC -->|Query mempool| Mempool + Mempool -->|Mempool data| RPC + + %% Styling + classDef external fill:#444,stroke:#888,stroke-width:1px,color:white; + classDef component fill:#333,stroke:#888,stroke-width:1px,color:white; + + class Net,State,TxVerifier,RPC external; + class Mempool,Storage,Downloads,Crawler,QueueChecker component; +``` + +## Component Descriptions + +1. **Mempool Service**: The central coordinator that handles requests and manages the mempool state. + +2. **Storage**: In-memory storage for verified transactions and rejection lists. + +3. **Transaction Downloads**: Handles downloading and verifying transactions from peers. + +4. **Crawler**: Periodically polls peers for new transactions. + +5. **Queue Checker**: Regularly polls for newly verified transactions. + +## Transaction Flow + +1. Transactions arrive via network gossiping, direct RPC submission, or crawler polling. + +2. The mempool checks if transactions are already known or rejected. If not, it queues them for download. + +3. The download service retrieves transaction data from peers. + +4. Transactions are verified against consensus rules using the transaction verifier. + +5. Verified transactions are stored in memory and gossiped to peers. + +6. The queue checker regularly checks for newly verified transactions. + +7. Transactions remain in the mempool until they are mined or evicted due to size limits. + +8. When the chain tip changes, the mempool updates its verification context and potentially evicts invalid transactions. diff --git a/book/src/dev/mempool-specification.md b/book/src/dev/mempool-specification.md new file mode 100644 index 00000000000..1142ab0e827 --- /dev/null +++ b/book/src/dev/mempool-specification.md @@ -0,0 +1,206 @@ +# Mempool Specification + +The Zebra mempool handles unmined Zcash transactions: collecting them from peers, verifying them, storing them in memory, providing APIs for other components to access them, and gossiping transactions to peers. This document specifies the architecture, behavior, and interfaces of the mempool. + +## Overview + +The mempool is a fundamental component of the Zebra node, responsible for managing the lifecycle of unmined transactions. It provides an in-memory storage for valid transactions that haven't yet been included in a block, and offers interfaces for other components to interact with these transactions. + +Key responsibilities of the mempool include: +- Accepting new transactions from the network +- Verifying transactions against a subset of consensus rules +- Storing verified transactions in memory +- Managing memory usage and transaction eviction +- Providing transaction queries to other components +- Gossiping transactions to peers + +## Architecture + +The mempool is comprised of several subcomponents: + +1. **Mempool Service** (`Mempool`): The main service that handles requests from other components, manages the active state of the mempool, and coordinates the other subcomponents. + +2. **Transaction Storage** (`Storage`): Manages the in-memory storage of verified transactions and rejected transactions, along with their rejection reasons. + +3. **Transaction Downloads** (`Downloads`): Handles downloading and verifying transactions, coordinating with the network and verification services. + +4. **Crawler** (`Crawler`): Periodically polls peers for new transactions to add to the mempool. + +5. **Queue Checker** (`QueueChecker`): Regularly checks the transaction verification queue to process newly verified transactions. + +6. **Transaction Gossip** (`gossip`): Broadcasts newly added transactions to peers. + +7. **Pending Outputs** (`PendingOutputs`): Tracks requests for transaction outputs that haven't yet been seen. + +For a visual representation of the architecture and transaction flow, see the [Mempool Architecture Diagram](diagrams/mempool-architecture.md). + +## Activation + +The mempool is activated when: +- The node is near the blockchain tip (determined by `SyncStatus`) +- OR when the current chain height reaches a configured debug height (`debug_enable_at_height`) + +When activated, the mempool creates transaction download and verify services, initializes storage, and starts background tasks for crawling and queue checking. + +## Configuration + +The mempool has the following configurable parameters: + +1. **Transaction Cost Limit** (`tx_cost_limit`): The maximum total serialized byte size of all transactions in the mempool, defaulting to 80,000,000 bytes as required by [ZIP-401](https://zips.z.cash/zip-0401). + +2. **Eviction Memory Time** (`eviction_memory_time`): The maximum time to remember evicted transaction IDs in the rejection list, defaulting to 60 minutes. + +3. **Debug Enable At Height** (`debug_enable_at_height`): An optional height at which to enable the mempool for debugging, regardless of sync status. + +## State Management + +The mempool maintains an `ActiveState` which can be either: +- `Disabled`: Mempool is not active +- `Enabled`: Mempool is active and contains: + - `storage`: The Storage instance for transactions + - `tx_downloads`: Transaction download and verification stream + - `last_seen_tip_hash`: Hash of the last chain tip the mempool has seen + +The mempool responds to chain tip changes: +- On new blocks: Updates verification context, removes mined transactions +- On reorgs: Clears tip-specific rejections, retries all transactions + +## Transaction Processing Flow + +1. **Transaction Arrival**: + - From peer gossip (inv messages) + - From direct submission (RPC) + - From periodic peer polling (crawler) + +2. **Transaction Download**: + - Checks if transaction exists in mempool or rejection lists + - Queues transaction for download if needed + - Downloads transaction data from peers + +3. **Transaction Verification**: + - Checks transaction against consensus rules + - Verifies transaction against the current chain state + - Manages dependencies between transactions + +4. **Transaction Storage**: + - Stores verified transactions in memory + - Tracks transaction dependencies + - Enforces size limits and eviction policies + +5. **Transaction Gossip**: + - Broadcasts newly verified transactions to peers + +## Transaction Rejection + +Transactions can be rejected for multiple reasons, categorized into: + +1. **Exact Tip Rejections** (`ExactTipRejectionError`): + - Failures in consensus validation + - Only applies to exactly matching transactions at the current tip + +2. **Same Effects Tip Rejections** (`SameEffectsTipRejectionError`): + - Spending conflicts with other mempool transactions + - Missing outputs from mempool transactions + - Applies to any transaction with the same effects at the current tip + +3. **Same Effects Chain Rejections** (`SameEffectsChainRejectionError`): + - Expired transactions + - Duplicate spends already in the blockchain + - Transactions already mined + - Transactions evicted due to memory limits + - Applies until a rollback or network upgrade + +Rejection reasons are stored alongside rejected transaction IDs to prevent repeated verification of invalid transactions. + +## Memory Management + +The mempool employs several strategies for memory management: + +1. **Transaction Cost Limit**: Enforces a maximum total size for all mempool transactions. + +2. **Random Eviction**: When the mempool exceeds the cost limit, transactions are randomly evicted following the [ZIP-401](https://zips.z.cash/zip-0401) specification. + +3. **Eviction Memory**: Remembers evicted transaction IDs for a configurable period to prevent re-verification. + +4. **Rejection List Size Limit**: Caps rejection lists at 40,000 entries per [ZIP-401](https://zips.z.cash/zip-0401). + +5. **Automatic Cleanup**: Removes expired transactions and rejections that are no longer relevant. + +## Service Interface + +The mempool exposes a service interface with the following request types: + +1. **Query Requests**: + - `TransactionIds`: Get all transaction IDs in the mempool + - `TransactionsById`: Get transactions by their unmined IDs + - `TransactionsByMinedId`: Get transactions by their mined hashes + - `FullTransactions`: Get all verified transactions with fee information + - `RejectedTransactionIds`: Query rejected transaction IDs + - `TransactionWithDepsByMinedId`: Get a transaction and its dependencies + +2. **Action Requests**: + - `Queue`: Queue transactions or transaction IDs for download and verification + - `CheckForVerifiedTransactions`: Check for newly verified transactions + - `AwaitOutput`: Wait for a specific transparent output to become available + +## Interaction with Other Components + +The mempool interacts with several other Zebra components: + +1. **Network Service**: For downloading transactions and gossiping to peers. + +2. **State Service**: For checking transaction validity against the current chain state. + +3. **Transaction Verifier**: For consensus validation of transactions. + +4. **Chain Tip Service**: For tracking the current blockchain tip. + +5. **RPC Services**: To provide transaction data for RPC methods. + +## Implementation Constraints + +1. **Correctness**: + - All transactions in the mempool must be verified + - Transactions must be re-verified when the chain tip changes + - Rejected transactions must be properly tracked to prevent DoS attacks + +2. **Performance**: + - Transaction processing should be asynchronous + - Memory usage should be bounded + - Critical paths should be optimized for throughput + +3. **Reliability**: + - The mempool should recover from crashes and chain reorganizations + - Background tasks should be resilient to temporary failures + +## ZIP-401 Compliance + +The mempool implements the requirements specified in [ZIP-401](https://zips.z.cash/zip-0401): + +1. Implements `mempooltxcostlimit` configuration (default: 80,000,000) +2. Implements `mempoolevictionmemoryminutes` configuration (default: 60 minutes) +3. Uses random eviction when the mempool exceeds the cost limit +4. Caps eviction memory lists at 40,000 entries +5. Uses transaction IDs (txid) for version 5 transactions in eviction lists + +## Error Handling + +The mempool employs a comprehensive error handling strategy: + +1. **Temporary Failures**: For network or resource issues, transactions remain in the queue for retry. + +2. **Permanent Rejections**: For consensus or semantic failures, transactions are rejected with specific error reasons. + +3. **Dependency Failures**: For missing inputs or dependencies, transactions may wait for dependencies to be resolved. + +4. **Recovery**: On startup or after a crash, the mempool is rebuilt from scratch. + +## Metrics and Diagnostics + +The mempool provides metrics for monitoring: + +1. **Transaction Count**: Number of transactions in the mempool +2. **Total Cost**: Total size of all mempool transactions +3. **Queued Count**: Number of transactions pending download or verification +4. **Rejected Count**: Number of rejected transactions in memory +5. **Background Task Status**: Health of crawler and queue checker tasks From af689ac2f76ac7c42aa227c69c2a76f384d9fa18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 12:50:26 +0000 Subject: [PATCH 128/245] build(deps): bump ring from 0.17.8 to 0.17.13 in the cargo group (#9317) Bumps the cargo group with 1 update: [ring](https://github.com/briansmith/ring). Updates `ring` from 0.17.8 to 0.17.13 - [Changelog](https://github.com/briansmith/ring/blob/main/RELEASES.md) - [Commits](https://github.com/briansmith/ring/commits) --- updated-dependencies: - dependency-name: ring dependency-type: indirect dependency-group: cargo ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9095e4f1ee5..c86e6f330e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3764,15 +3764,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] From 3b2af49dc7257f8f677f649bac840db981b021ed Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 3 Apr 2025 15:57:15 +0300 Subject: [PATCH 129/245] chore(deps): Updates ECC dependencies (#9300) * Bumps ECC dep versions (using git sources) and updates their usage in Zebra * removes dependency on bridgetree and imports types from incrementalmerkletree directly instead * Removes unused patches * bumps ECC dep versions and replaces Zebra's usage of the now-deprecated API. * Adds conversion impl from `HashType` for `SighashType` * fixes lints * updates deny.toml * updates edition, adds redjubjub to cargo deny exceptions * reverts Rust edition bump * fixes new usage of `add_output()` --------- Co-authored-by: Alfredo Garcia --- Cargo.lock | 301 +++++++++++++----- Cargo.toml | 26 +- deny.toml | 7 +- zebra-chain/Cargo.toml | 2 +- zebra-chain/src/orchard/tree.rs | 11 +- zebra-chain/src/parameters/network.rs | 2 +- .../src/parameters/network/tests/vectors.rs | 3 +- zebra-chain/src/primitives/address.rs | 29 +- .../src/primitives/viewing_key/sapling.rs | 2 +- .../src/primitives/zcash_primitives.rs | 38 +-- zebra-chain/src/sapling/tree.rs | 10 +- zebra-chain/src/sprout/tree.rs | 6 +- zebra-chain/src/transaction/sighash.rs | 25 ++ zebra-consensus/src/primitives/halo2/tests.rs | 4 +- zebra-rpc/Cargo.toml | 1 + .../src/methods/get_block_template_rpcs.rs | 8 +- zebra-scan/src/service/scan_task/scan.rs | 7 +- zebra-scan/src/tests.rs | 4 +- zebrad/Cargo.toml | 2 +- 19 files changed, 331 insertions(+), 157 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c86e6f330e8..68e042adfce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -68,7 +68,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common", + "crypto-common 0.1.6", "generic-array", ] @@ -372,9 +372,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bech32" -version = "0.9.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" [[package]] name = "bellman" @@ -448,16 +448,16 @@ dependencies = [ [[package]] name = "bip32" -version = "0.5.3" +version = "0.6.0-pre.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db40d3dfbeab4e031d78c844642fa0caa0b0db11ce1607ac9d2986dff1405c69" +checksum = "143f5327f23168716be068f8e1014ba2ea16a6c91e8777bc8927da7b51e1df1f" dependencies = [ "bs58", "hmac", "rand_core 0.6.4", - "ripemd", - "secp256k1", - "sha2", + "ripemd 0.2.0-pre.4", + "secp256k1 0.29.1", + "sha2 0.11.0-pre.4", "subtle", "zeroize", ] @@ -513,9 +513,9 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" dependencies = [ "arrayref", "arrayvec", @@ -524,9 +524,9 @@ dependencies = [ [[package]] name = "blake2s_simd" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" +checksum = "e90f7deecfac93095eb874a40febd69427776e24e1bd7f87f33ac62d6f0174df" dependencies = [ "arrayref", "arrayvec", @@ -542,6 +542,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.11.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a229bfd78e4827c91b9b95784f69492c1b77c1ab75a45a8a037b139215086f94" +dependencies = [ + "hybrid-array", +] + [[package]] name = "bls12_381" version = "0.8.0" @@ -555,22 +564,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "bridgetree" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef977c7f8e75aa81fc589064c121ab8d32448b7939d34d58df479aa93e65ea5" -dependencies = [ - "incrementalmerkletree", -] - [[package]] name = "bs58" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2", + "sha2 0.10.8", "tinyvec", ] @@ -770,7 +770,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common", + "crypto-common 0.1.6", "inout", "zeroize", ] @@ -1058,6 +1058,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-common" +version = "0.2.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170d71b5b14dec99db7739f6fc7d6ec2db80b78c3acb77db48392ccc3d8a9ea0" +dependencies = [ + "hybrid-array", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -1067,7 +1076,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest", + "digest 0.10.7", "fiat-crypto", "rustc_version", "serde", @@ -1157,8 +1166,18 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer", - "crypto-common", + "block-buffer 0.10.4", + "crypto-common 0.1.6", +] + +[[package]] +name = "digest" +version = "0.11.0-pre.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf2e3d6615d99707295a9673e889bf363a04b2a466bd320c65a72536f7577379" +dependencies = [ + "block-buffer 0.11.0-rc.4", + "crypto-common 0.2.0-rc.2", "subtle", ] @@ -1232,7 +1251,7 @@ dependencies = [ "hex", "rand_core 0.6.4", "serde", - "sha2", + "sha2 0.10.8", "zeroize", ] @@ -1797,11 +1816,11 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hmac" -version = "0.12.1" +version = "0.13.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +checksum = "e4b1fb14e4df79f9406b434b60acef9f45c26c50062cccf1346c6103b8c47d58" dependencies = [ - "digest", + "digest 0.11.0-pre.9", ] [[package]] @@ -1903,6 +1922,15 @@ dependencies = [ "serde", ] +[[package]] +name = "hybrid-array" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dab50e193aebe510fe0e40230145820e02f48dae0cf339ea4204e6e708ff7bd" +dependencies = [ + "typenum", +] + [[package]] name = "hyper" version = "1.6.0" @@ -2164,11 +2192,24 @@ dependencies = [ [[package]] name = "incrementalmerkletree" -version = "0.7.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216c71634ac6f6ed13c2102d64354c0a04dcbdc30e31692c5972d3974d8b6d97" +checksum = "30821f91f0fa8660edca547918dc59812893b497d07c1144f326f07fdd94aba9" dependencies = [ "either", + "proptest", + "rand 0.8.5", + "rand_core 0.6.4", +] + +[[package]] +name = "incrementalmerkletree-testing" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad20fb6cf815e76ce9b9eca74f347740ab99059fe4b5e4a002403d0441a02983" +dependencies = [ + "incrementalmerkletree", + "proptest", ] [[package]] @@ -2774,9 +2815,9 @@ dependencies = [ [[package]] name = "nonempty" -version = "0.7.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +checksum = "549e471b99ccaf2f89101bec68f4d244457d5a95a9c3d0672e9564124397741d" [[package]] name = "nu-ansi-term" @@ -2892,9 +2933,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f7152474406422f572de163e0bc63b2126cdbfe17bc849efbbde36fcfe647e" +checksum = "b1ef66fcf99348242a20d582d7434da381a867df8dc155b3a980eca767c56137" dependencies = [ "aes", "bitvec", @@ -3085,7 +3126,7 @@ checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" dependencies = [ "once_cell", "pest", - "sha2", + "sha2 0.10.8", ] [[package]] @@ -3643,6 +3684,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "redjubjub" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b0ac1bc6bb3696d2c6f52cff8fba57238b81da8c0214ee6cd146eb8fde364e" +dependencies = [ + "rand_core 0.6.4", + "reddsa", + "thiserror 1.0.69", + "zeroize", +] + [[package]] name = "redox_syscall" version = "0.5.8" @@ -3782,7 +3835,16 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest", + "digest 0.10.7", +] + +[[package]] +name = "ripemd" +version = "0.2.0-pre.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48cf93482ea998ad1302c42739bc73ab3adc574890c373ec89710e219357579" +dependencies = [ + "digest 0.11.0-pre.9", ] [[package]] @@ -3946,9 +4008,9 @@ dependencies = [ [[package]] name = "sapling-crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfff8cfce16aeb38da50b8e2ed33c9018f30552beff2210c266662a021b17f38" +checksum = "f9d3c081c83f1dc87403d9d71a06f52301c0aa9ea4c17da2a3435bbf493ffba4" dependencies = [ "aes", "bellman", @@ -3956,10 +4018,11 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "bls12_381", - "byteorder", + "core2", "document-features", "ff", "fpe", + "getset", "group", "hex", "incrementalmerkletree", @@ -3968,7 +4031,7 @@ dependencies = [ "memuse", "rand 0.8.5", "rand_core 0.6.4", - "redjubjub", + "redjubjub 0.8.0", "subtle", "tracing", "zcash_note_encryption", @@ -3988,10 +4051,19 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ - "secp256k1-sys", + "secp256k1-sys 0.8.1", "serde", ] +[[package]] +name = "secp256k1" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" +dependencies = [ + "secp256k1-sys 0.10.1", +] + [[package]] name = "secp256k1-sys" version = "0.8.1" @@ -4001,6 +4073,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -4222,7 +4303,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -4233,7 +4314,18 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.11.0-pre.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "540c0893cce56cdbcfebcec191ec8e0f470dd1889b6e7a0b503e310a94a168f5" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.11.0-pre.9", ] [[package]] @@ -4247,9 +4339,9 @@ dependencies = [ [[package]] name = "shardtree" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5f2390975ebfe8838f9e861f7a588123d49a7a7a0a08568ea831d8ad53fc9b4" +checksum = "637e95dcd06bc1bb3f86ed9db1e1832a70125f32daae071ef37dcb7701b7d4fe" dependencies = [ "bitflags 2.8.0", "either", @@ -5187,7 +5279,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "crypto-common", + "crypto-common 0.1.6", "subtle", ] @@ -5509,14 +5601,14 @@ dependencies = [ [[package]] name = "which" -version = "4.4.2" +version = "6.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f" dependencies = [ "either", "home", - "once_cell", "rustix", + "winsafe", ] [[package]] @@ -5690,6 +5782,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + [[package]] name = "wit-bindgen-rt" version = "0.33.0" @@ -5764,12 +5862,13 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff95eac82f71286a79c750e674550d64fb2b7aadaef7b89286b2917f645457d" +checksum = "6a21f218c86b350d706c22489af999b098e19bf92ed6dd71770660ea29ee707d" dependencies = [ "bech32", "bs58", + "core2", "f4jumble", "zcash_encoding", "zcash_protocol", @@ -5777,9 +5876,9 @@ dependencies = [ [[package]] name = "zcash_client_backend" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbeeede366fdb642710d3c59fc2090489affd075f66db53ed11bb7138d2d0258" +checksum = "cd14f1ef34cacef42dd2149783dad3d1f46949cb72da786f6ab13d6aa142020b" dependencies = [ "base64 0.22.1", "bech32", @@ -5811,15 +5910,16 @@ dependencies = [ "zcash_note_encryption", "zcash_primitives", "zcash_protocol", + "zcash_transparent", "zip32", "zip321", ] [[package]] name = "zcash_encoding" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3654116ae23ab67dd1f849b01f8821a8a156f884807ff665eac109bf28306c4d" +checksum = "bca38087e6524e5f51a5b0fb3fc18f36d7b84bf67b2056f494ca0c281590953d" dependencies = [ "core2", "nonempty", @@ -5838,14 +5938,15 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.4.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8162c94957f1e379b8e2fb30f97b95cfa93ac9c6bc02895946ca6392d1abb81" +checksum = "2af2839a7bb0489ccf0db9fb12c67234dd83e4a3b81ef50a10beecf1e852a18e" dependencies = [ "bech32", "blake2b_simd", "bls12_381", "bs58", + "core2", "document-features", "group", "memuse", @@ -5857,8 +5958,8 @@ dependencies = [ "tracing", "zcash_address", "zcash_encoding", - "zcash_primitives", "zcash_protocol", + "zcash_transparent", "zip32", ] @@ -5877,19 +5978,19 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab47d526d7fd6f88b3a2854ad81b54757a80c2aeadd1d8b06f690556af9743c" +checksum = "7574550ec5eba75f4e9d447b186de1541c40251d7f3ae2693ddaa5a477760190" dependencies = [ - "aes", "bip32", "blake2b_simd", "bs58", - "byteorder", + "core2", "document-features", "equihash", "ff", "fpe", + "getset", "group", "hex", "incrementalmerkletree", @@ -5899,11 +6000,11 @@ dependencies = [ "orchard", "rand 0.8.5", "rand_core 0.6.4", - "redjubjub", - "ripemd", + "redjubjub 0.8.0", + "ripemd 0.1.3", "sapling-crypto", - "secp256k1", - "sha2", + "secp256k1 0.29.1", + "sha2 0.10.8", "subtle", "tracing", "zcash_address", @@ -5911,14 +6012,15 @@ dependencies = [ "zcash_note_encryption", "zcash_protocol", "zcash_spec", + "zcash_transparent", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daba607872e60d91a09248d8e1ea3d6801c819fb80d67016d9de02d81323c10d" +checksum = "d6bd0b0fe6a98a8b07e30c58457a2c2085f90e57ffac18eec9f72a566b471bde" dependencies = [ "bellman", "blake2b_simd", @@ -5930,7 +6032,7 @@ dependencies = [ "known-folders", "lazy_static", "rand_core 0.6.4", - "redjubjub", + "redjubjub 0.8.0", "sapling-crypto", "tracing", "xdg", @@ -5939,12 +6041,17 @@ dependencies = [ [[package]] name = "zcash_protocol" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bc22b9155b2c7eb20105cd06de170d188c1bc86489b92aa3fda7b8da8d96acf" +checksum = "72df627873d103e973b536d34d16cc802d06a3d1494dc010781449789a156dc5" dependencies = [ + "core2", "document-features", + "hex", + "incrementalmerkletree", + "incrementalmerkletree-testing", "memuse", + "proptest", ] [[package]] @@ -5959,13 +6066,38 @@ dependencies = [ [[package]] name = "zcash_spec" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cede95491c2191d3e278cab76e097a44b17fde8d6ca0d4e3a22cf4807b2d857" +checksum = "ded3f58b93486aa79b85acba1001f5298f27a46489859934954d262533ee2915" dependencies = [ "blake2b_simd", ] +[[package]] +name = "zcash_transparent" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b0c4ea6d9b94b5159106b65b57c4a9ea46859e7f7f8fb1be3e18e2d25bc372" +dependencies = [ + "bip32", + "blake2b_simd", + "bs58", + "core2", + "document-features", + "getset", + "hex", + "proptest", + "ripemd 0.1.3", + "secp256k1 0.29.1", + "sha2 0.10.8", + "subtle", + "zcash_address", + "zcash_encoding", + "zcash_protocol", + "zcash_spec", + "zip32", +] + [[package]] name = "zebra-chain" version = "1.0.0-beta.45" @@ -5975,7 +6107,6 @@ dependencies = [ "bitvec", "blake2b_simd", "blake2s_simd", - "bridgetree", "bs58", "byteorder", "chrono", @@ -6003,15 +6134,15 @@ dependencies = [ "rand_core 0.6.4", "rayon", "reddsa", - "redjubjub", - "ripemd", + "redjubjub 0.7.0", + "ripemd 0.1.3", "sapling-crypto", - "secp256k1", + "secp256k1 0.27.0", "serde", "serde-big-array", "serde_json", "serde_with", - "sha2", + "sha2 0.10.8", "spandoc", "static_assertions", "tempfile", @@ -6028,6 +6159,7 @@ dependencies = [ "zcash_note_encryption", "zcash_primitives", "zcash_protocol", + "zcash_transparent", "zebra-test", ] @@ -6186,6 +6318,7 @@ dependencies = [ "tracing", "zcash_address", "zcash_primitives", + "zcash_protocol", "zebra-chain", "zebra-consensus", "zebra-network", @@ -6516,9 +6649,9 @@ dependencies = [ [[package]] name = "zip32" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9943793abf9060b68e1889012dafbd5523ab5b125c0fcc24802d69182f2ac9" +checksum = "13ff9ea444cdbce820211f91e6aa3d3a56bde7202d3c0961b7c38f793abf5637" dependencies = [ "blake2b_simd", "memuse", @@ -6528,9 +6661,9 @@ dependencies = [ [[package]] name = "zip321" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3e613defb0940acef1f54774b51c7f48f2fa705613dd800870dc69f35cd2ea" +checksum = "91b5156b2f2e06d7819c2a5fcd4d515e745f5ac97a06cfb3721205d965de8f13" dependencies = [ "base64 0.22.1", "nom", diff --git a/Cargo.toml b/Cargo.toml index 6aef936bc39..d1344eef6bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,17 +22,18 @@ resolver = "2" # `cargo release` settings [workspace.dependencies] -incrementalmerkletree = { version = "0.7.1", features = ["legacy-api"] } -orchard = "0.10.0" -sapling-crypto = "0.3.0" -zcash_address = "0.6.0" -zcash_client_backend = "0.14.0" -zcash_encoding = "0.2.2" +incrementalmerkletree = { version = "0.8.2", features = ["legacy-api"] } +orchard = "0.11.0" +sapling-crypto = "0.5.0" +zcash_address = "0.7.0" +zcash_client_backend = "0.17.0" +zcash_encoding = "0.3.0" zcash_history = "0.4.0" -zcash_keys = "0.4.0" -zcash_primitives = "0.19.0" -zcash_proofs = "0.19.0" -zcash_protocol = "0.4.0" +zcash_keys = "0.7.0" +zcash_primitives = "0.22.0" +zcash_proofs = "0.22.0" +zcash_transparent = { version = "0.2.0", features = ["test-dependencies"] } +zcash_protocol = "0.5.0" abscissa_core = "0.7.0" atty = "0.2.14" base64 = "0.22.1" @@ -41,10 +42,9 @@ bincode = "1.3.3" bitflags = "2.8.0" bitflags-serde-legacy = "0.1.1" bitvec = "1.0.1" -blake2b_simd = "1.0.2" -blake2s_simd = "1.0.2" +blake2b_simd = "1.0.3" +blake2s_simd = "1.0.3" bls12_381 = "0.8.0" -bridgetree = "0.6.0" bs58 = "0.5.1" byteorder = "1.5.0" bytes = "1.9.0" diff --git a/deny.toml b/deny.toml index 7b73f984f69..3387d59b632 100644 --- a/deny.toml +++ b/deny.toml @@ -89,8 +89,11 @@ skip-tree = [ # Remove after release candicate period is over and the ECC crates are not patched anymore { name = "equihash", version = "=0.2.0" }, - # wait for zcash_client_backend to update bech32 - { name = "bech32", version = "=0.9.1" }, + # wait for all librustzcash crates to update sha2, secp256k1, and ripemd + { name = "sha2", version = "=0.10.8" }, + { name = "secp256k1", version = "=0.27.0" }, + { name = "redjubjub", version = "=0.7.0" }, + { name = "ripemd", version = "=0.1.3" }, # wait for zcash_script to update itertools { name = "itertools", version = "=0.13.0" }, diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index fb4833027d3..579b3304b45 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -68,7 +68,6 @@ bitflags = { workspace = true } bitflags-serde-legacy = { workspace = true } blake2b_simd = { workspace = true } blake2s_simd = { workspace = true } -bridgetree = { workspace = true } bs58 = { workspace = true, features = ["check"] } byteorder = { workspace = true } @@ -103,6 +102,7 @@ zcash_primitives = { workspace = true, features = ["transparent-inputs"] } sapling-crypto.workspace = true zcash_protocol.workspace = true zcash_address.workspace = true +zcash_transparent.workspace = true # Time chrono = { workspace = true, features = ["clock", "std", "serde"] } diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index b3433a99c6d..81dd3f8e089 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -18,10 +18,9 @@ use std::{ }; use bitvec::prelude::*; -use bridgetree::NonEmptyFrontier; use halo2::pasta::{group::ff::PrimeField, pallas}; use hex::ToHex; -use incrementalmerkletree::Hashable; +use incrementalmerkletree::{frontier::NonEmptyFrontier, Hashable}; use lazy_static::lazy_static; use thiserror::Error; use zcash_primitives::merkle_tree::HashSer; @@ -248,7 +247,7 @@ impl ToHex for Node { /// [`z_gettreestate`][2] RPC requires [`CommitmentTree`][3]s. Implementing /// [`HashSer`] for [`Node`]s allows the conversion. /// -/// [1]: bridgetree::Frontier +/// [1]: incrementalmerkletree::frontier::Frontier /// [2]: https://zcash.github.io/rpc/z_gettreestate.html /// [3]: incrementalmerkletree::frontier::CommitmentTree impl HashSer for Node { @@ -348,7 +347,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Orchard = MERKLE_DEPTH = 32. - inner: bridgetree::Frontier, + inner: incrementalmerkletree::frontier::Frontier, /// A cached root of the tree. /// @@ -637,7 +636,7 @@ impl NoteCommitmentTree { /// Serializes [`Self`] to a format compatible with `zcashd`'s RPCs. pub fn to_rpc_bytes(&self) -> Vec { - // Convert the tree from [`Frontier`](bridgetree::Frontier) to + // Convert the tree from [`Frontier`](incrementalmerkletree::frontier::Frontier) to // [`CommitmentTree`](merkle_tree::CommitmentTree). let tree = incrementalmerkletree::frontier::CommitmentTree::from_frontier(&self.inner); @@ -665,7 +664,7 @@ impl Clone for NoteCommitmentTree { impl Default for NoteCommitmentTree { fn default() -> Self { Self { - inner: bridgetree::Frontier::empty(), + inner: incrementalmerkletree::frontier::Frontier::empty(), cached_root: Default::default(), } } diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 71ede1ba7f6..90341bc9d41 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -286,7 +286,7 @@ impl FromStr for Network { pub struct InvalidNetworkError(String); impl zcash_protocol::consensus::Parameters for Network { - fn network_type(&self) -> zcash_address::Network { + fn network_type(&self) -> zcash_protocol::consensus::NetworkType { self.kind().into() } diff --git a/zebra-chain/src/parameters/network/tests/vectors.rs b/zebra-chain/src/parameters/network/tests/vectors.rs index 8845090b50c..061fc9cc24f 100644 --- a/zebra-chain/src/parameters/network/tests/vectors.rs +++ b/zebra-chain/src/parameters/network/tests/vectors.rs @@ -1,7 +1,6 @@ //! Fixed test vectors for the network consensus parameters. -use zcash_primitives::consensus::{self as zp_consensus, Parameters}; -use zcash_protocol::consensus::NetworkConstants as _; +use zcash_protocol::consensus::{self as zp_consensus, NetworkConstants as _, Parameters}; use crate::{ block::Height, diff --git a/zebra-chain/src/primitives/address.rs b/zebra-chain/src/primitives/address.rs index 32ffa69b94a..57745a39e86 100644 --- a/zebra-chain/src/primitives/address.rs +++ b/zebra-chain/src/primitives/address.rs @@ -3,6 +3,7 @@ //! Usage: use zcash_address::unified::{self, Container}; +use zcash_protocol::consensus::NetworkType; use crate::{parameters::NetworkKind, transparent, BoxError}; @@ -44,7 +45,7 @@ impl zcash_address::TryFromAddress for Address { type Error = BoxError; fn try_from_transparent_p2pkh( - network: zcash_address::Network, + network: NetworkType, data: [u8; 20], ) -> Result> { Ok(Self::Transparent(transparent::Address::from_pub_key_hash( @@ -54,7 +55,7 @@ impl zcash_address::TryFromAddress for Address { } fn try_from_transparent_p2sh( - network: zcash_address::Network, + network: NetworkType, data: [u8; 20], ) -> Result> { Ok(Self::Transparent(transparent::Address::from_script_hash( @@ -64,7 +65,7 @@ impl zcash_address::TryFromAddress for Address { } fn try_from_sapling( - network: zcash_address::Network, + network: NetworkType, data: [u8; 43], ) -> Result> { let network = network.into(); @@ -74,7 +75,7 @@ impl zcash_address::TryFromAddress for Address { } fn try_from_unified( - network: zcash_address::Network, + network: NetworkType, unified_address: zcash_address::unified::Address, ) -> Result> { let network = network.into(); @@ -170,27 +171,27 @@ impl Address { } } -impl From for NetworkKind { - fn from(network: zcash_address::Network) -> Self { +impl From for NetworkKind { + fn from(network: NetworkType) -> Self { match network { - zcash_address::Network::Main => NetworkKind::Mainnet, - zcash_address::Network::Test => NetworkKind::Testnet, - zcash_address::Network::Regtest => NetworkKind::Regtest, + NetworkType::Main => NetworkKind::Mainnet, + NetworkType::Test => NetworkKind::Testnet, + NetworkType::Regtest => NetworkKind::Regtest, } } } -impl From for zcash_address::Network { +impl From for NetworkType { fn from(network: NetworkKind) -> Self { match network { - NetworkKind::Mainnet => zcash_address::Network::Main, - NetworkKind::Testnet => zcash_address::Network::Test, - NetworkKind::Regtest => zcash_address::Network::Regtest, + NetworkKind::Mainnet => NetworkType::Main, + NetworkKind::Testnet => NetworkType::Test, + NetworkKind::Regtest => NetworkType::Regtest, } } } -impl From<&NetworkKind> for zcash_address::Network { +impl From<&NetworkKind> for NetworkType { fn from(network: &NetworkKind) -> Self { (*network).into() } diff --git a/zebra-chain/src/primitives/viewing_key/sapling.rs b/zebra-chain/src/primitives/viewing_key/sapling.rs index 846b9835fa9..85c0d0d2da9 100644 --- a/zebra-chain/src/primitives/viewing_key/sapling.rs +++ b/zebra-chain/src/primitives/viewing_key/sapling.rs @@ -5,7 +5,7 @@ use zcash_client_backend::{ encoding::decode_extended_full_viewing_key, keys::sapling::DiversifiableFullViewingKey as SaplingDfvk, }; -use zcash_primitives::constants::*; +use zcash_protocol::constants::*; use crate::parameters::Network; diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 6dbff2df09c..b5b83466d73 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -29,8 +29,8 @@ impl zp_tx::components::transparent::Authorization for TransparentAuth<'_> { // In this block we convert our Output to a librustzcash to TxOut. // (We could do the serialize/deserialize route but it's simple enough to convert manually) -impl zp_tx::sighash::TransparentAuthorizingContext for TransparentAuth<'_> { - fn input_amounts(&self) -> Vec { +impl zcash_transparent::sighash::TransparentAuthorizingContext for TransparentAuth<'_> { + fn input_amounts(&self) -> Vec { self.all_prev_outputs .iter() .map(|prevout| { @@ -151,7 +151,7 @@ impl<'a> zp_tx::Authorization for PrecomputedAuth<'a> { // End of (mostly) copied code /// Convert a Zebra transparent::Output into a librustzcash one. -impl TryFrom<&transparent::Output> for zp_tx::components::TxOut { +impl TryFrom<&transparent::Output> for zcash_transparent::bundle::TxOut { type Error = io::Error; #[allow(clippy::unwrap_in_result)] @@ -160,12 +160,12 @@ impl TryFrom<&transparent::Output> for zp_tx::components::TxOut { .zcash_serialize_to_vec() .expect("zcash_primitives and Zebra transparent output formats must be compatible"); - zp_tx::components::TxOut::read(&mut serialized_output_bytes.as_slice()) + zcash_transparent::bundle::TxOut::read(&mut serialized_output_bytes.as_slice()) } } /// Convert a Zebra transparent::Output into a librustzcash one. -impl TryFrom for zp_tx::components::TxOut { +impl TryFrom for zcash_transparent::bundle::TxOut { type Error = io::Error; // The borrow is actually needed to use TryFrom<&transparent::Output> @@ -176,11 +176,11 @@ impl TryFrom for zp_tx::components::TxOut { } /// Convert a Zebra non-negative Amount into a librustzcash one. -impl TryFrom> for zp_tx::components::amount::NonNegativeAmount { +impl TryFrom> for zcash_protocol::value::Zatoshis { type Error = BalanceError; fn try_from(amount: Amount) -> Result { - zp_tx::components::amount::NonNegativeAmount::from_nonnegative_i64(amount.into()) + zcash_protocol::value::Zatoshis::from_nonnegative_i64(amount.into()) } } @@ -293,16 +293,18 @@ pub(crate) fn sighash( let output = &precomputed_tx_data.all_previous_outputs[input_index]; lock_script = output.lock_script.clone().into(); unlock_script = zcash_primitives::legacy::Script(script_code); - zp_tx::sighash::SignableInput::Transparent { - hash_type: hash_type.bits() as _, - index: input_index, - script_code: &unlock_script, - script_pubkey: &lock_script, - value: output - .value - .try_into() - .expect("amount was previously validated"), - } + zp_tx::sighash::SignableInput::Transparent( + zcash_transparent::sighash::SignableInput::from_parts( + hash_type.try_into().expect("hash type should be ALL"), + input_index, + &unlock_script, + &lock_script, + output + .value + .try_into() + .expect("amount was previously validated"), + ), + ) } None => zp_tx::sighash::SignableInput::Shielded, }; @@ -344,7 +346,7 @@ pub(crate) fn transparent_output_address( output: &transparent::Output, network: &Network, ) -> Option { - let tx_out = zp_tx::components::TxOut::try_from(output) + let tx_out = zcash_transparent::bundle::TxOut::try_from(output) .expect("zcash_primitives and Zebra transparent output formats must be compatible"); let alt_addr = tx_out.recipient_address(); diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index a532f7dfcda..f1320be32f8 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -18,9 +18,11 @@ use std::{ }; use bitvec::prelude::*; -use bridgetree::NonEmptyFrontier; use hex::ToHex; -use incrementalmerkletree::{frontier::Frontier, Hashable}; +use incrementalmerkletree::{ + frontier::{Frontier, NonEmptyFrontier}, + Hashable, +}; use lazy_static::lazy_static; use thiserror::Error; @@ -637,7 +639,7 @@ impl NoteCommitmentTree { /// Serializes [`Self`] to a format matching `zcashd`'s RPCs. pub fn to_rpc_bytes(&self) -> Vec { - // Convert the tree from [`Frontier`](bridgetree::Frontier) to + // Convert the tree from [`Frontier`](incrementalmerkletree::frontier::Frontier) to // [`CommitmentTree`](merkle_tree::CommitmentTree). let tree = incrementalmerkletree::frontier::CommitmentTree::from_frontier(&self.inner); @@ -666,7 +668,7 @@ impl Clone for NoteCommitmentTree { impl Default for NoteCommitmentTree { fn default() -> Self { Self { - inner: bridgetree::Frontier::empty(), + inner: incrementalmerkletree::frontier::Frontier::empty(), cached_root: Default::default(), } } diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index c5ad74a7d6e..d6bec56d134 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -197,7 +197,7 @@ pub enum NoteCommitmentTreeError { /// job of this tree to protect against double-spending, as it is append-only; double-spending /// is prevented by maintaining the [nullifier set] for each shielded pool. /// -/// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment +/// Internally this wraps [`incrementalmerkletree::frontier::Frontier`], so that we can maintain and increment /// the full tree with only the minimal amount of non-empty nodes/leaves required. /// /// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs @@ -210,9 +210,9 @@ pub enum NoteCommitmentTreeError { #[serde(into = "LegacyNoteCommitmentTree")] #[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { - /// The tree represented as a [`bridgetree::Frontier`]. + /// The tree represented as a [`incrementalmerkletree::frontier::Frontier`]. /// - /// A [`bridgetree::Frontier`] is a subset of the tree that allows to fully specify it. It + /// A [`incrementalmerkletree::frontier::Frontier`] is a subset of the tree that allows to fully specify it. It /// consists of nodes along the rightmost (newer) branch of the tree that /// has non-empty nodes. Upper (near root) empty nodes of the branch are not /// stored. diff --git a/zebra-chain/src/transaction/sighash.rs b/zebra-chain/src/transaction/sighash.rs index c2460a7101c..c0f0fc406a3 100644 --- a/zebra-chain/src/transaction/sighash.rs +++ b/zebra-chain/src/transaction/sighash.rs @@ -1,5 +1,7 @@ //! Signature hashes for Zcash transactions +use zcash_transparent::sighash::SighashType; + use super::Transaction; use crate::parameters::NetworkUpgrade; @@ -19,6 +21,29 @@ bitflags::bitflags! { const SINGLE = Self::ALL.bits() | Self::NONE.bits(); /// Anyone can add inputs to this transaction const ANYONECANPAY = 0b1000_0000; + + /// Sign all the outputs and Anyone can add inputs to this transaction + const ALL_ANYONECANPAY = Self::ALL.bits() | Self::ANYONECANPAY.bits(); + /// Sign none of the outputs and Anyone can add inputs to this transaction + const NONE_ANYONECANPAY = Self::NONE.bits() | Self::ANYONECANPAY.bits(); + /// Sign one of the outputs and Anyone can add inputs to this transaction + const SINGLE_ANYONECANPAY = Self::SINGLE.bits() | Self::ANYONECANPAY.bits(); + } +} + +impl TryFrom for SighashType { + type Error = (); + + fn try_from(hash_type: HashType) -> Result { + Ok(match hash_type { + HashType::ALL => Self::ALL, + HashType::NONE => Self::NONE, + HashType::SINGLE => Self::SINGLE, + HashType::ALL_ANYONECANPAY => Self::ALL_ANYONECANPAY, + HashType::NONE_ANYONECANPAY => Self::NONE_ANYONECANPAY, + HashType::SINGLE_ANYONECANPAY => Self::SINGLE_ANYONECANPAY, + _other => return Err(()), + }) } } diff --git a/zebra-consensus/src/primitives/halo2/tests.rs b/zebra-consensus/src/primitives/halo2/tests.rs index e654adcc546..75877d7b2b9 100644 --- a/zebra-consensus/src/primitives/halo2/tests.rs +++ b/zebra-consensus/src/primitives/halo2/tests.rs @@ -49,8 +49,10 @@ fn generate_test_vectors() { ); for _ in 0..num_recipients { + let mut memo: [u8; 512] = [0; 512]; + memo[0] = 0xF6; builder - .add_output(None, recipient, NoteValue::from_raw(note_value), None) + .add_output(None, recipient, NoteValue::from_raw(note_value), memo) .unwrap(); } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 4518051a2fe..9012504fe2d 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -91,6 +91,7 @@ serde = { workspace = true, features = ["serde_derive"] } nix = { workspace = true, features = ["signal"] } zcash_primitives = { workspace = true, features = ["transparent-inputs"] } +zcash_protocol.workspace = true # ECC deps used by getblocktemplate-rpcs feature zcash_address = { workspace = true, optional = true} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 809fafac69e..df6d686c749 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1266,9 +1266,11 @@ where async fn z_list_unified_receivers(&self, address: String) -> Result { use zcash_address::unified::Container; - let (network, unified_address): (zcash_address::Network, zcash_address::unified::Address) = - zcash_address::unified::Encoding::decode(address.clone().as_str()) - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + let (network, unified_address): ( + zcash_protocol::consensus::NetworkType, + zcash_address::unified::Address, + ) = zcash_address::unified::Encoding::decode(address.clone().as_str()) + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; let mut p2pkh = String::new(); let mut p2sh = String::new(); diff --git a/zebra-scan/src/service/scan_task/scan.rs b/zebra-scan/src/service/scan_task/scan.rs index fd662f55560..53e151d21c4 100644 --- a/zebra-scan/src/service/scan_task/scan.rs +++ b/zebra-scan/src/service/scan_task/scan.rs @@ -542,7 +542,12 @@ pub fn scanning_keys<'a>( dfvks .into_iter() .enumerate() - .map(|(i, dfvk)| Ok((AccountId::try_from(u32::try_from(i)?)?, dfvk_to_ufvk(dfvk)?))) + .map(|(i, dfvk)| { + Ok(( + AccountId::try_from(u32::try_from(i)?).map_err(|err| eyre!(err))?, + dfvk_to_ufvk(dfvk)?, + )) + }) .try_collect::<(_, _), Vec<(_, _)>, _>() .map(ScanningKeys::from_account_ufvks) } diff --git a/zebra-scan/src/tests.rs b/zebra-scan/src/tests.rs index ab2e9ea7ea9..499f9d6f048 100644 --- a/zebra-scan/src/tests.rs +++ b/zebra-scan/src/tests.rs @@ -208,7 +208,7 @@ pub fn fake_compact_block( let cout = CompactSaplingOutput { cmu, ephemeral_key, - ciphertext: enc_ciphertext.as_ref()[..52].to_vec(), + ciphertext: enc_ciphertext[..52].to_vec(), }; let mut ctx = CompactTx::default(); let mut txid = vec![0; 32]; @@ -250,7 +250,7 @@ pub fn random_compact_tx(mut rng: impl RngCore) -> CompactTx { }; let fake_cmu = { let fake_cmu = bls12_381::Scalar::random(&mut rng); - fake_cmu.to_repr().as_ref().to_owned() + fake_cmu.to_repr().to_vec() }; let fake_epk = { let mut buffer = [0; 64]; diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 832a3a4cfb5..355601deecd 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -19,7 +19,7 @@ edition = "2021" # Zebra is only supported on the latest stable Rust version. See the README for details. # Any Zebra release can break compatibility with older Rust versions. -rust-version = "1.81.0" +rust-version = "1.85.0" # Settings that impact runtime behaviour From de51063957b2e5edce4b95586b2ed3d6a619bf36 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 3 Apr 2025 14:13:29 -0300 Subject: [PATCH 130/245] fix(clippy): last lints (#9371) * fix clippy lints * another lint missing --- zebra-chain/src/amount/tests/vectors.rs | 6 ++---- zebra-chain/src/block/merkle.rs | 4 ++-- zebra-chain/src/block/tests/generate.rs | 13 ++++--------- zebra-chain/src/primitives/zcash_primitives.rs | 4 ++-- zebra-chain/src/transaction/tests/vectors.rs | 6 ++---- zebra-consensus/src/checkpoint.rs | 2 +- zebra-consensus/src/transaction/check.rs | 4 ++-- zebra-network/src/isolated.rs | 2 +- zebra-network/src/meta_addr.rs | 6 +++--- zebra-network/src/peer/connection.rs | 10 ++++------ zebra-network/src/peer_set/set.rs | 10 +++++----- zebra-network/src/protocol/external/codec.rs | 8 ++++---- zebra-network/src/protocol/external/message.rs | 2 +- zebra-node-services/src/scan_service/request.rs | 4 +--- zebra-state/src/service.rs | 8 ++++---- zebra-state/src/tests/setup.rs | 2 +- zebrad/src/components/mempool.rs | 3 +-- zebrad/tests/common/lightwalletd/sync.rs | 2 +- .../tests/common/lightwalletd/wallet_grpc_test.rs | 4 ++-- 19 files changed, 43 insertions(+), 57 deletions(-) diff --git a/zebra-chain/src/amount/tests/vectors.rs b/zebra-chain/src/amount/tests/vectors.rs index e3b1fb78d47..13ed0748d59 100644 --- a/zebra-chain/src/amount/tests/vectors.rs +++ b/zebra-chain/src/amount/tests/vectors.rs @@ -335,9 +335,7 @@ fn test_sum() -> Result<()> { let times: usize = (i64::MAX / MAX_MONEY) .try_into() .expect("4392 can always be converted to usize"); - let amounts: Vec = std::iter::repeat(MAX_MONEY.try_into()?) - .take(times + 1) - .collect(); + let amounts: Vec = std::iter::repeat_n(MAX_MONEY.try_into()?, times + 1).collect(); let sum_ref = amounts.iter().sum::>(); let sum_value = amounts.into_iter().sum::>(); @@ -357,7 +355,7 @@ fn test_sum() -> Result<()> { .expect("4392 can always be converted to usize"); let neg_max_money: Amount = (-MAX_MONEY).try_into()?; let amounts: Vec> = - std::iter::repeat(neg_max_money).take(times + 1).collect(); + std::iter::repeat_n(neg_max_money, times + 1).collect(); let sum_ref = amounts.iter().sum::>(); let sum_value = amounts.into_iter().sum::>(); diff --git a/zebra-chain/src/block/merkle.rs b/zebra-chain/src/block/merkle.rs index 639324f9d82..9d707489803 100644 --- a/zebra-chain/src/block/merkle.rs +++ b/zebra-chain/src/block/merkle.rs @@ -1,6 +1,6 @@ //! The Bitcoin-inherited Merkle tree of transactions. -use std::{fmt, io::Write, iter}; +use std::{fmt, io::Write}; use hex::{FromHex, ToHex}; @@ -404,7 +404,7 @@ impl std::iter::FromIterator for AuthDataRoot { // https://zips.z.cash/zip-0244#block-header-changes // Pad with enough leaves to make the tree full (a power of 2). let pad_count = hashes.len().next_power_of_two() - hashes.len(); - hashes.extend(iter::repeat([0u8; 32]).take(pad_count)); + hashes.extend(std::iter::repeat_n([0u8; 32], pad_count)); assert!(hashes.len().is_power_of_two()); while hashes.len() > 1 { diff --git a/zebra-chain/src/block/tests/generate.rs b/zebra-chain/src/block/tests/generate.rs index b908b6f9747..4fae246f082 100644 --- a/zebra-chain/src/block/tests/generate.rs +++ b/zebra-chain/src/block/tests/generate.rs @@ -131,9 +131,8 @@ fn multi_transaction_block(oversized: bool) -> Block { } // Create transactions to be just below or just above the limit - let transactions = std::iter::repeat(Arc::new(transaction)) - .take(max_transactions_in_block) - .collect::>(); + let transactions = + std::iter::repeat_n(Arc::new(transaction), max_transactions_in_block).collect::>(); // Add the transactions into a block let block = Block { @@ -193,9 +192,7 @@ fn single_transaction_block_many_inputs(oversized: bool) -> Block { let mut outputs = Vec::new(); // Create inputs to be just below the limit - let inputs = std::iter::repeat(input) - .take(max_inputs_in_tx) - .collect::>(); + let inputs = std::iter::repeat_n(input, max_inputs_in_tx).collect::>(); // 1 single output outputs.push(output); @@ -268,9 +265,7 @@ fn single_transaction_block_many_outputs(oversized: bool) -> Block { let inputs = vec![input]; // Create outputs to be just below the limit - let outputs = std::iter::repeat(output) - .take(max_outputs_in_tx) - .collect::>(); + let outputs = std::iter::repeat_n(output, max_outputs_in_tx).collect::>(); // Create a big transaction let big_transaction = Transaction::V1 { diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index b5b83466d73..926f6008479 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -279,8 +279,8 @@ impl<'a> PrecomputedTxData<'a> { /// signature hash is being computed. /// - `hash_type`: the type of hash (SIGHASH) being used. /// - `input_index_script_code`: a tuple with the index of the transparent Input -/// for which we are producing a sighash and the respective script code being -/// validated, or None if it's a shielded input. +/// for which we are producing a sighash and the respective script code being +/// validated, or None if it's a shielded input. pub(crate) fn sighash( precomputed_tx_data: &PrecomputedTxData, hash_type: HashType, diff --git a/zebra-chain/src/transaction/tests/vectors.rs b/zebra-chain/src/transaction/tests/vectors.rs index d8dd2e54097..43f2292f598 100644 --- a/zebra-chain/src/transaction/tests/vectors.rs +++ b/zebra-chain/src/transaction/tests/vectors.rs @@ -41,7 +41,7 @@ lazy_static! { /// given index is read. Therefore, we just need a list where `array[index]` /// is the given `output`. fn mock_pre_v5_output_list(output: transparent::Output, index: usize) -> Vec { - iter::repeat(output).take(index + 1).collect() + std::iter::repeat_n(output, index + 1).collect() } #[test] @@ -233,9 +233,7 @@ fn deserialize_large_transaction() { let tx_inputs_num = MAX_BLOCK_BYTES as usize / input_data.len(); // Set the precalculated amount of inputs and a single output. - let inputs = std::iter::repeat(input) - .take(tx_inputs_num) - .collect::>(); + let inputs = std::iter::repeat_n(input, tx_inputs_num).collect::>(); // Create an oversized transaction. Adding the output and lock time causes // the transaction to overflow the threshold. diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 70f36d9c2da..54f36bd460f 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -86,7 +86,7 @@ type QueuedBlockList = Vec; /// /// This value is a tradeoff between: /// - rejecting bad blocks: if we queue more blocks, we need fewer network -/// retries, but use a bit more CPU when verifying, +/// retries, but use a bit more CPU when verifying, /// - avoiding a memory DoS: if we queue fewer blocks, we use less memory. /// /// Memory usage is controlled by the sync service, because it controls block diff --git a/zebra-consensus/src/transaction/check.rs b/zebra-consensus/src/transaction/check.rs index 3e12578cfe6..9d075843c93 100644 --- a/zebra-consensus/src/transaction/check.rs +++ b/zebra-consensus/src/transaction/check.rs @@ -27,9 +27,9 @@ use crate::error::TransactionError; /// /// Arguments: /// - `block_height`: the height of the mined block, or the height of the next block for mempool -/// transactions +/// transactions /// - `block_time`: the time in the mined block header, or the median-time-past of the next block -/// for the mempool. Optional if the lock time is a height. +/// for the mempool. Optional if the lock time is a height. /// /// # Panics /// diff --git a/zebra-network/src/isolated.rs b/zebra-network/src/isolated.rs index c6ca66d53b2..0a1ebd1bee3 100644 --- a/zebra-network/src/isolated.rs +++ b/zebra-network/src/isolated.rs @@ -42,7 +42,7 @@ mod tests; /// - `network`: the Zcash [`Network`] used for this connection. /// /// - `data_stream`: an existing data stream. This can be a non-anonymised TCP connection, -/// or a Tor client `arti_client::DataStream`. +/// or a Tor client `arti_client::DataStream`. /// /// - `user_agent`: a valid BIP14 user-agent, e.g., the empty string. pub fn connect_isolated( diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index ec9eb6e848d..ac032ecb10f 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -163,11 +163,11 @@ pub struct MetaAddr { /// /// The exact meaning depends on `last_connection_state`: /// - `Responded`: the services advertised by this peer, the last time we - /// performed a handshake with it + /// performed a handshake with it /// - `NeverAttempted`: the unverified services advertised by another peer, - /// then gossiped by the peer that sent us this address + /// then gossiped by the peer that sent us this address /// - `Failed` or `AttemptPending`: unverified services via another peer, - /// or services advertised in a previous handshake + /// or services advertised in a previous handshake /// /// ## Security /// diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index fa065f53920..8417a37bf39 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -110,8 +110,8 @@ impl Handler { Handler::Ping(_) => "Ping".into(), Handler::Peers => "Peers".into(), - Handler::FindBlocks { .. } => "FindBlocks".into(), - Handler::FindHeaders { .. } => "FindHeaders".into(), + Handler::FindBlocks => "FindBlocks".into(), + Handler::FindHeaders => "FindHeaders".into(), Handler::BlocksByHash { .. } => "BlocksByHash".into(), Handler::TransactionsById { .. } => "TransactionsById".into(), @@ -1189,7 +1189,7 @@ where self.fail_with(PeerError::DuplicateHandshake).await; Consumed } - Message::Verack { .. } => { + Message::Verack => { self.fail_with(PeerError::DuplicateHandshake).await; Consumed } @@ -1218,9 +1218,7 @@ where Unused } // These messages should never be sent by peers. - Message::FilterLoad { .. } - | Message::FilterAdd { .. } - | Message::FilterClear { .. } => { + Message::FilterLoad { .. } | Message::FilterAdd { .. } | Message::FilterClear => { // # Security // // Zcash connections are not authenticated, so malicious nodes can send fake messages, diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index c66493d4f9a..11e41a01aa9 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -281,16 +281,16 @@ where /// - `discover`: handles peer connects and disconnects; /// - `demand_signal`: requests more peers when all peers are busy (unready); /// - `handle_rx`: receives background task handles, - /// monitors them to make sure they're still running, - /// and shuts down all the tasks as soon as one task exits; + /// monitors them to make sure they're still running, + /// and shuts down all the tasks as soon as one task exits; /// - `inv_stream`: receives inventory changes from peers, - /// allowing the peer set to direct inventory requests; + /// allowing the peer set to direct inventory requests; /// - `bans_receiver`: receives a map of banned IP addresses that should be dropped; /// - `address_book`: when peer set is busy, it logs address book diagnostics. /// - `minimum_peer_version`: endpoint to see the minimum peer protocol version in real time. /// - `max_conns_per_ip`: configured maximum number of peers that can be in the - /// peer set per IP, defaults to the config value or to - /// [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`]. + /// peer set per IP, defaults to the config value or to + /// [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`]. pub fn new( config: &Config, discover: D, diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 2ed3673107e..df7017e3903 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -138,12 +138,12 @@ impl Encoder for Codec { // of length 12, as they must be &[u8; 12]. let command = match item { Version { .. } => b"version\0\0\0\0\0", - Verack { .. } => b"verack\0\0\0\0\0\0", + Verack => b"verack\0\0\0\0\0\0", Ping { .. } => b"ping\0\0\0\0\0\0\0\0", Pong { .. } => b"pong\0\0\0\0\0\0\0\0", Reject { .. } => b"reject\0\0\0\0\0\0", Addr { .. } => b"addr\0\0\0\0\0\0\0\0", - GetAddr { .. } => b"getaddr\0\0\0\0\0", + GetAddr => b"getaddr\0\0\0\0\0", Block { .. } => b"block\0\0\0\0\0\0\0", GetBlocks { .. } => b"getblocks\0\0\0", Headers { .. } => b"headers\0\0\0\0\0", @@ -152,10 +152,10 @@ impl Encoder for Codec { GetData { .. } => b"getdata\0\0\0\0\0", NotFound { .. } => b"notfound\0\0\0\0", Tx { .. } => b"tx\0\0\0\0\0\0\0\0\0\0", - Mempool { .. } => b"mempool\0\0\0\0\0", + Mempool => b"mempool\0\0\0\0\0", FilterLoad { .. } => b"filterload\0\0", FilterAdd { .. } => b"filteradd\0\0\0", - FilterClear { .. } => b"filterclear\0", + FilterClear => b"filterclear\0", }; trace!(?item, len = body_length); diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index f8ee8cbc9b8..025f909d420 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -218,7 +218,7 @@ pub enum Message { /// When a peer requests a list of transaction hashes, `zcashd` returns: /// - a batch of messages containing found transactions, then /// - a `notfound` message containing a list of transaction hashes that - /// aren't available in its mempool or state. + /// aren't available in its mempool or state. /// /// But when a peer requests blocks or headers, any missing items are /// silently skipped, without any `notfound` messages. diff --git a/zebra-node-services/src/scan_service/request.rs b/zebra-node-services/src/scan_service/request.rs index e6a8e9c492b..3da449b60f4 100644 --- a/zebra-node-services/src/scan_service/request.rs +++ b/zebra-node-services/src/scan_service/request.rs @@ -53,9 +53,7 @@ impl Request { #[test] fn test_check_num_keys() { - let fake_keys: Vec<_> = std::iter::repeat(String::new()) - .take(MAX_REQUEST_KEYS + 1) - .collect(); + let fake_keys: Vec<_> = std::iter::repeat_n(String::new(), MAX_REQUEST_KEYS + 1).collect(); let bad_requests = [ Request::DeleteKeys(vec![]), diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index bef0af00019..1c2d36b1911 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -86,11 +86,11 @@ use self::queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified, /// /// This service modifies and provides access to: /// - the non-finalized state: the ~100 most recent blocks. -/// Zebra allows chain forks in the non-finalized state, -/// stores it in memory, and re-downloads it when restarted. +/// Zebra allows chain forks in the non-finalized state, +/// stores it in memory, and re-downloads it when restarted. /// - the finalized state: older blocks that have many confirmations. -/// Zebra stores the single best chain in the finalized state, -/// and re-loads it from disk when restarted. +/// Zebra stores the single best chain in the finalized state, +/// and re-loads it from disk when restarted. /// /// Read requests to this service are buffered, then processed concurrently. /// Block write requests are buffered, then queued, then processed in order by a separate task. diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index b5f3fe1bed2..7c4c4a0bd6c 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -26,7 +26,7 @@ use crate::{ /// Arguments: /// - `transaction_version_override`: See `LedgerState::height_strategy` for details. /// - `transaction_has_valid_network_upgrade`: See `LedgerState::height_strategy` for details. -/// Note: `false` allows zero or more invalid network upgrades. +/// Note: `false` allows zero or more invalid network upgrades. /// - `blocks_after_nu_activation`: The number of blocks the strategy will generate /// after the provided `network_upgrade`. /// - `network_upgrade` - The network upgrade that we are using to simulate from where the diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index e022bb464be..22292c530c3 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -894,8 +894,7 @@ impl Service for Mempool { Request::Queue(gossiped_txs) => Response::Queued( // Special case; we can signal the error inside the response, // because the inbound service ignores inner errors. - iter::repeat(MempoolError::Disabled) - .take(gossiped_txs.len()) + iter::repeat_n(MempoolError::Disabled, gossiped_txs.len()) .map(BoxError::from) .map(Err) .collect(), diff --git a/zebrad/tests/common/lightwalletd/sync.rs b/zebrad/tests/common/lightwalletd/sync.rs index 3a55aedb5c2..f5e1534db8c 100644 --- a/zebrad/tests/common/lightwalletd/sync.rs +++ b/zebrad/tests/common/lightwalletd/sync.rs @@ -193,7 +193,7 @@ pub fn are_zebrad_and_lightwalletd_tips_synced( // Block number is the last word of the message. We rely on that specific for this to work. let last = msg .split(' ') - .last() + .next_back() .expect("always possible to get the last word of a separated by space string"); lightwalletd_next_height = last .parse() diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index a26ada3f9c3..08a8c5bfa8b 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -20,9 +20,9 @@ //! - `GetTaddressBalanceStream`: Covered. //! //! - `GetMempoolTx`: Covered by the send_transaction_test, -//! currently disabled by `lightwalletd`. +//! currently disabled by `lightwalletd`. //! - `GetMempoolStream`: Covered by the send_transaction_test, -//! currently disabled by `lightwalletd`. +//! currently disabled by `lightwalletd`. //! //! - `GetTreeState`: Covered. //! From ddcfeacddfbeedc45100e127bc370dc84e5d52c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 09:37:29 +0000 Subject: [PATCH 131/245] build(deps): bump the devops group across 1 directory with 4 updates (#9368) Bumps the devops group with 4 updates in the / directory: [tj-actions/changed-files](https://github.com/tj-actions/changed-files), [peter-evans/dockerhub-description](https://github.com/peter-evans/dockerhub-description), [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) and [github/codeql-action](https://github.com/github/codeql-action). Updates `tj-actions/changed-files` from 46.0.1 to 46.0.3 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v46.0.1...v46.0.3) Updates `peter-evans/dockerhub-description` from 4.0.0 to 4.0.1 - [Release notes](https://github.com/peter-evans/dockerhub-description/releases) - [Commits](https://github.com/peter-evans/dockerhub-description/compare/v4.0.0...v4.0.1) Updates `astral-sh/setup-uv` from 5.1.0 to 5.4.1 - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/887a942a15af3a7626099df99e897a18d9e5ab3a...0c5e2b8115b80b4c7c5ddf6ffdd634974642d182) Updates `github/codeql-action` from 3.28.0 to 3.28.13 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/48ab28a6f5dbc2a99bf1e0131198dd8f1df78169...1b549b9259bda1cb5ddde3b41741a82a2d15a841) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: peter-evans/dockerhub-description dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: astral-sh/setup-uv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-lint.yml | 4 ++-- .github/workflows/docs-dockerhub-description.yml | 2 +- .github/workflows/zizmor.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index b2917c17cb6..a62ac760af5 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v46.0.1 + uses: tj-actions/changed-files@v46.0.3 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v46.0.1 + uses: tj-actions/changed-files@v46.0.3 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index 8efdaca1d54..8ef51be9cd4 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -23,7 +23,7 @@ jobs: persist-credentials: false - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4.0.0 + uses: peter-evans/dockerhub-description@v4.0.1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 25f14de877a..b9bb0ed866a 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -19,13 +19,13 @@ jobs: with: persist-credentials: false - name: Install the latest version of uv - uses: astral-sh/setup-uv@887a942a15af3a7626099df99e897a18d9e5ab3a # v4 + uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v4 - name: Run zizmor 🌈 run: uvx zizmor --format sarif . > results.sarif env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 with: sarif_file: results.sarif category: zizmor From 8e04edb24fe3e02d9a90a15c282e720af86ff58f Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 4 Apr 2025 14:13:11 +0200 Subject: [PATCH 132/245] feat(rpc): Support negative heights in `HashOrHeight` (#9316) * Support negative heights in `HashOrHeight` * Change ownership of cached state * Revert "Change ownership of cached state" This reverts commit 1fe9ae393faec167f81e5f0ebb43326885824686. * add solution to other methods, add tests * cargo clippy fix * fmt --------- Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-rpc/src/methods.rs | 28 +++++++++--------- zebra-rpc/src/methods/tests/vectors.rs | 32 ++++++++++++++++++++- zebra-state/src/request.rs | 40 ++++++++++++++++++++++++-- 3 files changed, 84 insertions(+), 16 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index cd162435755..6a5ddf5f755 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -834,11 +834,11 @@ where None }; - let hash_or_height: HashOrHeight = hash_or_height - .parse() - // Reference for the legacy error code: - // - .map_error(server::error::LegacyCode::InvalidParameter)?; + let hash_or_height = + HashOrHeight::new(&hash_or_height, self.latest_chain_tip.best_tip_height()) + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; if verbosity == 0 { let request = zebra_state::ReadRequest::Block(hash_or_height); @@ -1006,9 +1006,11 @@ where let verbose = verbose.unwrap_or(true); let network = self.network.clone(); - let hash_or_height: HashOrHeight = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; + let hash_or_height = + HashOrHeight::new(&hash_or_height, self.latest_chain_tip.best_tip_height()) + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; let zebra_state::ReadResponse::BlockHeader { header, hash, @@ -1301,11 +1303,11 @@ where let mut state = self.state.clone(); let network = self.network.clone(); - // Reference for the legacy error code: - // - let hash_or_height = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidParameter)?; + let hash_or_height = + HashOrHeight::new(&hash_or_height, self.latest_chain_tip.best_tip_height()) + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; // Fetch the block referenced by [`hash_or_height`] from the state. // diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 94f54c65d47..1648f77875d 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -17,7 +17,7 @@ use zebra_chain::{ use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::BoxError; -use zebra_state::{GetBlockTemplateChainInfo, LatestChainTip, ReadStateService}; +use zebra_state::{GetBlockTemplateChainInfo, IntoDisk, LatestChainTip, ReadStateService}; use zebra_test::mock_service::MockService; use super::super::*; @@ -196,6 +196,21 @@ async fn rpc_getblock() { assert_eq!(get_block, expected_result); } + // Test negative heights: -1 should return block 10, -2 block 9, etc. + for neg_height in (-10..=-1).rev() { + // Convert negative height to corresponding index + let index = (neg_height + (blocks.len() as i32)) as usize; + + let expected_result = GetBlock::Raw(blocks[index].clone().into()); + + let get_block = rpc + .get_block(neg_height.to_string(), Some(0u8)) + .await + .expect("We should have a GetBlock struct"); + + assert_eq!(get_block, expected_result); + } + // Create empty note commitment tree information. let sapling = SaplingTrees { size: 0 }; let orchard = OrchardTrees { size: 0 }; @@ -684,6 +699,21 @@ async fn rpc_getblockheader() { } } + // Test negative heights: -1 should return a header for block 10, -2 block header 9, etc. + for neg_height in (-10..=-1).rev() { + // Convert negative height to corresponding index + let index = (neg_height + (blocks.len() as i32)) as usize; + + let expected_result = GetBlockHeader::Raw(HexData(blocks[index].header.clone().as_bytes())); + + let get_block = rpc + .get_block_header(neg_height.to_string(), Some(false)) + .await + .expect("We should have a GetBlock struct"); + + assert_eq!(get_block, expected_result); + } + mempool.expect_no_requests().await; // The queue task should continue without errors or panics diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 03a43626945..125021afb7f 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -2,13 +2,13 @@ use std::{ collections::{HashMap, HashSet}, - ops::{Deref, DerefMut, RangeInclusive}, + ops::{Add, Deref, DerefMut, RangeInclusive}, sync::Arc, }; use zebra_chain::{ amount::{Amount, NegativeAllowed, NonNegative}, - block::{self, Block}, + block::{self, Block, HeightDiff}, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, @@ -133,6 +133,42 @@ impl HashOrHeight { None } } + + /// Constructs a new [`HashOrHeight`] from a string containing a hash or a positive or negative + /// height. + /// + /// When the provided `hash_or_height` contains a negative height, the `tip_height` parameter + /// needs to be `Some` since height `-1` points to the tip. + pub fn new(hash_or_height: &str, tip_height: Option) -> Result { + hash_or_height + .parse() + .map(Self::Hash) + .or_else(|_| hash_or_height.parse().map(Self::Height)) + .or_else(|_| { + hash_or_height + .parse() + .map_err(|_| "could not parse negative height") + .and_then(|d: HeightDiff| { + if d.is_negative() { + { + Ok(HashOrHeight::Height( + tip_height + .ok_or("missing tip height")? + .add(d) + .ok_or("underflow when adding negative height to tip")? + .next() + .map_err(|_| "height -1 needs to point to tip")?, + )) + } + } else { + Err("height was not negative") + } + }) + }) + .map_err(|_| { + "parse error: could not convert the input string to a hash or height".to_string() + }) + } } impl std::fmt::Display for HashOrHeight { From ae37e77958bff2ae8c3edd963b0196c003ce1fe5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 13:35:16 +0000 Subject: [PATCH 133/245] build(deps): bump the ecc group across 1 directory with 2 updates (#9372) Bumps the ecc group with 2 updates in the / directory: [zcash_protocol](https://github.com/zcash/librustzcash) and [equihash](https://github.com/zcash/librustzcash). Updates `zcash_protocol` from 0.5.0 to 0.5.1 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_protocol-0.5.0...zcash_protocol-0.5.1) Updates `equihash` from 0.2.0 to 0.2.2 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/0.2.0...zcash_encoding-0.2.2) --- updated-dependencies: - dependency-name: zcash_protocol dependency-version: 0.5.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc - dependency-name: equihash dependency-version: 0.2.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 11 ++++++----- Cargo.toml | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68e042adfce..0cd12b3609f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1301,12 +1301,13 @@ dependencies = [ [[package]] name = "equihash" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab579d7cf78477773b03e80bc2f89702ef02d7112c711d54ca93dcdce68533d5" +checksum = "ca4f333d4ccc9d23c06593733673026efa71a332e028b00f12cf427b9677dce9" dependencies = [ "blake2b_simd", - "byteorder", + "core2", + "document-features", ] [[package]] @@ -6041,9 +6042,9 @@ dependencies = [ [[package]] name = "zcash_protocol" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72df627873d103e973b536d34d16cc802d06a3d1494dc010781449789a156dc5" +checksum = "de0acf60e235c5ba42c83f1e7e3763cf90a436583e6de71557fed26bab2d65dc" dependencies = [ "core2", "document-features", diff --git a/Cargo.toml b/Cargo.toml index d1344eef6bd..feb2b596fbb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ zcash_keys = "0.7.0" zcash_primitives = "0.22.0" zcash_proofs = "0.22.0" zcash_transparent = { version = "0.2.0", features = ["test-dependencies"] } -zcash_protocol = "0.5.0" +zcash_protocol = "0.5.1" abscissa_core = "0.7.0" atty = "0.2.14" base64 = "0.22.1" @@ -57,7 +57,7 @@ crossbeam-channel = "0.5.14" dirs = "6.0.0" ed25519-zebra = "4.0.3" elasticsearch = { version = "8.17.0-alpha.1", default-features = false } -equihash = "0.2.0" +equihash = "0.2.2" ff = "0.13.0" futures = "0.3.31" futures-core = "0.3.28" From 6f7a3b928432eb7dcb7b2ba5a2984acfc2a652bc Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Fri, 4 Apr 2025 16:33:20 +0100 Subject: [PATCH 134/245] Deduplicate dependencies (#9382) * Fix duplicate `secp256k1` dependency * Fix duplicate `redjubjub` dependency --- Cargo.lock | 52 +++++++++++----------------------------------------- Cargo.toml | 4 ++-- 2 files changed, 13 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0cd12b3609f..2ee29ff79b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -456,7 +456,7 @@ dependencies = [ "hmac", "rand_core 0.6.4", "ripemd 0.2.0-pre.4", - "secp256k1 0.29.1", + "secp256k1", "sha2 0.11.0-pre.4", "subtle", "zeroize", @@ -3672,19 +3672,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "redjubjub" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" -dependencies = [ - "rand_core 0.6.4", - "reddsa", - "serde", - "thiserror 1.0.69", - "zeroize", -] - [[package]] name = "redjubjub" version = "0.8.0" @@ -3693,6 +3680,7 @@ checksum = "89b0ac1bc6bb3696d2c6f52cff8fba57238b81da8c0214ee6cd146eb8fde364e" dependencies = [ "rand_core 0.6.4", "reddsa", + "serde", "thiserror 1.0.69", "zeroize", ] @@ -4032,7 +4020,7 @@ dependencies = [ "memuse", "rand 0.8.5", "rand_core 0.6.4", - "redjubjub 0.8.0", + "redjubjub", "subtle", "tracing", "zcash_note_encryption", @@ -4046,32 +4034,14 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "secp256k1-sys 0.8.1", - "serde", -] - [[package]] name = "secp256k1" version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "secp256k1-sys 0.10.1", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] @@ -6001,10 +5971,10 @@ dependencies = [ "orchard", "rand 0.8.5", "rand_core 0.6.4", - "redjubjub 0.8.0", + "redjubjub", "ripemd 0.1.3", "sapling-crypto", - "secp256k1 0.29.1", + "secp256k1", "sha2 0.10.8", "subtle", "tracing", @@ -6033,7 +6003,7 @@ dependencies = [ "known-folders", "lazy_static", "rand_core 0.6.4", - "redjubjub 0.8.0", + "redjubjub", "sapling-crypto", "tracing", "xdg", @@ -6089,7 +6059,7 @@ dependencies = [ "hex", "proptest", "ripemd 0.1.3", - "secp256k1 0.29.1", + "secp256k1", "sha2 0.10.8", "subtle", "zcash_address", @@ -6135,10 +6105,10 @@ dependencies = [ "rand_core 0.6.4", "rayon", "reddsa", - "redjubjub 0.7.0", + "redjubjub", "ripemd 0.1.3", "sapling-crypto", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde-big-array", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index feb2b596fbb..2aacbe8d6f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,13 +104,13 @@ rand_chacha = "0.3.1" rand_core = "0.6.4" rayon = "1.10.0" reddsa = "0.5.1" -redjubjub = "0.7.0" +redjubjub = "0.8" regex = "1.11.0" reqwest = { version = "0.12.9", default-features = false } ripemd = "0.1.3" rlimit = "0.10.2" rocksdb = { version = "0.22.0", default-features = false } -secp256k1 = "0.27.0" +secp256k1 = "0.29" semver = "1.0.25" sentry = { version = "0.36.0", default-features = false } serde = "1.0.217" From 0d201a59f812d7312239f488f9c8beea73cd64d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 5 Apr 2025 11:07:45 +0000 Subject: [PATCH 135/245] build(deps): bump the prod group across 1 directory with 32 updates (#9385) * build(deps): bump the prod group across 1 directory with 32 updates Bumps the prod group with 32 updates in the / directory: | Package | From | To | | --- | --- | --- | | [bitflags](https://github.com/bitflags/bitflags) | `2.8.0` | `2.9.0` | | [bytes](https://github.com/tokio-rs/bytes) | `1.9.0` | `1.10.1` | | [chrono](https://github.com/chronotope/chrono) | `0.4.39` | `0.4.40` | | [clap](https://github.com/clap-rs/clap) | `4.5.27` | `4.5.35` | | [ff](https://github.com/zkcrypto/ff) | `0.13.0` | `0.13.1` | | [http-body-util](https://github.com/hyperium/http-body) | `0.1.2` | `0.1.3` | | [humantime](https://github.com/chronotope/humantime) | `2.1.0` | `2.2.0` | | [hyper-util](https://github.com/hyperium/hyper-util) | `0.1.10` | `0.1.11` | | [indexmap](https://github.com/indexmap-rs/indexmap) | `2.7.1` | `2.8.0` | | [inferno](https://github.com/jonhoo/inferno) | `0.12.1` | `0.12.2` | | [insta](https://github.com/mitsuhiko/insta) | `1.42.1` | `1.42.2` | | [jsonrpsee-proc-macros](https://github.com/paritytech/jsonrpsee) | `0.24.8` | `0.24.9` | | [jsonrpsee-types](https://github.com/paritytech/jsonrpsee) | `0.24.8` | `0.24.9` | | [log](https://github.com/rust-lang/log) | `0.4.25` | `0.4.27` | | [metrics-exporter-prometheus](https://github.com/metrics-rs/metrics) | `0.16.1` | `0.16.2` | | [once_cell](https://github.com/matklad/once_cell) | `1.20.2` | `1.21.3` | | [owo-colors](https://github.com/owo-colors/owo-colors) | `4.1.0` | `4.2.0` | | [pin-project](https://github.com/taiki-e/pin-project) | `1.1.8` | `1.1.10` | | [prost](https://github.com/tokio-rs/prost) | `0.13.4` | `0.13.5` | | [quote](https://github.com/dtolnay/quote) | `1.0.38` | `1.0.40` | | [reqwest](https://github.com/seanmonstar/reqwest) | `0.12.12` | `0.12.15` | | [semver](https://github.com/dtolnay/semver) | `1.0.25` | `1.0.26` | | [serde](https://github.com/serde-rs/serde) | `1.0.217` | `1.0.219` | | [serde_json](https://github.com/serde-rs/json) | `1.0.138` | `1.0.140` | | [syn](https://github.com/dtolnay/syn) | `2.0.96` | `2.0.100` | | [tempfile](https://github.com/Stebalien/tempfile) | `3.16.0` | `3.19.1` | | [thiserror](https://github.com/dtolnay/thiserror) | `2.0.11` | `2.0.12` | | [tinyvec](https://github.com/Lokathor/tinyvec) | `1.8.1` | `1.9.0` | | [tokio](https://github.com/tokio-rs/tokio) | `1.43.0` | `1.44.1` | | [tokio-util](https://github.com/tokio-rs/tokio) | `0.7.13` | `0.7.14` | | [toml](https://github.com/toml-rs/toml) | `0.8.19` | `0.8.20` | | [tower](https://github.com/tower-rs/tower) | `0.4.13` | `0.5.2` | Updates `bitflags` from 2.8.0 to 2.9.0 - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/2.8.0...2.9.0) Updates `bytes` from 1.9.0 to 1.10.1 - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.9.0...v1.10.1) Updates `chrono` from 0.4.39 to 0.4.40 - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.39...v0.4.40) Updates `clap` from 4.5.27 to 4.5.35 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.27...clap_complete-v4.5.35) Updates `ff` from 0.13.0 to 0.13.1 - [Changelog](https://github.com/zkcrypto/ff/blob/main/CHANGELOG.md) - [Commits](https://github.com/zkcrypto/ff/commits) Updates `http-body-util` from 0.1.2 to 0.1.3 - [Release notes](https://github.com/hyperium/http-body/releases) - [Commits](https://github.com/hyperium/http-body/compare/http-body-util-v0.1.2...http-body-util-v0.1.3) Updates `humantime` from 2.1.0 to 2.2.0 - [Commits](https://github.com/chronotope/humantime/commits) Updates `hyper-util` from 0.1.10 to 0.1.11 - [Release notes](https://github.com/hyperium/hyper-util/releases) - [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.10...v0.1.11) Updates `indexmap` from 2.7.1 to 2.8.0 - [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.7.1...2.8.0) Updates `inferno` from 0.12.1 to 0.12.2 - [Changelog](https://github.com/jonhoo/inferno/blob/main/CHANGELOG.md) - [Commits](https://github.com/jonhoo/inferno/compare/v0.12.1...v0.12.2) Updates `insta` from 1.42.1 to 1.42.2 - [Release notes](https://github.com/mitsuhiko/insta/releases) - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.42.1...1.42.2) Updates `jsonrpsee-proc-macros` from 0.24.8 to 0.24.9 - [Release notes](https://github.com/paritytech/jsonrpsee/releases) - [Changelog](https://github.com/paritytech/jsonrpsee/blob/v0.24.9/CHANGELOG.md) - [Commits](https://github.com/paritytech/jsonrpsee/compare/v0.24.8...v0.24.9) Updates `jsonrpsee-types` from 0.24.8 to 0.24.9 - [Release notes](https://github.com/paritytech/jsonrpsee/releases) - [Changelog](https://github.com/paritytech/jsonrpsee/blob/v0.24.9/CHANGELOG.md) - [Commits](https://github.com/paritytech/jsonrpsee/compare/v0.24.8...v0.24.9) Updates `log` from 0.4.25 to 0.4.27 - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.25...0.4.27) Updates `metrics-exporter-prometheus` from 0.16.1 to 0.16.2 - [Changelog](https://github.com/metrics-rs/metrics/blob/main/release.toml) - [Commits](https://github.com/metrics-rs/metrics/compare/metrics-exporter-prometheus-v0.16.1...metrics-exporter-prometheus-v0.16.2) Updates `once_cell` from 1.20.2 to 1.21.3 - [Changelog](https://github.com/matklad/once_cell/blob/master/CHANGELOG.md) - [Commits](https://github.com/matklad/once_cell/compare/v1.20.2...v1.21.3) Updates `owo-colors` from 4.1.0 to 4.2.0 - [Release notes](https://github.com/owo-colors/owo-colors/releases) - [Changelog](https://github.com/owo-colors/owo-colors/blob/main/CHANGELOG.md) - [Commits](https://github.com/owo-colors/owo-colors/compare/v4.1.0...v4.2.0) Updates `pin-project` from 1.1.8 to 1.1.10 - [Release notes](https://github.com/taiki-e/pin-project/releases) - [Changelog](https://github.com/taiki-e/pin-project/blob/main/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project/compare/v1.1.8...v1.1.10) Updates `prost` from 0.13.4 to 0.13.5 - [Release notes](https://github.com/tokio-rs/prost/releases) - [Changelog](https://github.com/tokio-rs/prost/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/prost/compare/v0.13.4...v0.13.5) Updates `quote` from 1.0.38 to 1.0.40 - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.38...1.0.40) Updates `reqwest` from 0.12.12 to 0.12.15 - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.12.12...v0.12.15) Updates `semver` from 1.0.25 to 1.0.26 - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.25...1.0.26) Updates `serde` from 1.0.217 to 1.0.219 - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.217...v1.0.219) Updates `serde_json` from 1.0.138 to 1.0.140 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.138...v1.0.140) Updates `syn` from 2.0.96 to 2.0.100 - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.96...2.0.100) Updates `tempfile` from 3.16.0 to 3.19.1 - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.16.0...v3.19.1) Updates `thiserror` from 2.0.11 to 2.0.12 - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/2.0.11...2.0.12) Updates `tinyvec` from 1.8.1 to 1.9.0 - [Changelog](https://github.com/Lokathor/tinyvec/blob/main/CHANGELOG.md) - [Commits](https://github.com/Lokathor/tinyvec/compare/v1.8.1...v1.9.0) Updates `tokio` from 1.43.0 to 1.44.1 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.43.0...tokio-1.44.1) Updates `tokio-util` from 0.7.13 to 0.7.14 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.13...tokio-util-0.7.14) Updates `toml` from 0.8.19 to 0.8.20 - [Commits](https://github.com/toml-rs/toml/compare/toml-v0.8.19...toml-v0.8.20) Updates `tower` from 0.4.13 to 0.5.2 - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/tower-0.4.13...tower-0.5.2) --- updated-dependencies: - dependency-name: bitflags dependency-version: 2.9.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: bytes dependency-version: 1.10.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: chrono dependency-version: 0.4.40 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: clap dependency-version: 4.5.35 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: ff dependency-version: 0.13.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: http-body-util dependency-version: 0.1.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: humantime dependency-version: 2.2.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: hyper-util dependency-version: 0.1.11 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: indexmap dependency-version: 2.8.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: inferno dependency-version: 0.12.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: insta dependency-version: 1.42.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: jsonrpsee-proc-macros dependency-version: 0.24.9 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: jsonrpsee-types dependency-version: 0.24.9 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: log dependency-version: 0.4.27 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: metrics-exporter-prometheus dependency-version: 0.16.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: once_cell dependency-version: 1.21.3 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: owo-colors dependency-version: 4.2.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: pin-project dependency-version: 1.1.10 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: prost dependency-version: 0.13.5 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: quote dependency-version: 1.0.40 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: reqwest dependency-version: 0.12.15 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: semver dependency-version: 1.0.26 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: serde dependency-version: 1.0.219 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: serde_json dependency-version: 1.0.140 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: syn dependency-version: 2.0.100 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tempfile dependency-version: 3.19.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: thiserror dependency-version: 2.0.12 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tinyvec dependency-version: 1.9.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: tokio dependency-version: 1.44.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod - dependency-name: tokio-util dependency-version: 0.7.14 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: toml dependency-version: 0.8.20 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: prod - dependency-name: tower dependency-version: 0.5.2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: prod ... Signed-off-by: dependabot[bot] * downgrade tower * update denies * add comment --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 486 +++++++++++++++++++++++++++++++---------------------- Cargo.toml | 62 +++---- deny.toml | 8 +- 3 files changed, 321 insertions(+), 235 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ee29ff79b7..e7df1bd8c41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.5.27", + "clap 4.5.35", "color-eyre", "fs-err", "once_cell", @@ -247,7 +247,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -258,7 +258,7 @@ checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -412,7 +412,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -423,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -432,7 +432,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -443,7 +443,7 @@ dependencies = [ "regex", "rustc-hash 2.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -485,9 +485,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bitflags-serde-legacy" @@ -495,7 +495,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "serde", ] @@ -600,9 +600,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2-sys" @@ -726,15 +726,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets", + "windows-link", ] [[package]] @@ -803,9 +803,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.27" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -813,9 +813,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstream", "anstyle", @@ -825,14 +825,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -981,7 +981,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.27", + "clap 4.5.35", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1092,7 +1092,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1116,7 +1116,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1127,7 +1127,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1210,7 +1210,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1353,9 +1353,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" dependencies = [ "bitvec", "rand_core 0.6.4", @@ -1515,7 +1515,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1591,7 +1591,7 @@ dependencies = [ "cfg-if", "libc", "wasi 0.13.3+wasi-0.2.2", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1603,7 +1603,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -1618,7 +1618,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "libc", "libgit2-sys", "log", @@ -1655,7 +1655,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -1878,12 +1878,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1909,9 +1909,9 @@ checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "humantime-serde" @@ -1986,9 +1986,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -1996,6 +1996,7 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", @@ -2141,7 +2142,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -2188,7 +2189,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -2232,9 +2233,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2256,9 +2257,9 @@ dependencies = [ [[package]] name = "inferno" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692eda1cc790750b9f5a5e3921ef9c117fd5498b97cfacbc910693e5b29002dc" +checksum = "2094aecddc672e902cd773bad7071542f63641e01e9187c3bba4b43005e837e9" dependencies = [ "ahash", "itoa", @@ -2281,9 +2282,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.42.1" +version = "1.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c1b125e30d93896b365e156c33dadfffab45ee8400afcbba4752f59de08a86" +checksum = "50259abbaa67d11d2bcafc7ba1d094ed7a0c70e3ce893f0d0997f73558cb3084" dependencies = [ "console", "linked-hash-map", @@ -2428,15 +2429,15 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcae0c6c159e11541080f1f829873d8f374f81eda0abc67695a13fc8dc1a580" +checksum = "5e65763c942dfc9358146571911b0cd1c361c2d63e2d2305622d40d36376ca80" dependencies = [ "heck 0.5.0", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -2468,9 +2469,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddb81adb1a5ae9182df379e374a79e24e992334e7346af4d065ae5b2acb8d4c6" +checksum = "08a8e70baf945b6b5752fc8eb38c918a48f1234daf11355e07106d963f860089" dependencies = [ "http", "serde", @@ -2518,9 +2519,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libgit2-sys" @@ -2541,7 +2542,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -2556,7 +2557,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "libc", ] @@ -2609,6 +2610,12 @@ version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +[[package]] +name = "linux-raw-sys" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413" + [[package]] name = "litemap" version = "0.7.4" @@ -2633,9 +2640,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "lz4-sys" @@ -2696,15 +2703,15 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" +checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", "http-body-util", "hyper", "hyper-util", - "indexmap 2.7.1", + "indexmap 2.8.0", "ipnet", "metrics", "metrics-util", @@ -2798,7 +2805,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases", "libc", @@ -2910,9 +2917,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" @@ -3002,9 +3009,9 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "owo-colors" -version = "4.1.0" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb37767f6569cd834a413442455e0f066d0d522de8630436e2a1761d9726ba56" +checksum = "1036865bb9422d3300cf723f657c2851d0e9ab12567854b1f4eba3d77decf564" [[package]] name = "pairing" @@ -3061,7 +3068,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3092,7 +3099,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.11", + "thiserror 2.0.12", "ucd-trie", ] @@ -3116,7 +3123,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -3137,27 +3144,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.7.1", + "indexmap 2.8.0", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -3255,7 +3262,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -3321,7 +3328,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -3341,7 +3348,7 @@ checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.8.0", + "bitflags 2.9.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -3361,14 +3368,14 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] name = "prost" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -3390,21 +3397,21 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.96", + "syn 2.0.100", "tempfile", ] [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -3482,7 +3489,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -3501,7 +3508,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time", @@ -3523,9 +3530,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] @@ -3631,7 +3638,7 @@ version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3691,7 +3698,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3702,7 +3709,7 @@ checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3751,9 +3758,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.12" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "async-compression", "base64 0.22.1", @@ -3911,10 +3918,23 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" +dependencies = [ + "bitflags 2.9.0", + "errno", + "libc", + "linux-raw-sys 0.9.3", "windows-sys 0.59.0", ] @@ -4065,9 +4085,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" dependencies = [ "serde", ] @@ -4160,9 +4180,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] @@ -4178,22 +4198,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "memchr", "ryu", @@ -4231,7 +4251,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_derive", "serde_json", @@ -4248,7 +4268,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -4257,7 +4277,7 @@ version = "0.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "libyml", "memchr", @@ -4314,7 +4334,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e95dcd06bc1bb3f86ed9db1e1832a70125f32daae071ef37dcb7701b7d4fe" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "either", "incrementalmerkletree", "tracing", @@ -4384,9 +4404,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4522,9 +4542,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -4560,7 +4580,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -4571,15 +4591,14 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.16.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" +checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" dependencies = [ - "cfg-if", "fastrand", "getrandom 0.3.1", "once_cell", - "rustix", + "rustix 1.0.5", "windows-sys 0.59.0", ] @@ -4612,11 +4631,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4627,18 +4646,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -4647,7 +4666,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "libc", "log", @@ -4720,9 +4739,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4735,9 +4754,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -4760,7 +4779,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -4800,9 +4819,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4823,9 +4842,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -4848,7 +4867,7 @@ version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4896,7 +4915,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -5041,7 +5060,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -5154,7 +5173,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -5362,7 +5381,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -5492,7 +5511,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -5527,7 +5546,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5578,7 +5597,7 @@ checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f" dependencies = [ "either", "home", - "rustix", + "rustix 0.38.44", "winsafe", ] @@ -5620,7 +5639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -5629,37 +5648,42 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + [[package]] name = "windows-registry" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ "windows-result", "windows-strings", - "windows-targets", + "windows-targets 0.53.0", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" dependencies = [ - "windows-targets", + "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ - "windows-result", - "windows-targets", + "windows-link", ] [[package]] @@ -5668,7 +5692,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -5677,7 +5701,7 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -5686,14 +5710,30 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -5702,48 +5742,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.7.0" @@ -5765,7 +5853,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -5827,7 +5915,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", "synstructure 0.13.1", ] @@ -6073,7 +6161,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.45" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -6117,7 +6205,7 @@ dependencies = [ "spandoc", "static_assertions", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tokio", "tracing", @@ -6161,7 +6249,7 @@ dependencies = [ "sapling-crypto", "serde", "spandoc", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tokio", "tower 0.4.13", @@ -6206,7 +6294,7 @@ dependencies = [ name = "zebra-network" version = "1.0.0-beta.45" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "byteorder", "bytes", "chrono", @@ -6215,7 +6303,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.7.1", + "indexmap 2.8.0", "itertools 0.14.0", "lazy_static", "metrics", @@ -6230,11 +6318,11 @@ dependencies = [ "serde", "static_assertions", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-stream", "tokio-util", - "toml 0.8.19", + "toml 0.8.20", "tower 0.4.13", "tracing", "tracing-error", @@ -6267,7 +6355,7 @@ dependencies = [ "hex", "http-body-util", "hyper", - "indexmap 2.7.1", + "indexmap 2.8.0", "insta", "jsonrpsee", "jsonrpsee-proc-macros", @@ -6279,7 +6367,7 @@ dependencies = [ "semver", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-stream", "tonic", @@ -6310,7 +6398,7 @@ dependencies = [ "futures", "group", "hex", - "indexmap 2.7.1", + "indexmap 2.8.0", "insta", "itertools 0.14.0", "jsonrpc", @@ -6326,7 +6414,7 @@ dependencies = [ "structopt", "tempfile", "tokio", - "toml 0.8.19", + "toml 0.8.20", "tonic", "tower 0.4.13", "tracing", @@ -6351,7 +6439,7 @@ version = "1.0.0-beta.45" dependencies = [ "hex", "lazy_static", - "thiserror 2.0.11", + "thiserror 2.0.12", "zcash_script", "zebra-chain", "zebra-test", @@ -6374,7 +6462,7 @@ dependencies = [ "howudoin", "human_bytes", "humantime-serde", - "indexmap 2.7.1", + "indexmap 2.8.0", "insta", "itertools 0.14.0", "jubjub", @@ -6394,7 +6482,7 @@ dependencies = [ "serde_json", "spandoc", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tokio", "tower 0.4.13", @@ -6411,18 +6499,18 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.7.1", + "indexmap 2.8.0", "insta", "itertools 0.14.0", "lazy_static", "once_cell", - "owo-colors 4.1.0", + "owo-colors 4.2.0", "proptest", "rand 0.8.5", "regex", "spandoc", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tokio", "tower 0.4.13", @@ -6437,7 +6525,7 @@ version = "1.0.0-beta.45" dependencies = [ "color-eyre", "hex", - "indexmap 2.7.1", + "indexmap 2.8.0", "itertools 0.14.0", "jsonrpc", "quote", @@ -6448,8 +6536,8 @@ dependencies = [ "serde_json", "serde_yml", "structopt", - "syn 2.0.96", - "thiserror 2.0.11", + "syn 2.0.100", + "thiserror 2.0.12", "tinyvec", "tokio", "tracing-error", @@ -6470,7 +6558,7 @@ dependencies = [ "atty", "bytes", "chrono", - "clap 4.5.27", + "clap 4.5.35", "color-eyre", "console-subscriber", "dirs", @@ -6482,7 +6570,7 @@ dependencies = [ "humantime-serde", "hyper", "hyper-util", - "indexmap 2.7.1", + "indexmap 2.8.0", "indicatif", "inferno", "insta", @@ -6505,12 +6593,12 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "thread-priority", "tinyvec", "tokio", "tokio-stream", - "toml 0.8.19", + "toml 0.8.20", "tonic", "tonic-build", "tower 0.4.13", @@ -6552,7 +6640,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -6572,7 +6660,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", "synstructure 0.13.1", ] @@ -6593,7 +6681,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] @@ -6615,7 +6703,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.100", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2aacbe8d6f3..e390d7d172e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ atty = "0.2.14" base64 = "0.22.1" bellman = "0.14.0" bincode = "1.3.3" -bitflags = "2.8.0" +bitflags = "2.9.0" bitflags-serde-legacy = "0.1.1" bitvec = "1.0.1" blake2b_simd = "1.0.3" @@ -47,9 +47,9 @@ blake2s_simd = "1.0.3" bls12_381 = "0.8.0" bs58 = "0.5.1" byteorder = "1.5.0" -bytes = "1.9.0" -chrono = { version = "0.4.39", default-features = false } -clap = "4.5.27" +bytes = "1.10.1" +chrono = { version = "0.4.40", default-features = false } +clap = "4.5.35" color-eyre = { version = "0.6.3", default-features = false } console-subscriber = "0.4.0" criterion = "0.5.1" @@ -58,7 +58,7 @@ dirs = "6.0.0" ed25519-zebra = "4.0.3" elasticsearch = { version = "8.17.0-alpha.1", default-features = false } equihash = "0.2.2" -ff = "0.13.0" +ff = "0.13.1" futures = "0.3.31" futures-core = "0.3.28" futures-util = "0.3.28" @@ -67,38 +67,38 @@ halo2 = "0.3.0" hex = "0.4.3" hex-literal = "0.4.1" howudoin = "0.1.2" -http-body-util = "0.1.2" +http-body-util = "0.1.3" human_bytes = { version = "0.4.3", default-features = false } -humantime = "2.1.0" +humantime = "2.2.0" humantime-serde = "1.1.1" hyper = "1.6.0" -hyper-util = "0.1.9" -indexmap = "2.7.1" +hyper-util = "0.1.11" +indexmap = "2.8.0" indicatif = "0.17.11" -inferno = { version = "0.12.1", default-features = false } -insta = "1.42.1" +inferno = { version = "0.12.2", default-features = false } +insta = "1.42.2" itertools = "0.14.0" jsonrpc = "0.18.0" jsonrpsee = "0.24.8" -jsonrpsee-proc-macros = "0.24.8" -jsonrpsee-types = "0.24.8" +jsonrpsee-proc-macros = "0.24.9" +jsonrpsee-types = "0.24.9" jubjub = "0.10.0" lazy_static = "1.4.0" -log = "0.4.25" +log = "0.4.27" metrics = "0.24.1" -metrics-exporter-prometheus = { version = "0.16.1", default-features = false } +metrics-exporter-prometheus = { version = "0.16.2", default-features = false } mset = "0.1.1" nix = "0.29.0" num-integer = "0.1.46" -once_cell = "1.20.2" +once_cell = "1.21.3" ordered-map = "0.4.2" -owo-colors = "4.1.0" -pin-project = "1.1.8" +owo-colors = "4.2.0" +pin-project = "1.1.10" primitive-types = "0.12.2" proptest = "1.6.0" proptest-derive = "0.5.1" -prost = "0.13.4" -quote = "1.0.38" +prost = "0.13.5" +quote = "1.0.40" rand = "0.8.5" rand_chacha = "0.3.1" rand_core = "0.6.4" @@ -106,32 +106,32 @@ rayon = "1.10.0" reddsa = "0.5.1" redjubjub = "0.8" regex = "1.11.0" -reqwest = { version = "0.12.9", default-features = false } +reqwest = { version = "0.12.15", default-features = false } ripemd = "0.1.3" rlimit = "0.10.2" rocksdb = { version = "0.22.0", default-features = false } secp256k1 = "0.29" -semver = "1.0.25" +semver = "1.0.26" sentry = { version = "0.36.0", default-features = false } -serde = "1.0.217" +serde = "1.0.219" serde-big-array = "0.5.1" -serde_json = "1.0.138" +serde_json = "1.0.140" serde_with = "3.12.0" serde_yml = "0.0.12" sha2 = "0.10.7" spandoc = "0.2.2" static_assertions = "1.1.0" structopt = "0.3.26" -syn = "2.0.96" -tempfile = "3.16.0" -thiserror = "2.0.11" +syn = "2.0.100" +tempfile = "3.19.1" +thiserror = "2.0.12" thread-priority = "1.2.0" -tinyvec = "1.8.1" -tokio = "1.43.0" +tinyvec = "1.9.0" +tokio = "1.44.1" tokio-stream = "0.1.17" tokio-test = "0.4.4" -tokio-util = "0.7.13" -toml = "0.8.19" +tokio-util = "0.7.14" +toml = "0.8.20" tonic = "0.12.3" tonic-build = "0.12.3" tonic-reflection = "0.12.3" diff --git a/deny.toml b/deny.toml index 3387d59b632..eccc230af95 100644 --- a/deny.toml +++ b/deny.toml @@ -86,13 +86,8 @@ skip-tree = [ { name = "thiserror", version = "=1.0.69" }, { name = "thiserror-impl", version = "=1.0.69" }, - # Remove after release candicate period is over and the ECC crates are not patched anymore - { name = "equihash", version = "=0.2.0" }, - # wait for all librustzcash crates to update sha2, secp256k1, and ripemd { name = "sha2", version = "=0.10.8" }, - { name = "secp256k1", version = "=0.27.0" }, - { name = "redjubjub", version = "=0.7.0" }, { name = "ripemd", version = "=0.1.3" }, # wait for zcash_script to update itertools @@ -100,6 +95,9 @@ skip-tree = [ # wait for abscissa_core to update synstructure { name = "synstructure", version = "=0.12.6" }, + + # wait until zcash_client_backend update rustix + { name = "rustix", version = "=0.38.44" }, ] # This section is considered when running `cargo deny check sources`. From 65df2003093ce23277520d9c7fd54b5eb3b1fa69 Mon Sep 17 00:00:00 2001 From: Kris Nuttycombe Date: Mon, 7 Apr 2025 03:25:30 -0600 Subject: [PATCH 136/245] Update `zcash_transparent` to version 0.2.3 (#9387) This also updates to the latest `zcash_client_backend` dependency, and fixes a few imports to use base types instead of reexported versions that may be deprecated in the future. --- Cargo.lock | 23 ++++--------------- Cargo.toml | 3 ++- .../src/parameters/network/tests/vectors.rs | 2 +- .../src/primitives/zcash_primitives.rs | 17 +++++--------- zebra-scan/Cargo.toml | 2 ++ .../src/bin/scanning-results-reader/main.rs | 2 +- zebra-scan/src/service/scan_task/scan.rs | 10 ++++---- 7 files changed, 21 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7df1bd8c41..e4adcde86be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2199,19 +2199,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30821f91f0fa8660edca547918dc59812893b497d07c1144f326f07fdd94aba9" dependencies = [ "either", - "proptest", - "rand 0.8.5", - "rand_core 0.6.4", -] - -[[package]] -name = "incrementalmerkletree-testing" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad20fb6cf815e76ce9b9eca74f347740ab99059fe4b5e4a002403d0441a02983" -dependencies = [ - "incrementalmerkletree", - "proptest", ] [[package]] @@ -6107,10 +6094,7 @@ dependencies = [ "core2", "document-features", "hex", - "incrementalmerkletree", - "incrementalmerkletree-testing", "memuse", - "proptest", ] [[package]] @@ -6134,9 +6118,9 @@ dependencies = [ [[package]] name = "zcash_transparent" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b0c4ea6d9b94b5159106b65b57c4a9ea46859e7f7f8fb1be3e18e2d25bc372" +checksum = "2cd8c2d138ec893d3d384d97304da9ff879424056087c8ac811780a0e8d96a99" dependencies = [ "bip32", "blake2b_simd", @@ -6145,7 +6129,6 @@ dependencies = [ "document-features", "getset", "hex", - "proptest", "ripemd 0.1.3", "secp256k1", "sha2 0.10.8", @@ -6424,6 +6407,7 @@ dependencies = [ "zcash_keys", "zcash_note_encryption", "zcash_primitives", + "zcash_protocol", "zebra-chain", "zebra-grpc", "zebra-node-services", @@ -6431,6 +6415,7 @@ dependencies = [ "zebra-state", "zebra-test", "zebrad", + "zip32", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e390d7d172e..1a9b67bee94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,8 +32,9 @@ zcash_history = "0.4.0" zcash_keys = "0.7.0" zcash_primitives = "0.22.0" zcash_proofs = "0.22.0" -zcash_transparent = { version = "0.2.0", features = ["test-dependencies"] } +zcash_transparent = "0.2.3" zcash_protocol = "0.5.1" +zip32 = "0.2" abscissa_core = "0.7.0" atty = "0.2.14" base64 = "0.22.1" diff --git a/zebra-chain/src/parameters/network/tests/vectors.rs b/zebra-chain/src/parameters/network/tests/vectors.rs index 061fc9cc24f..b7263e0087e 100644 --- a/zebra-chain/src/parameters/network/tests/vectors.rs +++ b/zebra-chain/src/parameters/network/tests/vectors.rs @@ -19,7 +19,7 @@ use crate::{ }; /// Checks that every method in the `Parameters` impl for `zebra_chain::Network` has the same output -/// as the Parameters impl for `zcash_primitives::consensus::Network` on Mainnet and the default Testnet. +/// as the Parameters impl for `zcash_protocol::consensus::NetworkType` on Mainnet and the default Testnet. #[test] fn check_parameters_impl() { let zp_network_upgrades = [ diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 926f6008479..5ef8bd4c6b8 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -23,7 +23,7 @@ struct TransparentAuth<'a> { all_prev_outputs: &'a [transparent::Output], } -impl zp_tx::components::transparent::Authorization for TransparentAuth<'_> { +impl zcash_transparent::bundle::Authorization for TransparentAuth<'_> { type ScriptSig = zcash_primitives::legacy::Script; } @@ -61,22 +61,17 @@ struct MapTransparent<'a> { } impl<'a> - zp_tx::components::transparent::MapAuth< - zp_tx::components::transparent::Authorized, - TransparentAuth<'a>, - > for MapTransparent<'a> + zcash_transparent::bundle::MapAuth> + for MapTransparent<'a> { fn map_script_sig( &self, - s: ::ScriptSig, - ) -> ::ScriptSig { + s: ::ScriptSig, + ) -> ::ScriptSig { s } - fn map_authorization( - &self, - _: zp_tx::components::transparent::Authorized, - ) -> TransparentAuth<'a> { + fn map_authorization(&self, _: zcash_transparent::bundle::Authorized) -> TransparentAuth<'a> { // TODO: This map should consume self, so we can move self.auth self.auth.clone() } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index bdc1f40f870..ef928498b2a 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -74,8 +74,10 @@ futures = { workspace = true } zcash_client_backend.workspace = true zcash_keys = { workspace = true, features = ["sapling"] } zcash_primitives.workspace = true +zcash_protocol.workspace = true zcash_address.workspace = true sapling-crypto.workspace = true +zip32 = { workspace = true, features = ["std"] } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["shielded-scan"] } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["shielded-scan"] } diff --git a/zebra-scan/src/bin/scanning-results-reader/main.rs b/zebra-scan/src/bin/scanning-results-reader/main.rs index 88ae9549b54..d6361022087 100644 --- a/zebra-scan/src/bin/scanning-results-reader/main.rs +++ b/zebra-scan/src/bin/scanning-results-reader/main.rs @@ -14,7 +14,7 @@ use jsonrpc::Client; use zcash_client_backend::decrypt_transaction; use zcash_primitives::consensus::{BlockHeight, BranchId}; use zcash_primitives::transaction::Transaction; -use zcash_primitives::zip32::AccountId; +use zip32::AccountId; use zebra_scan::scan::{dfvk_to_ufvk, sapling_key_to_dfvk}; use zebra_scan::{storage::Storage, Config}; diff --git a/zebra-scan/src/service/scan_task/scan.rs b/zebra-scan/src/service/scan_task/scan.rs index 53e151d21c4..f70a9778df3 100644 --- a/zebra-scan/src/service/scan_task/scan.rs +++ b/zebra-scan/src/service/scan_task/scan.rs @@ -25,7 +25,7 @@ use zcash_client_backend::{ }, scanning::{Nullifiers, ScanError, ScanningKeys}, }; -use zcash_primitives::zip32::{AccountId, Scope}; +use zip32::{AccountId, Scope}; use sapling_crypto::zip32::DiversifiableFullViewingKey; @@ -561,10 +561,10 @@ pub fn dfvk_to_ufvk(dfvk: &DiversifiableFullViewingKey) -> Result zcash_primitives::consensus::Network { +/// Returns the [`zcash_protocol::consensus::Network`] for this network. +pub fn zp_network(network: &Network) -> zcash_protocol::consensus::Network { match network { - Network::Mainnet => zcash_primitives::consensus::Network::MainNetwork, - Network::Testnet(_) => zcash_primitives::consensus::Network::TestNetwork, + Network::Mainnet => zcash_protocol::consensus::Network::MainNetwork, + Network::Testnet(_) => zcash_protocol::consensus::Network::TestNetwork, } } From b1f4cd3533da29183713db8147a0783525276b38 Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Mon, 7 Apr 2025 20:59:19 +0300 Subject: [PATCH 137/245] docs: complete the Treestate RFC documentation (#9340) * docs: complete the Treestate RFC documentation * small changes * line wrapping * move from draft --------- Co-authored-by: Alfredo Garcia --- book/src/SUMMARY.md | 1 + book/src/dev/rfcs/0007-treestate.md | 346 +++++++++++++++++++++ book/src/dev/rfcs/drafts/0005-treestate.md | 227 -------------- 3 files changed, 347 insertions(+), 227 deletions(-) create mode 100644 book/src/dev/rfcs/0007-treestate.md delete mode 100644 book/src/dev/rfcs/drafts/0005-treestate.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 06f04e658ce..833c04e802c 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -47,6 +47,7 @@ - [Asynchronous Script Verification](dev/rfcs/0004-asynchronous-script-verification.md) - [State Updates](dev/rfcs/0005-state-updates.md) - [Contextual Difficulty Validation](dev/rfcs/0006-contextual-difficulty.md) + - [Tree States](dev/rfcs/0007-treestate.md) - [Zebra Client](dev/rfcs/0009-zebra-client.md) - [V5 Transaction](dev/rfcs/0010-v5-transaction.md) - [Async Rust in Zebra](dev/rfcs/0011-async-rust-in-zebra.md) diff --git a/book/src/dev/rfcs/0007-treestate.md b/book/src/dev/rfcs/0007-treestate.md new file mode 100644 index 00000000000..1de547c6ead --- /dev/null +++ b/book/src/dev/rfcs/0007-treestate.md @@ -0,0 +1,346 @@ +# Treestate + +- Feature Name: treestate +- Start Date: 2020-08-31 +- Design PR: [ZcashFoundation/zebra#983](https://github.com/ZcashFoundation/zebra/issues/983) +- Zebra Issue: [ZcashFoundation/zebra#958](https://github.com/ZcashFoundation/zebra/issues/958) + +# Summary +[summary]: #summary + +To validate blocks involving shielded transactions, we have to check the +computed treestate from the included transactions against the block header +metadata (for Sapling and Orchard) or previously finalized state (for Sprout). +This document describes how we compute and manage that data, assuming a finalized +state service as described in the [State Updates RFC](https://zebra.zfnd.org/dev/rfcs/0005-state-updates.md). + + +# Motivation +[motivation]: #motivation + +Block validation requires checking that the treestate of the block (consisting +of the note commitment tree and nullifier set) is consistent with the metadata +we have in the block header (the root of the note commitment tree) or previously +finalized state (for Sprout). + + +# Definitions +[definitions]: #definitions + +## Common Definitions + +Many terms used here are defined in the [Zcash Protocol Specification](https://zips.z.cash/protocol/protocol.pdf) + +**notes**: Represents a value bound to a shielded payment address (public key) +which is spendable by the recipient who holds the spending key corresponding to +a given shielded payment address. + +**nullifiers**: A value that prevents double-spending of a shielded payment. +Revealed by `Spend` descriptions when its associated `Note` is spent. + +**nullifier set**: The set of unique `Nullifier`s revealed by any `Transaction`s +within a `Block`. `Nullifier`s are enforced to be unique within a valid block chain +by committing to previous treestates in `Spend` descriptions, in order to prevent +double-spends. + +**note commitments**: Pedersen commitment to the values consisting a `Note`. One +should not be able to construct a `Note` from its commitment. + +**note commitment tree**: An incremental Merkle tree of fixed depth used to +store `NoteCommitment`s that `JoinSplit` transfers or `Spend` transfers produce. It +is used to express the existence of value and the capability to spend it. It is +not the job of this tree to protect against double-spending, as it is +append-only: that's what the `Nullifier` set is for. + +**note position**: The index of a `NoteCommitment` at the leafmost layer, +counting leftmost to rightmost. The [position in the tree is determined by the +order of transactions in the block](https://zips.z.cash/protocol/protocol.pdf#transactions). + +**root**: The layer 0 node of a Merkle tree. + +**anchor**: A Merkle tree root of a `NoteCommitment` tree. It uniquely +identifies a `NoteCommitment` tree state given the assumed security properties +of the Merkle tree’s hash function. Since the `Nullifier` set is always updated +together with the `NoteCommitment` tree, this also identifies a particular state +of the associated `Nullifier` set. + +## Sprout Definitions + +**joinsplit**: A shielded transfer that can spend Sprout `Note`s and transparent +value, and create new Sprout `Note`s and transparent value, in one Groth16 proof +statement. + +## Sapling Definitions + +**spend descriptions**: A shielded Sapling transfer that spends a `Note`. Includes +an anchor of some previous `Block`'s `NoteCommitment` tree. + +**output descriptions**: A shielded Sapling transfer that creates a +`Note`. Includes the u-coordinate of the `NoteCommitment` itself. + +## Orchard Definitions + +**action descriptions**: A shielded Orchard transfer that spends and/or creates a +`Note`. Does not include an anchor, because that is encoded once in the +`anchorOrchard` field of a V5 `Transaction`. + + +# Guide-level explanation +[guide-level-explanation]: #guide-level-explanation + +## Common Processing for All Protocols + +As `Block`s are validated, the `NoteCommitment`s revealed by all the transactions +within that block are used to construct `NoteCommitmentTree`s, with the +`NoteCommitment`s aligned in their note positions in the bottom layer of the +Sprout or Sapling tree from the left-most leaf to the right-most in +`Transaction` order in the `Block`. So the Sprout `NoteCommitment`s revealed by +the first `JoinSplit` in a block would take note position 0 in the Sprout +note commitment tree, for example. Once all the transactions in a block are +parsed and the notes for each tree collected in their appropriate positions, the +root of each tree is computed. While the trees are being built, the respective +block nullifier sets are updated in memory as note nullifiers are revealed. If +the rest of the block is validated according to consensus rules, that root is +committed to its own data structure via our state service (Sprout anchors, +Sapling anchors). Sapling block validation includes comparing the specified +FinalSaplingRoot in its block header to the root of the Sapling `NoteCommitment` +tree that we have just computed to make sure they match. + +## Sprout Processing + +For Sprout, we must compute/update interstitial `NoteCommitmentTree`s between +`JoinSplit`s that may reference an earlier one's root as its anchor. If we do +this at the transaction layer, we can iterate through all the `JoinSplit`s and +compute the Sprout `NoteCommitmentTree` and nullifier set similar to how we do +the Sapling ones as described below, but at each state change (ie, +per-`JoinSplit`) we note the root and cache it for lookup later. As the +`JoinSplit`s are validated without context, we check for its specified anchor +amongst the interstitial roots we've already calculated (according to the spec, +these interstitial roots don't have to be finalized or the result of an +independently validated `JoinSplit`, they just must refer to any prior `JoinSplit` +root in the same transaction). So we only have to wait for our previous root to +be computed via any of our candidates, which in the worst case is waiting for +all of them to be computed for the last `JoinSplit`. If our `JoinSplit`s defined +root pops out, that `JoinSplit` passes that check. + +## Sapling Processing + +As the transactions within a block are parsed, Sapling shielded transactions +including `Spend` descriptions and `Output` descriptions describe the spending and +creation of Zcash Sapling notes. `Spend` descriptions specify an anchor, which +references a previous `NoteCommitment` tree root. This is a previous block's anchor +as defined in their block header. This is convenient because we can query our state +service for previously finalized Sapling block anchors, and if they are found, then +that [consensus check](https://zips.z.cash/protocol/canopy.pdf#spendsandoutputs) +has been satisfied and the `Spend` description can be validated independently. + +For Sapling, at the block layer, we can iterate over all the transactions in +order and if they have `Spend`s and/or `Output`s, we update our Nullifer set for +the block as nullifiers are revealed in `Spend` descriptions, and update our note +commitment tree as `NoteCommitment`s are revealed in `Output` descriptions, adding +them as leaves in positions according to their order as they appear transaction +to transaction, output to output, in the block. This can be done independent of +the transaction validations. When the Sapling transactions are all validated, +the `NoteCommitmentTree` root should be computed: this is the anchor for this +block. + +### Anchor Validation Across Network Upgrades + +For Sapling and Blossom blocks, we need to check that this root matches +the `RootHash` bytes in this block's header, as the `FinalSaplingRoot`. Once all +other consensus and validation checks are done, this will be saved down to our +finalized state to our `sapling_anchors` set, making it available for lookup by +other Sapling descriptions in future transactions. + +In Heartwood and Canopy, the rules for final Sapling roots are modified to support +empty blocks by allowing an empty subtree hash instead of requiring the root to +match the previous block's final Sapling root when there are no Sapling transactions. + +In NU5, the rules are further extended to include Orchard note commitment trees, +with similar logic applied to the `anchorOrchard` field in V5 transactions. + +## Orchard Processing + +For Orchard, similar to Sapling, action descriptions can spend and create notes. +The anchor is specified at the transaction level in the `anchorOrchard` field of +a V5 transaction. The process follows similar steps to Sapling for validation and +inclusion in blocks. + +## Block Finalization + +To finalize the block, the Sprout, Sapling, and Orchard treestates are the ones +resulting from the last transaction in the block, and determines the respective +anchors that will be associated with this block as we commit it to our finalized +state. The nullifiers revealed in the block will be merged with the existing ones +in our finalized state (ie, it should strictly grow over time). + +## State Management + +### Orchard + +- There is a single copy of the latest Orchard Note Commitment Tree for the +finalized tip. +- When finalizing a block, the finalized tip is updated with a serialization of +the latest Orchard Note Commitment Tree. (The previous tree should be deleted as +part of the same database transaction.) +- Each non-finalized chain gets its own copy of the Orchard note commitment tree, +cloned from the note commitment tree of the finalized tip or fork root. +- When a block is added to a non-finalized chain tip, the Orchard note commitment +tree is updated with the note commitments from that block. +- When a block is rolled back from a non-finalized chain tip, the Orchard tree +state is restored to its previous state before the block was added. This involves +either keeping a reference to the previous state or recalculating from the fork +point. + +### Sapling + +- There is a single copy of the latest Sapling Note Commitment Tree for the +finalized tip. +- When finalizing a block, the finalized tip is updated with a serialization of +the Sapling Note Commitment Tree. (The previous tree should be deleted as part +of the same database transaction.) +- Each non-finalized chain gets its own copy of the Sapling note commitment tree, +cloned from the note commitment tree of the finalized tip or fork root. +- When a block is added to a non-finalized chain tip, the Sapling note commitment +tree is updated with the note commitments from that block. +- When a block is rolled back from a non-finalized chain tip, the Sapling tree +state is restored to its previous state, similar to the Orchard process. This +involves either maintaining a history of tree states or recalculating from the +fork point. + +### Sprout + +- Every finalized block stores a separate copy of the Sprout note commitment +tree (😿), as of that block. +- When finalizing a block, the Sprout note commitment tree for that block is stored +in the state. (The trees for previous blocks also remain in the state.) +- Every block in each non-finalized chain gets its own copy of the Sprout note +commitment tree. The initial tree is cloned from the note commitment tree of the +finalized tip or fork root. +- When a block is added to a non-finalized chain tip, the Sprout note commitment +tree is cloned, then updated with the note commitments from that block. +- When a block is rolled back from a non-finalized chain tip, the trees for each +block are deleted, along with that block. + +We can't just compute a fresh tree with just the note commitments within a block, +we are adding them to the tree referenced by the anchor, but we cannot update that +tree with just the anchor, we need the 'frontier' nodes and leaves of the +incremental merkle tree. + + +# Reference-level explanation +[reference-level-explanation]: #reference-level-explanation + +The implementation involves several key components: + +1. **Incremental Merkle Trees**: We use the `incrementalmerkletree` crate to +implement the note commitment trees for each shielded pool. + +2. **Nullifier Storage**: We maintain nullifier sets in RocksDB to efficiently +check for duplicates. + +3. **Tree State Management**: + - For finalized blocks, we store the tree states in RocksDB. + - For non-finalized chains, we keep tree states in memory. + +4. **Anchor Verification**: + - For Sprout: we check anchors against our stored Sprout tree roots. + - For Sapling: we compare the computed root against the block header's +`FinalSaplingRoot`. + - For Orchard: we validate the `anchorOrchard` field in V5 transactions. + +5. **Re-insertion Prevention**: Our implementation should prevent re-inserts +of keys that have been deleted from the database, as this could lead to +inconsistencies. The state service tracks deletion events and validates insertion +operations accordingly. + + +# Drawbacks +[drawbacks]: #drawbacks + +1. **Storage Requirements**: Storing separate tree states (especially for Sprout) +requires significant disk space. + +2. **Performance Impact**: Computing and verifying tree states can be +computationally expensive, potentially affecting sync performance. + +3. **Implementation Complexity**: Managing multiple tree states across different +protocols adds complexity to the codebase. + +4. **Fork Handling**: Maintaining correct tree states during chain reorganizations +requires careful handling. + + +# Rationale and alternatives +[rationale-and-alternatives]: #rationale-and-alternatives + +We chose this approach because: + +1. **Protocol Compatibility**: Our implementation follows the Zcash protocol +specification requirements for handling note commitment trees and anchors. + +2. **Performance Optimization**: By caching tree states, we avoid recomputing +them for every validation operation. + +3. **Memory Efficiency**: For non-finalized chains, we only keep necessary tree +states in memory. + +4. **Scalability**: The design scales with chain growth by efficiently managing +storage requirements. + +Alternative approaches considered: + +1. **Recompute Trees On-Demand**: Instead of storing tree states, recompute them +when needed. This would save storage but significantly impact performance. + +2. **Single Tree State**: Maintain only the latest tree state and recompute for +historical blocks. This would simplify implementation but make historical validation harder. + +3. **Full History Storage**: Store complete tree states for all blocks. This would optimize +validation speed but require excessive storage. + + +# Prior art +[prior-art]: #prior-art + +1. **Zcashd**: Uses similar concepts but with differences in implementation details, +particularly around storage and concurrency. + +2. **Lightwalletd**: Provides a simplified approach to tree state management focused +on scanning rather than full validation. + +3. **Incrementalmerkletree Crate**: Our implementation leverages this existing Rust +crate for efficient tree management. + + +# Unresolved questions +[unresolved-questions]: #unresolved-questions + +1. **Optimization Opportunities**: Are there further optimizations we can make to reduce +storage requirements while maintaining performance? + +2. **Root Storage**: Should we store the `Root` hash in `sprout_note_commitment_tree`, +and use it to look up the complete tree state when needed? + +3. **Re-insertion Prevention**: What's the most efficient approach to prevent re-inserts +of deleted keys? + +4. **Concurrency Model**: How do we best handle concurrent access to tree states during +parallel validation? + + +# Future possibilities +[future-possibilities]: #future-possibilities + +1. **Pruning Strategies**: Implement advanced pruning strategies for historical tree states +to reduce storage requirements. + +2. **Parallelization**: Further optimize tree state updates for parallel processing. + +3. **Checkpoint Verification**: Use tree states for efficient checkpoint-based verification. + +4. **Light Client Support**: Leverage tree states to support Zebra-based light clients with +efficient proof verification. + +5. **State Storage Optimization**: Investigate more efficient serialization formats and storage +mechanisms for tree states. diff --git a/book/src/dev/rfcs/drafts/0005-treestate.md b/book/src/dev/rfcs/drafts/0005-treestate.md deleted file mode 100644 index d18654375dc..00000000000 --- a/book/src/dev/rfcs/drafts/0005-treestate.md +++ /dev/null @@ -1,227 +0,0 @@ -# Treestate - -- Feature Name: treestate -- Start Date: 2020-08-31 -- Design PR: [ZcashFoundation/zebra#983](https://github.com/ZcashFoundation/zebra/issues/983) -- Zebra Issue: [ZcashFoundation/zebra#958](https://github.com/ZcashFoundation/zebra/issues/958) - -# Summary -[summary]: #summary - -To validate blocks involving shielded transactions, we have to check the -computed treestate from the included transactions against the block header -metadata (for Sapling and Orchard) or previously finalized state (for Sprout). This document -describes how we compute and manage that data, assuming a finalized state -service as described in the [State Updates RFC](https://zebra.zfnd.org/dev/rfcs/0005-state-updates.md). - - -# Motivation -[motivation]: #motivation - -Block validation requires checking that the treestate of the block (consisting -of the note commitment tree and nullifier set) is consistent with the metadata -we have in the block header (the root of the note commitment tree) or previously -finalized state (for Sprout). - - -# Definitions -[definitions]: #definitions - -TODO: split up these definitions into common, Sprout, Sapling, and possibly Orchard sections - -Many terms used here are defined in the [Zcash Protocol Specification](https://zips.z.cash/protocol/protocol.pdf) - -**notes**: Represents a value bound to a shielded payment address (public key) -which is spendable by the recipient who holds the spending key corresponding to -a given shielded payment address. - -**nullifiers**: Revealed by `Spend` descriptions when its associated `Note` is spent. - -**nullifier set**: The set of unique `Nullifier`s revealed by any `Transaction`s -within a `Block`. `Nullifier`s are enforced to be unique within a valid block chain -by committing to previous treestates in `Spend` descriptions, in order to prevent -double-spends. - -**note commitments**: Pedersen commitment to the values consisting a `Note`. One -should not be able to construct a `Note` from its commitment. - -**note commitment tree**: An incremental Merkle tree of fixed depth used to -store `NoteCommitment`s that `JoinSplit` transfers or `Spend` transfers produce. It -is used to express the existence of value and the capability to spend it. It is -not the job of this tree to protect against double-spending, as it is -append-only: that's what the `Nullifier` set is for. - -**note position**: The index of a `NoteCommitment` at the leafmost layer, -counting leftmost to rightmost. The [position in the tree is determined by the -order of transactions in the block](https://zips.z.cash/protocol/canopy.pdf#transactions). - -**root**: The layer 0 node of a Merkle tree. - -**anchor**: A Merkle tree root of a `NoteCommitment` tree. It uniquely -identifies a `NoteCommitment` tree state given the assumed security properties -of the Merkle tree’s hash function. Since the `Nullifier` set is always updated -together with the `NoteCommitment` tree, this also identifies a particular state -of the associated `Nullifier` set. - -**spend descriptions**: A shielded Sapling transfer that spends a `Note`. Includes -an anchor of some previous `Block`'s `NoteCommitment` tree. - -**output descriptions**: A shielded Sapling transfer that creates a -`Note`. Includes the u-coordinate of the `NoteCommitment` itself. - -**action descriptions**: A shielded Orchard transfer that spends and/or creates a `Note`. -Does not include an anchor, because that is encoded once in the `anchorOrchard` -field of a V5 `Transaction`. - - - -**joinsplit**: A shielded transfer that can spend Sprout `Note`s and transparent -value, and create new Sprout `Note`s and transparent value, in one Groth16 proof -statement. - - -# Guide-level explanation -[guide-level-explanation]: #guide-level-explanation - -TODO: split into common, Sprout, Sapling, and probably Orchard sections - -As `Block`s are validated, the `NoteCommitment`s revealed by all the transactions -within that block are used to construct `NoteCommitmentTree`s, with the -`NoteCommitment`s aligned in their note positions in the bottom layer of the -Sprout or Sapling tree from the left-most leaf to the right-most in -`Transaction` order in the `Block`. So the Sprout `NoteCommitment`s revealed by -the first `JoinSplit` in a block would take note position 0 in the Sprout -note commitment tree, for example. Once all the transactions in a block are -parsed and the notes for each tree collected in their appropriate positions, the -root of each tree is computed. While the trees are being built, the respective -block nullifier sets are updated in memory as note nullifiers are revealed. If -the rest of the block is validated according to consensus rules, that root is -committed to its own data structure via our state service (Sprout anchors, -Sapling anchors). Sapling block validation includes comparing the specified -FinalSaplingRoot in its block header to the root of the Sapling `NoteCommitment` -tree that we have just computed to make sure they match. - -As the transactions within a block are parsed, Sapling shielded transactions -including `Spend` descriptions and `Output` descriptions describe the spending and -creation of Zcash Sapling notes, and JoinSplit-on-Groth16 descriptions to -transfer/spend/create Sprout notes and transparent value. `JoinSplit` and `Spend` -descriptions specify an anchor, which references a previous `NoteCommitment` tree -root: for `Spend`s, this is a previous block's anchor as defined in their block -header, for `JoinSplit`s, it may be a previous block's anchor or the root -produced by a strictly previous `JoinSplit` description in its transaction. For -`Spend`s, this is convenient because we can query our state service for -previously finalized Sapling block anchors, and if they are found, then that -[consensus check](https://zips.z.cash/protocol/canopy.pdf#spendsandoutputs) has -been satisfied and the `Spend` description can be validated independently. For -`JoinSplit`s, if it's not a previously finalized block anchor, it must be the -treestate anchor of previous `JoinSplit` in this transaction, and we have to wait -for that one to be parsed and its root computed to check that ours is -valid. Luckily, it can only be a previous `JoinSplit` in this transaction, and is -[usually the immediately previous one](zcashd), so the set of candidate anchors -is smaller for earlier `JoinSplit`s in a transaction, but larger for the later -ones. For these `JoinSplit`s, they can be validated independently of their -anchor's finalization status as long as the final check of the anchor is done, -when available, such as at the Transaction level after all the `JoinSplit`s have -finished validating everything that can be validated without the context of -their anchor's finalization state. - -So for each transaction, for both `Spend` descriptions and `JoinSplit`s, we can -preemptively try to do our consensus check by looking up the anchors in our -finalized set first. For `Spend`s, we then trigger the remaining validation and -when that finishes we are full done with those. For `JoinSplit`s, the anchor -state check may pass early if it's a previous block Sprout `NoteCommitment` tree -root, but it may fail because it's an earlier `JoinSplit`s root instead, so once -the `JoinSplit` validates independently of the anchor, we wait for all candidate -previous `JoinSplit`s in that transaction finish validating before doing the -anchor consensus check again, but against the output treestate roots of earlier -`JoinSplit`s. - -Both Sprout and Sapling `NoteCommitment` trees must be computed for the whole -block to validate. For Sprout, we need to compute interstitial treestates in -between `JoinSplit`s in order to do the final consensus check for each/all -`JoinSplit`s, not just for the whole block, as in Sapling. - -For Sapling, at the block layer, we can iterate over all the transactions in -order and if they have `Spend`s and/or `Output`s, we update our Nullifer set for -the block as nullifiers are revealed in `Spend` descriptions, and update our note -commitment tree as `NoteCommitment`s are revealed in `Output` descriptions, adding -them as leaves in positions according to their order as they appear transaction -to transaction, output to output, in the block. This can be done independent of -the transaction validations. When the Sapling transactions are all validated, -the `NoteCommitmentTree` root should be computed: this is the anchor for this -block. For Sapling and Blossom blocks, we need to check that this root matches -the `RootHash` bytes in this block's header, as the `FinalSaplingRoot`. Once all -other consensus and validation checks are done, this will be saved down to our -finalized state to our `sapling_anchors` set, making it available for lookup by -other Sapling descriptions in future transactions. -TODO: explain Heartwood, Canopy, NU5 rule variants around anchors. -For Sprout, we must compute/update interstitial `NoteCommitmentTree`s between -`JoinSplit`s that may reference an earlier one's root as its anchor. If we do -this at the transaction layer, we can iterate through all the `JoinSplit`s and -compute the Sprout `NoteCommitmentTree` and nullifier set similar to how we do -the Sapling ones as described above, but at each state change (ie, -per-`JoinSplit`) we note the root and cache it for lookup later. As the -`JoinSplit`s are validated without context, we check for its specified anchor -amongst the interstitial roots we've already calculated (according to the spec, -these interstitial roots don't have to be finalized or the result of an -independently validated `JoinSplit`, they just must refer to any prior `JoinSplit` -root in the same transaction). So we only have to wait for our previous root to -be computed via any of our candidates, which in the worst case is waiting for -all of them to be computed for the last `JoinSplit`. If our `JoinSplit`s defined -root pops out, that `JoinSplit` passes that check. - -To finalize the block, the Sprout and Sapling treestates are the ones resulting -from the last transaction in the block, and determines the Sprout and Sapling -anchors that will be associated with this block as we commit it to our finalized -state. The Sprout and Sapling nullifiers revealed in the block will be merged -with the existing ones in our finalized state (ie, it should strictly grow over -time). - -## State Management - -### Orchard -- There is a single copy of the latest Orchard Note Commitment Tree for the finalized tip. -- When finalizing a block, the finalized tip is updated with a serialization of the latest Orchard Note Commitment Tree. (The previous tree should be deleted as part of the same database transaction.) -- Each non-finalized chain gets its own copy of the Orchard note commitment tree, cloned from the note commitment tree of the finalized tip or fork root. -- When a block is added to a non-finalized chain tip, the Orchard note commitment tree is updated with the note commitments from that block. -- When a block is rolled back from a non-finalized chain tip... (TODO) - -### Sapling -- There is a single copy of the latest Sapling Note Commitment Tree for the finalized tip. -- When finalizing a block, the finalized tip is updated with a serialization of the Sapling Note Commitment Tree. (The previous tree should be deleted as part of the same database transaction.) -- Each non-finalized chain gets its own copy of the Sapling note commitment tree, cloned from the note commitment tree of the finalized tip or fork root. -- When a block is added to a non-finalized chain tip, the Sapling note commitment tree is updated with the note commitments from that block. -- When a block is rolled back from a non-finalized chain tip... (TODO) - -### Sprout -- Every finalized block stores a separate copy of the Sprout note commitment tree (😿), as of that block. -- When finalizing a block, the Sprout note commitment tree for that block is stored in the state. (The trees for previous blocks also remain in the state.) -- Every block in each non-finalized chain gets its own copy of the Sprout note commitment tree. The initial tree is cloned from the note commitment tree of the finalized tip or fork root. -- When a block is added to a non-finalized chain tip, the Sprout note commitment tree is cloned, then updated with the note commitments from that block. -- When a block is rolled back from a non-finalized chain tip, the trees for each block are deleted, along with that block. - -We can't just compute a fresh tree with just the note commitments within a block, we are adding them to the tree referenced by the anchor, but we cannot update that tree with just the anchor, we need the 'frontier' nodes and leaves of the incremental merkle tree. - -# Reference-level explanation -[reference-level-explanation]: #reference-level-explanation - - -# Drawbacks -[drawbacks]: #drawbacks - - - -# Rationale and alternatives -[rationale-and-alternatives]: #rationale-and-alternatives - - -# Prior art -[prior-art]: #prior-art - - -# Unresolved questions -[unresolved-questions]: #unresolved-questions - - -# Future possibilities -[future-possibilities]: #future-possibilities From 6febe41f01479d4a9a5bbc9815f4eb584a073858 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 8 Apr 2025 10:14:10 -0300 Subject: [PATCH 138/245] rpc(getpeerinfo): Add inbound peers to method response (#9214) * extend getpeerinfo * add and use `currently_live_peers()` * remove `currently_live_peers` and just use filtered `recently_live_peers` instead * fix codespell * simplify Co-authored-by: Arya * remove non needed filter Co-authored-by: Arya * fix mock --------- Co-authored-by: Arya Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-network/src/address_book_peers/mock.rs | 8 ++- .../src/methods/get_block_template_rpcs.rs | 3 +- .../types/peer_info.rs | 12 ++++ .../tests/snapshot/get_block_template_rpcs.rs | 9 ++- .../snapshots/get_peer_info@mainnet_10.snap | 3 +- .../snapshots/get_peer_info@testnet_10.snap | 3 +- zebra-rpc/src/methods/tests/vectors.rs | 59 +++++++++++++++++-- 7 files changed, 84 insertions(+), 13 deletions(-) diff --git a/zebra-network/src/address_book_peers/mock.rs b/zebra-network/src/address_book_peers/mock.rs index 58253530f76..6de29152271 100644 --- a/zebra-network/src/address_book_peers/mock.rs +++ b/zebra-network/src/address_book_peers/mock.rs @@ -19,7 +19,11 @@ impl MockAddressBookPeers { } impl AddressBookPeers for MockAddressBookPeers { - fn recently_live_peers(&self, _now: chrono::DateTime) -> Vec { - self.recently_live_peers.clone() + fn recently_live_peers(&self, now: chrono::DateTime) -> Vec { + self.recently_live_peers + .iter() + .filter(|peer| peer.was_recently_live(now)) + .cloned() + .collect() } } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index df6d686c749..b8dbb810a75 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1101,10 +1101,11 @@ where async fn get_peer_info(&self) -> Result> { let address_book = self.address_book.clone(); + Ok(address_book .recently_live_peers(chrono::Utc::now()) .into_iter() - .map(PeerInfo::from) + .map(PeerInfo::new) .collect()) } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs index fe59df482fc..1eb75051300 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs @@ -7,12 +7,23 @@ use zebra_network::{types::MetaAddr, PeerSocketAddr}; pub struct PeerInfo { /// The IP address and port of the peer pub addr: PeerSocketAddr, + + /// Inbound (true) or Outbound (false) + pub inbound: bool, +} + +impl PeerInfo { + /// Create a new `PeerInfo` from a `MetaAddr` + pub fn new(meta_addr: MetaAddr) -> Self { + Self::from(meta_addr) + } } impl From for PeerInfo { fn from(meta_addr: MetaAddr) -> Self { Self { addr: meta_addr.addr(), + inbound: meta_addr.is_inbound(), } } } @@ -21,6 +32,7 @@ impl Default for PeerInfo { fn default() -> Self { Self { addr: PeerSocketAddr::unspecified(), + inbound: false, } } } diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index ec6d2fe4870..df6cfbacea9 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -25,7 +25,10 @@ use zebra_chain::{ transparent, work::difficulty::{CompactDifficulty, ParameterDifficulty as _}, }; -use zebra_network::{address_book_peers::MockAddressBookPeers, types::MetaAddr}; +use zebra_network::{ + address_book_peers::MockAddressBookPeers, + types::{MetaAddr, PeerServices}, +}; use zebra_node_services::mempool; use zebra_state::{GetBlockTemplateChainInfo, ReadRequest, ReadResponse}; @@ -132,12 +135,14 @@ pub async fn test_responses( mock_chain_tip_sender.send_best_tip_hash(fake_tip_hash); mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); - let mock_address_book = MockAddressBookPeers::new(vec![MetaAddr::new_initial_peer( + let mock_address_book = MockAddressBookPeers::new(vec![MetaAddr::new_connected( SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), network.default_port(), ) .into(), + &PeerServices::NODE_NETWORK, + false, ) .into_new_meta_addr(Instant::now(), DateTime32::now())]); diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap index 71d1375eb0e..651e1f005ee 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap @@ -4,6 +4,7 @@ expression: get_peer_info --- [ { - "addr": "127.0.0.1:8233" + "addr": "127.0.0.1:8233", + "inbound": false } ] diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap index e9ad3a7f930..62a290aebed 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap @@ -4,6 +4,7 @@ expression: get_peer_info --- [ { - "addr": "127.0.0.1:18233" + "addr": "127.0.0.1:18233", + "inbound": false } ] diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 1648f77875d..d18aef9e6ee 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1373,7 +1373,7 @@ async fn rpc_getblockcount_empty_state() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getpeerinfo() { use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; + use zebra_network::{address_book_peers::MockAddressBookPeers, types::PeerServices}; let _init_guard = zebra_test::init(); let network = Mainnet; @@ -1396,19 +1396,54 @@ async fn rpc_getpeerinfo() { ) .await; - let mock_peer_address = zebra_network::types::MetaAddr::new_initial_peer( + // Add a connected outbound peer + let outbound_mock_peer_address = zebra_network::types::MetaAddr::new_connected( std::net::SocketAddr::new( std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)), network.default_port(), ) .into(), + &PeerServices::NODE_NETWORK, + false, ) .into_new_meta_addr( std::time::Instant::now(), zebra_chain::serialization::DateTime32::now(), ); - let mock_address_book = MockAddressBookPeers::new(vec![mock_peer_address]); + // Add a connected inbound peer + let inbound_mock_peer_address = zebra_network::types::MetaAddr::new_connected( + std::net::SocketAddr::new( + std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)), + 44444, + ) + .into(), + &PeerServices::NODE_NETWORK, + true, + ) + .into_new_meta_addr( + std::time::Instant::now(), + zebra_chain::serialization::DateTime32::now(), + ); + + // Add a peer that is not connected and will not be displayed in the RPC output + let not_connected_mock_peer_adderess = zebra_network::types::MetaAddr::new_initial_peer( + std::net::SocketAddr::new( + std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)), + 55555, + ) + .into(), + ) + .into_new_meta_addr( + std::time::Instant::now(), + zebra_chain::serialization::DateTime32::now(), + ); + + let mock_address_book = MockAddressBookPeers::new(vec![ + outbound_mock_peer_address, + inbound_mock_peer_address, + not_connected_mock_peer_adderess, + ]); // Init RPC let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( @@ -1429,12 +1464,24 @@ async fn rpc_getpeerinfo() { .await .expect("We should have an array of addresses"); + // Response of length should be 2. We have 2 connected peers and 1 unconnected peer in the address book. + assert_eq!(get_peer_info.len(), 2); + + let mut res_iter = get_peer_info.into_iter(); + // Check for the outbound peer + assert_eq!( + res_iter + .next() + .expect("there should be a mock peer address"), + outbound_mock_peer_address.into() + ); + + // Check for the inbound peer assert_eq!( - get_peer_info - .into_iter() + res_iter .next() .expect("there should be a mock peer address"), - mock_peer_address.into() + inbound_mock_peer_address.into() ); mempool.expect_no_requests().await; From c9a4ae41f9f02dcab3ee529c9ab9c796ecffe0ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 15:39:30 +0000 Subject: [PATCH 139/245] build(deps): bump tokio from 1.44.1 to 1.44.2 in the cargo group (#9391) Bumps the cargo group with 1 update: [tokio](https://github.com/tokio-rs/tokio). Updates `tokio` from 1.44.1 to 1.44.2 - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.44.1...tokio-1.44.2) --- updated-dependencies: - dependency-name: tokio dependency-version: 1.44.2 dependency-type: direct:production dependency-group: cargo ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4adcde86be..6655e8a84ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4741,9 +4741,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.1" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 1a9b67bee94..f4166c1e7d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -128,7 +128,7 @@ tempfile = "3.19.1" thiserror = "2.0.12" thread-priority = "1.2.0" tinyvec = "1.9.0" -tokio = "1.44.1" +tokio = "1.44.2" tokio-stream = "0.1.17" tokio-test = "0.4.4" tokio-util = "0.7.14" From a9166b673620a9d6427fd44d9dfa7511f1342bbc Mon Sep 17 00:00:00 2001 From: Kris Nuttycombe Date: Tue, 8 Apr 2025 15:31:28 -0600 Subject: [PATCH 140/245] Update to `zcash_keys 0.8`, `zcash_client_backend 0.18` (#9395) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- zebra-scan/src/bin/scanning-results-reader/main.rs | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6655e8a84ab..a621f278b8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5922,9 +5922,9 @@ dependencies = [ [[package]] name = "zcash_client_backend" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd14f1ef34cacef42dd2149783dad3d1f46949cb72da786f6ab13d6aa142020b" +checksum = "17d9eb503faa9f72a1b6575f884e524ac43e816b275a28445e9ecb9c59e46771" dependencies = [ "base64 0.22.1", "bech32", @@ -5984,9 +5984,9 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af2839a7bb0489ccf0db9fb12c67234dd83e4a3b81ef50a10beecf1e852a18e" +checksum = "cb50fbc9d2d5e5997eefa934297be78312552f393149aa042ab12ac42031070c" dependencies = [ "bech32", "blake2b_simd", diff --git a/Cargo.toml b/Cargo.toml index f4166c1e7d7..06680d00137 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,10 +26,10 @@ incrementalmerkletree = { version = "0.8.2", features = ["legacy-api"] } orchard = "0.11.0" sapling-crypto = "0.5.0" zcash_address = "0.7.0" -zcash_client_backend = "0.17.0" +zcash_client_backend = "0.18" zcash_encoding = "0.3.0" zcash_history = "0.4.0" -zcash_keys = "0.7.0" +zcash_keys = "0.8" zcash_primitives = "0.22.0" zcash_proofs = "0.22.0" zcash_transparent = "0.2.3" diff --git a/zebra-scan/src/bin/scanning-results-reader/main.rs b/zebra-scan/src/bin/scanning-results-reader/main.rs index d6361022087..4a2d5eb7776 100644 --- a/zebra-scan/src/bin/scanning-results-reader/main.rs +++ b/zebra-scan/src/bin/scanning-results-reader/main.rs @@ -62,8 +62,8 @@ pub fn main() { ) .expect("TX fetched via RPC should be deserializable from raw bytes"); - for output in - decrypt_transaction(&zp_network, height, &tx, &ufvks).sapling_outputs() + for output in decrypt_transaction(&zp_network, Some(height), None, &tx, &ufvks) + .sapling_outputs() { let memo = memo_bytes_to_string(output.memo().as_array()); From 4956c22cdfe0d470f5bba0c3a26671fa5fcba282 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 9 Apr 2025 07:11:09 -0300 Subject: [PATCH 141/245] exit early in `try_reusing_previous_db_after_major_upgrade` if `!exist(old_path)` (#9397) --- zebra-state/src/service/finalized_state/disk_db.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 014213fcf83..01c0f0bd7ff 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -984,6 +984,11 @@ impl DiskDb { let db_kind = db_kind.as_ref(); let old_path = config.db_path(db_kind, major_db_ver - 1, network); + // Exit early if the path doesn't exist or there's an error checking it. + if !fs::exists(&old_path).unwrap_or(false) { + return; + } + let new_path = config.db_path(db_kind, major_db_ver, network); let old_path = match fs::canonicalize(&old_path) { From 7fc37651d310199517e340d7864016e91be969c9 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 9 Apr 2025 16:43:07 +0100 Subject: [PATCH 142/245] refactor!: remove experimental features from release builds (#9222) * refactor!: remove experimental features from release builds BREAKING CHANGE: Experimental features are no longer built into release images. Users must now build custom images with specific features if needed. - Remove experimental build job from release workflow - Remove experimental features from CI matrix - Update documentation to reflect new feature strategy - Simplify Dockerfile by removing experimental feature args - Keep RUN_ALL_EXPERIMENTAL_TESTS for testing purposes only Migration: Users who need specific features should: 1. Build custom images using --build-arg FEATURES="feature1 feature2" 2. Use the documented feature flags in their builds 3. Refer to the new documentation for building with custom features Closes #9210 Closes #9204 Partially #7415 * revert(ci): Add support for experimental features in test runs Enable passing experimental features to Docker test runs by: - Updating workflow configuration to pass EXPERIMENTAL_FEATURES - Modifying entrypoint script to include experimental features during test execution - Documenting new experimental test category in workflow README * refactor(docs): streamline Docker documentation and feature testing strategy - Reorganized and clarified advanced usage instructions for building Docker images with custom features, including metrics support. - Enhanced the presentation of alternatives and configuration details for Docker usage. * Apply suggestions from code review Co-authored-by: Marek * chore: revert deleted sentence * Update book/src/user/docker.md Co-authored-by: Marek --------- Co-authored-by: Marek --- .github/workflows/ci-unit-tests-os.yml | 13 +++++-------- .github/workflows/release-binaries.yml | 23 +---------------------- book/src/user/docker.md | 21 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 71468bd9348..4afbe55b361 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -73,7 +73,7 @@ jobs: ### Build and test Zebra on all OSes ### ######################################## test: - name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }} + name: Test ${{ matrix.rust }} on ${{ matrix.os }} # The large timeout is to accommodate: # - macOS and Windows builds (typically 50-90 minutes), and timeout-minutes: 120 @@ -83,11 +83,9 @@ jobs: matrix: os: [ubuntu-latest, macos-latest, windows-latest] rust: [stable, beta] - # TODO: When vars.EXPERIMENTAL_FEATURES has features in it, add it here. - # Or work out a way to trim the space from the variable: GitHub doesn't allow empty variables. - # Or use `default` for the empty feature set and EXPERIMENTAL_FEATURES, and update the branch protection rules. - #features: ${{ fromJSON(format('["", "{0}"]', vars.EXPERIMENTAL_FEATURES)) }} - features: [""] + # We only test with default features in this workflow + # Other feature combinations are tested in specific workflows + features: ["default-release-binaries"] exclude: # We're excluding macOS beta for the following reasons: # - the concurrent macOS runner limit is much lower than the Linux limit @@ -121,8 +119,7 @@ jobs: #with: # workspaces: ". -> C:\\zebra-target" with: - # Split the experimental features cache from the regular cache, to avoid linker errors. - # (These might be "disk full" errors, or they might be dependency resolution issues.) + # Split the cache by feature set to avoid linker errors key: ${{ matrix.features }} - name: Change target output directory on Windows diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 1cf4e6b5da7..21032d00881 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -15,30 +15,9 @@ on: - released jobs: - # Each time this workflow is executed, a build will be triggered to create a new image - # with the corresponding tags using information from git - - # The image will be named `zebra:-experimental` - build-experimental: - name: Build Experimental Features Release Docker - uses: ./.github/workflows/sub-build-docker-image.yml - with: - dockerfile_path: ./docker/Dockerfile - dockerfile_target: runtime - image_name: zebra - tag_suffix: -experimental - features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} - rust_log: ${{ vars.RUST_LOG }} - # This step needs access to Docker Hub secrets to run successfully - secrets: inherit - # The image will be named `zebra:` - # It should be built last, so tags with the same name point to the production build, not the experimental build. build: name: Build Release Docker - # Run this build last, regardless of whether experimental worked - needs: build-experimental - if: always() uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -53,7 +32,7 @@ jobs: failure-issue: name: Open or update issues for release binaries failures # When a new job is added to this workflow, add it to this list. - needs: [ build, build-experimental ] + needs: [ build ] # Open tickets for any failed build in this workflow. if: failure() || cancelled() runs-on: ubuntu-latest diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 90dad34a2d7..90ba102579d 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -54,6 +54,27 @@ docker build \ . ``` +### Alternatives + +See [Building Zebra](https://github.com/ZcashFoundation/zebra#manual-build) for more information. + + +### Building with Custom Features + +Zebra supports various features that can be enabled during build time using the `FEATURES` build argument: + +For example, if we'd like to enable metrics on the image, we'd build it using the following `build-arg`: + +> [!IMPORTANT] +> To fully use and display the metrics, you'll need to run a Prometheus and Grafana server, and configure it to scrape and visualize the metrics endpoint. This is explained in more detailed in the [Metrics](https://zebra.zfnd.org/user/metrics.html#zebra-metrics) section of the User Guide. + +```shell +# Build with specific features +docker build -f ./docker/Dockerfile --target runtime \ + --build-arg FEATURES="default-release-binaries prometheus" \ + --tag zebra:metrics . +``` + All available Cargo features are listed at . From cc5c5edd357b5aae013a7d1cbec394d09bfe9765 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 17:01:26 +0000 Subject: [PATCH 143/245] build(deps): bump the devops group with 3 updates (#9404) Bumps the devops group with 3 updates: [tj-actions/changed-files](https://github.com/tj-actions/changed-files), [peter-evans/dockerhub-description](https://github.com/peter-evans/dockerhub-description) and [github/codeql-action](https://github.com/github/codeql-action). Updates `tj-actions/changed-files` from 46.0.3 to 46.0.5 - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v46.0.3...v46.0.5) Updates `peter-evans/dockerhub-description` from 4.0.1 to 4.0.2 - [Release notes](https://github.com/peter-evans/dockerhub-description/releases) - [Commits](https://github.com/peter-evans/dockerhub-description/compare/v4.0.1...v4.0.2) Updates `github/codeql-action` from 3.28.13 to 3.28.15 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b549b9259bda1cb5ddde3b41741a82a2d15a841...45775bd8235c68ba998cffa5171334d58593da47) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-version: 46.0.5 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: peter-evans/dockerhub-description dependency-version: 4.0.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: github/codeql-action dependency-version: 3.28.15 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-lint.yml | 4 ++-- .github/workflows/docs-dockerhub-description.yml | 2 +- .github/workflows/zizmor.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index a62ac760af5..ff5e8489244 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v46.0.3 + uses: tj-actions/changed-files@v46.0.5 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v46.0.3 + uses: tj-actions/changed-files@v46.0.5 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index 8ef51be9cd4..5d0367b6037 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -23,7 +23,7 @@ jobs: persist-credentials: false - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4.0.1 + uses: peter-evans/dockerhub-description@v4.0.2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index b9bb0ed866a..d7258e20f23 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -25,7 +25,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 with: sarif_file: results.sarif category: zizmor From d0612323125ec1496feac2b47d433943044508df Mon Sep 17 00:00:00 2001 From: Mark Henderson Date: Wed, 9 Apr 2025 14:52:13 -0400 Subject: [PATCH 144/245] Experiment: `Transaction::V6` variants only (#9339) * Update main.yml * feat: auto-sync upstream * fix: merge errors * Merge pull request #6 from ShieldedLabs/aphelionz/v6-transactions Add Transaction::V6 Variants * fix: enable tx_v6 on zebra-chain when it's enabled on zebra-state * fix: more feature flag dependencies * cleanup: remove prop.txt * Update zebra-chain/src/transaction.rs Co-authored-by: Alfredo Garcia * cleanup: removing SL-specific workflow * fix: skip some windows-related denies * Update deny.toml Co-authored-by: Alfredo Garcia * fix: better deny.toml entry for windows-core * Update zebra-chain/src/transaction/serialize.rs Co-authored-by: Alfredo Garcia * Update zebra-chain/src/transaction/serialize.rs Co-authored-by: Alfredo Garcia * Update zebra-chain/src/transaction/tests/vectors.rs Co-authored-by: Alfredo Garcia * Update zebra-chain/src/transaction.rs Co-authored-by: Alfredo Garcia * feat: passthrough functions for v6 -> v5 * fix: rust fmt --------- Co-authored-by: Mark Henderson Co-authored-by: Alfredo Garcia --- Cargo.lock | 544 +++++++++++------- deny.toml | 6 + zebra-chain/Cargo.toml | 2 + zebra-chain/src/transaction.rs | 181 ++++++ zebra-chain/src/transaction/arbitrary.rs | 4 + zebra-chain/src/transaction/serialize.rs | 49 ++ zebra-chain/src/transaction/tests/vectors.rs | 30 + zebra-chain/src/transaction/txid.rs | 8 + zebra-chain/src/transaction/unmined.rs | 2 + zebra-consensus/Cargo.toml | 2 + zebra-consensus/src/transaction.rs | 33 ++ zebra-state/Cargo.toml | 2 + .../src/service/non_finalized_state/chain.rs | 32 ++ zebra-state/src/tests.rs | 2 + zebrad/Cargo.toml | 2 + .../components/mempool/storage/tests/prop.rs | 16 + 16 files changed, 706 insertions(+), 209 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a621f278b8c..300156372ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,7 +93,7 @@ dependencies = [ "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -193,9 +193,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arc-swap" @@ -217,9 +217,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "flate2", "futures-core", @@ -252,9 +252,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", @@ -366,9 +366,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bech32" @@ -441,7 +441,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "shlex", "syn 2.0.100", ] @@ -582,15 +582,15 @@ checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byte-slice-cast" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -606,12 +606,11 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] @@ -670,9 +669,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "525046617d8376e3db1deffb079e91cef90a89fc3ca5c185bbf8c9ecdd15cd5c" dependencies = [ "jobserver", "libc", @@ -877,9 +876,9 @@ checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "console" -version = "0.15.10" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" dependencies = [ "encode_unicode", "libc", @@ -933,6 +932,26 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "constant_time_eq" version = "0.3.1" @@ -1097,9 +1116,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ "darling_core", "darling_macro", @@ -1107,9 +1126,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", @@ -1121,9 +1140,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", @@ -1152,9 +1171,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", "serde", @@ -1215,18 +1234,18 @@ dependencies = [ [[package]] name = "document-features" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" dependencies = [ "litrs", ] [[package]] name = "dyn-clone" -version = "1.0.17" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" [[package]] name = "ed25519" @@ -1257,9 +1276,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "elasticsearch" @@ -1312,15 +1331,15 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", "windows-sys 0.59.0", @@ -1382,18 +1401,18 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", - "miniz_oxide 0.8.3", + "miniz_oxide 0.8.7", ] [[package]] @@ -1417,9 +1436,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "form_urlencoded" @@ -1584,21 +1603,23 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] name = "getset" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded738faa0e88d3abc9d1a13cb11adc2073c400969eeb8793cf7132589959fc" +checksum = "f3586f256131df87204eb733da72e3d3eb4f343c639f4b7be279ac7c48baeafe" dependencies = [ "proc-macro-error2", "proc-macro2", @@ -1645,9 +1666,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -1655,7 +1676,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.8.0", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -1664,9 +1685,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" dependencies = [ "cfg-if", "crunchy", @@ -1796,9 +1817,9 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" [[package]] name = "hex" @@ -1857,9 +1878,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1891,9 +1912,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -2006,16 +2027,17 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", - "windows-core", + "windows-core 0.61.0", ] [[package]] @@ -2068,9 +2090,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -2092,9 +2114,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -2113,9 +2135,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -2220,9 +2242,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2260,9 +2282,9 @@ dependencies = [ [[package]] name = "inout" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "generic-array", ] @@ -2292,11 +2314,11 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.4.0", + "hermit-abi 0.5.0", "libc", "windows-sys 0.59.0", ] @@ -2345,16 +2367,17 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -2381,9 +2404,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "834af00800e962dee8f7bfc0f60601de215e73e78e5497d733a2919da837d3c8" +checksum = "37b26c20e2178756451cfeb0661fb74c47dd5988cb7e3939de7e9241fd604d42" dependencies = [ "jsonrpsee-core", "jsonrpsee-server", @@ -2393,9 +2416,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76637f6294b04e747d68e69336ef839a3493ca62b35bf488ead525f7da75c5bb" +checksum = "456196007ca3a14db478346f58c7238028d55ee15c1df15115596e411ff27925" dependencies = [ "async-trait", "bytes", @@ -2406,7 +2429,7 @@ dependencies = [ "jsonrpsee-types", "parking_lot", "rand 0.8.5", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", "serde_json", "thiserror 1.0.69", @@ -2429,9 +2452,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.24.8" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66b7a3df90a1a60c3ed68e7ca63916b53e9afa928e33531e87f61a9c8e9ae87b" +checksum = "55e363146da18e50ad2b51a0a7925fc423137a0b1371af8235b1c231a0647328" dependencies = [ "futures-util", "http", @@ -2575,9 +2598,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "libc", @@ -2605,9 +2628,9 @@ checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "litrs" @@ -2698,7 +2721,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", - "indexmap 2.8.0", + "indexmap 2.9.0", "ipnet", "metrics", "metrics-util", @@ -2747,9 +2770,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430" dependencies = [ "adler2", ] @@ -2910,9 +2933,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" -version = "11.1.4" +version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "opaque-debug" @@ -2973,9 +2996,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.9.2" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" dependencies = [ "log", "serde", @@ -3011,28 +3034,30 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" dependencies = [ "arrayvec", "bitvec", "byte-slice-cast", + "const_format", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.100", ] [[package]] @@ -3081,9 +3106,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" dependencies = [ "memchr", "thiserror 2.0.12", @@ -3092,9 +3117,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" +checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" dependencies = [ "pest", "pest_generator", @@ -3102,9 +3127,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" +checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" dependencies = [ "pest", "pest_meta", @@ -3115,9 +3140,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" +checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" dependencies = [ "once_cell", "pest", @@ -3126,12 +3151,12 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.8.0", + "indexmap 2.9.0", ] [[package]] @@ -3178,9 +3203,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "plotters" @@ -3223,9 +3248,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" [[package]] name = "powerfmt" @@ -3235,18 +3260,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy", + "zerocopy 0.8.24", ] [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" dependencies = [ "proc-macro2", "syn 2.0.100", @@ -3265,9 +3290,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] @@ -3320,9 +3345,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -3370,12 +3395,12 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f3e5beed80eb580c68e2c600937ac2c4eedabdfd5ef1e5b7ea4f3fba84497b" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck 0.5.0", - "itertools 0.13.0", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -3403,9 +3428,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] @@ -3433,9 +3458,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.37.2" +version = "0.37.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165859e9e55f79d67b96c5d96f4e88b6f2695a1972849c15a6a3f5c59fc2c003" +checksum = "a4ce8c88de324ff838700f36fb6ab86c96df0e3c4ab6ef3a9b2044465cce1369" dependencies = [ "memchr", ] @@ -3465,33 +3490,35 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror 2.0.12", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.2", + "rand 0.9.0", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", @@ -3503,9 +3530,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -3524,6 +3551,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "radium" version = "0.7.0" @@ -3554,6 +3587,17 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy 0.8.24", +] + [[package]] name = "rand_chacha" version = "0.2.2" @@ -3574,6 +3618,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + [[package]] name = "rand_core" version = "0.5.1" @@ -3592,6 +3646,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -3621,9 +3684,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.3.0" +version = "11.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" +checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" dependencies = [ "bitflags 2.9.0", ] @@ -3681,9 +3744,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" dependencies = [ "bitflags 2.9.0", ] @@ -3800,9 +3863,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.13" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3880,9 +3943,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc-hex" @@ -3927,9 +3990,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.22" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "log", "once_cell", @@ -3960,9 +4023,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "ring", "rustls-pki-types", @@ -3971,9 +4034,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rusty-fork" @@ -3989,9 +4052,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "same-file" @@ -4200,7 +4263,7 @@ version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "memchr", "ryu", @@ -4238,7 +4301,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.8.0", + "indexmap 2.9.0", "serde", "serde_derive", "serde_json", @@ -4264,7 +4327,7 @@ version = "0.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "libyml", "memchr", @@ -4385,9 +4448,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" @@ -4583,7 +4646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" dependencies = [ "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.2", "once_cell", "rustix 1.0.5", "windows-sys 0.59.0", @@ -4673,9 +4736,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4690,15 +4753,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4771,9 +4834,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4850,11 +4913,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.23" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", @@ -5171,9 +5234,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "ucd-trie" @@ -5222,9 +5285,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -5313,9 +5376,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.12.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ "serde", ] @@ -5429,9 +5492,9 @@ checksum = "a7b6d5a78adc3e8f198e9cd730f219a695431467f7ec29dcfc63ade885feebe1" [[package]] name = "wait-timeout" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" dependencies = [ "libc", ] @@ -5469,9 +5532,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -5625,7 +5688,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", "windows-targets 0.52.6", ] @@ -5638,6 +5701,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings 0.4.0", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "windows-link" version = "0.1.1" @@ -5651,7 +5749,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ "windows-result", - "windows-strings", + "windows-strings 0.3.1", "windows-targets 0.53.0", ] @@ -5673,6 +5771,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-strings" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -5821,9 +5928,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e49d2d35d3fad69b39b94139037ecfb4f359f08958b9c11e7315ce770462419" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -5836,9 +5943,9 @@ checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.0", ] @@ -6286,7 +6393,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.8.0", + "indexmap 2.9.0", "itertools 0.14.0", "lazy_static", "metrics", @@ -6338,7 +6445,7 @@ dependencies = [ "hex", "http-body-util", "hyper", - "indexmap 2.8.0", + "indexmap 2.9.0", "insta", "jsonrpsee", "jsonrpsee-proc-macros", @@ -6381,7 +6488,7 @@ dependencies = [ "futures", "group", "hex", - "indexmap 2.8.0", + "indexmap 2.9.0", "insta", "itertools 0.14.0", "jsonrpc", @@ -6447,7 +6554,7 @@ dependencies = [ "howudoin", "human_bytes", "humantime-serde", - "indexmap 2.8.0", + "indexmap 2.9.0", "insta", "itertools 0.14.0", "jubjub", @@ -6484,7 +6591,7 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.8.0", + "indexmap 2.9.0", "insta", "itertools 0.14.0", "lazy_static", @@ -6510,7 +6617,7 @@ version = "1.0.0-beta.45" dependencies = [ "color-eyre", "hex", - "indexmap 2.8.0", + "indexmap 2.9.0", "itertools 0.14.0", "jsonrpc", "quote", @@ -6555,7 +6662,7 @@ dependencies = [ "humantime-serde", "hyper", "hyper-util", - "indexmap 2.8.0", + "indexmap 2.9.0", "indicatif", "inferno", "insta", @@ -6613,8 +6720,16 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +dependencies = [ + "zerocopy-derive 0.8.24", ] [[package]] @@ -6628,20 +6743,31 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", diff --git a/deny.toml b/deny.toml index eccc230af95..8e348c31870 100644 --- a/deny.toml +++ b/deny.toml @@ -98,6 +98,12 @@ skip-tree = [ # wait until zcash_client_backend update rustix { name = "rustix", version = "=0.38.44" }, + + # wait for reqwest to update windows-registry + { name = "windows-strings", version = "=0.3.1" }, + + # wait for sentry to update windows-core + { name = "windows-core", version = "=0.52.0" } ] # This section is considered when running `cargo deny check sources`. diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 579b3304b45..609e5e7cc4d 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -60,6 +60,8 @@ proptest-impl = [ bench = ["zebra-test"] +tx_v6 = [] + [dependencies] # Cryptography diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 025bd22881d..d7d9a59e325 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -143,6 +143,28 @@ pub enum Transaction { /// The orchard data for this transaction, if any. orchard_shielded_data: Option, }, + /// A `version = 6` transaction, which is reserved for current development. + #[cfg(feature = "tx_v6")] + V6 { + /// The Network Upgrade for this transaction. + /// + /// Derived from the ConsensusBranchId field. + network_upgrade: NetworkUpgrade, + /// The earliest time or block height that this transaction can be added to the + /// chain. + lock_time: LockTime, + /// The latest block height that this transaction can be added to the chain. + expiry_height: block::Height, + /// The transparent inputs to the transaction. + inputs: Vec, + /// The transparent outputs from the transaction. + outputs: Vec, + /// The sapling shielded data for this transaction, if any. + sapling_shielded_data: Option>, + /// The orchard data for this transaction, if any. + orchard_shielded_data: Option, + // TODO: Add the rest of the v6 fields. + }, } impl fmt::Display for Transaction { @@ -253,6 +275,8 @@ impl Transaction { | Transaction::V3 { .. } | Transaction::V4 { .. } => None, Transaction::V5 { .. } => Some(AuthDigest::from(self)), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Some(AuthDigest::from(self)), } } @@ -342,6 +366,8 @@ impl Transaction { match self { Transaction::V1 { .. } | Transaction::V2 { .. } => false, Transaction::V3 { .. } | Transaction::V4 { .. } | Transaction::V5 { .. } => true, + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => true, } } @@ -363,6 +389,8 @@ impl Transaction { Transaction::V3 { .. } => 3, Transaction::V4 { .. } => 4, Transaction::V5 { .. } => 5, + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => 6, } } @@ -374,6 +402,8 @@ impl Transaction { | Transaction::V3 { lock_time, .. } | Transaction::V4 { lock_time, .. } | Transaction::V5 { lock_time, .. } => *lock_time, + #[cfg(feature = "tx_v6")] + Transaction::V6 { lock_time, .. } => *lock_time, }; // `zcashd` checks that the block height is greater than the lock height. @@ -421,6 +451,8 @@ impl Transaction { | Transaction::V3 { lock_time, .. } | Transaction::V4 { lock_time, .. } | Transaction::V5 { lock_time, .. } => *lock_time, + #[cfg(feature = "tx_v6")] + Transaction::V6 { lock_time, .. } => *lock_time, }; let mut lock_time_bytes = Vec::new(); lock_time @@ -457,6 +489,15 @@ impl Transaction { block::Height(0) => None, block::Height(expiry_height) => Some(block::Height(*expiry_height)), }, + #[cfg(feature = "tx_v6")] + Transaction::V6 { expiry_height, .. } => match expiry_height { + // # Consensus + // + // > No limit: To set no limit on transactions (so that they do not expire), nExpiryHeight should be set to 0. + // https://zips.z.cash/zip-0203#specification + block::Height(0) => None, + block::Height(expiry_height) => Some(block::Height(*expiry_height)), + }, } } @@ -473,6 +514,10 @@ impl Transaction { Transaction::V5 { network_upgrade, .. } => Some(*network_upgrade), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + network_upgrade, .. + } => Some(*network_upgrade), } } @@ -486,6 +531,8 @@ impl Transaction { Transaction::V3 { ref inputs, .. } => inputs, Transaction::V4 { ref inputs, .. } => inputs, Transaction::V5 { ref inputs, .. } => inputs, + #[cfg(feature = "tx_v6")] + Transaction::V6 { ref inputs, .. } => inputs, } } @@ -504,6 +551,8 @@ impl Transaction { Transaction::V3 { ref outputs, .. } => outputs, Transaction::V4 { ref outputs, .. } => outputs, Transaction::V5 { ref outputs, .. } => outputs, + #[cfg(feature = "tx_v6")] + Transaction::V6 { ref outputs, .. } => outputs, } } @@ -552,6 +601,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -587,6 +638,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => 0, + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => 0, } } @@ -626,6 +679,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -662,6 +717,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => None, + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => None, } } @@ -670,6 +727,8 @@ impl Transaction { match self { // No JoinSplits Transaction::V1 { .. } | Transaction::V5 { .. } => false, + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => false, // JoinSplits-on-BCTV14 Transaction::V2 { joinsplit_data, .. } | Transaction::V3 { joinsplit_data, .. } => { @@ -717,6 +776,8 @@ impl Transaction { } | Transaction::V1 { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -738,6 +799,12 @@ impl Transaction { .. } => Box::new(sapling_shielded_data.anchors()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Box::new(sapling_shielded_data.anchors()), + // No Spends Transaction::V1 { .. } | Transaction::V2 { .. } @@ -750,6 +817,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Box::new(std::iter::empty()), } } @@ -775,6 +847,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => Box::new(sapling_shielded_data.spends_per_anchor()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Box::new(sapling_shielded_data.spends_per_anchor()), // No Spends Transaction::V1 { .. } @@ -788,6 +865,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Box::new(std::iter::empty()), } } @@ -803,6 +885,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => Box::new(sapling_shielded_data.outputs()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Box::new(sapling_shielded_data.outputs()), // No Outputs Transaction::V1 { .. } @@ -816,6 +903,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Box::new(std::iter::empty()), } } @@ -833,6 +925,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => Box::new(sapling_shielded_data.nullifiers()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Box::new(sapling_shielded_data.nullifiers()), // No Spends Transaction::V1 { .. } @@ -846,6 +943,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Box::new(std::iter::empty()), } } @@ -863,6 +965,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => Box::new(sapling_shielded_data.note_commitments()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Box::new(sapling_shielded_data.note_commitments()), // No Spends Transaction::V1 { .. } @@ -876,6 +983,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Box::new(std::iter::empty()), } } @@ -891,6 +1003,11 @@ impl Transaction { sapling_shielded_data, .. } => sapling_shielded_data.is_some(), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data, + .. + } => sapling_shielded_data.is_some(), } } @@ -905,6 +1022,11 @@ impl Transaction { orchard_shielded_data, .. } => orchard_shielded_data.as_ref(), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + orchard_shielded_data, + .. + } => orchard_shielded_data.as_ref(), // No Orchard shielded data Transaction::V1 { .. } @@ -1029,6 +1151,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -1076,6 +1200,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -1117,6 +1243,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(iter::empty()), }; joinsplit_value_balances.map(ValueBalance::from_sprout_amount) @@ -1158,6 +1286,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => sapling_shielded_data.value_balance, + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => sapling_shielded_data.value_balance, Transaction::V1 { .. } | Transaction::V2 { .. } @@ -1170,6 +1303,11 @@ impl Transaction { sapling_shielded_data: None, .. } => Amount::zero(), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => Amount::zero(), }; ValueBalance::from_sapling_amount(sapling_value_balance) @@ -1290,6 +1428,14 @@ impl Transaction { *network_upgrade = nu; Ok(()) } + #[cfg(feature = "tx_v6")] + Transaction::V6 { + ref mut network_upgrade, + .. + } => { + *network_upgrade = nu; + Ok(()) + } } } @@ -1315,6 +1461,11 @@ impl Transaction { ref mut expiry_height, .. } => expiry_height, + #[cfg(feature = "tx_v6")] + Transaction::V6 { + ref mut expiry_height, + .. + } => expiry_height, } } @@ -1326,6 +1477,8 @@ impl Transaction { Transaction::V3 { ref mut inputs, .. } => inputs, Transaction::V4 { ref mut inputs, .. } => inputs, Transaction::V5 { ref mut inputs, .. } => inputs, + #[cfg(feature = "tx_v6")] + Transaction::V6 { ref mut inputs, .. } => inputs, } } @@ -1352,6 +1505,11 @@ impl Transaction { sapling_shielded_data: Some(sapling_shielded_data), .. } => Some(&mut sapling_shielded_data.value_balance), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: Some(sapling_shielded_data), + .. + } => Some(&mut sapling_shielded_data.value_balance), Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } @@ -1363,6 +1521,11 @@ impl Transaction { sapling_shielded_data: None, .. } => None, + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data: None, + .. + } => None, } } @@ -1411,6 +1574,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -1459,6 +1624,8 @@ impl Transaction { .. } | Transaction::V5 { .. } => Box::new(std::iter::empty()), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => Box::new(std::iter::empty()), } } @@ -1477,6 +1644,11 @@ impl Transaction { orchard_shielded_data: Some(orchard_shielded_data), .. } => Some(orchard_shielded_data), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + orchard_shielded_data: Some(orchard_shielded_data), + .. + } => Some(orchard_shielded_data), Transaction::V1 { .. } | Transaction::V2 { .. } @@ -1486,6 +1658,11 @@ impl Transaction { orchard_shielded_data: None, .. } => None, + #[cfg(feature = "tx_v6")] + Transaction::V6 { + orchard_shielded_data: None, + .. + } => None, } } @@ -1507,6 +1684,10 @@ impl Transaction { Transaction::V5 { ref mut outputs, .. } => outputs, + #[cfg(feature = "tx_v6")] + Transaction::V6 { + ref mut outputs, .. + } => outputs, } } } diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index f51b209fc02..2438039ea1c 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -924,6 +924,8 @@ pub fn transaction_to_fake_v5( orchard_shielded_data: None, }, v5 @ V5 { .. } => v5.clone(), + #[cfg(feature = "tx_v6")] + v6 @ V6 { .. } => v6.clone(), } } @@ -1008,6 +1010,8 @@ pub fn v5_transactions<'b>( | Transaction::V3 { .. } | Transaction::V4 { .. } => None, ref tx @ Transaction::V5 { .. } => Some(tx.clone()), + #[cfg(feature = "tx_v6")] + ref tx @ Transaction::V6 { .. } => Some(tx.clone()), }) } diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index a8a6b8ed8e5..bfd614b22df 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -672,6 +672,55 @@ impl ZcashSerialize for Transaction { // `proofsOrchard`, `vSpendAuthSigsOrchard`, and `bindingSigOrchard`. orchard_shielded_data.zcash_serialize(&mut writer)?; } + + #[cfg(feature = "tx_v6")] + Transaction::V6 { + network_upgrade, + lock_time, + expiry_height, + inputs, + outputs, + sapling_shielded_data, + orchard_shielded_data, + } => { + // Transaction V6 spec: + // https://zips.z.cash/zip-0230#specification + + // Denoted as `nVersionGroupId` in the spec. + writer.write_u32::(TX_V5_VERSION_GROUP_ID)?; + + // Denoted as `nConsensusBranchId` in the spec. + writer.write_u32::(u32::from( + network_upgrade + .branch_id() + .expect("valid transactions must have a network upgrade with a branch id"), + ))?; + + // Denoted as `lock_time` in the spec. + lock_time.zcash_serialize(&mut writer)?; + + // Denoted as `nExpiryHeight` in the spec. + writer.write_u32::(expiry_height.0)?; + + // Denoted as `tx_in_count` and `tx_in` in the spec. + inputs.zcash_serialize(&mut writer)?; + + // Denoted as `tx_out_count` and `tx_out` in the spec. + outputs.zcash_serialize(&mut writer)?; + + // A bundle of fields denoted in the spec as `nSpendsSapling`, `vSpendsSapling`, + // `nOutputsSapling`,`vOutputsSapling`, `valueBalanceSapling`, `anchorSapling`, + // `vSpendProofsSapling`, `vSpendAuthSigsSapling`, `vOutputProofsSapling` and + // `bindingSigSapling`. + sapling_shielded_data.zcash_serialize(&mut writer)?; + + // A bundle of fields denoted in the spec as `nActionsOrchard`, `vActionsOrchard`, + // `flagsOrchard`,`valueBalanceOrchard`, `anchorOrchard`, `sizeProofsOrchard`, + // `proofsOrchard`, `vSpendAuthSigsOrchard`, and `bindingSigOrchard`. + orchard_shielded_data.zcash_serialize(&mut writer)?; + + // TODO: Add the rest of v6 transaction fields. + } } Ok(()) } diff --git a/zebra-chain/src/transaction/tests/vectors.rs b/zebra-chain/src/transaction/tests/vectors.rs index 43f2292f598..673125bc016 100644 --- a/zebra-chain/src/transaction/tests/vectors.rs +++ b/zebra-chain/src/transaction/tests/vectors.rs @@ -908,6 +908,36 @@ fn binding_signatures() { ) .expect("a valid redjubjub::VerificationKey"); + bvk.verify(sighash.as_ref(), &sapling_shielded_data.binding_sig) + .expect("verification passes"); + + at_least_one_v5_checked = true; + } + } + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data, + .. + } => { + if let Some(sapling_shielded_data) = sapling_shielded_data { + // V6 txs have the outputs spent by their transparent inputs hashed into + // their SIGHASH, so we need to exclude txs with transparent inputs. + // + // References: + // + // + // + if tx.has_transparent_inputs() { + continue; + } + + let sighash = tx.sighash(nu, HashType::ALL, &[], None); + + let bvk = redjubjub::VerificationKey::try_from( + sapling_shielded_data.binding_verification_key(), + ) + .expect("a valid redjubjub::VerificationKey"); + bvk.verify(sighash.as_ref(), &sapling_shielded_data.binding_sig) .expect("verification passes"); diff --git a/zebra-chain/src/transaction/txid.rs b/zebra-chain/src/transaction/txid.rs index abaffdd4d45..9b08cf55edc 100644 --- a/zebra-chain/src/transaction/txid.rs +++ b/zebra-chain/src/transaction/txid.rs @@ -28,6 +28,8 @@ impl<'a> TxIdBuilder<'a> { | Transaction::V3 { .. } | Transaction::V4 { .. } => self.txid_v1_to_v4(), Transaction::V5 { .. } => self.txid_v5(), + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => self.txid_v6(), } } @@ -48,4 +50,10 @@ impl<'a> TxIdBuilder<'a> { // We compute v5 txid (from ZIP-244) using librustzcash. Some(Hash(*self.trans.to_librustzcash(nu).ok()?.txid().as_ref())) } + + /// Passthrough to txid_v5 for V6 transactions. + #[cfg(feature = "tx_v6")] + fn txid_v6(self) -> Option { + self.txid_v5() + } } diff --git a/zebra-chain/src/transaction/unmined.rs b/zebra-chain/src/transaction/unmined.rs index 8d808781384..26f208cdd5b 100644 --- a/zebra-chain/src/transaction/unmined.rs +++ b/zebra-chain/src/transaction/unmined.rs @@ -142,6 +142,8 @@ impl From<&Transaction> for UnminedTxId { match transaction { V1 { .. } | V2 { .. } | V3 { .. } | V4 { .. } => Legacy(transaction.into()), V5 { .. } => Witnessed(transaction.into()), + #[cfg(feature = "tx_v6")] + V6 { .. } => Witnessed(transaction.into()), } } } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 7fe94c1b380..e29249b12aa 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -31,6 +31,8 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +tx_v6 = ["zebra-chain/tx_v6", "zebra-state/tx_v6"] + # Test-only features proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "zebra-state/proptest-impl"] diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 661b0be14d2..dcf3ff0d8f0 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -523,6 +523,19 @@ where sapling_shielded_data, orchard_shielded_data, )?, + #[cfg(feature="tx_v6")] + Transaction::V6 { + sapling_shielded_data, + orchard_shielded_data, + .. + } => Self::verify_v6_transaction( + &req, + &network, + script_verifier, + cached_ffi_transaction.clone(), + sapling_shielded_data, + orchard_shielded_data, + )?, }; if let Some(unmined_tx) = req.mempool_transaction() { @@ -1027,6 +1040,26 @@ where } } + /// Passthrough to verify_v5_transaction, but for V6 transactions. + #[cfg(feature = "tx_v6")] + fn verify_v6_transaction( + request: &Request, + network: &Network, + script_verifier: script::Verifier, + cached_ffi_transaction: Arc, + sapling_shielded_data: &Option>, + orchard_shielded_data: &Option, + ) -> Result { + Self::verify_v5_transaction( + request, + network, + script_verifier, + cached_ffi_transaction, + sapling_shielded_data, + orchard_shielded_data, + ) + } + /// Verifies if a transaction's transparent inputs are valid using the provided /// `script_verifier` and `cached_ffi_transaction`. /// diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 2b62be81751..f7695cf8e84 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -48,6 +48,8 @@ elasticsearch = [ "zebra-chain/elasticsearch", ] +tx_v6 = ["zebra-chain/tx_v6"] + [dependencies] bincode = { workspace = true } chrono = { workspace = true, features = ["clock", "std"] } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index e017a742c0d..771ecc1affd 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -1563,6 +1563,22 @@ impl Chain { sapling_shielded_data, orchard_shielded_data, ), + #[cfg(feature="tx_v6")] + V6 { + inputs, + outputs, + sapling_shielded_data, + orchard_shielded_data, + .. + } => ( + inputs, + outputs, + &None, + &None, + sapling_shielded_data, + orchard_shielded_data, + ), + V1 { .. } | V2 { .. } | V3 { .. } => unreachable!( "older transaction versions only exist in finalized blocks, because of the mandatory canopy checkpoint", ), @@ -1731,6 +1747,22 @@ impl UpdateWith for Chain { sapling_shielded_data, orchard_shielded_data, ), + #[cfg(feature="tx_v6")] + V6 { + inputs, + outputs, + sapling_shielded_data, + orchard_shielded_data, + .. + } => ( + inputs, + outputs, + &None, + &None, + sapling_shielded_data, + orchard_shielded_data, + ), + V1 { .. } | V2 { .. } | V3 { .. } => unreachable!( "older transaction versions only exist in finalized blocks, because of the mandatory canopy checkpoint", ), diff --git a/zebra-state/src/tests.rs b/zebra-state/src/tests.rs index 488ab4227bd..2a229c0ebb0 100644 --- a/zebra-state/src/tests.rs +++ b/zebra-state/src/tests.rs @@ -34,6 +34,8 @@ impl FakeChainHelper for Arc { Transaction::V3 { inputs, .. } => &mut inputs[0], Transaction::V4 { inputs, .. } => &mut inputs[0], Transaction::V5 { inputs, .. } => &mut inputs[0], + #[cfg(feature = "tx_v6")] + Transaction::V6 { inputs, .. } => &mut inputs[0], }; match input { diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 355601deecd..cf37db69612 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -158,6 +158,8 @@ test_sync_to_mandatory_checkpoint_testnet = [] test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] +tx_v6 = ["zebra-chain/tx_v6", "zebra-state/tx_v6", "zebra-consensus/tx_v6"] + [dependencies] zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45" } diff --git a/zebrad/src/components/mempool/storage/tests/prop.rs b/zebrad/src/components/mempool/storage/tests/prop.rs index 3ddb67583b0..f180795bac7 100644 --- a/zebrad/src/components/mempool/storage/tests/prop.rs +++ b/zebrad/src/components/mempool/storage/tests/prop.rs @@ -568,6 +568,8 @@ impl SpendConflictTestInput { // No JoinSplits Transaction::V1 { .. } | Transaction::V5 { .. } => {} + #[cfg(feature = "tx_v6")] + Transaction::V6 { .. } => {} } } } @@ -638,6 +640,14 @@ impl SpendConflictTestInput { Self::remove_sapling_transfers_with_conflicts(sapling_shielded_data, &conflicts) } + #[cfg(feature = "tx_v6")] + Transaction::V6 { + sapling_shielded_data, + .. + } => { + Self::remove_sapling_transfers_with_conflicts(sapling_shielded_data, &conflicts) + } + // No Spends Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => {} } @@ -709,6 +719,12 @@ impl SpendConflictTestInput { .. } => Self::remove_orchard_actions_with_conflicts(orchard_shielded_data, &conflicts), + #[cfg(feature = "tx_v6")] + Transaction::V6 { + orchard_shielded_data, + .. + } => Self::remove_orchard_actions_with_conflicts(orchard_shielded_data, &conflicts), + // No Spends Transaction::V1 { .. } | Transaction::V2 { .. } From 0ea5fe076fe82d645e7fea129820accbbbe2b6b7 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 10 Apr 2025 15:27:50 +0100 Subject: [PATCH 145/245] fix(test): wait for sync to start before launching lightwalletd (#9409) * fix(test): wait for genesis block before launching lightwalletd The `lightwalletd_integration` test would fail when running with an empty `zebrad` state (`LaunchWithEmptyState` test type). `lightwalletd` expects the connected `zebrad` instance to have processed at least the genesis block upon startup and treats a completely empty state as a fatal error. This adds a wait step specifically for the empty state scenario. The test now waits for `zebrad` to log that it has committed the genesis block (Height 0) before launching the `lightwalletd` process. This ensures `zebrad` is ready and prevents `lightwalletd` from exiting prematurely. * fix(test): use syncer state tip log for lightwalletd launch condition * fix(test): lint --- zebrad/tests/acceptance.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 797d5440204..73fb03eced8 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1974,6 +1974,21 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { )?; } + // Wait for zebrad to sync the genesis block before launching lightwalletd, + // if lightwalletd is launched and zebrad starts with an empty state. + // This prevents lightwalletd from exiting early due to an empty state. + if test_type.launches_lightwalletd() && !test_type.needs_zebra_cached_state() { + tracing::info!( + ?test_type, + "waiting for zebrad to sync genesis block before launching lightwalletd...", + ); + // Wait for zebrad to commit the genesis block to the state. + // Use the syncer's state tip log message, as the specific commit log might not appear reliably. + zebrad.expect_stdout_line_matches( + "starting sync, obtaining new tips state_tip=Some\\(Height\\(0\\)\\)", + )?; + } + // Launch lightwalletd, if needed let lightwalletd_and_port = if test_type.launches_lightwalletd() { tracing::info!( From 4e29b097af1cb3186b3f6c57ebe81bdf1a1c21a0 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 10 Apr 2025 20:29:48 +0200 Subject: [PATCH 146/245] Remove the `getblocktemplate-rpcs` Cargo feature (#9401) * Remove the `getblocktemplate-rpcs` Cargo feature * Keep the feature in `zebrad/Cargo.toml` --- .../sub-ci-integration-tests-gcp.yml | 2 +- book/src/dev/overview.md | 4 +- zebra-chain/Cargo.toml | 4 -- zebra-chain/src/transaction.rs | 1 - zebra-chain/src/transparent.rs | 2 - zebra-consensus/Cargo.toml | 7 ---- zebra-consensus/src/block.rs | 4 +- zebra-consensus/src/block/request.rs | 6 --- zebra-consensus/src/router.rs | 1 - zebra-node-services/Cargo.toml | 5 --- zebra-node-services/src/mempool.rs | 2 - zebra-rpc/Cargo.toml | 15 ++----- zebra-rpc/src/config.rs | 5 --- zebra-rpc/src/config/mining.rs | 8 ---- zebra-rpc/src/methods.rs | 11 ----- .../src/methods/get_block_template_rpcs.rs | 13 +----- zebra-rpc/src/methods/tests.rs | 1 - zebra-rpc/src/methods/tests/prop.rs | 23 ----------- zebra-rpc/src/methods/tests/snapshot.rs | 16 +------- .../tests/snapshot/get_block_template_rpcs.rs | 2 +- zebra-rpc/src/methods/tests/vectors.rs | 13 ------ .../src/methods/types/get_raw_mempool.rs | 5 --- zebra-rpc/src/server.rs | 14 ------- zebra-rpc/src/server/error.rs | 2 - zebra-state/Cargo.toml | 5 --- zebra-state/src/request.rs | 9 ---- zebra-state/src/response.rs | 6 --- zebra-state/src/service.rs | 5 --- .../src/service/non_finalized_state.rs | 19 +-------- zebra-utils/Cargo.toml | 11 +---- zebrad/Cargo.toml | 14 ++----- zebrad/src/commands/start.rs | 16 -------- .../components/inbound/tests/fake_peer_set.rs | 5 --- .../components/inbound/tests/real_peer_set.rs | 6 --- zebrad/src/components/mempool.rs | 5 --- zebrad/src/config.rs | 1 - zebrad/src/lib.rs | 4 -- zebrad/tests/acceptance.rs | 41 ++----------------- zebrad/tests/common/config.rs | 19 ++++----- zebrad/tests/common/mod.rs | 13 ++---- 40 files changed, 33 insertions(+), 312 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 071c46f0b74..9f045d7dcf3 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -430,7 +430,7 @@ jobs: saves_to_disk: false secrets: inherit - ## getblocktemplate-rpcs using cached Zebra state on mainnet + ## getblocktemplate RPC tests using cached Zebra state on mainnet # # TODO: move these below the rest of the mainnet jobs that just use Zebra cached state diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index afefbd33403..642f472fd42 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -41,8 +41,6 @@ The following are general desiderata for Zebra: ## Service Dependencies -Note: dotted lines are for "getblocktemplate-rpcs" feature -
{{#include diagrams/service-dependencies.svg}}
@@ -74,6 +72,8 @@ digraph services { Render here: https://dreampuf.github.io/GraphvizOnline --> +The dotted lines are for the `getblocktemplate` RPC. + ## Architecture Unlike `zcashd`, which originated as a Bitcoin Core fork and inherited its diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 609e5e7cc4d..e09ca94fdb0 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -29,10 +29,6 @@ async-error = [ "tokio", ] -# Mining RPC support -getblocktemplate-rpcs = [ -] - # Experimental shielded scanning support shielded-scan = [ "zcash_client_backend" diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index d7d9a59e325..c242492f061 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -14,7 +14,6 @@ mod sighash; mod txid; mod unmined; -#[cfg(feature = "getblocktemplate-rpcs")] pub mod builder; #[cfg(any(test, feature = "proptest-impl"))] diff --git a/zebra-chain/src/transparent.rs b/zebra-chain/src/transparent.rs index 55f45beb92d..47d7bfd42c1 100644 --- a/zebra-chain/src/transparent.rs +++ b/zebra-chain/src/transparent.rs @@ -218,7 +218,6 @@ impl Input { /// # Panics /// /// If the coinbase data is greater than [`MAX_COINBASE_DATA_LEN`]. - #[cfg(feature = "getblocktemplate-rpcs")] pub fn new_coinbase( height: block::Height, data: Option>, @@ -409,7 +408,6 @@ pub struct Output { impl Output { /// Returns a new coinbase output that pays `amount` using `lock_script`. - #[cfg(feature = "getblocktemplate-rpcs")] pub fn new_coinbase(amount: Amount, lock_script: Script) -> Output { Output { value: amount, diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index e29249b12aa..07a1d09bffc 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -24,13 +24,6 @@ progress-bar = [ "zebra-state/progress-bar", ] -# Mining RPC support -getblocktemplate-rpcs = [ - "zebra-state/getblocktemplate-rpcs", - "zebra-node-services/getblocktemplate-rpcs", - "zebra-chain/getblocktemplate-rpcs", -] - tx_v6 = ["zebra-chain/tx_v6", "zebra-state/tx_v6"] # Test-only features diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index c4c4ab336ad..d053074a531 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -79,7 +79,6 @@ pub enum VerifyBlockError { // TODO: make this into a concrete type, and add it to is_duplicate_request() (#2908) Commit(#[source] BoxError), - #[cfg(feature = "getblocktemplate-rpcs")] #[error("unable to validate block proposal: failed semantic verification (proof of work is not checked for proposals): {0}")] // TODO: make this into a concrete type (see #5732) ValidateProposal(#[source] BoxError), @@ -343,8 +342,7 @@ where deferred_balance: Some(expected_deferred_amount), }; - // Return early for proposal requests when getblocktemplate-rpcs feature is enabled - #[cfg(feature = "getblocktemplate-rpcs")] + // Return early for proposal requests. if request.is_proposal() { return match state_service .ready() diff --git a/zebra-consensus/src/block/request.rs b/zebra-consensus/src/block/request.rs index 9a0dbcc9cf9..534f6c599b8 100644 --- a/zebra-consensus/src/block/request.rs +++ b/zebra-consensus/src/block/request.rs @@ -9,8 +9,6 @@ use zebra_chain::block::Block; pub enum Request { /// Performs semantic validation, then asks the state to perform contextual validation and commit the block Commit(Arc), - - #[cfg(feature = "getblocktemplate-rpcs")] /// Performs semantic validation but skips checking proof of work, /// then asks the state to perform contextual validation. /// Does not commit the block to the state. @@ -22,8 +20,6 @@ impl Request { pub fn block(&self) -> Arc { Arc::clone(match self { Request::Commit(block) => block, - - #[cfg(feature = "getblocktemplate-rpcs")] Request::CheckProposal(block) => block, }) } @@ -32,8 +28,6 @@ impl Request { pub fn is_proposal(&self) -> bool { match self { Request::Commit(_) => false, - - #[cfg(feature = "getblocktemplate-rpcs")] Request::CheckProposal(_) => true, } } diff --git a/zebra-consensus/src/router.rs b/zebra-consensus/src/router.rs index fb56699ce6b..83caa83a712 100644 --- a/zebra-consensus/src/router.rs +++ b/zebra-consensus/src/router.rs @@ -192,7 +192,6 @@ where let block = request.block(); match block.coinbase_height() { - #[cfg(feature = "getblocktemplate-rpcs")] // There's currently no known use case for block proposals below the checkpoint height, // so it's okay to immediately return an error here. Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => { diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index ce9e48100f3..7f28671e75d 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -19,11 +19,6 @@ default = [] # Production features that activate extra dependencies, or extra features in dependencies -# Mining RPC support -getblocktemplate-rpcs = [ - "zebra-chain/getblocktemplate-rpcs", -] - # Tool and test features rpc-client = [ diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index 6c035e6dc44..feaadf65095 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -65,7 +65,6 @@ pub enum Request { // // TODO: make the Transactions response return VerifiedUnminedTx, // and remove the FullTransactions variant - #[cfg(feature = "getblocktemplate-rpcs")] FullTransactions, /// Query matching cached rejected transaction IDs in the mempool, @@ -133,7 +132,6 @@ pub enum Response { // // TODO: make the Transactions response return VerifiedUnminedTx, // and remove the FullTransactions variant - #[cfg(feature = "getblocktemplate-rpcs")] FullTransactions { /// All [`VerifiedUnminedTx`]s in the mempool transactions: Vec, diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 9012504fe2d..cdc1f8d8262 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -30,16 +30,8 @@ indexer-rpcs = [ "zebra-state/indexer" ] -# Production features that activate extra dependencies, or extra features in dependencies - -# Mining RPC support -getblocktemplate-rpcs = [ - "zcash_address", - "zebra-consensus/getblocktemplate-rpcs", - "zebra-state/getblocktemplate-rpcs", - "zebra-node-services/getblocktemplate-rpcs", - "zebra-chain/getblocktemplate-rpcs", -] +# Production features that activate extra dependencies, or extra features in +# dependencies # Experimental internal miner support internal-miner = [] @@ -93,8 +85,7 @@ nix = { workspace = true, features = ["signal"] } zcash_primitives = { workspace = true, features = ["transparent-inputs"] } zcash_protocol.workspace = true -# ECC deps used by getblocktemplate-rpcs feature -zcash_address = { workspace = true, optional = true} +zcash_address = { workspace = true } # Test-only feature proptest-impl proptest = { workspace = true, optional = true } diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index 2a91d14334b..a6711c3f4cc 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -80,12 +80,7 @@ impl Default for Config { // Disable indexer RPCs by default. indexer_listen_addr: None, - // Use a single thread, so we can detect RPC port conflicts. - #[cfg(not(feature = "getblocktemplate-rpcs"))] - parallel_cpu_threads: 1, - // Use multiple threads, because we pause requests during getblocktemplate long polling - #[cfg(feature = "getblocktemplate-rpcs")] parallel_cpu_threads: 0, // Debug options are always off by default. diff --git a/zebra-rpc/src/config/mining.rs b/zebra-rpc/src/config/mining.rs index 0594d96b822..224d5b8fa1e 100644 --- a/zebra-rpc/src/config/mining.rs +++ b/zebra-rpc/src/config/mining.rs @@ -60,14 +60,6 @@ impl Default for Config { } impl Config { - /// Return true if `getblocktemplate-rpcs` rust feature is not turned on, false otherwise. - /// - /// This is used to ignore the mining section of the configuration if the feature is not - /// enabled, allowing us to log a warning when the config found is different from the default. - pub fn skip_getblocktemplate(&self) -> bool { - !cfg!(feature = "getblocktemplate-rpcs") - } - /// Is the internal miner enabled using at least one thread? #[cfg(feature = "internal-miner")] pub fn is_internal_miner_enabled(&self) -> bool { diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 6a5ddf5f755..51bc33e0bc9 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,7 +6,6 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. -#[cfg(feature = "getblocktemplate-rpcs")] use std::collections::HashMap; use std::{collections::HashSet, fmt::Debug, sync::Arc}; @@ -63,13 +62,10 @@ pub mod trees; pub mod types; use types::GetRawMempool; -#[cfg(feature = "getblocktemplate-rpcs")] use types::MempoolObject; -#[cfg(feature = "getblocktemplate-rpcs")] pub mod get_block_template_rpcs; -#[cfg(feature = "getblocktemplate-rpcs")] pub use get_block_template_rpcs::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; #[cfg(test)] @@ -1140,25 +1136,19 @@ where #[allow(unused)] let verbose = verbose.unwrap_or(false); - #[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::block::MAX_BLOCK_BYTES; - #[cfg(feature = "getblocktemplate-rpcs")] // Determines whether the output of this RPC is sorted like zcashd let should_use_zcashd_order = self.debug_like_zcashd; let mut mempool = self.mempool.clone(); - #[cfg(feature = "getblocktemplate-rpcs")] let request = if should_use_zcashd_order || verbose { mempool::Request::FullTransactions } else { mempool::Request::TransactionIds }; - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let request = mempool::Request::TransactionIds; - // `zcashd` doesn't check if it is synced to the tip here, so we don't either. let response = mempool .ready() @@ -1167,7 +1157,6 @@ where .map_misc_error()?; match response { - #[cfg(feature = "getblocktemplate-rpcs")] mempool::Response::FullTransactions { mut transactions, transaction_dependencies, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index b8dbb810a75..d808575f0ea 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1,4 +1,4 @@ -//! RPC methods related to mining only available with `getblocktemplate-rpcs` rust feature. +//! Mining-related RPCs. use std::{fmt::Debug, sync::Arc, time::Duration}; @@ -83,10 +83,6 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) /// method: post /// tags: blockchain - /// - /// # Notes - /// - /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. #[method(name = "getblockcount")] fn get_block_count(&self) -> Result; @@ -105,7 +101,6 @@ pub trait GetBlockTemplateRpc { /// /// - If `index` is positive then index = block height. /// - If `index` is negative then -1 is the last known valid block. - /// - This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. #[method(name = "getblockhash")] async fn get_block_hash(&self, index: i32) -> Result; @@ -130,8 +125,6 @@ pub trait GetBlockTemplateRpc { /// /// Zebra verifies blocks in parallel, and keeps recent chains in parallel, /// so moving between chains and forking chains is very cheap. - /// - /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. #[method(name = "getblocktemplate")] async fn get_block_template( &self, @@ -218,10 +211,6 @@ pub trait GetBlockTemplateRpc { /// # Parameters /// /// - `address`: (string, required) The zcash address to validate. - /// - /// # Notes - /// - /// - No notes #[method(name = "validateaddress")] async fn validate_address(&self, address: String) -> Result; diff --git a/zebra-rpc/src/methods/tests.rs b/zebra-rpc/src/methods/tests.rs index f98d41d6fb3..4b2710ea8fd 100644 --- a/zebra-rpc/src/methods/tests.rs +++ b/zebra-rpc/src/methods/tests.rs @@ -2,6 +2,5 @@ mod prop; mod snapshot; -#[cfg(feature = "getblocktemplate-rpcs")] pub mod utils; mod vectors; diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 0e8db0f36e6..bfb2d7003a4 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -1,6 +1,5 @@ //! Randomised property tests for RPC methods. -#[cfg(feature = "getblocktemplate-rpcs")] use std::collections::HashMap; use std::{collections::HashSet, fmt::Debug, sync::Arc}; @@ -30,7 +29,6 @@ use zebra_state::{BoxError, GetBlockTemplateChainInfo}; use zebra_test::mock_service::MockService; -#[cfg(feature = "getblocktemplate-rpcs")] use crate::methods::types::MempoolObject; use crate::methods::{ self, @@ -246,28 +244,7 @@ proptest! { tokio::time::pause(); runtime.block_on(async move { - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let (expected_response, mempool_query) = { - let transaction_ids: HashSet<_> = transactions - .iter() - .map(|tx| tx.transaction.id) - .collect(); - - let mut expected_response: Vec = transaction_ids - .iter() - .map(|id| id.mined_id().encode_hex()) - .collect(); - expected_response.sort(); - - let mempool_query = mempool - .expect_request(mempool::Request::TransactionIds) - .map_ok(|r|r.respond(mempool::Response::TransactionIds(transaction_ids))); - - (GetRawMempool::TxIds(expected_response), mempool_query) - }; - // Note: this depends on `SHOULD_USE_ZCASHD_ORDER` being true. - #[cfg(feature = "getblocktemplate-rpcs")] let (expected_response, mempool_query) = { let mut expected_response = transactions.clone(); diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index dd3a49f2c57..bad3b85b478 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -32,7 +32,6 @@ use zebra_test::mock_service::MockService; use super::super::*; -#[cfg(feature = "getblocktemplate-rpcs")] mod get_block_template_rpcs; /// The first block height in the state that can never be stored in the database, @@ -180,8 +179,8 @@ async fn test_rpc_response_data_for_network(network: &Network) { let mut mempool: MockService<_, _, _, zebra_node_services::BoxError> = MockService::build().for_unit_tests(); + // Create a populated state service - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] let (state, read_state, latest_chain_tip, _chain_tip_change) = zebra_state::populated_state(blocks.clone(), network).await; @@ -189,8 +188,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { let mut settings = insta::Settings::clone_current(); settings.set_snapshot_suffix(format!("{}_{}", network_string(network), blocks.len() - 1)); - // Test getblocktemplate-rpcs snapshots - #[cfg(feature = "getblocktemplate-rpcs")] + // Test the `getblocktemplate` RPC snapshots. get_block_template_rpcs::test_responses( network, mempool.clone(), @@ -388,7 +386,6 @@ async fn test_rpc_response_data_for_network(network: &Network) { // - as we have the mempool mocked we need to expect a request and wait for a response, // which will be an empty mempool in this case. // Note: this depends on `SHOULD_USE_ZCASHD_ORDER` being true. - #[cfg(feature = "getblocktemplate-rpcs")] let mempool_req = mempool .expect_request_that(|request| matches!(request, mempool::Request::FullTransactions)) .map(|responder| { @@ -399,15 +396,6 @@ async fn test_rpc_response_data_for_network(network: &Network) { }); }); - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let mempool_req = mempool - .expect_request_that(|request| matches!(request, mempool::Request::TransactionIds)) - .map(|responder| { - responder.respond(mempool::Response::TransactionIds( - std::collections::HashSet::new(), - )); - }); - // make the api call let get_raw_mempool = rpc.get_raw_mempool(None); let (response, _) = futures::join!(get_raw_mempool, mempool_req); diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index df6cfbacea9..46dc26ce780 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -2,7 +2,7 @@ //! //! To update these snapshots, run: //! ```sh -//! cargo insta test --review --features getblocktemplate-rpcs --delete-unreferenced-snapshots +//! cargo insta test --review --delete-unreferenced-snapshots //! ``` use std::{ diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index d18aef9e6ee..5ee5819f521 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1255,7 +1255,6 @@ async fn rpc_getaddressutxos_response() { } #[tokio::test(flavor = "multi_thread")] -#[cfg(feature = "getblocktemplate-rpcs")] async fn rpc_getblockcount() { use zebra_chain::chain_sync_status::MockSyncStatus; use zebra_network::address_book_peers::MockAddressBookPeers; @@ -1315,7 +1314,6 @@ async fn rpc_getblockcount() { mempool.expect_no_requests().await; } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getblockcount_empty_state() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -1369,7 +1367,6 @@ async fn rpc_getblockcount_empty_state() { mempool.expect_no_requests().await; } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getpeerinfo() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -1487,7 +1484,6 @@ async fn rpc_getpeerinfo() { mempool.expect_no_requests().await; } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getblockhash() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -1557,7 +1553,6 @@ async fn rpc_getblockhash() { mempool.expect_no_requests().await; } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getmininginfo() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -1594,7 +1589,6 @@ async fn rpc_getmininginfo() { .expect("get_mining_info call should succeed"); } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getnetworksolps() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -1663,7 +1657,6 @@ async fn rpc_getnetworksolps() { } } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getblocktemplate() { // test getblocktemplate with a miner P2SH address @@ -1672,7 +1665,6 @@ async fn rpc_getblocktemplate() { rpc_getblocktemplate_mining_address(false).await; } -#[cfg(feature = "getblocktemplate-rpcs")] async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { use zebra_chain::{ amount::NonNegative, @@ -2006,7 +1998,6 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { mempool.expect_no_requests().await; } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_submitblock_errors() { use zebra_chain::chain_sync_status::MockSyncStatus; @@ -2082,7 +2073,6 @@ async fn rpc_submitblock_errors() { // See zebrad::tests::acceptance::submit_block for success case. } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_validateaddress() { use get_block_template_rpcs::types::validate_address; @@ -2128,7 +2118,6 @@ async fn rpc_validateaddress() { ); } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_z_validateaddress() { use get_block_template_rpcs::types::z_validate_address; @@ -2174,7 +2163,6 @@ async fn rpc_z_validateaddress() { ); } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_getdifficulty() { use zebra_chain::{ @@ -2338,7 +2326,6 @@ async fn rpc_getdifficulty() { assert_eq!(format!("{:.2}", get_difficulty.unwrap()), "4096.00"); } -#[cfg(feature = "getblocktemplate-rpcs")] #[tokio::test(flavor = "multi_thread")] async fn rpc_z_listunifiedreceivers() { let _init_guard = zebra_test::init(); diff --git a/zebra-rpc/src/methods/types/get_raw_mempool.rs b/zebra-rpc/src/methods/types/get_raw_mempool.rs index 882ac98b8fc..4fefcea5516 100644 --- a/zebra-rpc/src/methods/types/get_raw_mempool.rs +++ b/zebra-rpc/src/methods/types/get_raw_mempool.rs @@ -1,17 +1,13 @@ //! Types used in `getrawmempool` RPC method. use std::collections::HashMap; -#[cfg(feature = "getblocktemplate-rpcs")] use std::collections::HashSet; -#[cfg(feature = "getblocktemplate-rpcs")] use hex::ToHex as _; use super::Zec; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::transaction::VerifiedUnminedTx; use zebra_chain::{amount::NonNegative, block::Height}; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_node_services::mempool::TransactionDependencies; /// Response to a `getrawmempool` RPC request. @@ -55,7 +51,6 @@ pub struct MempoolObject { } impl MempoolObject { - #[cfg(feature = "getblocktemplate-rpcs")] pub(crate) fn from_verified_unmined_tx( unmined_tx: &VerifiedUnminedTx, transactions: &[VerifiedUnminedTx], diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index b1f7e2e8faf..15245ed7c49 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -31,7 +31,6 @@ use crate::{ }, }; -#[cfg(feature = "getblocktemplate-rpcs")] use crate::methods::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; pub mod cookie; @@ -106,21 +105,16 @@ impl RpcServer { AddressBook, >( config: Config, - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] mining_config: crate::config::mining::Config, build_version: VersionString, user_agent: UserAgentString, mempool: Mempool, state: State, - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] block_verifier_router: BlockVerifierRouter, - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] sync_status: SyncStatus, - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] address_book: AddressBook, latest_chain_tip: Tip, network: Network, - #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] mined_block_sender: Option>, last_event: LoggedLastEvent, ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> @@ -162,7 +156,6 @@ impl RpcServer { .listen_addr .expect("caller should make sure listen_addr is set"); - #[cfg(feature = "getblocktemplate-rpcs")] // Initialize the getblocktemplate rpc method handler let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( &network, @@ -182,10 +175,7 @@ impl RpcServer { user_agent, network.clone(), config.debug_force_finished_sync, - #[cfg(feature = "getblocktemplate-rpcs")] mining_config.debug_like_zcashd, - #[cfg(not(feature = "getblocktemplate-rpcs"))] - true, mempool, state, latest_chain_tip, @@ -220,11 +210,7 @@ impl RpcServer { .expect("Unable to get local address"); info!("{OPENED_RPC_ENDPOINT_MSG}{}", addr); - #[cfg(feature = "getblocktemplate-rpcs")] let mut rpc_module = rpc_impl.into_rpc(); - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let rpc_module = rpc_impl.into_rpc(); - #[cfg(feature = "getblocktemplate-rpcs")] rpc_module .merge(get_block_template_rpc_impl.into_rpc()) .unwrap(); diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs index cf54de4e8b2..20bdf27aa7b 100644 --- a/zebra-rpc/src/server/error.rs +++ b/zebra-rpc/src/server/error.rs @@ -70,7 +70,6 @@ pub(crate) trait MapError: Sized { fn map_error(self, code: impl Into) -> std::result::Result; /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a prefixed message and a specific error code. - #[cfg(feature = "getblocktemplate-rpcs")] fn map_error_with_prefix( self, code: impl Into, @@ -107,7 +106,6 @@ where self.map_err(|error| ErrorObject::owned(code.into().code(), error.to_string(), None::<()>)) } - #[cfg(feature = "getblocktemplate-rpcs")] fn map_error_with_prefix( self, code: impl Into, diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index f7695cf8e84..1b49b1f6b87 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -22,11 +22,6 @@ progress-bar = [ "howudoin", ] -# Mining RPC support -getblocktemplate-rpcs = [ - "zebra-chain/getblocktemplate-rpcs", -] - # Indexes spending transaction ids by spent outpoints and revealed nullifiers indexer = [] diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 125021afb7f..25f865dff69 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -860,7 +860,6 @@ pub enum Request { /// Returns [`Response::KnownBlock(None)`](Response::KnownBlock) otherwise. KnownBlock(block::Hash), - #[cfg(feature = "getblocktemplate-rpcs")] /// Performs contextual validation of the given block, but does not commit it to the state. /// /// Returns [`Response::ValidBlockProposal`] when successful. @@ -891,7 +890,6 @@ impl Request { Request::BestChainNextMedianTimePast => "best_chain_next_median_time_past", Request::BestChainBlockHash(_) => "best_chain_block_hash", Request::KnownBlock(_) => "known_block", - #[cfg(feature = "getblocktemplate-rpcs")] Request::CheckBlockProposalValidity(_) => "check_block_proposal_validity", } } @@ -1160,7 +1158,6 @@ pub enum ReadRequest { /// best chain state information. ChainInfo, - #[cfg(feature = "getblocktemplate-rpcs")] /// Get the average solution rate in the best chain. /// /// Returns [`ReadResponse::SolutionRate`] @@ -1172,7 +1169,6 @@ pub enum ReadRequest { height: Option, }, - #[cfg(feature = "getblocktemplate-rpcs")] /// Performs contextual validation of the given block, but does not commit it to the state. /// /// It is the caller's responsibility to perform semantic validation. @@ -1182,7 +1178,6 @@ pub enum ReadRequest { /// the block fails contextual validation. CheckBlockProposalValidity(SemanticallyVerifiedBlock), - #[cfg(feature = "getblocktemplate-rpcs")] /// Returns [`ReadResponse::TipBlockSize(usize)`](ReadResponse::TipBlockSize) /// with the current best chain tip block size in bytes. TipBlockSize, @@ -1220,11 +1215,8 @@ impl ReadRequest { #[cfg(feature = "indexer")] ReadRequest::SpendingTransactionId(_) => "spending_transaction_id", ReadRequest::ChainInfo => "chain_info", - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::SolutionRate { .. } => "solution_rate", - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::CheckBlockProposalValidity(_) => "check_block_proposal_validity", - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::TipBlockSize => "tip_block_size", } } @@ -1282,7 +1274,6 @@ impl TryFrom for ReadRequest { Request::KnownBlock(_) => Err("ReadService does not track queued blocks"), - #[cfg(feature = "getblocktemplate-rpcs")] Request::CheckBlockProposalValidity(semantically_verified) => Ok( ReadRequest::CheckBlockProposalValidity(semantically_verified), ), diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 321b5d8935f..916efdd5adc 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -90,7 +90,6 @@ pub enum Response { /// Response to [`Request::KnownBlock`]. KnownBlock(Option), - #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`Request::CheckBlockProposalValidity`] ValidBlockProposal, } @@ -254,15 +253,12 @@ pub enum ReadResponse { /// information needed by the `getblocktemplate` RPC method. ChainInfo(GetBlockTemplateChainInfo), - #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`ReadRequest::SolutionRate`] SolutionRate(Option), - #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`ReadRequest::CheckBlockProposalValidity`] ValidBlockProposal, - #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`ReadRequest::TipBlockSize`] TipBlockSize(Option), } @@ -362,10 +358,8 @@ impl TryFrom for Response { #[cfg(feature = "indexer")] ReadResponse::TransactionId(_) => Err("there is no corresponding Response for this ReadResponse"), - #[cfg(feature = "getblocktemplate-rpcs")] ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal), - #[cfg(feature = "getblocktemplate-rpcs")] ReadResponse::SolutionRate(_) | ReadResponse::TipBlockSize(_) => { Err("there is no corresponding Response for this ReadResponse") } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 1c2d36b1911..c4399c3d701 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -39,7 +39,6 @@ use zebra_chain::{ subtree::NoteCommitmentSubtreeIndex, }; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::{block::Height, serialization::ZcashSerialize}; use crate::{ @@ -1113,7 +1112,6 @@ impl Service for StateService { .boxed() } - #[cfg(feature = "getblocktemplate-rpcs")] Request::CheckBlockProposalValidity(_) => { // Redirect the request to the concurrent ReadStateService let read_service = self.read_service.clone(); @@ -1894,7 +1892,6 @@ impl Service for ReadStateService { } // Used by getmininginfo, getnetworksolps, and getnetworkhashps RPCs. - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::SolutionRate { num_blocks, height } => { let state = self.clone(); @@ -1946,7 +1943,6 @@ impl Service for ReadStateService { .wait_for_panics() } - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::CheckBlockProposalValidity(semantically_verified) => { let state = self.clone(); @@ -1994,7 +1990,6 @@ impl Service for ReadStateService { .wait_for_panics() } - #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::TipBlockSize => { let state = self.clone(); diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 35027a64a60..4f18b54ddac 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -65,7 +65,6 @@ pub struct NonFinalizedState { /// with a commit to a cloned non-finalized state. // // TODO: make this field private and set it via an argument to NonFinalizedState::new() - #[cfg(feature = "getblocktemplate-rpcs")] should_count_metrics: bool, /// Number of chain forks transmitter. @@ -87,7 +86,6 @@ impl std::fmt::Debug for NonFinalizedState { f.field("chain_set", &self.chain_set) .field("network", &self.network); - #[cfg(feature = "getblocktemplate-rpcs")] f.field("should_count_metrics", &self.should_count_metrics); f.finish() @@ -100,14 +98,10 @@ impl Clone for NonFinalizedState { chain_set: self.chain_set.clone(), network: self.network.clone(), invalidated_blocks: self.invalidated_blocks.clone(), - - #[cfg(feature = "getblocktemplate-rpcs")] should_count_metrics: self.should_count_metrics, - // Don't track progress in clones. #[cfg(feature = "progress-bar")] chain_count_bar: None, - #[cfg(feature = "progress-bar")] chain_fork_length_bars: Vec::new(), } @@ -121,7 +115,6 @@ impl NonFinalizedState { chain_set: Default::default(), network: network.clone(), invalidated_blocks: Default::default(), - #[cfg(feature = "getblocktemplate-rpcs")] should_count_metrics: true, #[cfg(feature = "progress-bar")] chain_count_bar: None, @@ -753,13 +746,8 @@ impl NonFinalizedState { } /// Should this `NonFinalizedState` instance track metrics and progress bars? - #[allow(dead_code)] fn should_count_metrics(&self) -> bool { - #[cfg(feature = "getblocktemplate-rpcs")] - return self.should_count_metrics; - - #[cfg(not(feature = "getblocktemplate-rpcs"))] - return true; + self.should_count_metrics } /// Update the metrics after `block` is committed @@ -901,10 +889,7 @@ impl NonFinalizedState { /// Stop tracking metrics for this non-finalized state and all its chains. pub fn disable_metrics(&mut self) { - #[cfg(feature = "getblocktemplate-rpcs")] - { - self.should_count_metrics = false; - } + self.should_count_metrics = false; #[cfg(feature = "progress-bar")] { diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index eef1f4397a3..782319e3762 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -34,7 +34,6 @@ required-features = ["search-issue-refs"] name = "block-template-to-proposal" # this setting is required for Zebra's Docker build caches path = "src/bin/block-template-to-proposal/main.rs" -required-features = ["getblocktemplate-rpcs"] [[bin]] name = "openapi-generator" @@ -59,13 +58,6 @@ search-issue-refs = [ "tokio" ] -# block-template-to-proposal uses the experimental mining RPC support feature name -getblocktemplate-rpcs = [ - "zebra-rpc/getblocktemplate-rpcs", - "zebra-node-services/getblocktemplate-rpcs", - "zebra-chain/getblocktemplate-rpcs", -] - shielded-scan = [ "itertools", "jsonrpc", @@ -74,7 +66,6 @@ shielded-scan = [ ] openapi-generator = [ - "zebra-rpc", "syn", "quote", "serde_yml", @@ -98,7 +89,7 @@ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.4 zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } # These crates are needed for the zebra-checkpoints binary itertools = { workspace = true, optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index cf37db69612..5f702b0488b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -49,12 +49,11 @@ features = [ "prometheus", "sentry", "indexer", - "getblocktemplate-rpcs" ] [features] # In release builds, don't compile debug logging code, to improve performance. -default = ["release_max_level_info", "progress-bar", "getblocktemplate-rpcs"] +default = ["release_max_level_info", "progress-bar"] # Default features for official ZF binary release builds default-release-binaries = ["default", "sentry"] @@ -64,14 +63,8 @@ default-release-binaries = ["default", "sentry"] # Indexer support indexer = ["zebra-rpc/indexer-rpcs", "zebra-state/indexer"] -# Mining RPC support -getblocktemplate-rpcs = [ - "zebra-rpc/getblocktemplate-rpcs", - "zebra-consensus/getblocktemplate-rpcs", - "zebra-state/getblocktemplate-rpcs", - "zebra-node-services/getblocktemplate-rpcs", - "zebra-chain/getblocktemplate-rpcs", -] +# TODO: Remove this feature when releasing Zebra 3.0 (#9412). +getblocktemplate-rpcs = [] # Experimental internal miner support internal-miner = [ @@ -79,7 +72,6 @@ internal-miner = [ "zebra-chain/internal-miner", # TODO: move common code into zebra-chain or zebra-node-services and remove the RPC dependency "zebra-rpc/internal-miner", - "zebra-rpc/getblocktemplate-rpcs", ] # Experimental elasticsearch indexing diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 45e4554a08e..532c00e4778 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -86,7 +86,6 @@ use zebra_chain::block::genesis::regtest_genesis_block; use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; use zebra_rpc::server::RpcServer; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use crate::{ @@ -240,15 +239,6 @@ impl StartCmd { // And give it time to clear its queue tokio::task::yield_now().await; - #[cfg(not(feature = "getblocktemplate-rpcs"))] - if config.mining != zebra_rpc::config::mining::Config::default() { - warn!( - "Unused mining section in config,\ - compile with 'getblocktemplate-rpcs' feature to use mining RPCs" - ); - } - - #[cfg(feature = "getblocktemplate-rpcs")] // Create a channel to send mined blocks to the gossip task let submit_block_channel = SubmitBlockChannel::new(); @@ -269,10 +259,7 @@ impl StartCmd { address_book.clone(), latest_chain_tip.clone(), config.network.network.clone(), - #[cfg(feature = "getblocktemplate-rpcs")] Some(submit_block_channel.sender()), - #[cfg(not(feature = "getblocktemplate-rpcs"))] - None, LAST_WARN_ERROR_LOG_SENDER.subscribe(), ); rpc_task_handle.await.unwrap() @@ -316,10 +303,7 @@ impl StartCmd { sync_status.clone(), chain_tip_change.clone(), peer_set.clone(), - #[cfg(feature = "getblocktemplate-rpcs")] Some(submit_block_channel.receiver()), - #[cfg(not(feature = "getblocktemplate-rpcs"))] - None, ) .in_current_span(), ); diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index ab0e36f48a3..965e8481ad3 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -24,7 +24,6 @@ use zebra_network::{ AddressBook, InventoryResponse, Request, Response, }; use zebra_node_services::mempool; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -983,17 +982,13 @@ async fn setup( // Pretend we're close to tip SyncStatus::sync_close_to_tip(&mut recent_syncs); - #[cfg(feature = "getblocktemplate-rpcs")] let submitblock_channel = SubmitBlockChannel::new(); let sync_gossip_task_handle = tokio::spawn( sync::gossip_best_tip_block_hashes( sync_status.clone(), chain_tip_change.clone(), peer_set.clone(), - #[cfg(feature = "getblocktemplate-rpcs")] Some(submitblock_channel.receiver()), - #[cfg(not(feature = "getblocktemplate-rpcs"))] - None, ) .in_current_span(), ); diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 82e337dc8c5..4195ede43ad 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -21,7 +21,6 @@ use zebra_network::{ Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, }; use zebra_node_services::mempool; -#[cfg(feature = "getblocktemplate-rpcs")] use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; use zebra_state::Config as StateConfig; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -733,17 +732,13 @@ async fn setup( // We can't expect or unwrap because the returned Result does not implement Debug assert!(r.is_ok(), "unexpected setup channel send failure"); - #[cfg(feature = "getblocktemplate-rpcs")] let submitblock_channel = SubmitBlockChannel::new(); let block_gossip_task_handle = tokio::spawn(sync::gossip_best_tip_block_hashes( sync_status.clone(), chain_tip_change, peer_set.clone(), - #[cfg(feature = "getblocktemplate-rpcs")] Some(submitblock_channel.receiver()), - #[cfg(not(feature = "getblocktemplate-rpcs"))] - None, )); let tx_gossip_task_handle = tokio::spawn(gossip_mempool_transaction_id( @@ -798,7 +793,6 @@ async fn setup( ) } -#[cfg(feature = "getblocktemplate-rpcs")] mod submitblock_test { use std::io; use std::sync::{Arc, Mutex}; diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 22292c530c3..8141df3c31c 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -717,10 +717,7 @@ impl Service for Mempool { ActiveState::Enabled { storage, tx_downloads, - #[cfg(feature = "getblocktemplate-rpcs")] last_seen_tip_hash, - #[cfg(not(feature = "getblocktemplate-rpcs"))] - last_seen_tip_hash: _, } => match req { // Queries Request::TransactionIds => { @@ -791,7 +788,6 @@ impl Service for Mempool { response_fut.boxed() } - #[cfg(feature = "getblocktemplate-rpcs")] Request::FullTransactions => { trace!(?req, "got mempool request"); @@ -878,7 +874,6 @@ impl Service for Mempool { .boxed() } - #[cfg(feature = "getblocktemplate-rpcs")] Request::FullTransactions => { return async move { Err("mempool is not active: wait for Zebra to sync to the tip".into()) diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index a6174599ef8..ae36c100bbc 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -51,7 +51,6 @@ pub struct ZebradConfig { /// RPC configuration pub rpc: zebra_rpc::config::Config, - #[serde(skip_serializing_if = "zebra_rpc::config::mining::Config::skip_getblocktemplate")] /// Mining configuration pub mining: zebra_rpc::config::mining::Config, } diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index 66338891559..d0319b17b45 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -61,10 +61,6 @@ //! features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options) //! are available at compile time: //! -//! ### JSON-RPC -//! -//! * `getblocktemplate-rpcs`: Mining pool RPC support, enabled by default in production builds. -//! //! ### Metrics //! //! * configuring a `tracing.progress_bar`: shows key metrics in the terminal using progress bars, diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 73fb03eced8..9f9624bd401 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -113,13 +113,13 @@ //! Example of how to run the get_block_template test: //! //! ```console -//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test get_block_template --release -- --ignored --nocapture //! ``` //! //! Example of how to run the submit_block test: //! //! ```console -//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHE_DIR=/path/to/zebra/state cargo test submit_block --release -- --ignored --nocapture //! ``` //! //! Example of how to run the has_spending_transaction_ids test: @@ -219,9 +219,6 @@ use crate::common::cached_state::{ /// This limit only applies to some tests. pub const MAX_ASYNC_BLOCKING_TIME: Duration = zebra_test::mock_service::DEFAULT_MAX_REQUEST_DELAY; -/// The test config file prefix for `--feature getblocktemplate-rpcs` configs. -pub const GET_BLOCK_TEMPLATE_CONFIG_PREFIX: &str = "getblocktemplate-"; - /// The test config file prefix for `--feature shielded-scan` configs. pub const SHIELDED_SCAN_CONFIG_PREFIX: &str = "shieldedscan-"; @@ -949,17 +946,6 @@ fn stored_configs_parsed_correctly() -> Result<()> { continue; } - // ignore files starting with getblocktemplate prefix - // if we were not built with the getblocktemplate-rpcs feature. - #[cfg(not(feature = "getblocktemplate-rpcs"))] - if config_file_name.starts_with(GET_BLOCK_TEMPLATE_CONFIG_PREFIX) { - tracing::info!( - ?config_file_path, - "skipping getblocktemplate-rpcs config file path" - ); - continue; - } - // ignore files starting with shieldedscan prefix // if we were not built with the shielded-scan feature. if config_file_name.starts_with(SHIELDED_SCAN_CONFIG_PREFIX) { @@ -1008,17 +994,6 @@ fn stored_configs_work() -> Result<()> { continue; } - // ignore files starting with getblocktemplate prefix - // if we were not built with the getblocktemplate-rpcs feature. - #[cfg(not(feature = "getblocktemplate-rpcs"))] - if config_file_name.starts_with(GET_BLOCK_TEMPLATE_CONFIG_PREFIX) { - tracing::info!( - ?config_file_path, - "skipping getblocktemplate-rpcs config file path" - ); - continue; - } - let run_dir = testdir()?; let stored_config_path = config_file_full_path(config_file.path()); @@ -2552,7 +2527,6 @@ async fn lightwalletd_wallet_grpc_tests() -> Result<()> { /// /// See [`common::get_block_template_rpcs::get_peer_info`] for more information. #[tokio::test] -#[cfg(feature = "getblocktemplate-rpcs")] async fn get_peer_info() -> Result<()> { common::get_block_template_rpcs::get_peer_info::run().await } @@ -2561,8 +2535,6 @@ async fn get_peer_info() -> Result<()> { /// /// See [`common::get_block_template_rpcs::get_block_template`] for more information. #[tokio::test] -#[ignore] -#[cfg(feature = "getblocktemplate-rpcs")] async fn get_block_template() -> Result<()> { common::get_block_template_rpcs::get_block_template::run().await } @@ -2571,8 +2543,6 @@ async fn get_block_template() -> Result<()> { /// /// See [`common::get_block_template_rpcs::submit_block`] for more information. #[tokio::test] -#[ignore] -#[cfg(feature = "getblocktemplate-rpcs")] async fn submit_block() -> Result<()> { common::get_block_template_rpcs::submit_block::run().await } @@ -2983,14 +2953,12 @@ fn external_address() -> Result<()> { /// See [`common::regtest::submit_blocks`] for more information. // TODO: Test this with an NU5 activation height too once config can be serialized. #[tokio::test] -#[cfg(feature = "getblocktemplate-rpcs")] async fn regtest_block_templates_are_valid_block_submissions() -> Result<()> { common::regtest::submit_blocks_test().await?; Ok(()) } #[tokio::test(flavor = "multi_thread")] -#[cfg(feature = "getblocktemplate-rpcs")] async fn trusted_chain_sync_handles_forks_correctly() -> Result<()> { use std::sync::Arc; @@ -3263,9 +3231,8 @@ async fn trusted_chain_sync_handles_forks_correctly() -> Result<()> { /// Test successful block template submission as a block proposal or submission on a custom Testnet. /// /// This test can be run locally with: -/// `cargo test --package zebrad --test acceptance --features getblocktemplate-rpcs -- nu6_funding_streams_and_coinbase_balance --exact --show-output` +/// `cargo test --package zebrad --test acceptance -- nu6_funding_streams_and_coinbase_balance --exact --show-output` #[tokio::test(flavor = "multi_thread")] -#[cfg(feature = "getblocktemplate-rpcs")] async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { use zebra_chain::{ chain_sync_status::MockSyncStatus, @@ -3736,7 +3703,7 @@ fn check_no_git_refs_in_cargo_lock() { // /// Check that Zebra will disconnect from misbehaving peers. // #[tokio::test] -// #[cfg(all(feature = "getblocktemplate-rpcs", not(target_os = "windows")))] +// #[cfg(not(target_os = "windows"))] // async fn disconnects_from_misbehaving_peers() -> Result<()> { // use std::sync::{atomic::AtomicBool, Arc}; diff --git a/zebrad/tests/common/config.rs b/zebrad/tests/common/config.rs index 4b3f86201fd..e50a5f33309 100644 --- a/zebrad/tests/common/config.rs +++ b/zebrad/tests/common/config.rs @@ -68,17 +68,14 @@ pub fn default_test_config(net: &Network) -> Result { #[allow(unused_mut)] let mut mining = zebra_rpc::config::mining::Config::default(); - #[cfg(feature = "getblocktemplate-rpcs")] - { - let miner_address = if network.network.is_a_test_network() { - // Assume test networks all use the same address prefix and format - "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" - } else { - "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" - }; - - mining.miner_address = Some(miner_address.parse().expect("hard-coded address is valid")); - } + let miner_address = if network.network.is_a_test_network() { + // Assume test networks all use the same address prefix and format + "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" + } else { + "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" + }; + + mining.miner_address = Some(miner_address.parse().expect("hard-coded address is valid")); Ok(ZebradConfig { network, diff --git a/zebrad/tests/common/mod.rs b/zebrad/tests/common/mod.rs index 6fe1b10f034..24e474e9a2e 100644 --- a/zebrad/tests/common/mod.rs +++ b/zebrad/tests/common/mod.rs @@ -11,18 +11,13 @@ pub mod cached_state; pub mod check; +#[cfg(feature = "zebra-checkpoints")] +pub mod checkpoints; pub mod config; pub mod failure_messages; +pub mod get_block_template_rpcs; pub mod launch; pub mod lightwalletd; +pub mod regtest; pub mod sync; pub mod test_type; - -#[cfg(feature = "zebra-checkpoints")] -pub mod checkpoints; - -#[cfg(feature = "getblocktemplate-rpcs")] -pub mod get_block_template_rpcs; - -#[cfg(feature = "getblocktemplate-rpcs")] -pub mod regtest; From c901adf17719e7722723a49d72c321150a604588 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 10 Apr 2025 19:59:43 -0300 Subject: [PATCH 147/245] feat(rpc): Add some missing fields to transaction object (#9329) * extend `getrawtransaction` verbose output * replace strings where possible, fix tests * fix orchard * add todos for script assembly code * fix an import * fmt * fmt * suggestion for #9329: change coinbase field and add valueSat field to match zcashd (#9407) * change coinbase field and add valueSat field to match zcashd * remove valueSat; update vectors --------- Co-authored-by: Conrado Gouvea --- zebra-chain/src/sapling/commitment.rs | 22 + zebra-chain/src/sapling/note/ciphertexts.rs | 12 + zebra-chain/src/transaction/tests/vectors.rs | 18 + zebra-chain/src/transparent.rs | 16 + zebra-rpc/src/methods.rs | 54 +-- ...k_verbose_hash_verbosity_2@mainnet_10.snap | 40 +- ...k_verbose_hash_verbosity_2@testnet_10.snap | 40 +- ...verbose_height_verbosity_2@mainnet_10.snap | 40 +- ...verbose_height_verbosity_2@testnet_10.snap | 40 +- ...rawtransaction_verbosity=1@mainnet_10.snap | 41 +- ...rawtransaction_verbosity=1@testnet_10.snap | 41 +- zebra-rpc/src/methods/tests/vectors.rs | 205 ++++++--- zebra-rpc/src/methods/types.rs | 2 + zebra-rpc/src/methods/types/transaction.rs | 420 ++++++++++++++++++ zebra-rpc/src/tests/vectors.rs | 16 +- zebra-state/src/lib.rs | 7 +- 16 files changed, 893 insertions(+), 121 deletions(-) create mode 100644 zebra-rpc/src/methods/types/transaction.rs diff --git a/zebra-chain/src/sapling/commitment.rs b/zebra-chain/src/sapling/commitment.rs index 5256d324bfa..3e139af08db 100644 --- a/zebra-chain/src/sapling/commitment.rs +++ b/zebra-chain/src/sapling/commitment.rs @@ -3,6 +3,7 @@ use std::{fmt, io}; use bitvec::prelude::*; +use hex::ToHex; use jubjub::ExtendedPoint; use lazy_static::lazy_static; use rand_core::{CryptoRng, RngCore}; @@ -303,6 +304,17 @@ lazy_static! { #[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] pub struct NotSmallOrderValueCommitment(ValueCommitment); +impl NotSmallOrderValueCommitment { + /// Return the hash bytes in big-endian byte-order suitable for printing out byte by byte. + /// + /// Zebra displays commitment value in big-endian byte-order, + /// following the convention set by zcashd. + pub fn bytes_in_display_order(&self) -> [u8; 32] { + let mut reversed_bytes = self.0 .0.to_bytes(); + reversed_bytes.reverse(); + reversed_bytes + } +} impl TryFrom for NotSmallOrderValueCommitment { type Error = &'static str; @@ -365,6 +377,16 @@ impl ZcashDeserialize for NotSmallOrderValueCommitment { } } +impl ToHex for &NotSmallOrderValueCommitment { + fn encode_hex>(&self) -> T { + self.bytes_in_display_order().encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.bytes_in_display_order().encode_hex_upper() + } +} + #[cfg(test)] mod tests { diff --git a/zebra-chain/src/sapling/note/ciphertexts.rs b/zebra-chain/src/sapling/note/ciphertexts.rs index 75d25730627..41175bae352 100644 --- a/zebra-chain/src/sapling/note/ciphertexts.rs +++ b/zebra-chain/src/sapling/note/ciphertexts.rs @@ -59,6 +59,12 @@ impl ZcashDeserialize for EncryptedNote { } } +impl From for [u8; 580] { + fn from(note: EncryptedNote) -> Self { + note.0 + } +} + /// A ciphertext component for encrypted output notes. /// /// Corresponds to Sapling's 'outCiphertext' @@ -112,6 +118,12 @@ impl ZcashDeserialize for WrappedNoteKey { } } +impl From for [u8; 80] { + fn from(note: WrappedNoteKey) -> Self { + note.0 + } +} + #[cfg(test)] use proptest::prelude::*; #[cfg(test)] diff --git a/zebra-chain/src/transaction/tests/vectors.rs b/zebra-chain/src/transaction/tests/vectors.rs index 673125bc016..71c59e60099 100644 --- a/zebra-chain/src/transaction/tests/vectors.rs +++ b/zebra-chain/src/transaction/tests/vectors.rs @@ -952,3 +952,21 @@ fn binding_signatures() { assert!(at_least_one_v5_checked); } } + +#[test] +fn test_coinbase_script() -> Result<()> { + let _init_guard = zebra_test::init(); + + let tx = hex::decode("0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503b0e72100ffffffff04e8bbe60e000000001976a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac40787d010000000017a914931fec54c1fea86e574462cc32013f5400b891298738c94d010000000017a914c7a4285ed7aed78d8c0e28d7f1839ccb4046ab0c87286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c908700000000b0e721000000000000000000000000").unwrap(); + + let transaction = tx.zcash_deserialize_into::()?; + + let recoded_tx = transaction.zcash_serialize_to_vec().unwrap(); + assert_eq!(tx, recoded_tx); + + let data = transaction.inputs()[0].coinbase_script().unwrap(); + let expected = hex::decode("03b0e72100").unwrap(); + assert_eq!(data, expected); + + Ok(()) +} diff --git a/zebra-chain/src/transparent.rs b/zebra-chain/src/transparent.rs index 47d7bfd42c1..bbf6a8c8dfe 100644 --- a/zebra-chain/src/transparent.rs +++ b/zebra-chain/src/transparent.rs @@ -253,6 +253,22 @@ impl Input { } } + /// Returns the full coinbase script (the encoded height along with the + /// extra data) if this is an [`Input::Coinbase`]. Also returns `None` if + /// the coinbase is for the genesis block but does not match the expected + /// genesis coinbase data. + pub fn coinbase_script(&self) -> Option> { + match self { + Input::PrevOut { .. } => None, + Input::Coinbase { height, data, .. } => { + let mut height_and_data = Vec::new(); + serialize::write_coinbase_height(*height, data, &mut height_and_data).ok()?; + height_and_data.extend(&data.0); + Some(height_and_data) + } + } + } + /// Returns the input's sequence number. pub fn sequence(&self) -> u32 { match self { diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 51bc33e0bc9..fcc821204f3 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -7,7 +7,7 @@ //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. use std::collections::HashMap; -use std::{collections::HashSet, fmt::Debug, sync::Arc}; +use std::{collections::HashSet, fmt::Debug}; use chrono::Utc; use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt}; @@ -63,6 +63,7 @@ pub mod types; use types::GetRawMempool; use types::MempoolObject; +use types::TransactionObject; pub mod get_block_template_rpcs; @@ -930,6 +931,7 @@ where .try_into() .expect("should be less than max block height, i32::MAX"), ), + &network, )) }) .collect(); @@ -1247,6 +1249,7 @@ where tx.transaction.clone(), None, None, + &self.network, )) } else { let hex = tx.transaction.clone().into(); @@ -1270,6 +1273,7 @@ where tx.tx.clone(), Some(tx.height), Some(tx.confirmations), + &self.network, )) } else { let hex = tx.tx.into(); @@ -2382,7 +2386,7 @@ impl Default for GetBlockHash { /// Response to a `getrawtransaction` RPC request. /// /// See the notes for the [`Rpc::get_raw_transaction` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] pub enum GetRawTransaction { /// The raw transaction, encoded as hex bytes. @@ -2397,52 +2401,6 @@ impl Default for GetRawTransaction { } } -/// A Transaction object as returned by `getrawtransaction` and `getblock` RPC -/// requests. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub struct TransactionObject { - /// The raw transaction, encoded as hex bytes. - #[serde(with = "hex")] - pub hex: SerializedTransaction, - /// The height of the block in the best chain that contains the tx or `None` if the tx is in - /// the mempool. - #[serde(skip_serializing_if = "Option::is_none")] - pub height: Option, - /// The height diff between the block containing the tx and the best chain tip + 1 or `None` - /// if the tx is in the mempool. - #[serde(skip_serializing_if = "Option::is_none")] - pub confirmations: Option, - // TODO: many fields not yet supported -} - -impl Default for TransactionObject { - fn default() -> Self { - Self { - hex: SerializedTransaction::from( - [0u8; zebra_chain::transaction::MIN_TRANSPARENT_TX_SIZE as usize].to_vec(), - ), - height: Option::default(), - confirmations: Option::default(), - } - } -} - -impl TransactionObject { - /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. - #[allow(clippy::unwrap_in_result)] - fn from_transaction( - tx: Arc, - height: Option, - confirmations: Option, - ) -> Self { - Self { - hex: tx.into(), - height: height.map(|height| height.0), - confirmations, - } - } -} - /// Response to a `getaddressutxos` RPC request. /// /// See the notes for the [`Rpc::get_address_utxos` method]. diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 16890f1845e..375fee775b3 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -15,7 +15,45 @@ expression: block { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "5100", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a9147d46a730d31f97b1930d3368a967c309bd4d136a87", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } ], "time": 1477671596, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 088a2cc297c..4f84590dc61 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -15,7 +15,45 @@ expression: block { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "510101", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a914ef775f1f997f122a062fff1a2d7443abd1f9c64287", + "reqSigs": 1, + "type": "", + "addresses": [ + "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } ], "time": 1477674473, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index 16890f1845e..375fee775b3 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -15,7 +15,45 @@ expression: block { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "5100", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a9147d46a730d31f97b1930d3368a967c309bd4d136a87", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } ], "time": 1477671596, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index 088a2cc297c..4f84590dc61 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -15,7 +15,45 @@ expression: block { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "510101", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a914ef775f1f997f122a062fff1a2d7443abd1f9c64287", + "reqSigs": 1, + "type": "", + "addresses": [ + "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } ], "time": 1477674473, diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap index b78a6686336..1e63140c2ad 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap @@ -1,12 +1,49 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs expression: rsp -snapshot_kind: text --- { "Ok": { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025100ffffffff0250c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875acd43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "5100", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a9147d46a730d31f97b1930d3368a967c309bd4d136a87", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } } diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap index ab133db9b1a..f35046e222d 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap @@ -1,12 +1,49 @@ --- source: zebra-rpc/src/methods/tests/snapshot.rs expression: rsp -snapshot_kind: text --- { "Ok": { "hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff03510101ffffffff0250c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99acd43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c6428700000000", "height": 1, - "confirmations": 10 + "confirmations": 10, + "vin": [ + { + "coinbase": "510101", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 0.0005, + "valueZat": 50000, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", + "reqSigs": 0, + "type": "", + "addresses": [] + } + }, + { + "value": 0.000125, + "valueZat": 12500, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a914ef775f1f997f122a062fff1a2d7443abd1f9c64287", + "reqSigs": 1, + "type": "", + "addresses": [ + "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0 } } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 5ee5819f521..3d54f179eb8 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -314,42 +314,82 @@ async fn rpc_getblock() { let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; - assert_eq!( - get_block, - GetBlock::Object { - hash: GetBlockHash(block.hash()), - confirmations: (blocks.len() - i).try_into().expect("valid i64"), - height: Some(Height(i.try_into().expect("valid u32"))), - time: Some(block.header.time.timestamp()), - tx: block - .transactions - .iter() - .map(|tx| GetBlockTransaction::Object(TransactionObject { - hex: (*tx).clone().into(), - height: Some(i.try_into().expect("valid u32")), - confirmations: Some((blocks.len() - i).try_into().expect("valid i64")) - })) - .collect(), - trees, - size: Some(block.zcash_serialize_to_vec().unwrap().len() as i64), - version: Some(block.header.version), - merkle_root: Some(block.header.merkle_root), - block_commitments: Some(expected_block_commitments), - final_sapling_root: Some(expected_final_sapling_root), - final_orchard_root: None, - nonce: Some(expected_nonce), - bits: Some(block.header.difficulty_threshold), - difficulty: Some( + // partially compare the expected and actual GetBlock structs + if let GetBlock::Object { + hash, + confirmations, + height, + time, + tx, + trees, + size, + version, + merkle_root, + block_commitments, + final_sapling_root, + final_orchard_root, + nonce, + bits, + difficulty, + previous_block_hash, + next_block_hash, + solution, + } = &get_block + { + assert_eq!(hash, &GetBlockHash(block.hash())); + assert_eq!(confirmations, &((blocks.len() - i) as i64)); + assert_eq!(height, &Some(Height(i.try_into().expect("valid u32")))); + assert_eq!(time, &Some(block.header.time.timestamp())); + assert_eq!(trees, trees); + assert_eq!( + size, + &Some(block.zcash_serialize_to_vec().unwrap().len() as i64) + ); + assert_eq!(version, &Some(block.header.version)); + assert_eq!(merkle_root, &Some(block.header.merkle_root)); + assert_eq!(block_commitments, &Some(expected_block_commitments)); + assert_eq!(final_sapling_root, &Some(expected_final_sapling_root)); + assert_eq!(final_orchard_root, &None); + assert_eq!(nonce, &Some(expected_nonce)); + assert_eq!(bits, &Some(block.header.difficulty_threshold)); + assert_eq!( + difficulty, + &Some( block .header .difficulty_threshold .relative_to_network(&Mainnet) - ), - previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), - next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), - solution: Some(block.header.solution), + ) + ); + assert_eq!( + previous_block_hash, + &Some(GetBlockHash(block.header.previous_block_hash)) + ); + assert_eq!( + next_block_hash, + &blocks.get(i + 1).map(|b| GetBlockHash(b.hash())) + ); + assert_eq!(solution, &Some(block.header.solution)); + + for (actual_tx, expected_tx) in tx.iter().zip(block.transactions.iter()) { + if let GetBlockTransaction::Object(TransactionObject { + hex, + height, + confirmations, + .. + }) = actual_tx + { + assert_eq!(hex, &(*expected_tx).clone().into()); + assert_eq!(height, &Some(i.try_into().expect("valid u32"))); + assert_eq!( + confirmations, + &Some((blocks.len() - i).try_into().expect("valid i64")) + ); + } } - ); + } else { + panic!("Expected GetBlock::Object"); + } } // Make hash calls with verbosity=2 and check response @@ -362,42 +402,82 @@ async fn rpc_getblock() { let (expected_nonce, expected_final_sapling_root, expected_block_commitments) = get_block_data(&read_state, block.clone(), i).await; - assert_eq!( - get_block, - GetBlock::Object { - hash: GetBlockHash(block.hash()), - confirmations: (blocks.len() - i).try_into().expect("valid i64"), - height: Some(Height(i as u32)), - time: Some(block.header.time.timestamp()), - tx: block - .transactions - .iter() - .map(|tx| GetBlockTransaction::Object(TransactionObject { - hex: (*tx).clone().into(), - height: Some(i.try_into().expect("valid u32")), - confirmations: Some((blocks.len() - i).try_into().expect("valid i64")) - })) - .collect(), - trees, - size: Some(block.zcash_serialize_to_vec().unwrap().len() as i64), - version: Some(block.header.version), - merkle_root: Some(block.header.merkle_root), - block_commitments: Some(expected_block_commitments), - final_sapling_root: Some(expected_final_sapling_root), - final_orchard_root: None, - nonce: Some(expected_nonce), - bits: Some(block.header.difficulty_threshold), - difficulty: Some( + // partially compare the expected and actual GetBlock structs + if let GetBlock::Object { + hash, + confirmations, + height, + time, + tx, + trees, + size, + version, + merkle_root, + block_commitments, + final_sapling_root, + final_orchard_root, + nonce, + bits, + difficulty, + previous_block_hash, + next_block_hash, + solution, + } = &get_block + { + assert_eq!(hash, &GetBlockHash(block.hash())); + assert_eq!(confirmations, &((blocks.len() - i) as i64)); + assert_eq!(height, &Some(Height(i.try_into().expect("valid u32")))); + assert_eq!(time, &Some(block.header.time.timestamp())); + assert_eq!(trees, trees); + assert_eq!( + size, + &Some(block.zcash_serialize_to_vec().unwrap().len() as i64) + ); + assert_eq!(version, &Some(block.header.version)); + assert_eq!(merkle_root, &Some(block.header.merkle_root)); + assert_eq!(block_commitments, &Some(expected_block_commitments)); + assert_eq!(final_sapling_root, &Some(expected_final_sapling_root)); + assert_eq!(final_orchard_root, &None); + assert_eq!(nonce, &Some(expected_nonce)); + assert_eq!(bits, &Some(block.header.difficulty_threshold)); + assert_eq!( + difficulty, + &Some( block .header .difficulty_threshold .relative_to_network(&Mainnet) - ), - previous_block_hash: Some(GetBlockHash(block.header.previous_block_hash)), - next_block_hash: blocks.get(i + 1).map(|b| GetBlockHash(b.hash())), - solution: Some(block.header.solution), + ) + ); + assert_eq!( + previous_block_hash, + &Some(GetBlockHash(block.header.previous_block_hash)) + ); + assert_eq!( + next_block_hash, + &blocks.get(i + 1).map(|b| GetBlockHash(b.hash())) + ); + assert_eq!(solution, &Some(block.header.solution)); + + for (actual_tx, expected_tx) in tx.iter().zip(block.transactions.iter()) { + if let GetBlockTransaction::Object(TransactionObject { + hex, + height, + confirmations, + .. + }) = actual_tx + { + assert_eq!(hex, &(*expected_tx).clone().into()); + assert_eq!(height, &Some(i.try_into().expect("valid u32"))); + assert_eq!( + confirmations, + &Some((blocks.len() - i).try_into().expect("valid i64")) + ); + } } - ); + } else { + panic!("Expected GetBlock::Object"); + } } // Make height calls with no verbosity (defaults to 1) and check response @@ -880,6 +960,7 @@ async fn rpc_getrawtransaction() { hex, height, confirmations, + .. }) = response.expect("We should have a GetRawTransaction struct") else { unreachable!("Should return a Raw enum") diff --git a/zebra-rpc/src/methods/types.rs b/zebra-rpc/src/methods/types.rs index dcb6d530a69..07ceb58e1f0 100644 --- a/zebra-rpc/src/methods/types.rs +++ b/zebra-rpc/src/methods/types.rs @@ -2,8 +2,10 @@ mod get_blockchain_info; mod get_raw_mempool; +mod transaction; mod zec; pub use get_blockchain_info::Balance; pub use get_raw_mempool::{GetRawMempool, MempoolObject}; +pub use transaction::{Input, TransactionObject}; pub use zec::Zec; diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs new file mode 100644 index 00000000000..26df8c4dd7a --- /dev/null +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -0,0 +1,420 @@ +//! Verbose transaction-related types. + +use std::sync::Arc; + +use hex::ToHex; + +use zebra_chain::{ + block, + parameters::Network, + sapling::NotSmallOrderValueCommitment, + transaction::{SerializedTransaction, Transaction}, + transparent::Script, +}; +use zebra_consensus::groth16::Description; +use zebra_state::IntoDisk; + +use crate::methods::types; + +/// A Transaction object as returned by `getrawtransaction` and `getblock` RPC +/// requests. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub struct TransactionObject { + /// The raw transaction, encoded as hex bytes. + #[serde(with = "hex")] + pub hex: SerializedTransaction, + /// The height of the block in the best chain that contains the tx or `None` if the tx is in + /// the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + pub height: Option, + /// The height diff between the block containing the tx and the best chain tip + 1 or `None` + /// if the tx is in the mempool. + #[serde(skip_serializing_if = "Option::is_none")] + pub confirmations: Option, + + /// Transparent inputs of the transaction. + #[serde(rename = "vin", skip_serializing_if = "Option::is_none")] + pub inputs: Option>, + + /// Transparent outputs of the transaction. + #[serde(rename = "vout", skip_serializing_if = "Option::is_none")] + pub outputs: Option>, + + /// Sapling spends of the transaction. + #[serde(rename = "vShieldedSpend", skip_serializing_if = "Option::is_none")] + pub shielded_spends: Option>, + + /// Sapling outputs of the transaction. + #[serde(rename = "vShieldedOutput", skip_serializing_if = "Option::is_none")] + pub shielded_outputs: Option>, + + /// Orchard actions of the transaction. + #[serde(rename = "orchard", skip_serializing_if = "Option::is_none")] + pub orchard: Option, + + /// The net value of Sapling Spends minus Outputs in ZEC + #[serde(rename = "valueBalance", skip_serializing_if = "Option::is_none")] + pub value_balance: Option, + + /// The net value of Sapling Spends minus Outputs in zatoshis + #[serde(rename = "valueBalanceZat", skip_serializing_if = "Option::is_none")] + pub value_balance_zat: Option, + // TODO: some fields not yet supported +} + +/// The transparent input of a transaction. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[serde(untagged)] +pub enum Input { + /// A coinbase input. + Coinbase { + /// The coinbase scriptSig as hex. + #[serde(with = "hex")] + coinbase: Vec, + /// The script sequence number. + sequence: u32, + }, + /// A non-coinbase input. + NonCoinbase { + /// The transaction id. + txid: String, + /// The vout index. + vout: u32, + /// The script. + script_sig: ScriptSig, + /// The script sequence number. + sequence: u32, + /// The value of the output being spent in ZEC. + #[serde(skip_serializing_if = "Option::is_none")] + value: Option, + /// The value of the output being spent, in zats. + #[serde(rename = "valueZat", skip_serializing_if = "Option::is_none")] + value_zat: Option, + /// The address of the output being spent. + #[serde(skip_serializing_if = "Option::is_none")] + address: Option, + }, +} + +/// The transparent output of a transaction. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub struct Output { + /// The value in ZEC. + value: f64, + /// The value in zats. + #[serde(rename = "valueZat")] + value_zat: i64, + /// index. + n: u32, + /// The scriptPubKey. + #[serde(rename = "scriptPubKey")] + script_pub_key: ScriptPubKey, +} + +/// The scriptPubKey of a transaction output. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub struct ScriptPubKey { + /// the asm. + // #9330: The `asm` field is not currently populated. + asm: String, + /// the hex. + #[serde(with = "hex")] + hex: Script, + /// The required sigs. + #[serde(rename = "reqSigs")] + req_sigs: u32, + /// The type, eg 'pubkeyhash'. + // #9330: The `type` field is not currently populated. + r#type: String, + /// The addresses. + addresses: Vec, +} + +/// The scriptSig of a transaction input. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub struct ScriptSig { + /// The asm. + // #9330: The `asm` field is not currently populated. + asm: String, + /// The hex. + #[serde(with = "hex")] + hex: Script, +} + +/// A Sapling spend of a transaction. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct ShieldedSpend { + /// Value commitment to the input note. + #[serde(with = "hex")] + cv: NotSmallOrderValueCommitment, + /// Merkle root of the Sapling note commitment tree. + #[serde(with = "hex")] + anchor: [u8; 32], + /// The nullifier of the input note. + #[serde(with = "hex")] + nullifier: [u8; 32], + /// The randomized public key for spendAuthSig. + #[serde(with = "hex")] + rk: [u8; 32], + /// A zero-knowledge proof using the Sapling Spend circuit. + #[serde(with = "hex")] + proof: [u8; 192], + /// A signature authorizing this Spend. + #[serde(rename = "spendAuthSig", with = "hex")] + spend_auth_sig: [u8; 64], +} + +/// A Sapling output of a transaction. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct ShieldedOutput { + /// Value commitment to the input note. + #[serde(with = "hex")] + cv: NotSmallOrderValueCommitment, + /// The u-coordinate of the note commitment for the output note. + #[serde(rename = "cmu", with = "hex")] + cm_u: [u8; 32], + /// A Jubjub public key. + #[serde(rename = "ephemeralKey", with = "hex")] + ephemeral_key: [u8; 32], + /// The output note encrypted to the recipient. + #[serde(rename = "encCiphertext", with = "hex")] + enc_ciphertext: [u8; 580], + /// A ciphertext enabling the sender to recover the output note. + #[serde(rename = "outCiphertext", with = "hex")] + out_ciphertext: [u8; 80], + /// A zero-knowledge proof using the Sapling Output circuit. + #[serde(with = "hex")] + proof: [u8; 192], +} + +/// Object with Orchard-specific information. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub struct Orchard { + /// Array of Orchard actions. + actions: Vec, + /// The net value of Orchard Actions in ZEC. + #[serde(rename = "valueBalance")] + value_balance: f64, + /// The net value of Orchard Actions in zatoshis. + #[serde(rename = "valueBalanceZat")] + value_balance_zat: i64, +} + +/// The Orchard action of a transaction. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct OrchardAction { + /// A value commitment to the net value of the input note minus the output note. + #[serde(with = "hex")] + cv: [u8; 32], + /// The nullifier of the input note. + #[serde(with = "hex")] + nullifier: [u8; 32], + /// The randomized validating key for spendAuthSig. + #[serde(with = "hex")] + rk: [u8; 32], + /// The x-coordinate of the note commitment for the output note. + #[serde(rename = "cmx", with = "hex")] + cm_x: [u8; 32], + /// An encoding of an ephemeral Pallas public key. + #[serde(rename = "ephemeralKey", with = "hex")] + ephemeral_key: [u8; 32], + /// The output note encrypted to the recipient. + #[serde(rename = "encCiphertext", with = "hex")] + enc_ciphertext: [u8; 580], + /// A ciphertext enabling the sender to recover the output note. + #[serde(rename = "spendAuthSig", with = "hex")] + spend_auth_sig: [u8; 64], + /// A signature authorizing the spend in this Action. + #[serde(rename = "outCiphertext", with = "hex")] + out_ciphertext: [u8; 80], +} + +impl Default for TransactionObject { + fn default() -> Self { + Self { + hex: SerializedTransaction::from( + [0u8; zebra_chain::transaction::MIN_TRANSPARENT_TX_SIZE as usize].to_vec(), + ), + height: Option::default(), + confirmations: Option::default(), + inputs: None, + outputs: None, + shielded_spends: None, + shielded_outputs: None, + orchard: None, + value_balance: None, + value_balance_zat: None, + } + } +} + +impl TransactionObject { + /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. + #[allow(clippy::unwrap_in_result)] + pub(crate) fn from_transaction( + tx: Arc, + height: Option, + confirmations: Option, + network: &Network, + ) -> Self { + Self { + hex: tx.clone().into(), + height: height.map(|height| height.0), + confirmations, + inputs: Some( + tx.inputs() + .iter() + .map(|input| match input { + zebra_chain::transparent::Input::Coinbase { sequence, .. } => { + Input::Coinbase { + coinbase: input + .coinbase_script() + .expect("we know it is a valid coinbase script"), + sequence: *sequence, + } + } + zebra_chain::transparent::Input::PrevOut { + sequence, + unlock_script, + outpoint, + } => Input::NonCoinbase { + txid: outpoint.hash.encode_hex(), + vout: outpoint.index, + script_sig: ScriptSig { + asm: "".to_string(), + hex: unlock_script.clone(), + }, + sequence: *sequence, + value: None, + value_zat: None, + address: None, + }, + }) + .collect(), + ), + outputs: Some( + tx.outputs() + .iter() + .enumerate() + .map(|output| { + let addresses = match output.1.address(network) { + Some(address) => vec![address.to_string()], + None => vec![], + }; + + Output { + value: types::Zec::from(output.1.value).lossy_zec(), + value_zat: output.1.value.zatoshis(), + n: output.0 as u32, + script_pub_key: ScriptPubKey { + asm: "".to_string(), + hex: output.1.lock_script.clone(), + req_sigs: addresses.len() as u32, + r#type: "".to_string(), + addresses, + }, + } + }) + .collect(), + ), + shielded_spends: Some( + tx.sapling_spends_per_anchor() + .map(|spend| { + let mut anchor = spend.per_spend_anchor.as_bytes(); + anchor.reverse(); + + let mut nullifier = spend.nullifier.as_bytes(); + nullifier.reverse(); + + let mut rk: [u8; 32] = spend.clone().rk.into(); + rk.reverse(); + + let spend_auth_sig: [u8; 64] = spend.spend_auth_sig.into(); + + ShieldedSpend { + cv: spend.cv, + anchor, + nullifier, + rk, + proof: spend.proof().0, + spend_auth_sig, + } + }) + .collect(), + ), + shielded_outputs: Some( + tx.sapling_outputs() + .map(|output| { + let mut ephemeral_key: [u8; 32] = output.ephemeral_key.into(); + ephemeral_key.reverse(); + let enc_ciphertext: [u8; 580] = output.enc_ciphertext.into(); + let out_ciphertext: [u8; 80] = output.out_ciphertext.into(); + + ShieldedOutput { + cv: output.cv, + cm_u: output.cm_u.to_bytes(), + ephemeral_key, + enc_ciphertext, + out_ciphertext, + proof: output.proof().0, + } + }) + .collect(), + ), + value_balance: Some( + types::Zec::from(tx.sapling_value_balance().sapling_amount()).lossy_zec(), + ), + value_balance_zat: Some(tx.sapling_value_balance().sapling_amount().zatoshis()), + + orchard: if !tx.has_orchard_shielded_data() { + None + } else { + Some(Orchard { + actions: tx + .orchard_actions() + .collect::>() + .iter() + .map(|action| { + let spend_auth_sig: [u8; 64] = tx + .orchard_shielded_data() + .and_then(|shielded_data| { + shielded_data + .actions + .iter() + .find(|authorized_action| { + authorized_action.action == **action + }) + .map(|authorized_action| { + authorized_action.spend_auth_sig.into() + }) + }) + .unwrap_or([0; 64]); + + let cv: [u8; 32] = action.cv.into(); + let nullifier: [u8; 32] = action.nullifier.into(); + let rk: [u8; 32] = action.rk.into(); + let cm_x: [u8; 32] = action.cm_x.into(); + let ephemeral_key: [u8; 32] = action.ephemeral_key.into(); + let enc_ciphertext: [u8; 580] = action.enc_ciphertext.into(); + let out_ciphertext: [u8; 80] = action.out_ciphertext.into(); + + OrchardAction { + cv, + nullifier, + rk, + cm_x, + ephemeral_key, + enc_ciphertext, + spend_auth_sig, + out_ciphertext, + } + }) + .collect(), + value_balance: types::Zec::from(tx.orchard_value_balance().orchard_amount()) + .lossy_zec(), + value_balance_zat: tx.orchard_value_balance().orchard_amount().zatoshis(), + }) + }, + } + } +} diff --git a/zebra-rpc/src/tests/vectors.rs b/zebra-rpc/src/tests/vectors.rs index 0ca221c2cf5..48fd389f480 100644 --- a/zebra-rpc/src/tests/vectors.rs +++ b/zebra-rpc/src/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed Zebra RPC serialization test vectors. -use crate::methods::{GetBlock, GetRawTransaction, TransactionObject}; +use crate::methods::{types::TransactionObject, GetBlock, GetRawTransaction}; #[test] pub fn test_transaction_serialization() { @@ -12,6 +12,13 @@ pub fn test_transaction_serialization() { hex: vec![0x42].into(), height: Some(1), confirmations: Some(0), + inputs: None, + outputs: None, + shielded_spends: None, + shielded_outputs: None, + value_balance: None, + value_balance_zat: None, + orchard: None, }); assert_eq!( @@ -23,6 +30,13 @@ pub fn test_transaction_serialization() { hex: vec![0x42].into(), height: None, confirmations: None, + inputs: None, + outputs: None, + shielded_spends: None, + shielded_outputs: None, + value_balance: None, + value_balance_zat: None, + orchard: None, }); assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"42"}"#); diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 3cf59bbc1ab..19fb8321756 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -70,10 +70,13 @@ pub use service::finalized_state::{ // Allow use in the scanner and external tests #[cfg(any(test, feature = "proptest-impl", feature = "shielded-scan"))] pub use service::finalized_state::{ - DiskWriteBatch, FromDisk, IntoDisk, ReadDisk, TypedColumnFamily, WriteDisk, WriteTypedBatch, + DiskWriteBatch, FromDisk, ReadDisk, TypedColumnFamily, WriteDisk, WriteTypedBatch, }; -pub use service::{finalized_state::ZebraDb, ReadStateService}; +pub use service::{ + finalized_state::{IntoDisk, ZebraDb}, + ReadStateService, +}; // Allow use in external tests #[cfg(any(test, feature = "proptest-impl"))] From 4e1bb0e5e18a31faca1d8037e7ee0379802584ba Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 11 Apr 2025 15:37:28 +0300 Subject: [PATCH 148/245] change(command): Use read-only db instance when running `tip-height` or `copy-state` commands (#9359) * read tip height from read-only db instance when running "tip-height" command. * use ReadStateService for the source state in the copy-state cmd --- zebra-state/src/service.rs | 10 ++++++---- zebrad/src/commands/copy_state.rs | 32 +++++++++++++------------------ zebrad/src/commands/tip_height.rs | 21 ++++++++------------ 3 files changed, 27 insertions(+), 36 deletions(-) diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index c4399c3d701..5130b6aca9a 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -777,10 +777,7 @@ impl StateService { /// Return the tip of the current best chain. pub fn best_tip(&self) -> Option<(block::Height, block::Hash)> { - read::best_tip( - &self.read_service.latest_non_finalized_state(), - &self.read_service.db, - ) + self.read_service.best_tip() } /// Assert some assumptions about the semantically verified `block` before it is queued. @@ -818,6 +815,11 @@ impl ReadStateService { read_service } + /// Return the tip of the current best chain. + pub fn best_tip(&self) -> Option<(block::Height, block::Hash)> { + read::best_tip(&self.latest_non_finalized_state(), &self.db) + } + /// Gets a clone of the latest non-finalized state from the `non_finalized_state_receiver` fn latest_non_finalized_state(&self) -> NonFinalizedState { self.non_finalized_state_receiver.cloned_watch_data() diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index a285209b62d..128f1d30779 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -118,14 +118,8 @@ impl CopyStateCmd { let source_start_time = Instant::now(); // We're not verifying UTXOs here, so we don't need the maximum checkpoint height. - // - // TODO: use ReadStateService for the source? - let ( - mut source_state, - _source_read_only_state_service, - _source_latest_chain_tip, - _source_chain_tip_change, - ) = old_zs::spawn_init(source_config.clone(), network, Height::MAX, 0).await?; + let (mut source_read_only_state_service, _source_db, _source_latest_non_finalized_state) = + old_zs::spawn_init_read_only(source_config.clone(), network).await?; let elapsed = source_start_time.elapsed(); info!(?elapsed, "finished initializing source state service"); @@ -153,14 +147,14 @@ impl CopyStateCmd { info!("fetching source and target tip heights"); - let source_tip = source_state + let source_tip = source_read_only_state_service .ready() .await? - .call(old_zs::Request::Tip) + .call(old_zs::ReadRequest::Tip) .await?; let source_tip = match source_tip { - old_zs::Response::Tip(Some(source_tip)) => source_tip, - old_zs::Response::Tip(None) => Err("empty source state: no blocks to copy")?, + old_zs::ReadResponse::Tip(Some(source_tip)) => source_tip, + old_zs::ReadResponse::Tip(None) => Err("empty source state: no blocks to copy")?, response => Err(format!("unexpected response to Tip request: {response:?}",))?, }; @@ -210,17 +204,17 @@ impl CopyStateCmd { let copy_start_time = Instant::now(); for height in min_target_height..=max_copy_height { // Read block from source - let source_block = source_state + let source_block = source_read_only_state_service .ready() .await? - .call(old_zs::Request::Block(Height(height).into())) + .call(old_zs::ReadRequest::Block(Height(height).into())) .await?; let source_block = match source_block { - old_zs::Response::Block(Some(source_block)) => { + old_zs::ReadResponse::Block(Some(source_block)) => { trace!(?height, %source_block, "read source block"); source_block } - old_zs::Response::Block(None) => { + old_zs::ReadResponse::Block(None) => { Err(format!("unexpected missing source block, height: {height}",))? } @@ -328,13 +322,13 @@ impl CopyStateCmd { let final_target_tip_height = final_target_tip.0 .0; let final_target_tip_hash = final_target_tip.1; - let target_tip_source_depth = source_state + let target_tip_source_depth = source_read_only_state_service .ready() .await? - .call(old_zs::Request::Depth(final_target_tip_hash)) + .call(old_zs::ReadRequest::Depth(final_target_tip_hash)) .await?; let target_tip_source_depth = match target_tip_source_depth { - old_zs::Response::Depth(source_depth) => source_depth, + old_zs::ReadResponse::Depth(source_depth) => source_depth, response => Err(format!( "unexpected response to Depth request: {response:?}", diff --git a/zebrad/src/commands/tip_height.rs b/zebrad/src/commands/tip_height.rs index 29a124007dc..bf9c68858a5 100644 --- a/zebrad/src/commands/tip_height.rs +++ b/zebrad/src/commands/tip_height.rs @@ -10,11 +10,11 @@ use clap::Parser; use color_eyre::eyre::{eyre, Result}; use zebra_chain::{ - block::{self, Height}, - chain_tip::ChainTip, + block::{self}, parameters::Network, }; -use zebra_state::LatestChainTip; + +use zebra_state::ReadStateService; use crate::prelude::APPLICATION; @@ -46,25 +46,20 @@ impl Runnable for TipHeightCmd { impl TipHeightCmd { /// Load the chain tip height from the state cache directory. fn load_tip_height(&self) -> Result { - let latest_chain_tip = self.load_latest_chain_tip(); - - latest_chain_tip - .best_tip_height() + self.load_read_state() + .best_tip() + .map(|(height, _hash)| height) .ok_or_else(|| eyre!("State directory doesn't have a chain tip block")) } /// Starts a state service using the `cache_dir` and `network` from the provided arguments. - fn load_latest_chain_tip(&self) -> LatestChainTip { + fn load_read_state(&self) -> ReadStateService { let mut config = APPLICATION.config().state.clone(); if let Some(cache_dir) = self.cache_dir.clone() { config.cache_dir = cache_dir; } - // UTXO verification isn't used here: we're not updating the state. - let (_state_service, _read_state_service, latest_chain_tip, _chain_tip_change) = - zebra_state::init(config, &self.network, Height::MAX, 0); - - latest_chain_tip + zebra_state::init_read_only(config, &self.network).0 } } From 0e05fb3f5ce42fdfda8ceb7688bd99b2218acc2d Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Fri, 11 Apr 2025 11:33:54 -0300 Subject: [PATCH 149/245] feat(getaddresstxids): Make start and end fields optional and apply range rules to match zcashd (#9408) * make start and end fields optional and apply new rules to match zcashd * update docs to mark range fields as optionals * Apply suggestions from code review Co-authored-by: Arya --------- Co-authored-by: Arya --- zebra-rpc/src/methods.rs | 87 ++++++++----- zebra-rpc/src/methods/tests/snapshot.rs | 33 +++-- ...invalid_end_greater_start@mainnet_10.snap} | 2 +- ...invalid_end_greater_start@testnet_10.snap} | 2 +- ...ds_invalid_excessive_start@mainnet_10.snap | 10 -- ...ds_invalid_excessive_start@testnet_10.snap | 10 -- ...tx_ids_valid_excessive_end@mainnet_10.snap | 14 ++ ...tx_ids_valid_excessive_end@testnet_10.snap | 14 ++ ..._ids_valid_excessive_start@mainnet_10.snap | 7 + ..._ids_valid_excessive_start@testnet_10.snap | 7 + zebra-rpc/src/methods/tests/vectors.rs | 122 +++++++++++------- 11 files changed, 196 insertions(+), 112 deletions(-) rename zebra-rpc/src/methods/tests/snapshots/{get_address_tx_ids_invalid_excessive_end@mainnet_10.snap => get_address_tx_ids_invalid_end_greater_start@mainnet_10.snap} (50%) rename zebra-rpc/src/methods/tests/snapshots/{get_address_tx_ids_invalid_excessive_end@testnet_10.snap => get_address_tx_ids_invalid_end_greater_start@testnet_10.snap} (50%) delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@mainnet_10.snap delete mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@testnet_10.snap diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index fcc821204f3..fb3f596ab65 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,8 +6,11 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. -use std::collections::HashMap; -use std::{collections::HashSet, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + ops::RangeInclusive, +}; use chrono::Utc; use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt}; @@ -308,8 +311,8 @@ pub trait Rpc { /// /// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields: /// - `addresses`: (json array of string, required) The addresses to get transactions from. - /// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive). - /// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive). + /// - `start`: (numeric, optional) The lower height to start looking for transactions (inclusive). + /// - `end`: (numeric, optional) The top height to stop looking for transactions (inclusive). /// /// # Notes /// @@ -1451,13 +1454,11 @@ where let mut state = self.state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); - let start = Height(request.start); - let end = Height(request.end); - - let chain_height = best_chain_tip_height(&latest_chain_tip)?; - - // height range checks - check_height_range(start, end, chain_height)?; + let height_range = build_height_range( + request.start, + request.end, + best_chain_tip_height(&latest_chain_tip)?, + )?; let valid_addresses = AddressStrings { addresses: request.addresses, @@ -1466,7 +1467,7 @@ where let request = zebra_state::ReadRequest::TransactionIdsByAddresses { addresses: valid_addresses, - height_range: start..=end, + height_range, }; let response = state .ready() @@ -2496,9 +2497,9 @@ pub struct GetAddressTxIdsRequest { // A list of addresses to get transactions from. addresses: Vec, // The height to start looking for transactions. - start: u32, + start: Option, // The height to end looking for transactions. - end: u32, + end: Option, } impl GetAddressTxIdsRequest { @@ -2506,13 +2507,17 @@ impl GetAddressTxIdsRequest { pub fn from_parts(addresses: Vec, start: u32, end: u32) -> Self { GetAddressTxIdsRequest { addresses, - start, - end, + start: Some(start), + end: Some(end), } } /// Returns the contents of [`GetAddressTxIdsRequest`]. pub fn into_parts(&self) -> (Vec, u32, u32) { - (self.addresses.clone(), self.start, self.end) + ( + self.addresses.clone(), + self.start.unwrap_or(0), + self.end.unwrap_or(0), + ) } } @@ -2578,15 +2583,36 @@ impl OrchardTrees { } } -/// Check if provided height range is valid for address indexes. -fn check_height_range(start: Height, end: Height, chain_height: Height) -> Result<()> { - if start == Height(0) || end == Height(0) { - return Err(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!("start {start:?} and end {end:?} must both be greater than zero"), - None::<()>, - )); - } +/// Build a valid height range from the given optional start and end numbers. +/// +/// # Parameters +/// +/// - `start`: Optional starting height. If not provided, defaults to 0. +/// - `end`: Optional ending height. A value of 0 or absence of a value indicates to use `chain_height`. +/// - `chain_height`: The maximum permissible height. +/// +/// # Returns +/// +/// A `RangeInclusive` from the clamped start to the clamped end. +/// +/// # Errors +/// +/// Returns an error if the computed start is greater than the computed end. +fn build_height_range( + start: Option, + end: Option, + chain_height: Height, +) -> Result> { + // Convert optional values to Height, using 0 (as Height(0)) when missing. + // If start is above chain_height, clamp it to chain_height. + let start = Height(start.unwrap_or(0)).min(chain_height); + + // For `end`, treat a zero value or missing value as `chain_height`: + let end = match end { + Some(0) | None => chain_height, + Some(val) => Height(val).min(chain_height), + }; + if start > end { return Err(ErrorObject::owned( ErrorCode::InvalidParams.code(), @@ -2594,15 +2620,8 @@ fn check_height_range(start: Height, end: Height, chain_height: Height) -> Resul None::<()>, )); } - if start > chain_height || end > chain_height { - return Err(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!("start {start:?} and end {end:?} must both be less than or equal to the chain tip {chain_height:?}"), - None::<()>, - )); - } - Ok(()) + Ok(start..=end) } /// Given a potentially negative index, find the corresponding `Height`. diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index bad3b85b478..ab32234fccd 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -466,8 +466,8 @@ async fn test_rpc_response_data_for_network(network: &Network) { let get_address_tx_ids = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), - start: 1, - end: 10, + start: Some(1), + end: Some(10), }) .await .expect("We should have a vector of strings"); @@ -476,8 +476,8 @@ async fn test_rpc_response_data_for_network(network: &Network) { let get_address_tx_ids = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), - start: 2, - end: 2, + start: Some(2), + end: Some(2), }) .await .expect("We should have a vector of strings"); @@ -486,20 +486,31 @@ async fn test_rpc_response_data_for_network(network: &Network) { let get_address_tx_ids = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), - start: 3, - end: EXCESSIVE_BLOCK_HEIGHT, + start: Some(3), + end: Some(EXCESSIVE_BLOCK_HEIGHT), }) - .await; - snapshot_rpc_getaddresstxids_invalid("excessive_end", get_address_tx_ids, &settings); + .await + .expect("We should have a vector of strings"); + snapshot_rpc_getaddresstxids_valid("excessive_end", get_address_tx_ids, &settings); + + let get_address_tx_ids = rpc + .get_address_tx_ids(GetAddressTxIdsRequest { + addresses: addresses.clone(), + start: Some(EXCESSIVE_BLOCK_HEIGHT), + end: Some(EXCESSIVE_BLOCK_HEIGHT + 1), + }) + .await + .expect("We should have a vector of strings"); + snapshot_rpc_getaddresstxids_valid("excessive_start", get_address_tx_ids, &settings); let get_address_tx_ids = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), - start: EXCESSIVE_BLOCK_HEIGHT, - end: EXCESSIVE_BLOCK_HEIGHT + 1, + start: Some(2), + end: Some(1), }) .await; - snapshot_rpc_getaddresstxids_invalid("excessive_start", get_address_tx_ids, &settings); + snapshot_rpc_getaddresstxids_invalid("end_greater_start", get_address_tx_ids, &settings); // `getaddressutxos` let get_address_utxos = rpc diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@mainnet_10.snap similarity index 50% rename from zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@mainnet_10.snap index e2fcae6abca..f68d3d513dd 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@mainnet_10.snap @@ -5,6 +5,6 @@ expression: transactions { "Err": { "code": -32602, - "message": "start Height(3) and end Height(16777216) must both be less than or equal to the chain tip Height(10)" + "message": "start Height(2) must be less than or equal to end Height(1)" } } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@testnet_10.snap similarity index 50% rename from zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@testnet_10.snap index e2fcae6abca..f68d3d513dd 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_end@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_end_greater_start@testnet_10.snap @@ -5,6 +5,6 @@ expression: transactions { "Err": { "code": -32602, - "message": "start Height(3) and end Height(16777216) must both be less than or equal to the chain tip Height(10)" + "message": "start Height(2) must be less than or equal to end Height(1)" } } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@mainnet_10.snap deleted file mode 100644 index 4256ecc9330..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@mainnet_10.snap +++ /dev/null @@ -1,10 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: transactions ---- -{ - "Err": { - "code": -32602, - "message": "start Height(16777216) and end Height(16777217) must both be less than or equal to the chain tip Height(10)" - } -} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@testnet_10.snap deleted file mode 100644 index 4256ecc9330..00000000000 --- a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_invalid_excessive_start@testnet_10.snap +++ /dev/null @@ -1,10 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot.rs -expression: transactions ---- -{ - "Err": { - "code": -32602, - "message": "start Height(16777216) and end Height(16777217) must both be less than or equal to the chain tip Height(10)" - } -} diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@mainnet_10.snap new file mode 100644 index 00000000000..260981d1eb9 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@mainnet_10.snap @@ -0,0 +1,14 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: transactions +--- +[ + "4e3670eae21d0c7225716abacb3a95c203a75d7601594afddb07d175c3babe83", + "781bb9b0168831054c1ed3afd8bf153f3fc848e102cea86977545c86f6e2446e", + "9a4adaf3953818eb1634407032db0e00ef2441c49c1364161411d0743ec1a939", + "1dd2064423542cfee7ca4ec565acb37e1ad217035b6119668352165b20006fe1", + "26b813b0743f015e6b41519d4ca78ee7b5c76cbac5c8b8ac0ec6b9cf88b8328d", + "7a772e6fe8e6bbfc34f0a61ba472d9f9dfe3a5539adfdac2f3c6ae659d44de03", + "ff24ab621bf3e46ad195b47dca7f11fb376f53e23b2e14a6e9b6a1907cc27cdc", + "5f2702708af1d8727ad3f0da3ba74de14019232499c0324ddce236cf97e32548" +] diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@testnet_10.snap new file mode 100644 index 00000000000..ea24d0bd539 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_end@testnet_10.snap @@ -0,0 +1,14 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: transactions +--- +[ + "4a3bf3f814a3aef93423890c8afa9709229aaf3daf4da98c70d810253d3b9550", + "3373ed6deb1130f310d8788db5dfdb92e52980b34ca02ea124ced11aa247f80b", + "476480f7c2580a9e39b9d78892fea996c389e6627c8962700563c19b68cc7bee", + "23daf8408d825feb09dfeaaceccf1307ed1008265c7145573374872b332c57ab", + "47aebd007159819c19519a31bb87b8b40b9b09701fcc0e40bc61c98d283117f2", + "29f8982be208c9d8737200f0ecfd3f42c175b7dd67a0aba85812283fc443a443", + "9eec40dcf5f72aa0619472cbc3c336229fce2ff983b47ebccc7bf8800759781c", + "3887181b326c25e1ea1b6885c9b437280ca3372dc5b67af72423c88c18a1da2e" +] diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@mainnet_10.snap new file mode 100644 index 00000000000..2e6800fed11 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@mainnet_10.snap @@ -0,0 +1,7 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: transactions +--- +[ + "5f2702708af1d8727ad3f0da3ba74de14019232499c0324ddce236cf97e32548" +] diff --git a/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@testnet_10.snap new file mode 100644 index 00000000000..e263425f7f3 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_address_tx_ids_valid_excessive_start@testnet_10.snap @@ -0,0 +1,7 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: transactions +--- +[ + "3887181b326c25e1ea1b6885c9b437280ca3372dc5b67af72423c88c18a1da2e" +] diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 3d54f179eb8..70f3d483e7d 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1,6 +1,5 @@ //! Fixed test vectors for RPC methods. -use std::ops::RangeInclusive; use std::sync::Arc; use futures::FutureExt; @@ -1070,8 +1069,8 @@ async fn rpc_getaddresstxids_invalid_arguments() { let rpc_rsp = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: vec!["t1invalidaddress".to_owned()], - start: 1, - end: 2, + start: Some(1), + end: Some(2), }) .await .unwrap_err(); @@ -1085,8 +1084,8 @@ async fn rpc_getaddresstxids_invalid_arguments() { let addresses = vec![address.clone()]; // call the method with start greater than end - let start: u32 = 2; - let end: u32 = 1; + let start: Option = Some(2); + let end: Option = Some(1); let error = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), @@ -1100,38 +1099,6 @@ async fn rpc_getaddresstxids_invalid_arguments() { "start Height(2) must be less than or equal to end Height(1)".to_string() ); - // call the method with start equal zero - let start: u32 = 0; - let end: u32 = 1; - let error = rpc - .get_address_tx_ids(GetAddressTxIdsRequest { - addresses: addresses.clone(), - start, - end, - }) - .await - .unwrap_err(); - assert_eq!( - error.message(), - "start Height(0) and end Height(1) must both be greater than zero".to_string() - ); - - // call the method outside the chain tip height - let start: u32 = 1; - let end: u32 = 11; - let error = rpc - .get_address_tx_ids(GetAddressTxIdsRequest { - addresses, - start, - end, - }) - .await - .unwrap_err(); - assert_eq!( - error.message(), - "start Height(1) and end Height(11) must both be less than or equal to the chain tip Height(10)".to_string() - ); - mempool.expect_no_requests().await; // The queue task should continue without errors or panics @@ -1167,16 +1134,16 @@ async fn rpc_getaddresstxids_response() { if network == Mainnet { // Exhaustively test possible block ranges for mainnet. - // - // TODO: if it takes too long on slower machines, turn this into a proptest with 10-20 cases for start in 1..=10 { for end in start..=10 { rpc_getaddresstxids_response_with( &network, - start..=end, + Some(start), + Some(end), &address, &read_state, &latest_chain_tip, + (end - start + 1) as usize, ) .await; } @@ -1185,22 +1152,87 @@ async fn rpc_getaddresstxids_response() { // Just test the full range for testnet. rpc_getaddresstxids_response_with( &network, - 1..=10, + Some(1), + Some(10), &address, &read_state, &latest_chain_tip, + // response should be limited to the chain size. + 10, ) .await; } + + // No range arguments should be equivalent to the full range. + rpc_getaddresstxids_response_with( + &network, + None, + None, + &address, + &read_state, + &latest_chain_tip, + 10, + ) + .await; + + // Range of 0s should be equivalent to the full range. + rpc_getaddresstxids_response_with( + &network, + Some(0), + Some(0), + &address, + &read_state, + &latest_chain_tip, + 10, + ) + .await; + + // Start and and outside of the range should use the chain tip. + rpc_getaddresstxids_response_with( + &network, + Some(11), + Some(11), + &address, + &read_state, + &latest_chain_tip, + 1, + ) + .await; + + // End outside the range should use the chain tip. + rpc_getaddresstxids_response_with( + &network, + None, + Some(11), + &address, + &read_state, + &latest_chain_tip, + 10, + ) + .await; + + // Start outside the range should use the chain tip. + rpc_getaddresstxids_response_with( + &network, + Some(11), + None, + &address, + &read_state, + &latest_chain_tip, + 1, + ) + .await; } } async fn rpc_getaddresstxids_response_with( network: &Network, - range: RangeInclusive, + start: Option, + end: Option, address: &transparent::Address, read_state: &ReadStateService, latest_chain_tip: &LatestChainTip, + expected_response_len: usize, ) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); @@ -1223,14 +1255,14 @@ async fn rpc_getaddresstxids_response_with( let response = rpc .get_address_tx_ids(GetAddressTxIdsRequest { addresses, - start: *range.start(), - end: *range.end(), + start, + end, }) .await .expect("arguments are valid so no error can happen here"); // One founders reward output per coinbase transactions, no other transactions. - assert_eq!(response.len(), range.count()); + assert_eq!(response.len(), expected_response_len); mempool.expect_no_requests().await; From aa7205d660803686c56bb5e0d9c9058804f64fe7 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 16 Apr 2025 13:27:32 +0100 Subject: [PATCH 150/245] refactor(docker): Enhance Zebra configuration options and entrypoint logic (#9344) * refactor(docker): Enhance Zebra configuration options and entrypoint logic - Introduced multiple methods for configuring Zebra, including custom config file paths and automatic generation from environment variables. - Updated entrypoint script to handle configuration more robustly, checking for existing files and generating defaults as needed. - Improved documentation in `docker.md` to clarify configuration options and their precedence. - Adjusted environment variable handling in `docker/.env` for better clarity and functionality. - Refactored `prepare_conf_file` to create a complete configuration based on environment variables, streamlining the setup process. * fix(entrypoint): use correct `ZEBRA_RPC_PORT` variable This also removes the logic to set the default port for RPC as this was introducing a buggy logic * fix(docker): remove non-essential variables and set correct defaults * chore(docker): use the default config for docker-compose as the default approach * Apply suggestions from code review Co-authored-by: Marek * chore: better comment for user understanding * fix(entrypoint): set permissions correctly for parent directories * chore: align documentation and implementation * chore: clearer and aligned comments with docker-compose * fix(entrypoint): chown the zebra config file when `ZEBRA_CONF_PATH` is not set --------- Co-authored-by: Marek --- book/src/user/docker.md | 72 +++++++------ docker/.env | 26 ++++- docker/Dockerfile | 14 +-- docker/docker-compose.yml | 13 ++- docker/entrypoint.sh | 211 +++++++++++++++++++++++--------------- 5 files changed, 196 insertions(+), 140 deletions(-) diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 90ba102579d..c16fe1dd41b 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -80,49 +80,47 @@ All available Cargo features are listed at ## Configuring Zebra -To configure Zebra, edit the `docker/default-zebra-config.toml` config file and -uncomment the `configs` mapping in `docker/docker-compose.yml` so your config -takes effect. You can see if your config works as intended by looking at Zebra's -logs. - -Alternatively, you can configure Zebra by setting the environment variables in -the `docker/.env` file. Note that the config options of this file are limited to -the variables already present in the commented out blocks in it and adding new -ones will not be effective. Also note that the values of the variables take -precedence over the values set in the `docker/default-zebra-config.toml` config -file. The `docker/.env` file serves as a quick way to override the most commonly -used settings for Zebra, whereas the `docker/default-zebra-config.toml` file -provides full config capabilities. +To configure Zebra using Docker, you have a few options, processed in this order: + +1. **Provide a specific config file path:** Set the `ZEBRA_CONF_PATH` environment variable to point to your config file within the container. +2. **Use the default config file:** By default, the `docker-compose.yml` file mounts `./default-zebra-config.toml` to `/home/zebra/.config/zebrad.toml` using the `configs:` mapping. Zebra will use this file if `ZEBRA_CONF_PATH` is not set. To use environment variables instead, you must **comment out** the `configs:` mapping in `docker/docker-compose.yml`. +3. **Generate config from environment variables:** If neither of the above methods provides a config file (i.e., `ZEBRA_CONF_PATH` is unset *and* the `configs:` mapping in `docker-compose.yml` is commented out), the container's entrypoint script will *automatically generate* a default configuration file at `/home/zebra/.config/zebrad.toml`. This generated file uses specific environment variables (like `NETWORK`, `ZEBRA_RPC_PORT`, `ENABLE_COOKIE_AUTH`, `MINER_ADDRESS`, etc.) to define the settings. Using the `docker/.env` file is the primary way to set these variables for this auto-generation mode. + +You can see if your config works as intended by looking at Zebra's logs. + +Note that if you provide a configuration file using methods 1 or 2, environment variables from `docker/.env` will **not** override the settings within that file. The environment variables are primarily used for the auto-generation scenario (method 3). ### RPC -Zebra's RPC server is disabled by default. To enable it, you need to set its RPC -port. You can do that either in the `docker/default-zebra-config.toml` file or -`docker/.env` file, as described in the two paragraphs above. +Zebra's RPC server is disabled by default. To enable it, you need to define the RPC settings in Zebra's configuration. You can achieve this using one of the configuration methods described above: -When connecting to Zebra's RPC server, your RPC clients need to provide an -authentication cookie to the server or you need to turn the authentication off -in Zebra's config. By default, the cookie file is stored at -`/home/zebra/.cache/zebra/.cookie` in the container. You can print its contents -by running +* **Using a config file (methods 1 or 2):** Add or uncomment the `[rpc]` section in your `zebrad.toml` file (like the one provided in `docker/default-zebra-config.toml`). Ensure you set the `listen_addr` (e.g., `"0.0.0.0:8232"` for Mainnet). +* **Using environment variables (method 3):** Set the `ZEBRA_RPC_PORT` environment variable (e.g., in `docker/.env`). This tells the entrypoint script to include an enabled `[rpc]` section listening on `0.0.0.0:` in the auto-generated configuration file. -```bash -docker exec zebra cat /home/zebra/.cache/zebra/.cookie -``` +**Cookie Authentication:** -when the `zebra` container is running. Note that Zebra generates the cookie file -only if the RPC server is enabled, and each Zebra instance generates a unique -one. To turn the authentication off, either set `ENABLE_COOKIE_AUTH=false` in -`docker/.env` or set +By default, Zebra uses cookie-based authentication for RPC requests (`enable_cookie_auth = true`). When enabled, Zebra generates a unique, random cookie file required for client authentication. -```toml -[rpc] -enable_cookie_auth = false -``` +* **Cookie Location:** The entrypoint script configures Zebra to store this file at `/home/zebra/.cache/zebra/.cookie` inside the container. +* **Viewing the Cookie:** If the container is running and RPC is enabled with authentication, you can view the cookie content using: + + ```bash + docker exec cat /home/zebra/.cache/zebra/.cookie + ``` + + (Replace `` with your container's name, typically `zebra` if using the default `docker-compose.yml`). Your RPC client will need this value. +* **Disabling Authentication:** If you need to disable cookie authentication (e.g., for compatibility with tools like `lightwalletd`): + * If using a **config file** (methods 1 or 2), set `enable_cookie_auth = false` within the `[rpc]` section: + + ```toml + [rpc] + # listen_addr = ... + enable_cookie_auth = false + ``` + + * If using **environment variables** for auto-generation (method 3), set `ENABLE_COOKIE_AUTH=false` in your `docker/.env` file. -in `docker/default-zebra-config.toml` and mount this config file into the -container's filesystem in `docker/docker-compose.yml` as described at the -beginning of this section. +Remember that Zebra only generates the cookie file if the RPC server is enabled *and* `enable_cookie_auth` is set to `true` (or omitted, as `true` is the default). ## Examples @@ -142,8 +140,8 @@ Note that Docker will run Zebra with the RPC server enabled and the cookie authentication mechanism disabled since Lightwalletd doesn't support it. Instead of configuring Zebra via the recommended config file or `docker/.env` file, we configured the RPC server by setting environment variables directly in the -`docker/docker-compose.lwd.yml` file. This is a shortcut we can take when we are -familiar with the `docker/.env` file. +`docker/docker-compose.lwd.yml` file. This takes advantage of the entrypoint +script's auto-generation feature (method 3 described above). ### Running Zebra with Prometheus and Grafana diff --git a/docker/.env b/docker/.env index b4e8bade406..629d7d6b9c7 100644 --- a/docker/.env +++ b/docker/.env @@ -1,5 +1,11 @@ # Configuration variables for running Zebra in Docker +# Sets the path to a custom Zebra config file. If not set, Zebra will look for a config at +# ${HOME}/.config/zebrad.toml or generate one using environment variables below. +# ! Setting ZEBRA_CONF_PATH will make most of the following environment variables ineffective. +# +# ZEBRA_CONF_PATH="/path/to/your/custom/zebrad.toml" + # Sets the network Zebra runs will run on. # # NETWORK=Mainnet @@ -7,14 +13,17 @@ # Zebra's RPC server is disabled by default. To enable it, set its port number. # # ZEBRA_RPC_PORT=8232 # Default RPC port number on Mainnet. -# ZEBRA_RPC_PORT=18323 # Default RPC port number on Testnet. +# ZEBRA_RPC_PORT=18232 # Default RPC port number on Testnet. # To disable cookie authentication, set the value below to false. # # ENABLE_COOKIE_AUTH=true -# Sets a custom directory for the state and network caches. Zebra will also -# store its cookie authentication file in this directory. +# Sets a custom directory for the cookie authentication file. +# +# ZEBRA_COOKIE_DIR="/home/zebra/.config/cookie" + +# Sets a custom directory for the state and network caches. # # ZEBRA_CACHE_DIR="/home/zebra/.cache/zebra" @@ -25,6 +34,11 @@ # # FEATURES="" +# Sets the listen address and port for Prometheus metrics. +# +# METRICS_ENDPOINT_ADDR="0.0.0.0" +# METRICS_ENDPOINT_PORT=9999 + # Logging to a file is disabled by default. To enable it, uncomment the line # below and alternatively set your own path. # @@ -41,6 +55,12 @@ # # USE_JOURNALD=true +# Sets the listen address and port for the tracing endpoint. +# Only active when the 'filter-reload' feature is enabled. +# +# TRACING_ENDPOINT_ADDR="0.0.0.0" +# TRACING_ENDPOINT_PORT=3000 + # If you are going to use Zebra as a backend for a mining pool, set your mining # address. # diff --git a/docker/Dockerfile b/docker/Dockerfile index fc61f525914..a82c1010fb9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -120,10 +120,6 @@ COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/ # and allow to change permissions for mounted cache directories COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ -# Use the same default config as in the production environment. -ENV ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" -COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} - # As the build has already run with the root user, # we need to set the correct permissions for the home and cargo home dirs owned by it. RUN chown -R ${UID}:${GID} "${HOME}" && \ @@ -141,7 +137,9 @@ CMD [ "cargo", "test" ] # `test` stage. The resulting zebrad binary is used in the `runtime` stage. FROM deps AS release +# Set the working directory for the build. ARG HOME +WORKDIR ${HOME} RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ --mount=type=bind,source=tower-fallback,target=tower-fallback \ @@ -199,15 +197,8 @@ RUN addgroup --quiet --gid ${GID} ${USER} && \ adduser --quiet --gid ${GID} --uid ${UID} --home ${HOME} ${USER} --disabled-password --gecos "" WORKDIR ${HOME} - -# We set the default locations of the conf and cache dirs according to the XDG -# spec: https://specifications.freedesktop.org/basedir-spec/latest/ - RUN chown -R ${UID}:${GID} ${HOME} -ARG ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" -ENV ZEBRA_CONF_PATH=${ZEBRA_CONF_PATH} - # We're explicitly NOT using the USER directive here. # Instead, we run as root initially and use gosu in the entrypoint.sh # to step down to the non-privileged user. This allows us to change permissions @@ -217,7 +208,6 @@ ENV ZEBRA_CONF_PATH=${ZEBRA_CONF_PATH} # Copy the gosu binary to be able to run the entrypoint as non-root user COPY --from=tianon/gosu:bookworm /gosu /usr/local/bin/ COPY --from=release /usr/local/bin/zebrad /usr/local/bin/ -COPY --chown=${UID}:${GID} ./docker/default-zebra-config.toml ${ZEBRA_CONF_PATH} COPY --chown=${UID}:${GID} ./docker/entrypoint.sh /usr/local/bin/entrypoint.sh ENTRYPOINT [ "entrypoint.sh" ] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index a30e248b020..b561312fe27 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -15,13 +15,12 @@ services: - zebrad-cache:/home/zebra/.cache/zebra # Having `tty` set to true makes Zebra use colored logs. tty: true - #! Uncomment the `configs` mapping below to make your custom configuration - #! take effect. - # - # configs: - # - source: zebra-config - # target: /home/zebra/.config/zebrad.toml - # + # ! Comment out the `configs` mapping below to use the environment variables in the + # ! `.env` file, instead of the default configuration file. + configs: + - source: zebra-config + target: /home/zebra/.config/zebrad.toml + # Uncomment the `ports` mapping below to map ports between the container and # host. # diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 1f4f868c1be..e73ad84e086 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -6,11 +6,12 @@ # # ## Notes # -# - `$ZEBRA_CONF_PATH` must point to a Zebra conf file. +# - `$ZEBRA_CONF_PATH` can point to an existing Zebra config file, or if not set, +# the script will look for a default config at ${HOME}/.config/zebrad.toml, +# or generate one using environment variables. set -eo pipefail - # These are the default cached state directories for Zebra and lightwalletd. # # They are set to `${HOME}/.cache/zebra` and `${HOME}/.cache/lwd` @@ -18,101 +19,122 @@ set -eo pipefail # `ZEBRA_CACHE_DIR` and `LWD_CACHE_DIR` environment variables. : "${ZEBRA_CACHE_DIR:=${HOME}/.cache/zebra}" : "${LWD_CACHE_DIR:=${HOME}/.cache/lwd}" - -# Exit early if `ZEBRA_CONF_PATH` does not point to a file. -if [[ ! -f "${ZEBRA_CONF_PATH}" ]]; then - echo "ERROR: No Zebra config file found at ZEBRA_CONF_PATH (${ZEBRA_CONF_PATH})." - echo "Please ensure the file exists or mount your custom config file and set ZEBRA_CONF_PATH accordingly." - exit 1 -fi +: "${ZEBRA_COOKIE_DIR:=${HOME}/.cache/zebra}" # Use gosu to drop privileges and execute the given command as the specified UID:GID exec_as_user() { - exec gosu "${UID}:${GID}" "$@" + user=$(id -u) + if [[ ${user} == '0' ]]; then + exec gosu "${UID}:${GID}" "$@" + else + exec "$@" + fi } -# Modifies the existing Zebra config file at ZEBRA_CONF_PATH using environment variables. +# Modifies the Zebra config file using environment variables. # -# The config options this function supports are also listed in the "docker/.env" file. +# This function generates a new config file from scratch at ZEBRA_CONF_PATH +# using the provided environment variables. # -# This function modifies the existing file in-place and prints its location. +# It creates a complete configuration with network settings, state, RPC, +# metrics, tracing, and mining sections based on environment variables. prepare_conf_file() { - # Set a custom network. - if [[ -n "${NETWORK}" ]]; then - sed -i '/network = ".*"/s/".*"/"'"${NETWORK//\"/}"'"/' "${ZEBRA_CONF_PATH}" - fi - - # Enable the RPC server by setting its port. - if [[ -n "${ZEBRA_RPC_PORT}" ]]; then - sed -i '/# listen_addr = "0.0.0.0:18232" # Testnet/d' "${ZEBRA_CONF_PATH}" - sed -i 's/ *# Mainnet$//' "${ZEBRA_CONF_PATH}" - sed -i '/# listen_addr = "0.0.0.0:8232"/s/^# //; s/8232/'"${ZEBRA_RPC_PORT//\"/}"'/' "${ZEBRA_CONF_PATH}" - fi - - # Disable or enable cookie authentication. - if [[ -n "${ENABLE_COOKIE_AUTH}" ]]; then - sed -i '/# enable_cookie_auth = true/s/^# //; s/true/'"${ENABLE_COOKIE_AUTH//\"/}"'/' "${ZEBRA_CONF_PATH}" - fi - - # Set a custom state, network and cookie cache dirs. - # - # We're pointing all three cache dirs at the same location, so users will find - # all cached data in that single location. We can introduce more env vars and - # use them to set the cache dirs separately if needed. - if [[ -n "${ZEBRA_CACHE_DIR}" ]]; then - mkdir -p "${ZEBRA_CACHE_DIR//\"/}" - chown -R "${UID}:${GID}" "${ZEBRA_CACHE_DIR//\"/}" - sed -i 's|_dir = ".*"|_dir = "'"${ZEBRA_CACHE_DIR//\"/}"'"|' "${ZEBRA_CONF_PATH}" - fi - - # Set a custom lightwalletd cache dir. - if [[ -n "${LWD_CACHE_DIR}" ]]; then - mkdir -p "${LWD_CACHE_DIR//\"/}" - chown -R "${UID}:${GID}" "${LWD_CACHE_DIR//\"/}" - fi - - # Enable the Prometheus metrics endpoint. - if [[ "${FEATURES}" == *"prometheus"* ]]; then - sed -i '/# endpoint_addr = "0.0.0.0:9999" # Prometheus/s/^# //' "${ZEBRA_CONF_PATH}" - fi - - # Enable logging to a file by setting a custom log file path. - if [[ -n "${LOG_FILE}" ]]; then - mkdir -p "$(dirname "${LOG_FILE//\"/}")" - chown -R "${UID}:${GID}" "$(dirname "${LOG_FILE//\"/}")" - sed -i 's|# log_file = ".*"|log_file = "'"${LOG_FILE//\"/}"'"|' "${ZEBRA_CONF_PATH}" - fi + # Base configuration + cat >"${ZEBRA_CONF_PATH}" <&2 + exit 1 +} - # Set a mining address. - if [[ -n "${MINER_ADDRESS}" ]]; then - sed -i '/# miner_address = ".*"/{s/^# //; s/".*"/"'"${MINER_ADDRESS//\"/}"'"/}' "${ZEBRA_CONF_PATH}" +# Creates a directory if it doesn't exist and sets ownership to specified UID:GID. +# Also ensures the parent directories have the correct ownership. +# +# ## Parameters +# +# - $1: Directory path to create and own +create_owned_directory() { + local dir="$1" + # Skip if directory is empty + [[ -z ${dir} ]] && return + + # Create directory with parents + mkdir -p "${dir}" || exit_error "Failed to create directory: ${dir}" + + # Set ownership for the created directory + chown -R "${UID}:${GID}" "${dir}" || exit_error "Failed to secure directory: ${dir}" + + # Set ownership for parent directory (but not if it's root or home) + local parent_dir + parent_dir="$(dirname "${dir}")" + if [[ "${parent_dir}" != "/" && "${parent_dir}" != "${HOME}" ]]; then + chown "${UID}:${GID}" "${parent_dir}" fi - - # Trim all comments and empty lines. - sed -i '/^#/d; /^$/d' "${ZEBRA_CONF_PATH}" - - echo "${ZEBRA_CONF_PATH}" } +# Create and own cache and config directories +[[ -n ${ZEBRA_CACHE_DIR} ]] && create_owned_directory "${ZEBRA_CACHE_DIR}" +[[ -n ${LWD_CACHE_DIR} ]] && create_owned_directory "${LWD_CACHE_DIR}" +[[ -n ${ZEBRA_COOKIE_DIR} ]] && create_owned_directory "${ZEBRA_COOKIE_DIR}" +[[ -n ${LOG_FILE} ]] && create_owned_directory "$(dirname "${LOG_FILE}")" + # Runs cargo test with an arbitrary number of arguments. # # Positional Parameters # -# - '$1' must contain -# - either cargo FEATURES as described here: -# https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options, -# - or be empty. +# - '$1' must contain cargo FEATURES as described here: +# https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options # - The remaining params will be appended to a command starting with # `exec_as_user cargo test ... -- ...` run_cargo_test() { @@ -255,12 +277,39 @@ run_tests() { } # Main Script Logic +# +# 1. First check if ZEBRA_CONF_PATH is explicitly set or if a file exists at that path +# 2. If not set but default config exists, use that +# 3. If neither exists, generate a default config at ${HOME}/.config/zebrad.toml +# 4. Print environment variables and config for debugging +# 5. Process command-line arguments and execute appropriate action +if [[ -n ${ZEBRA_CONF_PATH} ]]; then + if [[ -f ${ZEBRA_CONF_PATH} ]]; then + echo "ZEBRA_CONF_PATH was set to ${ZEBRA_CONF_PATH} and a file exists." + echo "Using user-provided config file" + else + echo "ERROR: ZEBRA_CONF_PATH was set and no config file found at ${ZEBRA_CONF_PATH}." + echo "Please ensure a config file exists or set ZEBRA_CONF_PATH to point to your config file." + exit 1 + fi +else + if [[ -f "${HOME}/.config/zebrad.toml" ]]; then + echo "ZEBRA_CONF_PATH was not set." + echo "Using default config at ${HOME}/.config/zebrad.toml" + ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" + else + echo "ZEBRA_CONF_PATH was not set and no default config found at ${HOME}/.config/zebrad.toml" + echo "Preparing a default one..." + ZEBRA_CONF_PATH="${HOME}/.config/zebrad.toml" + create_owned_directory "$(dirname "${ZEBRA_CONF_PATH}")" + prepare_conf_file + fi +fi -prepare_conf_file "${ZEBRA_CONF_PATH}" echo "INFO: Using the following environment variables:" printenv -echo "Prepared the following Zebra config:" +echo "Using Zebra config at ${ZEBRA_CONF_PATH}:" cat "${ZEBRA_CONF_PATH}" # - If "$1" is "--", "-", or "zebrad", run `zebrad` with the remaining params. From 4e3426c8f3dd471850d4017b8dc5a60177621ff4 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 16 Apr 2025 15:16:27 +0100 Subject: [PATCH 151/245] feat(ci): Centralize zebra configuration testing with Docker (#9423) * refactor(docker): Enhance Zebra configuration options and entrypoint logic - Introduced multiple methods for configuring Zebra, including custom config file paths and automatic generation from environment variables. - Updated entrypoint script to handle configuration more robustly, checking for existing files and generating defaults as needed. - Improved documentation in `docker.md` to clarify configuration options and their precedence. - Adjusted environment variable handling in `docker/.env` for better clarity and functionality. - Refactored `prepare_conf_file` to create a complete configuration based on environment variables, streamlining the setup process. * fix(docker): remove non-essential variables and set correct defaults * feat(ci): Centralize zebra configuration testing with Docker - Replace multiple separate test jobs with a single comprehensive matrix-based test - Create a new ADR documenting the design decision for centralizing Docker tests - Move all test scenarios from both CI and CD pipelines into a single reusable workflow - Define extensive test matrix covering network, RPC, directory, feature configurations - Improve workflow readability with descriptive test names and clear organization - Simplify workflow inputs to only require the Docker image identifier * chore(ci): cleanup jobs that already don't exist --- .github/workflows/README.md | 2 +- .../cd-deploy-nodes-gcp.patch-external.yml | 18 --- .../workflows/cd-deploy-nodes-gcp.patch.yml | 14 -- .github/workflows/cd-deploy-nodes-gcp.yml | 25 +--- .github/workflows/ci-tests.patch-external.yml | 22 --- .github/workflows/ci-tests.patch.yml | 23 ---- .../workflows/sub-ci-unit-tests-docker.yml | 36 +---- .github/workflows/sub-test-zebra-config.yml | 127 ++++++++++++------ .../devops/004-improve-docker-conf-tests.md | 89 ++++++++++++ 9 files changed, 185 insertions(+), 171 deletions(-) create mode 100644 docs/decisions/devops/004-improve-docker-conf-tests.md diff --git a/.github/workflows/README.md b/.github/workflows/README.md index e45be336730..ea104c7db71 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -49,7 +49,7 @@ graph TB subgraph "Unit Test Flow" direction TB C[sub-ci-unit-tests-docker.yml] - H[test-all] & I[test-fake-activation-heights] & J[test-empty-sync] & K[test-lightwalletd-integration] & L[test-configuration-file] + H[test-all] & I[test-fake-activation-heights] & J[test-empty-sync] & K[test-lightwalletd-integration] & L[test-docker-configurations] C --> H C --> I C --> J diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index 7eade3ad5d2..f4f599e5638 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -27,21 +27,3 @@ jobs: if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' - - test-configuration-file: - name: Test CD default Docker config file / Test default-conf in Docker - # This dependency allows all these jobs to depend on a single condition, making it easier to - # change. - needs: build - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - test-configuration-file-testnet: - name: Test CD testnet Docker config file / Test testnet-conf in Docker - needs: build - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index 922749a2901..8ca9804f4ed 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -41,17 +41,3 @@ jobs: if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "No build required"' - - test-configuration-file: - name: Test CD default Docker config file / Test default-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-configuration-file-testnet: - name: Test CD testnet Docker config file / Test testnet-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index bce32c5db23..663bb9144cf 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -4,8 +4,7 @@ # # 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions. # 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. -# 3. `test-configuration-file`: Validates Zebra using the default config with the latest version. -# 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration. +# 3. `test-docker-configurations`: Validates all Zebra Docker configurations by running a matrix of configuration tests. # 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed. # 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted. # @@ -178,27 +177,13 @@ jobs: # This step needs access to Docker Hub secrets to run successfully secrets: inherit - # Test that Zebra works using the default config with the latest Zebra version. - test-configuration-file: - name: Test CD default Docker config file + # Run a matrix of configuration tests against the Docker image + test-docker-configurations: + name: Test Zebra Docker configurations needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: "default-conf" docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - - # Test reconfiguring the docker image for testnet. - test-configuration-file-testnet: - name: Test CD testnet Docker config file - needs: build - # Make sure Zebra can sync the genesis block on testnet - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: "testnet-conf" - docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: "-e NETWORK=Testnet" # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. @@ -242,7 +227,7 @@ jobs: set-matrix, build, versioning, - test-configuration-file, + test-docker-configurations, get-disk-name, ] runs-on: ubuntu-latest diff --git a/.github/workflows/ci-tests.patch-external.yml b/.github/workflows/ci-tests.patch-external.yml index f257315788b..48f48f540ea 100644 --- a/.github/workflows/ci-tests.patch-external.yml +++ b/.github/workflows/ci-tests.patch-external.yml @@ -51,28 +51,6 @@ jobs: steps: - run: 'echo "Skipping job on fork"' - test-configuration-file: - name: Unit tests / Test CI default Docker config file / Test default-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - test-configuration-file-testnet: - name: Unit tests / Test CI testnet Docker config file / Test testnet-conf in Docker - needs: build - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - test-zebra-conf-path: - name: Unit tests / Test CI custom Docker config file / Test custom-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - #### #### ## The following jobs are related to sub-ci-integration-tests-gcp.yml diff --git a/.github/workflows/ci-tests.patch.yml b/.github/workflows/ci-tests.patch.yml index e0a7ff605fd..db87f4a11bc 100644 --- a/.github/workflows/ci-tests.patch.yml +++ b/.github/workflows/ci-tests.patch.yml @@ -70,29 +70,6 @@ jobs: steps: - run: 'echo "No build required"' - test-configuration-file: - name: Unit tests / Test CI default Docker config file / Test default-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-configuration-file-testnet: - name: Unit tests / Test CI testnet Docker config file / Test testnet-conf in Docker - needs: build - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-zebra-conf-path: - name: Unit tests / Test CI custom Docker config file / Test custom-conf in Docker - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - #### ## The following jobs are related to sub-ci-integration-tests-gcp.yml ### diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index 852d26503b0..42cbbde056f 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -8,9 +8,7 @@ # 3. 'test-fake-activation-heights': Runs state tests with fake activation heights, isolating its build products. # 4. 'test-empty-sync': Tests Zebra's ability to sync and checkpoint from an empty state. # 5. 'test-lightwalletd-integration': Validates integration with 'lightwalletd' starting from an empty state. -# 6. 'test-configuration-file': Assesses the default Docker configuration for Zebra. -# 7. 'test-configuration-file-testnet': Checks the Docker image reconfiguration for the Testnet. -# 8. 'test-zebra-conf-path': Tests Zebra using a custom Docker configuration. +# 6. 'test-docker-configurations': Runs a matrix of configuration tests to validate various Docker configurations. name: Docker Unit Tests on: @@ -154,35 +152,12 @@ jobs: -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - # Test that Zebra works using the default config with the latest Zebra version. - test-configuration-file: - name: Test CI default Docker config file + # Run a matrix of configuration tests against the Docker image + test-docker-configurations: + name: Test Zebra Docker configurations uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: "default-conf" docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - - # Test reconfiguring Zebra for Testnet in Docker. - test-configuration-file-testnet: - name: Test enabling Testnet in Docker - # Make sure Zebra can sync the genesis block on testnet - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: "testnet-conf" - docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - test_variables: "-e NETWORK=Testnet" - grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - - # Test that Zebra works using $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test CI custom Docker config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: "custom-conf" - docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ inputs.image_digest }} - test_variables: '-e ZEBRA_CONF_PATH="/home/zebra/zebrad/tests/common/configs/custom-conf.toml"' - grep_patterns: '-e "extra_coinbase_data:\sSome\(\"do you even shield\?\"\)"' failure-issue: name: Open or update issues for main branch failures @@ -196,8 +171,7 @@ jobs: test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, - test-configuration-file, - test-zebra-conf-path, + test-docker-configurations, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index d72f35bf4e4..b4b9da36963 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -1,37 +1,83 @@ # This workflow is designed to test Zebra configuration files using Docker containers. +# It acts as a centralized test suite for Docker configuration scenarios, running multiple +# distinct tests against a provided Docker image using a matrix approach. # - Runs a specified Docker image with the provided test variables and network settings. # - Monitors and analyzes container logs for specific patterns to determine test success. # - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters. -name: Test Zebra Config Files + +name: Test Zebra Configs in Docker on: workflow_call: inputs: - # Status and logging - test_id: - required: true - type: string - description: 'Unique identifier for the test' - grep_patterns: - required: true - type: string - description: 'Patterns to grep for in the logs' - - # Test selection and parameters docker_image: required: true type: string - description: 'Docker image to test' - test_variables: - required: false - type: string - description: 'Environment variables used to select and configure the test' + description: "Docker image to test, including digest (e.g., gcr.io/example/zebrad@sha256:...)" jobs: - test-docker-config: - name: Test ${{ inputs.test_id }} in Docker + test-configurations: + # Use the matrix 'name' for the job name for clarity in UI + name: Test ${{ matrix.name }} timeout-minutes: 30 - runs-on: ubuntu-latest-m + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + # Basic network configurations + - id: default-conf + name: Default config + env_vars: "" + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + + - id: testnet-conf + name: Testnet config + env_vars: "-e NETWORK=Testnet" + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + + # Only runs when using the CI image, because the CD image doesn't have the custom config file + # available in the tests/common/configs directory + - id: custom-conf + name: Custom config + env_vars: '-e ZEBRA_CONF_PATH="/home/zebra/zebrad/tests/common/configs/custom-conf.toml"' + grep_patterns: '-e "extra_coinbase_data:\\sSome\\(\\\"do you even shield\\?\\\"\\)"' + + # RPC configuration tests + - id: rpc-conf + name: RPC config + env_vars: "-e ZEBRA_RPC_PORT=8232" + grep_patterns: '-e "Opened RPC endpoint at.*0.0.0.0:8232"' + + - id: rpc-custom-conf + name: RPC with custom port + env_vars: "-e ZEBRA_RPC_PORT=28232 -e RPC_LISTEN_ADDR=127.0.0.1" + grep_patterns: '-e "Opened RPC endpoint at.*127.0.0.1:28232"' + + # Custom directory tests + - id: rpc-cookie-conf + name: RPC with custom cookie dir + env_vars: "-e ZEBRA_RPC_PORT=8232 -e ENABLE_COOKIE_AUTH=true -e ZEBRA_COOKIE_DIR=/home/zebra/.config/cookie" + grep_patterns: '-e "RPC auth cookie written to disk"' + + # Custom directory tests + - id: custom-dirs-conf + name: Custom cache and cookie directories + env_vars: "-e ZEBRA_CACHE_DIR=/tmp/zebra-cache" + grep_patterns: '-e "Opened Zebra state cache at /tmp/zebra-cache"' + + # Feature-based configurations + - id: prometheus-feature + name: Prometheus metrics + env_vars: "-e FEATURES=prometheus -e METRICS_ENDPOINT_PORT=9999" + grep_patterns: '-e "0.0.0.0:9999"' + + # Mining configuration + - id: mining-config + name: Mining configuration + env_vars: '-e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v"' + grep_patterns: '-e "miner_address = \\\"t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v\\\""' + steps: - uses: actions/checkout@v4.2.2 with: @@ -44,41 +90,38 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.5.0 - - name: Run ${{ inputs.test_id }} test + - name: Run ${{ matrix.name }} test + # Only run if this isn't a skipped custom-conf test + if: ${{ matrix.id != 'custom-conf' || contains(inputs.docker_image, vars.CI_IMAGE_NAME) }} run: | docker pull ${{ inputs.docker_image }} - docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ inputs.docker_image }} zebrad start + docker run ${{ matrix.env_vars }} --detach --name ${{ matrix.id }} -t ${{ inputs.docker_image }} zebrad start # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE; docker logs \ --tail all \ --follow \ - ${{ inputs.test_id }} | \ + ${{ matrix.id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - ${{ inputs.grep_patterns }} - ) || true + ${{ matrix.grep_patterns }} + ) LOGS_EXIT_STATUS=$? - docker stop ${{ inputs.test_id }} + # Display grep status for debugging + echo "grep exit status: $LOGS_EXIT_STATUS" - EXIT_STATUS=$(docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); + docker stop ${{ matrix.id }} + + EXIT_STATUS=$(docker wait ${{ matrix.id }} || echo "Error retrieving exit status"); echo "docker exit status: $EXIT_STATUS"; - # If grep found the pattern, exit with the Docker container exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - # We can't diagnose or fix these errors, so we're just ignoring them for now. - # They don't actually impact the test because they happen after it succeeds. - # See ticket #7898 for details. - if [ $EXIT_STATUS -eq 137 ] || [ $EXIT_STATUS -eq 139 ]; then - echo "Warning: ignoring docker exit status $EXIT_STATUS"; - exit 0; - else - exit $EXIT_STATUS; - fi + # If grep didn't find the pattern, fail immediately + if [ $LOGS_EXIT_STATUS -ne 0 ]; then + echo "ERROR: Failed to find the expected pattern in logs. Check grep_patterns."; + exit 1; + else + echo "SUCCESS: Found the expected pattern in logs."; + exit $EXIT_STATUS; fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; diff --git a/docs/decisions/devops/004-improve-docker-conf-tests.md b/docs/decisions/devops/004-improve-docker-conf-tests.md new file mode 100644 index 00000000000..f1332c15ef5 --- /dev/null +++ b/docs/decisions/devops/004-improve-docker-conf-tests.md @@ -0,0 +1,89 @@ +--- +# status and date are the only required elements. Feel free to remove the rest. +status: accepted +date: 2025-04-07 +builds-on: N/A +story: Need a scalable and maintainable way to test various Docker image configurations derived from `.env` variables and `entrypoint.sh` logic, ensuring consistency between CI and CD pipelines. https://github.com/ZcashFoundation/zebra/pull/8948 +--- + +# Centralize Docker Configuration Testing using a Reusable Workflow + +## Context and Problem Statement + +Currently, tests verifying Zebra's Docker image configuration (based on environment variables processed by `docker/entrypoint.sh`) are implemented using a reusable workflow (`sub-test-zebra-config.yml`). However, the _invocation_ of these tests, including the specific scenarios (environment variables, grep patterns), is duplicated and scattered across different workflows, notably the CI workflow (`sub-ci-unit-tests-docker.yml`) and the CD workflow (`cd-deploy-nodes-gcp.yml`). + +This leads to: + +1. **Code Duplication:** Similar test setup logic exists in multiple places. +2. **Maintenance Overhead:** Adding or modifying configuration tests requires changes in multiple files. +3. **Scalability Issues:** Adding numerous new test scenarios would significantly clutter the main CI and CD workflow files. +4. **Potential Inconsistency:** Risk of configuration tests diverging between CI and CD environments. + +We need a centralized, scalable, and maintainable approach to define and run these configuration tests against Docker images built in both CI and CD contexts. + +## Priorities & Constraints + +- **DRY Principle:** Avoid repeating test logic and scenario definitions. +- **Maintainability:** Configuration tests should be easy to find, understand, and modify. +- **Scalability:** The solution should easily accommodate adding many more test scenarios in the future. +- **Consistency:** Ensure the same tests run against both CI and CD images where applicable. +- **Integration:** Leverage existing GitHub Actions tooling and workflows effectively. +- **Reliability:** Testing relies on running a container and grepping its logs for specific patterns to determine success. + +## Considered Options + +1. **Status Quo:** Continue defining and invoking configuration tests within the respective CI (`sub-ci-unit-tests-docker.yml`) and CD (`cd-deploy-nodes-gcp.yml`) workflows, using `sub-test-zebra-config.yml` for the core run/grep logic. +2. **Modify and Extend `sub-test-zebra-config.yml`:** Convert the existing `sub-test-zebra-config.yml` workflow. Remove its specific test inputs (`test_id`, `grep_patterns`, `test_variables`). Add multiple jobs _inside_ this workflow, each hardcoding a specific test scenario (run container + grep logs). The workflow would only take `docker_image` as input. +3. **Use `docker-compose.test.yml`:** Define test scenarios as services within a dedicated `docker-compose.test.yml` file. The CI/CD workflows would call a script (like `sub-test-zebra-config.yml`) that uses `docker compose` to run specific services and performs log grepping. +4. **Create a _New_ Dedicated Reusable Workflow:** Create a _new_ reusable workflow (e.g., `sub-test-all-configs.yml`) that takes a Docker image digest as input and contains multiple jobs, each defining and executing a specific configuration test scenario (run container + grep logs). + +## Pros and Cons of the Options + +### Option 1: Status Quo + +- Bad: High duplication, poor maintainability, poor scalability. + +### Option 2: Modify and Extend `sub-test-zebra-config.yml` + +- Good: Centralizes test definition, execution, and assertion logic within the GHA ecosystem. Maximizes DRY principle for GHA workflows. High maintainability and scalability for adding tests. Clear separation of concerns (build vs. test config). Reuses an existing workflow file structure. +- Bad: Modifies the existing workflow's purpose significantly. Callers need to adapt. + +### Option 3: Use `docker-compose.test.yml` + +- Good: Centralizes test _environment definitions_ in a standard format (`docker-compose.yml`). Easy local testing via `docker compose`. +- Bad: Requires managing an extra file (`docker-compose.test.yml`). Still requires a GitHub Actions script/workflow step to orchestrate `docker compose` commands and perform the essential log grepping/assertion logic. Less integrated into the pure GHA workflow structure. + +### Option 4: Create a _New_ Dedicated Reusable Workflow + +- Good: Cleanest separation - new workflow has a clear single purpose from the start. High maintainability and scalability. +- Bad: Introduces an additional workflow file. Adds a layer of workflow call chaining. + +## Decision Outcome + +Chosen option [Option 2: Modify and Extend `sub-test-zebra-config.yml`] + +This option provides a good balance of maintainability, scalability, and consistency by centralizing the configuration testing logic within a single, dedicated GitHub Actions reusable workflow (`sub-test-zebra-config.yml`). It directly addresses the code duplication across CI and CD pipelines and leverages GHA's native features for modularity by converting the existing workflow into a multi-job test suite runner. + +While Option 4 (creating a new workflow) offered slightly cleaner separation initially, modifying the existing workflow (Option 2) achieves the same goal of centralization while minimizing the number of workflow files. It encapsulates the entire test process (definition, execution, assertion) within GHA jobs in the reused file. + +The `sub-test-zebra-config.yml` workflow will be modified to remove its specific test inputs and instead contain individual jobs for each configuration scenario to be tested, taking only the `docker_image` digest as input. The CI and CD workflows will be simplified to call this modified workflow once after their respective build steps. + +### Expected Consequences + +- Reduction in code duplication within CI/CD workflow files. +- Improved maintainability: configuration tests are located in a single file (`sub-test-zebra-config.yml`). +- Easier addition of new configuration test scenarios by adding jobs to `sub-test-zebra-config.yml`. +- Clearer separation between image building and configuration testing logic. +- `sub-test-zebra-config.yml` will fundamentally change its structure and inputs. +- CI/CD workflows (`cd-deploy-nodes-gcp.yml`, parent of `sub-ci-unit-tests-docker.yml`) will need modification to remove old test jobs and add calls to the modified reusable workflow, passing the correct image digest. +- Debugging might involve tracing execution across workflow calls and within the multiple jobs of `sub-test-zebra-config.yml`. + +## More Information + +- GitHub Actions: Reusing Workflows: [https://docs.github.com/en/actions/using-workflows/reusing-workflows](https://docs.github.com/en/actions/using-workflows/reusing-workflows) +- Relevant files: + - `.github/workflows/sub-test-zebra-config.yml` (To be modified) + - `.github/workflows/cd-deploy-nodes-gcp.yml` (To be modified) + - `.github/workflows/sub-ci-unit-tests-docker.yml` (To be modified) + - `docker/entrypoint.sh` (Script processing configurations) + - `docker/.env` (Example environment variables) From c572027ca9c1bc56b6c6e8dd39f51baa9f230fbf Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 17 Apr 2025 10:05:42 +0100 Subject: [PATCH 152/245] fix(cd): allow to deploy without healthchecks (#9433) --- .github/workflows/cd-deploy-nodes-gcp.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 663bb9144cf..c7861b74fd3 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -343,7 +343,6 @@ jobs: gcloud compute instance-groups managed create \ "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \ --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ - --health-check zebrad-tracing-filter \ --initial-delay 30 \ --region "${{ vars.GCP_REGION }}" \ --size 1 From 33a44e5f9c528a8127bec663fe262c051ee9904e Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 18 Apr 2025 09:29:42 +0200 Subject: [PATCH 153/245] add(mining): Restore internal miner (#9311) Co-authored-by: Marek Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 1 + zebra-chain/Cargo.toml | 8 +- zebra-chain/src/work/equihash.rs | 89 +++++++++++++++++-- zebra-rpc/src/config/mining.rs | 8 +- .../tests/snapshot/get_block_template_rpcs.rs | 2 - zebra-rpc/src/methods/tests/vectors.rs | 4 - zebrad/src/components/miner.rs | 5 +- .../common/configs/v1.9.0-internal-miner.toml | 84 +++++++++++++++++ zebrad/tests/common/configs/v2.2.0.toml | 85 ++++++++++++++++++ 9 files changed, 255 insertions(+), 31 deletions(-) create mode 100644 zebrad/tests/common/configs/v1.9.0-internal-miner.toml create mode 100644 zebrad/tests/common/configs/v2.2.0.toml diff --git a/Cargo.lock b/Cargo.lock index 300156372ad..2e437e15bdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,6 +1325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca4f333d4ccc9d23c06593733673026efa71a332e028b00f12cf427b9677dce9" dependencies = [ "blake2b_simd", + "cc", "core2", "document-features", ] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index e09ca94fdb0..c43d37d2bcb 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -35,10 +35,7 @@ shielded-scan = [ ] # Experimental internal miner support -# TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 -# See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L38-L43 -# Restore support when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 -internal-miner = [] +internal-miner = ["equihash/solver"] # Experimental elasticsearch support elasticsearch = [] @@ -69,9 +66,6 @@ blake2s_simd = { workspace = true } bs58 = { workspace = true, features = ["check"] } byteorder = { workspace = true } -# TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 -# See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L73-L85 -# Restore support when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 equihash = { workspace = true } group = { workspace = true } diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index 35aa39f849b..6f65acffb99 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -133,21 +133,92 @@ impl Solution { #[allow(clippy::unwrap_in_result)] pub fn solve( mut header: Header, - mut _cancel_fn: F, + mut cancel_fn: F, ) -> Result, SolverCancelled> where F: FnMut() -> Result<(), SolverCancelled>, { - // TODO: Function code was removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 - // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/src/work/equihash.rs#L115-L166 - // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 - header.solution = Solution::for_proposal(); - Ok(AtLeastOne::from_one(header)) + use crate::shutdown::is_shutting_down; + + let mut input = Vec::new(); + header + .zcash_serialize(&mut input) + .expect("serialization into a vec can't fail"); + // Take the part of the header before the nonce and solution. + // This data is kept constant for this solver run. + let input = &input[0..Solution::INPUT_LENGTH]; + + while !is_shutting_down() { + // Don't run the solver if we'd just cancel it anyway. + cancel_fn()?; + + let solutions = equihash::tromp::solve_200_9(input, || { + // Cancel the solver if we have a new template. + if cancel_fn().is_err() { + return None; + } + + // This skips the first nonce, which doesn't matter in practice. + Self::next_nonce(&mut header.nonce); + Some(*header.nonce) + }); + + let mut valid_solutions = Vec::new(); + + for solution in &solutions { + header.solution = Self::from_bytes(solution) + .expect("unexpected invalid solution: incorrect length"); + + // TODO: work out why we sometimes get invalid solutions here + if let Err(error) = header.solution.check(&header) { + info!(?error, "found invalid solution for header"); + continue; + } + + if Self::difficulty_is_valid(&header) { + valid_solutions.push(header); + } + } + + match valid_solutions.try_into() { + Ok(at_least_one_solution) => return Ok(at_least_one_solution), + Err(_is_empty_error) => debug!( + solutions = ?solutions.len(), + "found valid solutions which did not pass the validity or difficulty checks" + ), + } + } + + Err(SolverCancelled) + } + + /// Returns `true` if the `nonce` and `solution` in `header` meet the difficulty threshold. + /// + /// # Panics + /// + /// - If `header` contains an invalid difficulty threshold. + #[cfg(feature = "internal-miner")] + fn difficulty_is_valid(header: &Header) -> bool { + // Simplified from zebra_consensus::block::check::difficulty_is_valid(). + let difficulty_threshold = header + .difficulty_threshold + .to_expanded() + .expect("unexpected invalid header template: invalid difficulty threshold"); + + // TODO: avoid calculating this hash multiple times + let hash = header.hash(); + + // Note: this comparison is a u256 integer comparison, like zcashd and bitcoin. Greater + // values represent *less* work. + hash <= difficulty_threshold } - // TODO: Some methods were removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 - // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/src/work/equihash.rs#L171-L196 - // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 + /// Modifies `nonce` to be the next integer in big-endian order. + /// Wraps to zero if the next nonce would overflow. + #[cfg(feature = "internal-miner")] + fn next_nonce(nonce: &mut [u8; 32]) { + let _ignore_overflow = crate::primitives::byte_array::increment_big_endian(&mut nonce[..]); + } } impl PartialEq for Solution { diff --git a/zebra-rpc/src/config/mining.rs b/zebra-rpc/src/config/mining.rs index 224d5b8fa1e..2946d80bf81 100644 --- a/zebra-rpc/src/config/mining.rs +++ b/zebra-rpc/src/config/mining.rs @@ -36,9 +36,7 @@ pub struct Config { /// for a valid Proof of Work. /// /// The internal miner is off by default. - // TODO: Restore equihash solver and recommend that Mainnet miners should use a mining pool with - // GPUs or ASICs designed for efficient mining. - #[cfg(feature = "internal-miner")] + #[serde(default)] pub internal_miner: bool, } @@ -50,10 +48,6 @@ impl Default for Config { // TODO: do we want to default to v5 transactions and Zebra coinbase data? extra_coinbase_data: None, debug_like_zcashd: true, - // TODO: Internal miner config code was removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 - // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-rpc/src/config/mining.rs#L61-L66 - // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 - #[cfg(feature = "internal-miner")] internal_miner: false, } } diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 46dc26ce780..b06a6edb57d 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -107,8 +107,6 @@ pub async fn test_responses( )), extra_coinbase_data: None, debug_like_zcashd: true, - // TODO: Use default field values when optional features are enabled in tests #8183 - #[cfg(feature = "internal-miner")] internal_miner: true, }; diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 70f3d483e7d..5c7ee2c39f8 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1831,8 +1831,6 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { miner_address: miner_address.clone(), extra_coinbase_data: None, debug_like_zcashd: true, - // TODO: Use default field values when optional features are enabled in tests #8183 - #[cfg(feature = "internal-miner")] internal_miner: true, }; @@ -2307,8 +2305,6 @@ async fn rpc_getdifficulty() { miner_address: None, extra_coinbase_data: None, debug_like_zcashd: true, - // TODO: Use default field values when optional features are enabled in tests #8183 - #[cfg(feature = "internal-miner")] internal_miner: true, }; diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs index ee4960a5d03..c841720d083 100644 --- a/zebrad/src/components/miner.rs +++ b/zebrad/src/components/miner.rs @@ -209,7 +209,7 @@ where } /// Generates block templates using `rpc`, and sends them to mining threads using `template_sender`. -#[instrument(skip(rpc, template_sender))] +#[instrument(skip(rpc, template_sender, network))] pub async fn generate_block_templates< Mempool, State, @@ -266,8 +266,9 @@ where // Wait for the chain to sync so we get a valid template. let Ok(template) = template else { - info!( + warn!( ?BLOCK_TEMPLATE_WAIT_TIME, + ?template, "waiting for a valid block template", ); diff --git a/zebrad/tests/common/configs/v1.9.0-internal-miner.toml b/zebrad/tests/common/configs/v1.9.0-internal-miner.toml new file mode 100644 index 00000000000..ca4f726e787 --- /dev/null +++ b/zebrad/tests/common/configs/v1.9.0-internal-miner.toml @@ -0,0 +1,84 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 +debug_enable_at_height = 0 + +[metrics] + +[mining] +miner_address = 't27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v' +internal_miner = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Testnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/configs/v2.2.0.toml b/zebrad/tests/common/configs/v2.2.0.toml new file mode 100644 index 00000000000..38429cf1ab7 --- /dev/null +++ b/zebrad/tests/common/configs/v2.2.0.toml @@ -0,0 +1,85 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true +internal_miner = false + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +cookie_dir = "cache_dir" +debug_force_finished_sync = false +enable_cookie_auth = true +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false \ No newline at end of file From 6e039c26c312f97a3d26ec1b073749238a481dc6 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 18 Apr 2025 16:16:02 +0100 Subject: [PATCH 154/245] fix(ci): set `ZEBRA_CACHE_DIR` in full and checkpoint syncs (#9434) * fix(ci): set `ZEBRA_CACHE_DIR` in full and checkpoint syncs Updates the GitHub Actions workflows for GCP integration tests to explicitly set the ZEBRA_CACHE_DIR environment variable to `/home/zebra/.cache/zebra` for the following test jobs: - regenerate-stateful-disks (Zebra checkpoint sync to mandatory checkpoint) - test-full-sync (Zebra full sync to tip on Mainnet) - test-full-sync-testnet (Zebra full sync to tip on Testnet) The rationale for this change is to address permission issues encountered during tests that attempt to create or access a cached database directory. By setting ZEBRA_CACHE_DIR to a specific path within the user's home directory, we ensure that the test has the necessary write permissions to create and manage the cache directory, avoiding 'Permission denied (os error 13)' errors that occur when using default or system directories without adequate access rights. * chore: fix typo --- .github/workflows/sub-ci-integration-tests-gcp.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 9f045d7dcf3..35e4570696c 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -79,7 +79,7 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -138,7 +138,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0" + test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -241,7 +241,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0" + test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. From 63723e7e844395f635a6b016515bf4b355b15c66 Mon Sep 17 00:00:00 2001 From: Kris Nuttycombe Date: Fri, 18 Apr 2025 14:57:45 -0600 Subject: [PATCH 155/245] zebra-state: Return only the history tree root in GetBlockTemplateChainInfo response. (#9444) The `HistoryTree` type is intended for internal use within the node; it should not be exposed via the ReadStateService response. Instead, this response should simply include the hash needed for block template construction. --- .../get_block_template_rpcs/get_block_template.rs | 5 ++--- .../types/get_block_template.rs | 2 +- zebra-rpc/src/methods/tests/prop.rs | 5 +++-- .../tests/snapshot/get_block_template_rpcs.rs | 2 +- zebra-rpc/src/methods/tests/vectors.rs | 13 +++++++------ zebra-state/src/response.rs | 6 +++--- zebra-state/src/service/read/difficulty.rs | 2 +- zebrad/tests/acceptance.rs | 12 ++++++------ 8 files changed, 24 insertions(+), 23 deletions(-) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 34495fbf4bb..59b01132dee 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -275,7 +275,7 @@ pub fn generate_coinbase_and_roots( block_template_height: Height, miner_address: &transparent::Address, mempool_txs: &[VerifiedUnminedTx], - history_tree: Arc, + chain_history_root: Option, like_zcashd: bool, extra_coinbase_data: Vec, ) -> (TransactionTemplate, DefaultRoots) { @@ -293,8 +293,7 @@ pub fn generate_coinbase_and_roots( // Calculate block default roots // // TODO: move expensive root, hash, and tree cryptography to a rayon thread? - let chain_history_root = history_tree - .hash() + let chain_history_root = chain_history_root .or_else(|| { (NetworkUpgrade::Heartwood.activation_height(network) == Some(block_template_height)) .then_some([0; 32].into()) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs index 879425bb667..74142cd005d 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs @@ -291,7 +291,7 @@ impl GetBlockTemplate { next_block_height, miner_address, &mempool_txs, - chain_tip_and_local_time.history_tree.clone(), + chain_tip_and_local_time.chain_history_root, like_zcashd, extra_coinbase_data, ); diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index bfb2d7003a4..dbac40d3fb3 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -11,6 +11,7 @@ use thiserror::Error; use tokio::sync::oneshot; use tower::buffer::Buffer; +use zebra_chain::history_tree::HistoryTree; use zebra_chain::{ amount::{Amount, NonNegative}, block::{Block, Height}, @@ -394,7 +395,7 @@ proptest! { .respond(zebra_state::ReadResponse::ChainInfo(GetBlockTemplateChainInfo { tip_hash: genesis_hash, tip_height: Height::MIN, - history_tree: Default::default(), + chain_history_root: HistoryTree::default().hash(), expected_difficulty: Default::default(), cur_time: DateTime32::now(), min_time: DateTime32::now(), @@ -468,7 +469,7 @@ proptest! { .respond(zebra_state::ReadResponse::ChainInfo(GetBlockTemplateChainInfo { tip_hash: block_hash, tip_height: block_height, - history_tree: Default::default(), + chain_history_root: HistoryTree::default().hash(), expected_difficulty: Default::default(), cur_time: DateTime32::now(), min_time: DateTime32::now(), diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index b06a6edb57d..afd8aca3bfa 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -256,7 +256,7 @@ pub async fn test_responses( cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(network), + chain_history_root: fake_history_tree(network).hash(), })); } }; diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 5c7ee2c39f8..a5f8670a9f7 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -9,6 +9,7 @@ use zebra_chain::{ amount::Amount, block::Block, chain_tip::{mock::MockChainTip, NoChainTip}, + history_tree::HistoryTree, parameters::Network::*, serialization::{ZcashDeserializeInto, ZcashSerialize}, transaction::UnminedTxId, @@ -52,7 +53,7 @@ async fn rpc_getinfo() { GetBlockTemplateChainInfo { tip_hash: Mainnet.genesis_hash(), tip_height: Height::MIN, - history_tree: Default::default(), + chain_history_root: HistoryTree::default().hash(), expected_difficulty: Default::default(), cur_time: zebra_chain::serialization::DateTime32::now(), min_time: zebra_chain::serialization::DateTime32::now(), @@ -1880,7 +1881,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(&Mainnet), + chain_history_root: fake_history_tree(&Mainnet).hash(), })); } }; @@ -2353,7 +2354,7 @@ async fn rpc_getdifficulty() { cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(&Mainnet), + chain_history_root: fake_history_tree(&Mainnet).hash(), })); }; @@ -2379,7 +2380,7 @@ async fn rpc_getdifficulty() { cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(&Mainnet), + chain_history_root: fake_history_tree(&Mainnet).hash(), })); }; @@ -2402,7 +2403,7 @@ async fn rpc_getdifficulty() { cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(&Mainnet), + chain_history_root: fake_history_tree(&Mainnet).hash(), })); }; @@ -2425,7 +2426,7 @@ async fn rpc_getdifficulty() { cur_time: fake_cur_time, min_time: fake_min_time, max_time: fake_max_time, - history_tree: fake_history_tree(&Mainnet), + chain_history_root: fake_history_tree(&Mainnet).hash(), })); }; diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 916efdd5adc..48c9c6c5783 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -4,7 +4,7 @@ use std::{collections::BTreeMap, sync::Arc}; use zebra_chain::{ amount::{Amount, NonNegative}, - block::{self, Block}, + block::{self, Block, ChainHistoryMmrRootHash}, orchard, sapling, serialization::DateTime32, subtree::{NoteCommitmentSubtreeData, NoteCommitmentSubtreeIndex}, @@ -277,9 +277,9 @@ pub struct GetBlockTemplateChainInfo { /// Depends on the `tip_hash`. pub tip_height: block::Height, - /// The history tree of the current best chain. + /// The FlyClient chain history root as of the end of the chain tip block. /// Depends on the `tip_hash`. - pub history_tree: Arc, + pub chain_history_root: Option, // Data derived from the state tip and recent blocks, and the current local clock. // diff --git a/zebra-state/src/service/read/difficulty.rs b/zebra-state/src/service/read/difficulty.rs index dd42c213656..e542ccad4c1 100644 --- a/zebra-state/src/service/read/difficulty.rs +++ b/zebra-state/src/service/read/difficulty.rs @@ -250,7 +250,7 @@ fn difficulty_time_and_history_tree( let mut result = GetBlockTemplateChainInfo { tip_hash, tip_height, - history_tree, + chain_history_root: history_tree.hash(), expected_difficulty, cur_time, min_time, diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 9f9624bd401..968d03cae47 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3092,8 +3092,7 @@ async fn trusted_chain_sync_handles_forks_correctly() -> Result<()> { header.previous_block_hash = chain_info.tip_hash; header.commitment_bytes = chain_info - .history_tree - .hash() + .chain_history_root .or(is_chain_history_activation_height.then_some([0; 32].into())) .expect("history tree can't be empty") .bytes_in_serialized_order() @@ -3440,8 +3439,9 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let valid_original_block_template = block_template.clone(); - let zebra_state::GetBlockTemplateChainInfo { history_tree, .. } = - fetch_state_tip_and_local_time(read_state.clone()).await?; + let zebra_state::GetBlockTemplateChainInfo { + chain_history_root, .. + } = fetch_state_tip_and_local_time(read_state.clone()).await?; let network = base_network_params .clone() @@ -3456,7 +3456,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { Height(block_template.height), &miner_address, &[], - history_tree.clone(), + chain_history_root, true, vec![], ); @@ -3499,7 +3499,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { Height(block_template.height), &miner_address, &[], - history_tree.clone(), + chain_history_root, true, vec![], ); From fb3c8d80f20b9d8f67f9ea3d11b930f96a5c5517 Mon Sep 17 00:00:00 2001 From: Tomass <155266802+zeroprooff@users.noreply.github.com> Date: Sat, 19 Apr 2025 19:10:14 +0300 Subject: [PATCH 156/245] docs: remove consecutive duplicate words (#9446) * Update get_block_template.rs * Update vectors.rs * Update serialize.rs --- zebra-chain/src/block/serialize.rs | 2 +- zebra-rpc/src/methods/tests/vectors.rs | 2 +- .../tests/common/get_block_template_rpcs/get_block_template.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/zebra-chain/src/block/serialize.rs b/zebra-chain/src/block/serialize.rs index e763915e499..edb4f7afc07 100644 --- a/zebra-chain/src/block/serialize.rs +++ b/zebra-chain/src/block/serialize.rs @@ -39,7 +39,7 @@ fn check_version(version: u32) -> Result<(), &'static str> { // but this is not actually part of the consensus rules, and in fact // broken mining software created blocks that do not have version 4. // There are approximately 4,000 blocks with version 536870912; this - // is the bit-reversal of the value 4, indicating that that mining pool + // is the bit-reversal of the value 4, indicating that mining pool // reversed bit-ordering of the version field. Because the version field // was not properly validated, these blocks were added to the chain. // diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index a5f8670a9f7..696f9d251fb 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1188,7 +1188,7 @@ async fn rpc_getaddresstxids_response() { ) .await; - // Start and and outside of the range should use the chain tip. + // Start and outside of the range should use the chain tip. rpc_getaddresstxids_response_with( &network, Some(11), diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index b6c2ed61619..daee319b38b 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -258,7 +258,7 @@ async fn try_validate_block_template(client: &RpcRequestClient) -> Result<()> { let _ = done_tx.send(()).await; for (proposal_result, template, time_source) in proposal_results { let proposal_result = proposal_result - .expect("response should be success output with with a serialized `ProposalResponse`"); + .expect("response should be success output with a serialized `ProposalResponse`"); if let ProposalResponse::Rejected(reject_reason) = proposal_result { tracing::info!( From 9002cfcbd38282387a31c6a46f3addc6f54d3af2 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 21 Apr 2025 08:10:41 -0300 Subject: [PATCH 157/245] downgrade info message (#9448) --- zebra-state/src/service/finalized_state/disk_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 01c0f0bd7ff..3ba7026aafc 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -1228,7 +1228,7 @@ impl DiskDb { ); } else { #[cfg(not(test))] - info!( + debug!( ?current_limit, min_limit = ?DiskDb::MIN_OPEN_FILE_LIMIT, ideal_limit = ?DiskDb::IDEAL_OPEN_FILE_LIMIT, From e6dc3b6fd31e895c0e72e66f8ff13c48245e938d Mon Sep 17 00:00:00 2001 From: Paul <3682187+PaulLaux@users.noreply.github.com> Date: Mon, 21 Apr 2025 16:38:44 +0300 Subject: [PATCH 158/245] NU7 constants (#9256) * Introduce Nu7 * Introduce Nu7 fo other crates of Zebra * Fix of new_regtest call (as it additionally needs Nu7 arg now) * Fix of new_regtest call (as it additionally needs Nu7 arg now) (2) * Set Nu7 as a network update for testnet in zebra-chain network tests * Fix serde names for NU7 * Update test snapshot in zebra-rpc to use NU7 * Copy Nu7-related changes from zsa-integration-state * Uncomment zcash_unstable = nu7 * Fix Nu7 constants and add cfg(zcash_unstable ...) attribute according to PR #15 review comments * Update network constant values according to PR #15 review comments * Add zcash_unstable=nu6 Rust flag to .cargo/config.toml to use nu6 by default * Add zcash_unstable = nu6 cfg in zebra-network/.../types.rs * Fix nu7 activation heights in network_upgrade.rs (nu6 value + 1 does not work - causes a test failure) * Add cfg for CURRENT_NETWORK_PROTOCOL_VERSION constant definition, add FIXMEs for a couple of Nu6 processing cases, add processing of Nu7 * Update get_blockchain_info@testnet_10.snap * Update get_blockchain_info@mainnet_10.snap * updated cfg flag * remove `zcash_unstable` flag * Remove testnet and mainnet NU7 activation heights, comment out consensus branch id and conversion to zcash primitives nu type * Yep, it'll work after NU7 * Yep, the test should work fine after NU7 * Yep, it only needs to be a post-nu6 height. * other fixes and cleanups * Updates snapshots, removes unnecessary FIXMEs, fixes issues with bad merge conflict resolution * fixes test hang when there are NUs defined without activation heights * fixes test, applies suggestion from code review. * Enable consensus branch ID for Nu7 --------- Co-authored-by: Dmitry Demin Co-authored-by: Arya Co-authored-by: Marek --- zebra-chain/src/block/commitment.rs | 4 +- zebra-chain/src/history_tree.rs | 4 +- zebra-chain/src/parameters/network.rs | 6 +-- zebra-chain/src/parameters/network/subsidy.rs | 4 +- zebra-chain/src/parameters/network/testnet.rs | 54 +++++++++---------- .../src/parameters/network/tests/vectors.rs | 6 +-- zebra-chain/src/parameters/network_upgrade.rs | 13 ++++- zebra-chain/src/primitives/zcash_history.rs | 3 +- zebra-chain/src/transaction/arbitrary.rs | 2 +- zebra-consensus/src/checkpoint/list/tests.rs | 2 +- zebra-consensus/src/transaction.rs | 5 +- zebra-consensus/src/transaction/tests.rs | 14 +++-- zebra-consensus/src/transaction/tests/prop.rs | 3 +- zebra-network/src/config.rs | 5 +- zebra-network/src/constants.rs | 4 +- zebra-network/src/protocol/external/types.rs | 8 ++- .../src/methods/get_block_template_rpcs.rs | 4 +- .../types/get_block_template/proposal.rs | 2 +- .../get_block_template_rpcs/types/subsidy.rs | 4 +- zebrad/tests/acceptance.rs | 4 +- zebrad/tests/common/configs/v1.9.0.toml | 1 + zebrad/tests/common/regtest.rs | 2 +- 22 files changed, 86 insertions(+), 68 deletions(-) diff --git a/zebra-chain/src/block/commitment.rs b/zebra-chain/src/block/commitment.rs index 2cb09e75b22..ec4ef7d2616 100644 --- a/zebra-chain/src/block/commitment.rs +++ b/zebra-chain/src/block/commitment.rs @@ -125,7 +125,7 @@ impl Commitment { // NetworkUpgrade::current() returns the latest network upgrade that's activated at the provided height, so // on Regtest for heights above height 0, it could return NU6, and it's possible for the current network upgrade // to be NU6 (or Canopy, or any network upgrade above Heartwood) at the Heartwood activation height. - (Canopy | Nu5 | Nu6, activation_height) + (Canopy | Nu5 | Nu6 | Nu7, activation_height) if height == activation_height && Some(height) == Heartwood.activation_height(network) => { @@ -136,7 +136,7 @@ impl Commitment { } } (Heartwood | Canopy, _) => Ok(ChainHistoryRoot(ChainHistoryMmrRootHash(bytes))), - (Nu5 | Nu6, _) => Ok(ChainHistoryBlockTxAuthCommitment( + (Nu5 | Nu6 | Nu7, _) => Ok(ChainHistoryBlockTxAuthCommitment( ChainHistoryBlockTxAuthCommitmentHash(bytes), )), } diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index d84f92321af..12dd9c61153 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -102,7 +102,7 @@ impl NonEmptyHistoryTree { )?; InnerHistoryTree::PreOrchard(tree) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 => { + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { let tree = Tree::::new_from_cache( network, network_upgrade, @@ -156,7 +156,7 @@ impl NonEmptyHistoryTree { )?; (InnerHistoryTree::PreOrchard(tree), entry) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 => { + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { let (tree, entry) = Tree::::new_from_block( network, block, diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 90341bc9d41..bb304f85e13 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -149,12 +149,10 @@ impl Network { /// Creates a new [`Network::Testnet`] with `Regtest` parameters and the provided network upgrade activation heights. pub fn new_regtest( - nu5_activation_height: Option, - nu6_activation_height: Option, + configured_activation_heights: testnet::ConfiguredActivationHeights, ) -> Self { Self::new_configured_testnet(testnet::Parameters::new_regtest( - nu5_activation_height, - nu6_activation_height, + configured_activation_heights, )) } diff --git a/zebra-chain/src/parameters/network/subsidy.rs b/zebra-chain/src/parameters/network/subsidy.rs index 2fd60a5afbc..68fd90cb77c 100644 --- a/zebra-chain/src/parameters/network/subsidy.rs +++ b/zebra-chain/src/parameters/network/subsidy.rs @@ -77,8 +77,8 @@ impl FundingStreamReceiver { /// [ZIP-1014]: https://zips.z.cash/zip-1014#abstract /// [`zcashd`]: https://github.com/zcash/zcash/blob/3f09cfa00a3c90336580a127e0096d99e25a38d6/src/consensus/funding.cpp#L13-L32 /// [ZIP-1015]: https://zips.z.cash/zip-1015 - pub fn info(&self, is_nu6: bool) -> (&'static str, &'static str) { - if is_nu6 { + pub fn info(&self, is_post_nu6: bool) -> (&'static str, &'static str) { + if is_post_nu6 { ( match self { FundingStreamReceiver::Ecc => "Electric Coin Company", diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 2b7fc4920b3..16c4906061d 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -118,35 +118,24 @@ impl From<&BTreeMap> for ConfiguredActivationHeights { let mut configured_activation_heights = ConfiguredActivationHeights::default(); for (height, network_upgrade) in activation_heights.iter() { - match network_upgrade { + let field = match network_upgrade { NetworkUpgrade::BeforeOverwinter => { - configured_activation_heights.before_overwinter = Some(height.0); - } - NetworkUpgrade::Overwinter => { - configured_activation_heights.overwinter = Some(height.0); - } - NetworkUpgrade::Sapling => { - configured_activation_heights.sapling = Some(height.0); - } - NetworkUpgrade::Blossom => { - configured_activation_heights.blossom = Some(height.0); - } - NetworkUpgrade::Heartwood => { - configured_activation_heights.heartwood = Some(height.0); - } - NetworkUpgrade::Canopy => { - configured_activation_heights.canopy = Some(height.0); - } - NetworkUpgrade::Nu5 => { - configured_activation_heights.nu5 = Some(height.0); - } - NetworkUpgrade::Nu6 => { - configured_activation_heights.nu6 = Some(height.0); + &mut configured_activation_heights.before_overwinter } + NetworkUpgrade::Overwinter => &mut configured_activation_heights.overwinter, + NetworkUpgrade::Sapling => &mut configured_activation_heights.sapling, + NetworkUpgrade::Blossom => &mut configured_activation_heights.blossom, + NetworkUpgrade::Heartwood => &mut configured_activation_heights.heartwood, + NetworkUpgrade::Canopy => &mut configured_activation_heights.canopy, + NetworkUpgrade::Nu5 => &mut configured_activation_heights.nu5, + NetworkUpgrade::Nu6 => &mut configured_activation_heights.nu6, + NetworkUpgrade::Nu7 => &mut configured_activation_heights.nu7, NetworkUpgrade::Genesis => { continue; } - } + }; + + *field = Some(height.0) } configured_activation_heights @@ -271,6 +260,9 @@ pub struct ConfiguredActivationHeights { /// Activation height for `NU6` network upgrade. #[serde(rename = "NU6")] pub nu6: Option, + /// Activation height for `NU7` network upgrade. + #[serde(rename = "NU7")] + pub nu7: Option, } /// Builder for the [`Parameters`] struct. @@ -405,6 +397,7 @@ impl ParametersBuilder { canopy, nu5, nu6, + nu7, }: ConfiguredActivationHeights, ) -> Self { use NetworkUpgrade::*; @@ -427,6 +420,7 @@ impl ParametersBuilder { .chain(canopy.into_iter().map(|h| (h, Canopy))) .chain(nu5.into_iter().map(|h| (h, Nu5))) .chain(nu6.into_iter().map(|h| (h, Nu6))) + .chain(nu7.into_iter().map(|h| (h, Nu7))) .map(|(h, nu)| (h.try_into().expect("activation height must be valid"), nu)) .collect(); @@ -672,11 +666,10 @@ impl Parameters { /// /// Creates an instance of [`Parameters`] with `Regtest` values. pub fn new_regtest( - nu5_activation_height: Option, - nu6_activation_height: Option, + ConfiguredActivationHeights { nu5, nu6, nu7, .. }: ConfiguredActivationHeights, ) -> Self { #[cfg(any(test, feature = "proptest-impl"))] - let nu5_activation_height = nu5_activation_height.or(Some(100)); + let nu5 = nu5.or(Some(100)); let parameters = Self::build() .with_genesis_hash(REGTEST_GENESIS_HASH) @@ -689,8 +682,9 @@ impl Parameters { // most network upgrades are disabled by default for Regtest in zcashd .with_activation_heights(ConfiguredActivationHeights { canopy: Some(1), - nu5: nu5_activation_height, - nu6: nu6_activation_height, + nu5, + nu6, + nu7, ..Default::default() }) .with_halving_interval(PRE_BLOSSOM_REGTEST_HALVING_INTERVAL); @@ -735,7 +729,7 @@ impl Parameters { should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, - } = Self::new_regtest(None, None); + } = Self::new_regtest(Default::default()); self.network_name == network_name && self.genesis_hash == genesis_hash diff --git a/zebra-chain/src/parameters/network/tests/vectors.rs b/zebra-chain/src/parameters/network/tests/vectors.rs index b7263e0087e..86c25c2bcd6 100644 --- a/zebra-chain/src/parameters/network/tests/vectors.rs +++ b/zebra-chain/src/parameters/network/tests/vectors.rs @@ -107,7 +107,7 @@ fn activates_network_upgrades_correctly() { let expected_activation_height = 1; let network = testnet::Parameters::build() .with_activation_heights(ConfiguredActivationHeights { - nu6: Some(expected_activation_height), + nu7: Some(expected_activation_height), ..Default::default() }) .to_network(); @@ -145,7 +145,7 @@ fn activates_network_upgrades_correctly() { (Network::Mainnet, MAINNET_ACTIVATION_HEIGHTS), (Network::new_default_testnet(), TESTNET_ACTIVATION_HEIGHTS), ( - Network::new_regtest(None, None), + Network::new_regtest(Default::default()), expected_default_regtest_activation_heights, ), ] { @@ -196,7 +196,7 @@ fn check_configured_network_name() { "Mainnet should be displayed as 'Mainnet'" ); assert_eq!( - Network::new_regtest(None, None).to_string(), + Network::new_regtest(Default::default()).to_string(), "Regtest", "Regtest should be displayed as 'Regtest'" ); diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index 74bc59cf162..0303e1e7344 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -15,7 +15,7 @@ use hex::{FromHex, ToHex}; use proptest_derive::Arbitrary; /// A list of network upgrades in the order that they must be activated. -const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 9] = [ +const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 10] = [ Genesis, BeforeOverwinter, Overwinter, @@ -25,6 +25,7 @@ const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 9] = [ Canopy, Nu5, Nu6, + Nu7, ]; /// A Zcash network upgrade. @@ -61,6 +62,9 @@ pub enum NetworkUpgrade { /// The Zcash protocol after the NU6 upgrade. #[serde(rename = "NU6")] Nu6, + /// The Zcash protocol after the NU7 upgrade. + #[serde(rename = "NU7")] + Nu7, } impl TryFrom for NetworkUpgrade { @@ -116,6 +120,7 @@ const FAKE_MAINNET_ACTIVATION_HEIGHTS: &[(block::Height, NetworkUpgrade)] = &[ (block::Height(30), Canopy), (block::Height(35), Nu5), (block::Height(40), Nu6), + (block::Height(45), Nu7), ]; /// Testnet network upgrade activation heights. @@ -243,6 +248,7 @@ pub(crate) const CONSENSUS_BRANCH_IDS: &[(NetworkUpgrade, ConsensusBranchId)] = (Canopy, ConsensusBranchId(0xe9ff75a6)), (Nu5, ConsensusBranchId(0xc2d6d0b4)), (Nu6, ConsensusBranchId(0xc8e71055)), + (Nu7, ConsensusBranchId(0x77190ad8)), ]; /// The target block spacing before Blossom. @@ -431,7 +437,9 @@ impl NetworkUpgrade { pub fn target_spacing(&self) -> Duration { let spacing_seconds = match self { Genesis | BeforeOverwinter | Overwinter | Sapling => PRE_BLOSSOM_POW_TARGET_SPACING, - Blossom | Heartwood | Canopy | Nu5 | Nu6 => POST_BLOSSOM_POW_TARGET_SPACING.into(), + Blossom | Heartwood | Canopy | Nu5 | Nu6 | Nu7 => { + POST_BLOSSOM_POW_TARGET_SPACING.into() + } }; Duration::seconds(spacing_seconds) @@ -550,6 +558,7 @@ impl From for NetworkUpgrade { zcash_protocol::consensus::NetworkUpgrade::Canopy => Self::Canopy, zcash_protocol::consensus::NetworkUpgrade::Nu5 => Self::Nu5, zcash_protocol::consensus::NetworkUpgrade::Nu6 => Self::Nu6, + // zcash_protocol::consensus::NetworkUpgrade::Nu7 => Self::Nu7, } } } diff --git a/zebra-chain/src/primitives/zcash_history.rs b/zebra-chain/src/primitives/zcash_history.rs index e8ca97d63f8..4b52c85d8e8 100644 --- a/zebra-chain/src/primitives/zcash_history.rs +++ b/zebra-chain/src/primitives/zcash_history.rs @@ -276,7 +276,8 @@ impl Version for zcash_history::V1 { NetworkUpgrade::Heartwood | NetworkUpgrade::Canopy | NetworkUpgrade::Nu5 - | NetworkUpgrade::Nu6 => zcash_history::NodeData { + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu7 => zcash_history::NodeData { consensus_branch_id: branch_id.into(), subtree_commitment: block_hash, start_time: time, diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index 2438039ea1c..a13a8876efe 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -778,7 +778,7 @@ impl Arbitrary for Transaction { NetworkUpgrade::Blossom | NetworkUpgrade::Heartwood | NetworkUpgrade::Canopy => { Self::v4_strategy(ledger_state) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 => prop_oneof![ + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => prop_oneof![ Self::v4_strategy(ledger_state.clone()), Self::v5_strategy(ledger_state) ] diff --git a/zebra-consensus/src/checkpoint/list/tests.rs b/zebra-consensus/src/checkpoint/list/tests.rs index 5a2fe803f3d..1df05327d64 100644 --- a/zebra-consensus/src/checkpoint/list/tests.rs +++ b/zebra-consensus/src/checkpoint/list/tests.rs @@ -237,7 +237,7 @@ fn checkpoint_list_load_hard_coded() -> Result<(), BoxError> { let _ = Mainnet.checkpoint_list(); let _ = Network::new_default_testnet().checkpoint_list(); - let _ = Network::new_regtest(None, None).checkpoint_list(); + let _ = Network::new_regtest(Default::default()).checkpoint_list(); Ok(()) } diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index dcf3ff0d8f0..53874c45fa1 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -938,7 +938,8 @@ where | NetworkUpgrade::Heartwood | NetworkUpgrade::Canopy | NetworkUpgrade::Nu5 - | NetworkUpgrade::Nu6 => Ok(()), + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu7 => Ok(()), // Does not support V4 transactions NetworkUpgrade::Genesis @@ -1024,7 +1025,7 @@ where // // Note: Here we verify the transaction version number of the above rule, the group // id is checked in zebra-chain crate, in the transaction serialize. - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 => Ok(()), + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => Ok(()), // Does not support V5 transactions NetworkUpgrade::Genesis diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 122dd57c700..f09e3f6a518 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -17,7 +17,7 @@ use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, Height}, orchard::{Action, AuthorizedAction, Flags}, - parameters::{Network, NetworkUpgrade}, + parameters::{testnet::ConfiguredActivationHeights, Network, NetworkUpgrade}, primitives::{ed25519, x25519, Groth16Proof}, sapling, serialization::{DateTime32, ZcashDeserialize, ZcashDeserializeInto}, @@ -1007,7 +1007,10 @@ async fn mempool_request_with_immature_spend_is_rejected() { async fn mempool_request_with_transparent_coinbase_spend_is_accepted_on_regtest() { let _init_guard = zebra_test::init(); - let network = Network::new_regtest(None, Some(1_000)); + let network = Network::new_regtest(ConfiguredActivationHeights { + nu6: Some(1_000), + ..Default::default() + }); let mut state: MockService<_, _, _, _> = MockService::build().for_unit_tests(); let verifier = Verifier::new_for_tests(&network, state.clone()); @@ -2865,7 +2868,12 @@ async fn v5_consensus_branch_ids() { while let Some(next_nu) = network_upgrade.next_upgrade() { // Check an outdated network upgrade. - let height = next_nu.activation_height(&network).expect("height"); + let Some(height) = next_nu.activation_height(&network) else { + tracing::warn!(?next_nu, "missing activation height",); + // Shift the network upgrade for the next loop iteration. + network_upgrade = next_nu; + continue; + }; let block_req = verifier .clone() diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index 8fea9cf3433..8f3167e2bae 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -347,7 +347,8 @@ fn sanitize_transaction_version( BeforeOverwinter => 2, Overwinter => 3, Sapling | Blossom | Heartwood | Canopy => 4, - Nu5 | Nu6 => 5, + // FIXME: Use 6 for Nu7 + Nu5 | Nu6 | Nu7 => 5, } }; diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index b401a5d4c93..7e289928ee6 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -725,12 +725,11 @@ impl<'de> Deserialize<'de> for Config { (NetworkKind::Mainnet, _) => Network::Mainnet, (NetworkKind::Testnet, None) => Network::new_default_testnet(), (NetworkKind::Regtest, testnet_parameters) => { - let (nu5_activation_height, nu6_activation_height) = testnet_parameters + let configured_activation_heights = testnet_parameters .and_then(|params| params.activation_heights) - .map(|ConfiguredActivationHeights { nu5, nu6, .. }| (nu5, nu6)) .unwrap_or_default(); - Network::new_regtest(nu5_activation_height, nu6_activation_height) + Network::new_regtest(configured_activation_heights) } ( NetworkKind::Testnet, diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 8a27809ca17..7d96add03d3 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -340,6 +340,8 @@ pub const TIMESTAMP_TRUNCATION_SECONDS: u32 = 30 * 60; /// /// This version of Zebra draws the current network protocol version from /// [ZIP-253](https://zips.z.cash/zip-0253). +// TODO: Update this constant to the correct value after NU7 activation, +// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_140); pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_120); /// The default RTT estimate for peer responses. @@ -410,7 +412,7 @@ lazy_static! { hash_map.insert(NetworkKind::Mainnet, Version::min_specified_for_upgrade(&Mainnet, Nu6)); hash_map.insert(NetworkKind::Testnet, Version::min_specified_for_upgrade(&Network::new_default_testnet(), Nu6)); - hash_map.insert(NetworkKind::Regtest, Version::min_specified_for_upgrade(&Network::new_regtest(None, None), Nu6)); + hash_map.insert(NetworkKind::Regtest, Version::min_specified_for_upgrade(&Network::new_regtest(Default::default()), Nu6)); hash_map }; diff --git a/zebra-network/src/protocol/external/types.rs b/zebra-network/src/protocol/external/types.rs index c6241ba4d78..78148253ba8 100644 --- a/zebra-network/src/protocol/external/types.rs +++ b/zebra-network/src/protocol/external/types.rs @@ -106,6 +106,8 @@ impl Version { (Mainnet, Nu5) => 170_100, (Testnet(params), Nu6) if params.is_default_testnet() => 170_110, (Mainnet, Nu6) => 170_120, + (Testnet(params), Nu7) if params.is_default_testnet() => 170_130, + (Mainnet, Nu7) => 170_140, // It should be fine to reject peers with earlier network protocol versions on custom testnets for now. (Testnet(_), _) => CURRENT_NETWORK_PROTOCOL_VERSION.0, @@ -205,8 +207,9 @@ mod test { let _init_guard = zebra_test::init(); let highest_network_upgrade = NetworkUpgrade::current(network, block::Height::MAX); - assert!(highest_network_upgrade == Nu6 || highest_network_upgrade == Nu5, - "expected coverage of all network upgrades: add the new network upgrade to the list in this test"); + assert!( + highest_network_upgrade == Nu7 || highest_network_upgrade == Nu6, + "expected coverage of all network upgrades: add the new network upgrade to the list in this test"); for &network_upgrade in &[ BeforeOverwinter, @@ -217,6 +220,7 @@ mod test { Canopy, Nu5, Nu6, + Nu7, ] { let height = network_upgrade.activation_height(network); if let Some(height) = height { diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index d808575f0ea..eec3b816f5b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1207,7 +1207,7 @@ where // Separate the funding streams into deferred and non-deferred streams .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); - let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; + let is_post_nu6 = NetworkUpgrade::current(&network, height) >= NetworkUpgrade::Nu6; let [lockbox_total, funding_streams_total]: [std::result::Result< Amount, @@ -1229,7 +1229,7 @@ where .into_iter() .map(|(receiver, value)| { let address = funding_stream_address(height, &network, receiver); - FundingStream::new(is_nu6, receiver, value, address) + FundingStream::new(is_post_nu6, receiver, value, address) }) .collect() }); diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs index fc0805b533d..373ba2d7c20 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs @@ -217,7 +217,7 @@ pub fn proposal_block_from_template( | NetworkUpgrade::Blossom | NetworkUpgrade::Heartwood => panic!("pre-Canopy block templates not supported"), NetworkUpgrade::Canopy => chain_history_root.bytes_in_serialized_order().into(), - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 => { + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { block_commitments_hash.bytes_in_serialized_order().into() } }; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs index 6d64dcbdee9..e1973c7af4a 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs @@ -76,12 +76,12 @@ pub struct FundingStream { impl FundingStream { /// Convert a `receiver`, `value`, and `address` into a `FundingStream` response. pub fn new( - is_nu6: bool, + is_post_nu6: bool, receiver: FundingStreamReceiver, value: Amount, address: Option<&transparent::Address>, ) -> FundingStream { - let (name, specification) = receiver.info(is_nu6); + let (name, specification) = receiver.info(is_post_nu6); FundingStream { recipient: name.to_string(), diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 968d03cae47..d65ee68c5f9 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -2899,7 +2899,7 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { async fn validate_regtest_genesis_block() { let _init_guard = zebra_test::init(); - let network = Network::new_regtest(None, None); + let network = Network::new_regtest(Default::default()); let state = zebra_state::init_test(&network); let ( block_verifier_router, @@ -2973,7 +2973,7 @@ async fn trusted_chain_sync_handles_forks_correctly() -> Result<()> { use zebra_state::{ReadResponse, Response}; let _init_guard = zebra_test::init(); - let mut config = os_assigned_rpc_port_config(false, &Network::new_regtest(None, None))?; + let mut config = os_assigned_rpc_port_config(false, &Network::new_regtest(Default::default()))?; config.state.ephemeral = false; let network = config.network.network.clone(); diff --git a/zebrad/tests/common/configs/v1.9.0.toml b/zebrad/tests/common/configs/v1.9.0.toml index 11bcf62107a..98a4b14bf67 100644 --- a/zebrad/tests/common/configs/v1.9.0.toml +++ b/zebrad/tests/common/configs/v1.9.0.toml @@ -73,6 +73,7 @@ Heartwood = 903_800 Canopy = 1_028_500 NU5 = 1_842_420 NU6 = 2_000_000 +NU7 = 2_000_001 [network.testnet_parameters.pre_nu6_funding_streams.height_range] start = 0 diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index efd3c08875b..5134e79bc65 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -42,7 +42,7 @@ pub(crate) async fn submit_blocks_test() -> Result<()> { let _init_guard = zebra_test::init(); info!("starting regtest submit_blocks test"); - let network = Network::new_regtest(None, None); + let network = Network::new_regtest(Default::default()); let mut config = os_assigned_rpc_port_config(false, &network)?; config.mempool.debug_enable_at_height = Some(0); From 8408dc33ae2586106417853d2b76cb1a35d4b343 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 25 Apr 2025 12:12:11 +0200 Subject: [PATCH 159/245] refactor(ci): simplify Docker tagging to remove redundant tags (#9431) - Removed `latest_tag` and `tag_suffix` inputs from Docker build workflow, as those are no longer required. - Split SHA tagging into PR and branch events for clarity. - Removed tag event references and flavor customizations to prevent redundant tags. - Updated comments for clarity on tag purposes. Closes #7415 --- .github/workflows/release-binaries.yml | 1 - .github/workflows/sub-build-docker-image.yml | 31 +++++++------------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 21032d00881..f2c915c86f8 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -23,7 +23,6 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - latest_tag: true features: ${{ vars.RUST_PROD_FEATURES }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 2728c7ad4ec..3cf09071597 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -35,13 +35,6 @@ on: features: required: false type: string - latest_tag: - required: false - type: boolean - default: false - tag_suffix: - required: false - type: string no_cache: description: "Disable the Docker cache for this build" required: false @@ -96,28 +89,26 @@ jobs: images: | us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }} zfnd/${{ inputs.image_name }},enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} - # appends inputs.tag_suffix to image tags/names - flavor: | - suffix=${{ inputs.tag_suffix }} - latest=${{ inputs.latest_tag }} # generate Docker tags based on the following events/attributes tags: | # These DockerHub release tags support the following use cases: - # - `latest`: always use the latest Zebra release when you pull or update - # - `v1.x.y` or `1.x.y`: always use the exact version, don't automatically upgrade + # - `latest`: Automatically points to the most recent Zebra release, ensuring users always get the latest stable version when pulling or updating. + # - `1.x.y`: Represents a specific semantic version (e.g., 1.2.3), allowing users to pin to an exact version for stability, preventing automatic upgrades. # - # `semver` adds a "latest" tag if `inputs.latest_tag` is `true`. type=semver,pattern={{version}} - type=ref,event=tag - # DockerHub release and CI tags. - # This tag makes sure tests are using exactly the right image, even when multiple PRs run at the same time. - type=sha,event=push - # These CI-only tags support CI on PRs, the main branch, and scheduled full syncs. - # These tags do not appear on DockerHub, because DockerHub images are only published on the release event. + # CI-only tags (not published to DockerHub, only in Google Artifact Registry): + # - `pr-xxx`: Tags images with the pull request number for CI tracking during PR workflows. + # - `branch-name`: Tags images with the branch name (e.g., `main`, `dev`) for CI builds on branch pushes. + # - `edge`: Tags the latest build on the default branch (e.g., `main`), used in CI to represent the cutting-edge version for testing. + # - `schedule`: Tags images built during scheduled workflows (e.g., nightly or periodic builds) for CI monitoring and testing. type=ref,event=pr type=ref,event=branch type=edge,enable={{is_default_branch}} type=schedule + # - `sha-xxxxxx`: Uses the commit SHA (shortened) to tag images for precise identification. + # Applied during pull requests and branch pushes to ensure CI tests use the exact image from the last commit. + type=sha,event=pr + type=sha,event=branch - name: Authenticate to Google Cloud id: auth From 94f2e98dc1805257e7800bed65dee280c0eebec4 Mon Sep 17 00:00:00 2001 From: natalie Date: Fri, 25 Apr 2025 15:37:15 +0100 Subject: [PATCH 160/245] refactor(chain): replace sinsemilla with external library (#9426) * Replace sinsemilla with external library (#7801) * Update zebra-chain/src/orchard/sinsemilla.rs Co-authored-by: Conrado Gouvea --------- Co-authored-by: Pili Guerra <1311133+mpguerra@users.noreply.github.com> Co-authored-by: Conrado Gouvea --- Cargo.lock | 1 + zebra-chain/Cargo.toml | 1 + zebra-chain/src/orchard/sinsemilla.rs | 154 +----------------- .../src/orchard/tests/vectors/sinsemilla.rs | 56 ------- 4 files changed, 10 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e437e15bdd..fea79f98473 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6293,6 +6293,7 @@ dependencies = [ "serde_json", "serde_with", "sha2 0.10.8", + "sinsemilla", "spandoc", "static_assertions", "tempfile", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index c43d37d2bcb..3d50b186baf 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -95,6 +95,7 @@ sapling-crypto.workspace = true zcash_protocol.workspace = true zcash_address.workspace = true zcash_transparent.workspace = true +sinsemilla = { version = "0.1.0" } # Time chrono = { workspace = true, features = ["clock", "std", "serde"] } diff --git a/zebra-chain/src/orchard/sinsemilla.rs b/zebra-chain/src/orchard/sinsemilla.rs index 6fa8bad751d..ca721d7f234 100644 --- a/zebra-chain/src/orchard/sinsemilla.rs +++ b/zebra-chain/src/orchard/sinsemilla.rs @@ -3,8 +3,9 @@ use bitvec::prelude::*; use halo2::{ arithmetic::{Coordinates, CurveAffine, CurveExt}, - pasta::{group::Group, pallas}, + pasta::pallas, }; +use sinsemilla::HashDomain; /// [Coordinate Extractor for Pallas][concreteextractorpallas] /// @@ -25,17 +26,6 @@ pub fn extract_p(point: pallas::Point) -> pallas::Base { } } -/// Extract⊥ P: P ∪ {⊥} → P𝑥 ∪ {⊥} such that -/// -/// Extract⊥ P(︀⊥)︀ = ⊥ -/// Extract⊥ P(︀𝑃: P)︀ = ExtractP(𝑃). -/// -/// -pub fn extract_p_bottom(maybe_point: Option) -> Option { - // Maps an Option to Option by applying a function to a contained value. - maybe_point.map(extract_p) -} - /// GroupHash into Pallas, aka _GroupHash^P_ /// /// Produces a random point in the Pallas curve. The first input element acts @@ -50,113 +40,24 @@ pub fn pallas_group_hash(D: &[u8], M: &[u8]) -> pallas::Point { pallas::Point::hash_to_curve(domain_separator)(M) } -/// Q(D) := GroupHash^P(︀“z.cash:SinsemillaQ”, D) -/// -/// -#[allow(non_snake_case)] -fn Q(D: &[u8]) -> pallas::Point { - pallas_group_hash(b"z.cash:SinsemillaQ", D) -} - -/// S(j) := GroupHash^P(︀“z.cash:SinsemillaS”, LEBS2OSP32(I2LEBSP32(j))) -/// -/// S: {0 .. 2^k - 1} -> P^*, aka 10 bits hashed into the group -/// -/// -#[allow(non_snake_case)] -fn S(j: &BitSlice) -> pallas::Point { - // The value of j is a 10-bit value, therefore must never exceed 2^10 in - // value. - assert_eq!(j.len(), 10); - - // I2LEOSP_32(𝑗) - let mut leosp_32_j = [0u8; 4]; - leosp_32_j[..2].copy_from_slice(j.to_bitvec().as_raw_slice()); - - pallas_group_hash(b"z.cash:SinsemillaS", &leosp_32_j) -} - -/// Incomplete addition on the Pallas curve. -/// -/// P ∪ {⊥} × P ∪ {⊥} → P ∪ {⊥} -/// -/// -fn incomplete_addition( - left: Option, - right: Option, -) -> Option { - let identity = pallas::Point::identity(); - - match (left, right) { - (None, _) | (_, None) => None, - (Some(l), _) if l == identity => None, - (_, Some(r)) if r == identity => None, - (Some(l), Some(r)) if l == r => None, - // The inverse of l, (x, -y) - (Some(l), Some(r)) if l == -r => None, - (Some(l), Some(r)) => Some(l + r), - } -} - -/// "...an algebraic hash function with collision resistance (for fixed input -/// length) derived from assumed hardness of the Discrete Logarithm Problem on -/// the Pallas curve." -/// -/// SinsemillaHash is used in the definitions of Sinsemilla commitments and of -/// the Sinsemilla hash for the Orchard incremental Merkle tree (§ 5.4.1.3 -/// ‘MerkleCRH^Orchard Hash Function’). -/// -/// SinsemillaHashToPoint(𝐷: B^Y^\[N\] , 𝑀 : B ^[{0 .. 𝑘·𝑐}] ) → P ∪ {⊥} -/// -/// -/// -/// # Panics -/// -/// If `M` is greater than `k*c = 2530` bits. -#[allow(non_snake_case)] -pub fn sinsemilla_hash_to_point(D: &[u8], M: &BitVec) -> Option { - let k = 10; - let c = 253; - - assert!(M.len() <= k * c); - - let mut acc = Some(Q(D)); - - // Split M into n segments of k bits, where k = 10 and c = 253, padding - // the last segment with zeros. - // - // https://zips.z.cash/protocol/nu5.pdf#concretesinsemillahash - for chunk in M.chunks(k) { - // Pad each chunk with zeros. - let mut store = [0u8; 2]; - let bits = BitSlice::<_, Lsb0>::from_slice_mut(&mut store); - bits[..chunk.len()].copy_from_bitslice(chunk); - - acc = incomplete_addition(incomplete_addition(acc, Some(S(&bits[..k]))), acc); - } - - acc -} - /// Sinsemilla Hash Function /// /// "SinsemillaHash is an algebraic hash function with collision resistance (for /// fixed input length) derived from assumed hardness of the Discrete Logarithm /// Problem. It is designed by Sean Bowe and Daira Hopwood. The motivation for /// introducing a new discrete-log-based hash function (rather than using -/// PedersenHash) is to make efcient use of the lookups available in recent +/// PedersenHash) is to make efficient use of the lookups available in recent /// proof systems including Halo 2." /// /// SinsemillaHash: B^Y^\[N\] × B[{0 .. 𝑘·𝑐}] → P_𝑥 ∪ {⊥} /// /// -/// -/// # Panics -/// -/// If `M` is greater than `k*c = 2530` bits in `sinsemilla_hash_to_point`. #[allow(non_snake_case)] pub fn sinsemilla_hash(D: &[u8], M: &BitVec) -> Option { - extract_p_bottom(sinsemilla_hash_to_point(D, M)) + let domain = std::str::from_utf8(D).expect("must be valid UTF-8"); + let hash_domain = HashDomain::new(domain); + + hash_domain.hash(M.iter().map(|b| *b.as_ref())).into() } #[cfg(test)] @@ -165,40 +66,6 @@ mod tests { use super::*; use crate::orchard::tests::vectors; - #[cfg(test)] - fn x_from_str(s: &str) -> pallas::Base { - use halo2::pasta::group::ff::PrimeField; - - pallas::Base::from_str_vartime(s).unwrap() - } - - #[test] - #[allow(non_snake_case)] - fn sinsemilla_single_test_vector() { - use halo2::pasta::group::Curve; - - let D = b"z.cash:test-Sinsemilla"; - let M = bitvec![ - u8, Lsb0; 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, - 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, - ]; - - let test_vector = pallas::Affine::from_xy( - x_from_str( - "19681977528872088480295086998934490146368213853811658798708435106473481753752", - ), - x_from_str( - "14670850419772526047574141291705097968771694788047376346841674072293161339903", - ), - ) - .unwrap(); - - assert_eq!( - sinsemilla_hash_to_point(&D[..], &M).expect("").to_affine(), - test_vector - ) - } - // Checks Sinsemilla hashes to point and to bytes (aka the x-coordinate // bytes of a point) with: // - One of two domains. @@ -207,17 +74,12 @@ mod tests { #[test] #[allow(non_snake_case)] fn sinsemilla_hackworks_test_vectors() { - use halo2::pasta::group::{ff::PrimeField, GroupEncoding}; + use halo2::pasta::group::ff::PrimeField; for tv in tests::vectors::SINSEMILLA.iter() { let D = tv.domain.as_slice(); let M: &BitVec = &tv.msg.iter().collect(); - assert_eq!( - sinsemilla_hash_to_point(D, M).expect("should not fail per Theorem 5.4.4"), - pallas::Point::from_bytes(&tv.point).unwrap() - ); - assert_eq!( sinsemilla_hash(D, M).expect("should not fail per Theorem 5.4.4"), pallas::Base::from_repr(tv.hash).unwrap() diff --git a/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs b/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs index 73d67b77570..ee04d799846 100644 --- a/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs +++ b/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs @@ -3,7 +3,6 @@ use lazy_static::lazy_static; pub struct TestVector { pub(crate) domain: Vec, pub(crate) msg: Vec, - pub(crate) point: [u8; 32], pub(crate) hash: [u8; 32], } @@ -21,11 +20,6 @@ lazy_static! { false, false, true, true, false, true, true, false, true, true, true, true, false, true, true, false, ], - point: [ - 0x98, 0x54, 0xaa, 0x38, 0x43, 0x63, 0xb5, 0x70, 0x8e, 0x06, 0xb4, 0x19, 0xb6, 0x43, - 0x58, 0x68, 0x39, 0x65, 0x3f, 0xba, 0x5a, 0x78, 0x2d, 0x2d, 0xb1, 0x4c, 0xed, 0x13, - 0xc1, 0x9a, 0x83, 0xab, - ], hash: [ 0x98, 0x54, 0xaa, 0x38, 0x43, 0x63, 0xb5, 0x70, 0x8e, 0x06, 0xb4, 0x19, 0xb6, 0x43, 0x58, 0x68, 0x39, 0x65, 0x3f, 0xba, 0x5a, 0x78, 0x2d, 0x2d, 0xb1, 0x4c, 0xed, 0x13, @@ -50,11 +44,6 @@ lazy_static! { true, true, false, true, false, true, false, true, true, false, true, false, false, false, true, true, false, false, false, true, true, false, true, ], - point: [ - 0xed, 0x5b, 0x98, 0x8e, 0x4e, 0x98, 0x17, 0x1f, 0x61, 0x8f, 0xee, 0xb1, 0x23, 0xe5, - 0xcd, 0x0d, 0xc2, 0xd3, 0x67, 0x11, 0xc5, 0x06, 0xd5, 0xbe, 0x11, 0x5c, 0xfe, 0x38, - 0x8f, 0x03, 0xc4, 0x80, - ], hash: [ 0xed, 0x5b, 0x98, 0x8e, 0x4e, 0x98, 0x17, 0x1f, 0x61, 0x8f, 0xee, 0xb1, 0x23, 0xe5, 0xcd, 0x0d, 0xc2, 0xd3, 0x67, 0x11, 0xc5, 0x06, 0xd5, 0xbe, 0x11, 0x5c, 0xfe, 0x38, @@ -76,11 +65,6 @@ lazy_static! { false, true, false, true, true, false, true, true, false, false, true, true, true, true, true, false, false, false, true, false, false, true, false, false, ], - point: [ - 0xd9, 0x5e, 0xe5, 0x8f, 0xbd, 0xaa, 0x6f, 0x3d, 0xe5, 0xe4, 0xfd, 0x7a, 0xfc, 0x35, - 0xfa, 0x9d, 0xcf, 0xe8, 0x2a, 0xd1, 0x93, 0x06, 0xb0, 0x7e, 0x6c, 0xda, 0x0c, 0x30, - 0xe5, 0x98, 0x34, 0x07, - ], hash: [ 0xd9, 0x5e, 0xe5, 0x8f, 0xbd, 0xaa, 0x6f, 0x3d, 0xe5, 0xe4, 0xfd, 0x7a, 0xfc, 0x35, 0xfa, 0x9d, 0xcf, 0xe8, 0x2a, 0xd1, 0x93, 0x06, 0xb0, 0x7e, 0x6c, 0xda, 0x0c, 0x30, @@ -111,11 +95,6 @@ lazy_static! { true, true, true, false, true, false, true, false, false, false, true, true, false, false, true, false, false, false, true, true, false, false, ], - point: [ - 0x6a, 0x92, 0x4b, 0x41, 0x39, 0x84, 0x29, 0x91, 0x0a, 0x78, 0x83, 0x2b, 0x61, 0x19, - 0x2a, 0x0b, 0x67, 0x40, 0xd6, 0x27, 0x77, 0xeb, 0x71, 0x54, 0x50, 0x32, 0xeb, 0x6c, - 0xe9, 0x3e, 0xc9, 0xb8, - ], hash: [ 0x6a, 0x92, 0x4b, 0x41, 0x39, 0x84, 0x29, 0x91, 0x0a, 0x78, 0x83, 0x2b, 0x61, 0x19, 0x2a, 0x0b, 0x67, 0x40, 0xd6, 0x27, 0x77, 0xeb, 0x71, 0x54, 0x50, 0x32, 0xeb, 0x6c, @@ -135,11 +114,6 @@ lazy_static! { true, false, false, false, false, true, true, true, false, true, false, false, false, false, false, false, true, false, false, false, false, false, ], - point: [ - 0xdc, 0x5f, 0xf0, 0x5b, 0x6f, 0x18, 0xb0, 0x76, 0xb6, 0x12, 0x82, 0x37, 0xa7, 0x59, - 0xed, 0xc7, 0xc8, 0x77, 0x8c, 0x70, 0x22, 0x2c, 0x79, 0xb7, 0x34, 0x03, 0x7b, 0x69, - 0x39, 0x3a, 0xbf, 0xbe, - ], hash: [ 0xdc, 0x5f, 0xf0, 0x5b, 0x6f, 0x18, 0xb0, 0x76, 0xb6, 0x12, 0x82, 0x37, 0xa7, 0x59, 0xed, 0xc7, 0xc8, 0x77, 0x8c, 0x70, 0x22, 0x2c, 0x79, 0xb7, 0x34, 0x03, 0x7b, 0x69, @@ -170,11 +144,6 @@ lazy_static! { false, false, true, false, true, true, false, true, true, true, false, true, true, true, false, false, true, true, ], - point: [ - 0xc7, 0x6c, 0x8d, 0x7c, 0x43, 0x55, 0x04, 0x1b, 0xd7, 0xa7, 0xc9, 0x9b, 0x54, 0x86, - 0x44, 0x19, 0x6f, 0x41, 0x94, 0x56, 0x20, 0x75, 0x37, 0xc2, 0x82, 0x85, 0x8a, 0x9b, - 0x19, 0x2d, 0x07, 0xbb, - ], hash: [ 0xc7, 0x6c, 0x8d, 0x7c, 0x43, 0x55, 0x04, 0x1b, 0xd7, 0xa7, 0xc9, 0x9b, 0x54, 0x86, 0x44, 0x19, 0x6f, 0x41, 0x94, 0x56, 0x20, 0x75, 0x37, 0xc2, 0x82, 0x85, 0x8a, 0x9b, @@ -197,11 +166,6 @@ lazy_static! { true, false, false, true, false, true, true, false, true, true, true, true, true, false, true, true, false, false, false, false, false, false, false, false, ], - point: [ - 0x1a, 0xe8, 0x25, 0xeb, 0x42, 0xd7, 0x4e, 0x1b, 0xca, 0x7e, 0xe8, 0xa1, 0xf8, 0xf3, - 0xde, 0xd8, 0x01, 0xff, 0xcd, 0x1f, 0x22, 0xba, 0x75, 0xc3, 0x4b, 0xd6, 0xe0, 0x6a, - 0x2c, 0x7c, 0x5a, 0xa0, - ], hash: [ 0x1a, 0xe8, 0x25, 0xeb, 0x42, 0xd7, 0x4e, 0x1b, 0xca, 0x7e, 0xe8, 0xa1, 0xf8, 0xf3, 0xde, 0xd8, 0x01, 0xff, 0xcd, 0x1f, 0x22, 0xba, 0x75, 0xc3, 0x4b, 0xd6, 0xe0, 0x6a, @@ -231,11 +195,6 @@ lazy_static! { true, false, false, false, false, true, true, false, false, false, true, true, true, true, ], - point: [ - 0x38, 0xcf, 0xa6, 0x00, 0xaf, 0xd8, 0x67, 0x0e, 0x1f, 0x9a, 0x79, 0xcb, 0x22, 0x42, - 0x5f, 0xa9, 0x50, 0xcc, 0x4d, 0x3a, 0x3f, 0x5a, 0xfe, 0x39, 0x76, 0xd7, 0x1b, 0xb1, - 0x11, 0x46, 0x0c, 0x2b, - ], hash: [ 0x38, 0xcf, 0xa6, 0x00, 0xaf, 0xd8, 0x67, 0x0e, 0x1f, 0x9a, 0x79, 0xcb, 0x22, 0x42, 0x5f, 0xa9, 0x50, 0xcc, 0x4d, 0x3a, 0x3f, 0x5a, 0xfe, 0x39, 0x76, 0xd7, 0x1b, 0xb1, @@ -256,11 +215,6 @@ lazy_static! { true, true, false, true, true, true, true, false, true, false, true, false, false, false, ], - point: [ - 0x82, 0x6f, 0xcb, 0xed, 0xfc, 0x83, 0xb9, 0xfa, 0xa5, 0x71, 0x1a, 0xab, 0x59, 0xbf, - 0xc9, 0x1b, 0xd4, 0x45, 0x58, 0x14, 0x67, 0x72, 0x5d, 0xde, 0x94, 0x1d, 0x58, 0xe6, - 0x26, 0x56, 0x66, 0x15, - ], hash: [ 0x82, 0x6f, 0xcb, 0xed, 0xfc, 0x83, 0xb9, 0xfa, 0xa5, 0x71, 0x1a, 0xab, 0x59, 0xbf, 0xc9, 0x1b, 0xd4, 0x45, 0x58, 0x14, 0x67, 0x72, 0x5d, 0xde, 0x94, 0x1d, 0x58, 0xe6, @@ -284,11 +238,6 @@ lazy_static! { false, false, true, true, false, false, true, false, true, false, false, false, true, true, false, ], - point: [ - 0x0b, 0xf0, 0x6c, 0xe8, 0x10, 0x05, 0xb8, 0x1a, 0x14, 0x80, 0x9f, 0xa6, 0xeb, 0xcb, - 0x94, 0xe2, 0xb6, 0x37, 0x5f, 0x87, 0xce, 0x51, 0x95, 0x8c, 0x94, 0x98, 0xed, 0x1a, - 0x31, 0x3c, 0x6a, 0x94, - ], hash: [ 0x0b, 0xf0, 0x6c, 0xe8, 0x10, 0x05, 0xb8, 0x1a, 0x14, 0x80, 0x9f, 0xa6, 0xeb, 0xcb, 0x94, 0xe2, 0xb6, 0x37, 0x5f, 0x87, 0xce, 0x51, 0x95, 0x8c, 0x94, 0x98, 0xed, 0x1a, @@ -301,11 +250,6 @@ lazy_static! { 0x6e, 0x73, 0x65, 0x6d, 0x69, 0x6c, 0x6c, 0x61, ], msg: vec![true, false, true, true, true, false, true, false], - point: [ - 0x80, 0x6a, 0xcc, 0x24, 0x7a, 0xc9, 0xba, 0x90, 0xd2, 0x5f, 0x58, 0x3d, 0xad, 0xb5, - 0xe0, 0xee, 0x5c, 0x03, 0xe1, 0xab, 0x35, 0x70, 0xb3, 0x62, 0xb4, 0xbe, 0x5a, 0x8b, - 0xce, 0xb6, 0x0b, 0x00, - ], hash: [ 0x80, 0x6a, 0xcc, 0x24, 0x7a, 0xc9, 0xba, 0x90, 0xd2, 0x5f, 0x58, 0x3d, 0xad, 0xb5, 0xe0, 0xee, 0x5c, 0x03, 0xe1, 0xab, 0x35, 0x70, 0xb3, 0x62, 0xb4, 0xbe, 0x5a, 0x8b, From 12b3d5ea41bf29778977bec3d8eb1c78aa8e8985 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 29 Apr 2025 14:49:32 +0100 Subject: [PATCH 161/245] fix(ci): use log exit code instead of docker stop (#9465) Tests in `sub-test-zebra-config.yml` were failing in some instances with exit code 137 (SIGKILL) or 139 (SIGSEGV) during the `docker stop` phase, even when the preceding log check (`grep`) succeeded. This occurred because zebrad sometimes doesn't shut down gracefully within the default docker stop timeout. This change modifies the exit logic to return status 0 if the grep command passes, regardless of the exit code from `docker wait`. This aligns the step result with the actual test outcome determined by log analysis and mirrors the fix from PR #8107. Ref: #7898 --- .github/workflows/sub-test-zebra-config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index b4b9da36963..6898d464b7b 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -123,5 +123,7 @@ jobs: exit 1; else echo "SUCCESS: Found the expected pattern in logs."; - exit $EXIT_STATUS; + # Exit successfully if grep passed, even if docker stop resulted in SIGKILL (137 or 139) + # See ticket #7898 for details. + exit 0; fi From 7cd185855c56d10f1c24c1ea895b36fcc109bbd5 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 29 Apr 2025 16:20:54 +0200 Subject: [PATCH 162/245] chore: Update docs (#9469) * Simplify `README.md` * Remove a resolved issue from known issues * Simplify the PR template --- .github/pull_request_template.md | 3 -- README.md | 85 +++++++++++--------------------- book/src/user/troubleshooting.md | 2 - 3 files changed, 29 insertions(+), 61 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 57eb5bc62f1..70ebc94f6df 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -41,6 +41,3 @@ - [ ] The PR name is suitable for the release notes. - [ ] The solution is tested. - [ ] The documentation is up to date. -- [ ] The PR has a priority label. -- [ ] If the PR shouldn't be in the release notes, it has the - `C-exclude-from-changelog` label. diff --git a/README.md b/README.md index de401540ba5..416842bf967 100644 --- a/README.md +++ b/README.md @@ -9,44 +9,21 @@ [![Build docs](https://github.com/ZcashFoundation/zebra/actions/workflows/docs-deploy-firebase.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/docs-deploy-firebase.yml) ![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg) -- [About](#about) - [Getting Started](#getting-started) - [Docker](#docker) - [Manual Build](#manual-build) - - [General instructions for installing dependencies](#general-instructions-for-installing-dependencies) - - [Dependencies on Arch](#dependencies-on-arch) - [CI/CD Architecture](#cicd-architecture) - [Documentation](#documentation) - [User support](#user-support) - [Security](#security) - [License](#license) -## About - [Zebra](https://zebra.zfnd.org/) is a Zcash full-node written in Rust. -Zebra implements all the features required to reach Zcash network consensus, and -the network stack is interoperable with `zcashd`. -[Here](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-advantages) are -some benefits of Zebra. - -Zebra validates blocks and transactions, but needs extra software to generate -them: - -- To generate transactions, [run Zebra with `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). -- To generate blocks, use a mining pool or miner with Zebra's mining JSON-RPCs. - Currently Zebra can only send mining rewards to a single fixed address. - To distribute rewards, use mining software that creates its own distribution transactions, - a light wallet or the `zcashd` wallet. - -Please [join us on Discord](https://discord.gg/na6QZNd) if you'd like to find -out more or get involved! - ## Getting Started -You can run Zebra using our Docker image or you can build it manually. Please -see the [System Requirements](https://zebra.zfnd.org/user/requirements.html) -section in the Zebra book for system requirements. +You can run Zebra using our [Docker +image](https://hub.docker.com/r/zfnd/zebra/tags) or you can build it manually. ### Docker @@ -62,15 +39,7 @@ For more information, read our [Docker documentation](https://zebra.zfnd.org/use Building Zebra requires [Rust](https://www.rust-lang.org/tools/install), [libclang](https://clang.llvm.org/doxygen/group__CINDEX.html), and a C++ -compiler. - -Zebra is tested with the latest `stable` Rust version. Earlier versions are not -supported or tested. Any Zebra release can start depending on new features in the -latest stable Rust. - -Around every 6 weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases). - -Below are quick summaries for installing the dependencies on your machine. +compiler. Below are quick summaries for installing the dependencies. [//]: # "The empty line in the `summary` tag below is required for correct Markdown rendering."
@@ -113,13 +82,13 @@ Note that the package `clang` includes `libclang` as well as the C++ compiler.
-Once the dependencies are in place, you can build and install Zebra: +Once you have the dependencies in place, you can build and install Zebra with: ```sh cargo install --locked zebrad ``` -You can start Zebra by +You can start Zebra by running ```sh zebrad start @@ -131,14 +100,18 @@ enabling optional features, detailed configuration and further details. ## CI/CD Architecture -Zebra uses a comprehensive CI/CD system built on GitHub Actions to ensure code quality, maintain stability, and automate routine tasks. Our CI/CD infrastructure: +Zebra uses a comprehensive CI/CD system built on GitHub Actions to ensure code +quality, maintain stability, and automate routine tasks. Our CI/CD +infrastructure: -- Runs automated tests on every PR and commit -- Manages deployments to various environments -- Handles cross-platform compatibility checks -- Automates release processes +- Runs automated tests on every PR and commit. +- Manages deployments to various environments. +- Handles cross-platform compatibility checks. +- Automates release processes. -For a detailed understanding of our CI/CD system, including workflow diagrams, infrastructure details, and best practices, see our [CI/CD Architecture Documentation](.github/workflows/README.md). +For a detailed understanding of our CI/CD system, including workflow diagrams, +infrastructure details, and best practices, see our [CI/CD Architecture +Documentation](.github/workflows/README.md). ## Documentation @@ -158,27 +131,27 @@ The Zcash Foundation maintains the following resources documenting Zebra: ## User support -For bug reports please [open a bug report ticket in the Zebra repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=%5BUser+reported+bug%5D%3A+). - -Alternatively by chat, [Join the Zcash Foundation Discord -Server](https://discord.com/invite/aRgNRVwsM8) and find the #zebra-support -channel. - -We maintain a list of known issues in the +If Zebra doesn't behave the way you expected, [open an +issue](https://github.com/ZcashFoundation/zebra/issues/new/choose). We regularly +triage new issues and we will respond. We maintain a list of known issues in the [Troubleshooting](https://zebra.zfnd.org/user/troubleshooting.html) section of the book. +If you want to chat with us, [Join the Zcash Foundation Discord +Server](https://discord.com/invite/aRgNRVwsM8) and find the "zebra-support" +channel. + ## Security -Zebra has a [responsible disclosure policy](https://github.com/ZcashFoundation/zebra/blob/main/SECURITY.md), which we encourage security researchers to follow. +Zebra has a [responsible disclosure +policy](https://github.com/ZcashFoundation/zebra/blob/main/SECURITY.md), which +we encourage security researchers to follow. ## License -Zebra is distributed under the terms of both the MIT license -and the Apache License (Version 2.0). +Zebra is distributed under the terms of both the MIT license and the Apache +License (Version 2.0). Some Zebra crates are distributed under the [MIT license +only](LICENSE-MIT), because some of their code was originally from MIT-licensed +projects. See each crate's directory for details. See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT). - -Some Zebra crates are distributed under the [MIT license only](LICENSE-MIT), -because some of their code was originally from MIT-licensed projects. -See each crate's directory for details. diff --git a/book/src/user/troubleshooting.md b/book/src/user/troubleshooting.md index 09b31e4513d..3566fecadf6 100644 --- a/book/src/user/troubleshooting.md +++ b/book/src/user/troubleshooting.md @@ -4,8 +4,6 @@ There are a few bugs in Zebra that we're still working on fixing: -- [The `getpeerinfo` RPC shows current and recent outbound connections](https://github.com/ZcashFoundation/zebra/issues/7893), rather than current inbound and outbound connections. - - [Progress bar estimates can become extremely large](https://github.com/console-rs/indicatif/issues/556). We're waiting on a fix in the progress bar library. - Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release From 1e25546955e34cd10268fba36f1831f0ac113247 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 5 May 2025 22:48:07 +0200 Subject: [PATCH 163/245] chore: Add checkpoints (#9466) * Add Mainnet checkpoints * Add Testnet checkpoints --- .../src/checkpoint/main-checkpoints.txt | 244 ++++++++++++ .../src/checkpoint/test-checkpoints.txt | 373 ++++++++++++++++++ 2 files changed, 617 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 0e3ef46075a..8ef88229bb6 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -12624,3 +12624,247 @@ 2803206 000000000143df11dc312516155284d98a3061be2866e69354c5ab68f2d9bc78 2803606 00000000013c64db870430b5c9e3605bca034bebf15d2292197037beca9f326d 2804006 00000000007e3643c215f1175cb93f70ace9510e6e515745914072065c3896c8 +2804406 00000000013b118fd427d6e7bd696af3f52240c7d730fb78362c90bb6f034218 +2804806 000000000164fccac94c367ef498001cf8958daaf6dee4b5a7298d42902617f6 +2805206 00000000001bb5c820f715c7ba3018f4fceddd7397c9bcb7950e7d24e87b8833 +2805606 0000000000e9af0aefa50f5b532f85975f7ab0cad47a8d4fc0bfe3e28113aaa3 +2806006 000000000139b3ba7371443ac1961b3ef4ce4a6cb83473dd56408af160235b02 +2806406 00000000012edd0a88cbfee9b64d7cb4c4b9961961d402c5c62076df15fb0623 +2806806 00000000009288beb28ee9268557ce133aad6da2ad59d7ee6003196f35b00c27 +2807206 00000000003ca15e2090e12fe3131bfc599a86c71bc4527c275b4b1c6103ac22 +2807606 00000000012aef3c7ecf780361874c27b78ec7138a7da1ad2693fd200209d40d +2808006 00000000009d25bbb24c5d54438e59133f34dc1780bfa8e251c774a795db2189 +2808406 00000000018eb6ba04985b84a90d8708973b45e8a497a992afb1995d6a464d0b +2808806 0000000000eef9babd4a73e076557452995cf6116bea53df8189bbb204c342a5 +2809206 00000000008476952c8a9d48d8f1f1cb3ddcc18c610d7de2fbcb4716e8bb08fc +2809606 00000000003c906522cc228b93a8e9fa7e541325303a8e2b412ba8a814a91135 +2810006 0000000000e85dee5d762086b89fd85b694f53eca7ef5b8ac03f73dfd93488e4 +2810406 0000000001cad5ecdcf9951e56d0a223601dacf9d8784c7f075382780a4b348e +2810806 00000000003300cf2f8222e0a16bfd1dd2ba77fe66382e7a27f256e64c722b88 +2811206 00000000020aaee281071c1948c46800ac8a53672f393a2f331505cb42116ae3 +2811606 00000000001148b7c999ef34c0499d009870eda76a4f9047e6c34a2cca4901d1 +2812006 0000000001d6edbb08a8051140c4c21342ea32dbbde4e9a2556786966e151bb3 +2812406 000000000110c42aa85dae5ac18ec47ae0870398c7a257206895767edd631ef6 +2812806 0000000001654e9270bd2844cb99c2ccee021963d97d1226d256e1ae95f7fcf0 +2813206 0000000001747407cd14f5ad98c230a2040f5ed3dfad00aed3af0674adb3b482 +2813606 000000000031aa880701bbc52d3b4fe65777ad54f510c8a5666a51c8310af5ce +2814006 000000000172d3efed427e08db33666586712be39cec209453ad0630984a05f5 +2814406 0000000001a6413326f78e57528a32b8868451e7f1eb472dd60861f7d24e183b +2814806 00000000003efc3eea293609c0ac331fa8a05ea6d57cfb60b4b7aa7dfdbabc8f +2815206 0000000001e0f383bf07fae523d8dc39a37ffc19d81657579b4dbcff3ed0bad8 +2815606 00000000017bf64347658d7900b12d7260b30d829ca06876a961f8bc0e22a805 +2816006 0000000001695723f6cfd18825ea64f1419a480e62f4f12fc2fddb34c7bda4e9 +2816406 00000000016bbb19675e5fc4d64ceed3ed2043d693c619cf26ebb91a519ff9fd +2816806 00000000001969b760770d73b1830fe82e42d51861c17f92b0ec243eef46e644 +2817206 00000000003c944f835b40735f5167a75bf2b7d9067accceafda6e52d3f93ee4 +2817606 00000000011bbd51c55c97827f0ae98e3e02b5feca4b4419a7029b5d07f934be +2818006 00000000005aada2dd11f842a64c1d54b21f285e95041f50c2546a96a6e9681f +2818406 0000000000535f0ed61601d5bb2f07265f01e17b3ab3fcae260f8cc33f615d5c +2818806 00000000007eb665bb2c75b049f908e69502838b136e86b370c40519d745133b +2819206 000000000086eb561431c5b09407fd30ac76a62d27e71bc531ff6ad5e20c2bae +2819606 0000000000b6b9ff26dd8061dd3332dfb291bd201d879ae8042af69d73b7147e +2820006 0000000001089b3e4ea0d31ca6e9a02b655bc7289e16e04c74e1f0c859da96ef +2820406 000000000004d29a9784404cc1080a8dcbe28af97f77586717c5612f2f1e3741 +2820806 000000000183f85c9d5e5ab45ace94fe5b387d981302a19ceb49ec0f5a7ab020 +2821206 00000000016118ac04d3d3541dd52c587b977d4a78acc2f39d791c9932898da5 +2821606 00000000017fcddb1b50a5fb068e1db1b447d5722ae3e11614ce7d118cdcf5d8 +2822006 000000000017749590b39f182538ff18a4465e76edb3068473910cec43ecbb15 +2822406 00000000015f36ee3807b04318d05e5a9a3287b90bf9011e85815a79935fe6cb +2822806 00000000005652193ba710a6f97426493bf178c917425519160268543c0deabc +2823206 0000000000ab828825608a92b5827d388779d42e4dceb77dba538451553a45c3 +2823606 000000000065c81838562f96dc047da28bdbe3b3efed0dddbe5c1a05457604b3 +2824006 0000000000ac6b64ed89c5819d5a8dfa1bb7420a2926c651533481cb1e04077d +2824406 0000000000df6a46ae660932a3074489ccef76dc3032df9d95ea1025ad28ec8b +2824806 0000000000d63b99b7230eb163c0269c2e0e528838d2dd62c426bbd1e5aee072 +2825206 0000000001a3839379bcbe168f6e449c734941f75b2862a3782124ed64ee1867 +2825606 00000000012194a9637ce6c9e868355136b070774275abf2f052d5b7906f6810 +2826006 0000000001aaac984f6d19cfaf231984e6fc50ad55011a9423c73b83ee2f66b9 +2826406 0000000000be652d5befae2e4c19c317e920cfe2878306a20f8b9f863effa89a +2826806 0000000001cbf20e6a0fc5479e442156419a28389b58a7d5c32119a732ccb798 +2827206 0000000000f96eb3e6eaccbb10769f3b1b2a4891e7f8087fb7ea1514ebc8dd92 +2827606 0000000000ab48e1b9fbbacc6da743be81db5ae4930a1b639bf40eb635a8317f +2828006 0000000002167a1ef104011614b7495e83e9a61919b2f71cb636b3a90bfc2aee +2828406 000000000128775495aefe2bf273d6ab63fd421710989a824b01c4c6afd44f0c +2828806 000000000005888de79a68fd88e26c36be899e8101692e7f911961d0c3ed293b +2829206 000000000041d889efcf06f48728bc389b3a836cf9d82f7d7081bddd43ffcaa1 +2829606 000000000103c98e0cf39bc02d89daf259250e99f902b69bbba046f4e368e93e +2830006 0000000000ae36e8be4825ecba4bdf3f585613e10125a36cf7e8ae56292f8171 +2830406 00000000018098369887fcd37ae07496c4066112d6ea4112322510f6ccd50f14 +2830806 0000000000ef8fa9528d5f3c0c82aea637926c86b1023164e1352a7252930acc +2831206 0000000000dbd5f5fa1a7c4d350f681768bd6e66915b6d28796d960bf1933603 +2831606 00000000018317bcc216479a9108fa3c70f37acb2712adf653973c0fdb3a881f +2832006 00000000014ce299b20bd594ef097e5161981349139fbebbc6fc1e3b8d9b2a0b +2832406 0000000000711c91fbce810c9f12025dcf4d7f0b3c4ec3206ff546f70065ddcb +2832806 00000000019220df314546f27f983e54624b8314c451729ffa3c9333e56b6f37 +2833206 00000000011adf693fa11f6707195bea10ebf9ac8247363eefe93ceef0d1a300 +2833606 000000000179b5f52726e43e11a6fac5a0531675c501819849b8557b9ff821dd +2834006 00000000018bfe09e1fc2c7e012390a51c9ae040253b90ee6b346dcd54834331 +2834406 00000000013577e3299d5396d0f19930c8ba6ca0a1e27842603a13122a707dc4 +2834806 000000000096e7ca20eb5af9789aee57fd219a38e9e7f92388914d58606cc5d2 +2835206 0000000001408eacdfa0a37da875462af20be13dca4203418aadda4849177b23 +2835606 0000000000066683a77eb5918122c352f39801a76921c3a08b4c3871e458e9b9 +2836006 00000000001ad8202f90a9e7f4c088dc4c381801776842346c4c1ba923ca88dd +2836406 0000000000779522430aa9d1e4559fd8ce9eb34e19ff28cffdc78c561775bee3 +2836806 0000000000ed7708e9b2826c48a8a4f61caf10c0b56946f7e3076ae9215cda4e +2837206 00000000010ce7106d696c2c79b04bdd2d4df6ac4c575039d10f93580b2f6620 +2837606 000000000032c55618c1c66d0ac3444cb4b37010bf24446e21616559c1b08c0f +2838006 0000000001e1adbe94af654bf91d5971f7a1e13bc59cc68e8a08532ffe50f079 +2838406 000000000193b6f63d16fa9a5edf8b5dc22ba9a993fe9204c7e508655e9291d7 +2838806 0000000001dd3d3933e63bbe8d18ea8c7af37068358d8dabec2e8938f06eba26 +2839206 00000000005a6eb409272d460b30514ba51d58b38574bbb4aa458a2e5e07bed7 +2839606 00000000009d0714b13b72cfadb7c7a4129ca2cbf70f2b1832fd31711716a259 +2840006 0000000000476fd2534026d2bdd55a628969d570de3a8247d50df6ce3ce79447 +2840406 000000000176d2a0ef45310cb9721495533ef6a3bb337c9f260e52743c2cbcc3 +2840806 00000000013eee5116c1e4052f0aabb762d1762863173356743eb84026f81684 +2841206 0000000000fd9959a4dc51b1c7308dc3c7c045b2e6620795d84cec7ef4b583ee +2841606 00000000000a632aecc5d3ab0c11b2ce3202e77440fc58ef630644270e06a9bf +2842006 000000000145cd68f4302b77a5ba1bdba51db1af6727c400ac2366b759852d21 +2842406 000000000192659a14190a10a3868781990d068859ed7d5d943467d41b9046cd +2842806 0000000000d9fee83a2feb9ff55fab61b35fb7e6f80d65c996bc59a9d908c47f +2843206 000000000197e2c21983dd6a846fa82c23b6f025d265ef03b7fc4c13910ae1c0 +2843606 0000000000a2f1b96e52808c0572e03e9b13048c795fe05c450743c58e694d9a +2844006 0000000001050c15734949f2662dc99e99b994e9b737f45294d78b6b4d3ddb0a +2844406 00000000011c9da4a7b433029c5d0b03d6546b02e743d762d84ee742e2a373c6 +2844806 00000000010557a87dc3c5b25b647d793afba738fc051336e80dd061dded5038 +2845206 00000000003c9a495f48392c0a847aba4ac66fb0b5d0f1a1d02561481fefe909 +2845606 0000000001a06f0fdba41ab7af4552988268d59d1724263bf90970dfd3ce554a +2846006 0000000000d0fa38f23eaf2c59fe3962a0f9addf54e241ff21ace0e10c0e9407 +2846406 000000000050f62091255c9fa24894d1788987dcbeedaee3781d2ce1a6599b07 +2846806 0000000001862805de9c5b437c9bddefe1dd1c279a3107af5ac7e553933aefda +2847206 0000000000f7af88abd4c171dab52795a9e5b38ecd70d64d4f3bafb804f5693f +2847606 0000000000c2d21c524778c7948f22d38413c7ea7de7639b059b3c96d429f759 +2848006 00000000015966fd523fcbc02a3bf8e46d2d9bbcf0d03e718f082aecd5e3ea72 +2848406 00000000017b4f8deae14f57cd0e80a850be9907e35b2b15c3b3f413aebb3c61 +2848806 00000000017e28e68835f571dccd18b9d5b8e7423a24524fb9f7e9d47417b3fc +2849206 000000000005d3fbbd04f5e1f229e5b6b5110143028bb54701ffcd19d1110cca +2849606 0000000000823823c34fa16bf8cce1f9b1fd8f39a9244dfad2e0fedd6a130538 +2850006 0000000000101d2e80c35c736c29e153e00e695c5989aa2d07c4c7dba7532efe +2850406 000000000016bf9cc5ac54d165d1270606c9877357d984bc24557f7df8d9a62e +2850806 000000000056f8c646ff3580e7fdf144ed17f4187cf2f846c641cac046747f4c +2851206 000000000137abd16c380467b5a9e13b6ae0c8262a5c71772b8256f59d1f43bc +2851606 00000000015d4354b431215703a5722557aefdf2383a62daa7e38908b42ca5b2 +2852006 0000000000cd11ecfbb600046da7d7537bd7c597a9e020d881eb22952ef3a146 +2852406 0000000001a6b041565864a56cf71c483459c0e70f0c91fef3643bd34ff48f11 +2852806 00000000016402331e5914bcdb93d6adac71e5ff9edb7b839de405678d8f0ec3 +2853206 0000000000b3ed0dea297be392c4a738acdb511e91aabd8b6e5e0ea2b3d945bd +2853606 000000000050e3875215b280ce97870a02e8ea442c9b90bda6d4ea6c06eb0749 +2854006 000000000151db74a91b0d1b65ae3132f3e0b3bcfb46795190b5d16ad4bb5c61 +2854406 00000000017eb1218f435098f779abd94b895263ffcd7d7e1a3f02c878a7367a +2854806 0000000001e435fe0a9fbed4813c791402cf979e71b0c965eed04c43ef4b0eb9 +2855206 00000000002abdd893787b2d0a32e9d56d1304e4cf2d8eea406033c4a268e3f9 +2855606 000000000123742568ba9a0e344830b6615325ca341280ced30463455afa3098 +2856006 0000000001a468db413b4e71ab3ad930fa3234ca3270298ae8d526bee2ce1b72 +2856406 0000000000b6d586f78a0989dc17df490b26e41dcc2eb47fcee9f6f6449db40b +2856806 000000000093ab7e59fa67153d7fbba11b9a1067206a2249832263c51833c5a8 +2857206 0000000000e977b43ae213b21730c50d609020692a510a9cf3bca756b9d72ce3 +2857606 00000000001e54400b9ef9617d7e5517ce0a53c8087aa74ff8f3afcfd4314ddb +2858006 0000000000c4751fbf56b0c601494127ce1babe9059ae3e39fd24e024879190f +2858406 0000000000ff843ffcf97f0464ac806942265ca6f56709e5a62af6cebb8ae199 +2858806 000000000115c07c71291afa2cb1081d0f2a68d3cf7b9244d66ba0e35e68f701 +2859206 00000000010644660fdc0fb8b616c00e2d43e13d1ce77697464cc02906af3053 +2859606 0000000001789d76f30154e77c41bf54d5bf74d5de350367d63fa38fd312048c +2860006 00000000013410032665cdff70091fdcff5dd76a3a583bf7373b6ef31a553ceb +2860406 00000000005900c65bd75e2173d790492798e90dddac5561bfa32a9f0947341b +2860806 0000000001db10b45aefc7ba48be8bf207e7de07122103e2542c17f68733f7bd +2861206 00000000016aad17393124ab0e22983101059c3156e824da01be7e1aecc12712 +2861606 00000000016604690fc2519bb9941ab955d265ae695c80a7178d006a13aca1b5 +2862006 000000000139db578e15eb57910bd226db9b6b49508a1010eeeca9beb24f807e +2862406 0000000000e4ce65f26039519cdb58690c968fa895a61a5237247a38d95fa45d +2862806 0000000001358509977e066b693d545d2207ec2d4d343aa601e8370f6aed27f0 +2863206 000000000083a0ce99732dc8543c08e7e8716a512c48832f6931429d9b40b798 +2863606 000000000157840a2e87c62a1c891a0c03ba591e6c20bf910aed27aa0ceb83c7 +2864006 00000000019f3e2d5a10b9937641b7165e1f298e4fcf8c6c4d03e3707458d4a9 +2864406 0000000000de31c311388e6edddf1890507727d06a84c9b1514f98a1e9cf44df +2864806 000000000125fef769f4f4dd68fcdb11f4437250ef40e057f0ae0d530ecb8eda +2865206 00000000010c62beca4b0f4c9b7b60ac4105fe490c7068b78fe60004ff1377cd +2865606 000000000053e86a850d249fe0c13290bb703bbc4c98db71f38377f974d8cae4 +2866006 0000000000de04f98541ec6ea5b5c3513d7ce76ea74d3afc5d376a165a46d585 +2866406 000000000134e188f2d86f829743c7f65f90ed1922da1f484efe23ffeb44b6f0 +2866806 0000000000fb38327599fa207bd2b4f40df2d85490b8940d19ee951868299a2e +2867206 000000000029c94bae4abf3b927c81f0e45348755734d66f395177e807a7367b +2867606 0000000001102af3dfae778bf74f57f6ca88923003cd783c9215318aa7e19992 +2868006 00000000006d7f0757b51894de2606d0e423ba90807d3202046d21a09da2a6f3 +2868406 00000000002a8b2d496bbb5d22e18971a30d372ddd664ee37121a2043473c065 +2868806 0000000000f99f2ef7d0f993c54871d934da5b102e3b465fb0e49f0a9c501e10 +2869206 0000000000ccb00b472f2a8e656a695a19cab6e1fa9eae41923a3967d12f729e +2869606 0000000001f67013dfa5905c3241be1cb9dcff66ba5729a4d5d72107eee7456c +2870006 0000000000d0ef2294850acdbe2793e3d49f114c4f4dd00b476c5a695d73679d +2870406 000000000191f2b49866956993ee261425e4023c899ac700f90750bde65e6bdb +2870806 000000000147834550fb8de0d7852228cecc424b814b6de76551264f0a39777f +2871206 0000000000c4d381db8c011660072e74fe2c367780d69c0d6e288007a0069cd2 +2871606 00000000018fca2a53d5a57a8df00fe4da51b40ca16244bee9851e87b2726426 +2872006 0000000001a1dce75de69b620dc110ab37c9feb649eeaf7e9670d5c9e4af1c1e +2872406 00000000009e774d23f156d90530529a5597ede0ee9d04458aae55f4ebfdd4c3 +2872806 00000000013dd6c65d627fbac8d1149e404a70d1e9878df7cc8cc2aae9226e3c +2873206 000000000100296cb37ca7cfde3ee1088bfaa6020a2ffe69190bb136861a4b1d +2873606 00000000015277ae80fa300516bda88ba822f09abaf523f43fc5294e01d3892c +2874006 000000000045271e8a96033dce1049de528319c3c9dcb79434ddf7ae5e8380da +2874406 000000000163b1b294d5ac4d80f33be6005063c67190830112162ede7d0c22ca +2874806 00000000000fde30d6f5a36549a0919fe69328cf6501f522f88aec9ebad84118 +2875206 0000000000d21c0b94d8ff73d59def79fed678c6b5eb5d8491c683f68642d4f4 +2875606 0000000001434cdcbd15accdd9f4551141dc0ba4d6759ef0a961c713d56a8e98 +2876006 0000000000be9c92c9d8bee4dd093897fbd393c1933a0c6ff753f509c44fdecd +2876406 00000000005fda179c89b5f03bf02ebf4007bcff994c30fbddf56db39b2a0e16 +2876806 00000000009f387ce7337077a996eb7b8fb3faa1c38ed03c493de5a0dc5496cd +2877206 0000000001aae1e305db3335ddc9cde7b12e04a2abb0e970ff8c070200b04b5b +2877606 00000000013da7e2ed3104a4c131b7aae29dd8175b176e648b61db9070558774 +2878006 00000000008647e6de2638401fdf2e785804ec619ba647f28683cd8e97515ca8 +2878406 0000000000d0804e9f2db406ebdb1566fae20f46ab3bc781517e2b919f98776c +2878806 00000000010717ec216dbae9613c9e097906e30a24c8c765176e2da4219e01bf +2879206 0000000000dfda6efc38ed3f6530d6e0716a9e17e217b1868c180c08373c8520 +2879606 000000000051f2bf5959d26daa177b352bf1779fb87eb0f789ee78f35459af1b +2880006 0000000000966609fcb8da57dcf9b77c3856a5d9239d6e1cfa64b5b992b681fb +2880406 0000000001791f0127cdfb75711b1b952c395843604bbcf5ed97918745be9042 +2880806 0000000000ddc8ba9b14a143cd2a15251d6bc62339044bb669230522cba0a2f1 +2881206 0000000000a152ed80cf98f2d7b40506ad24d40811aee88e844684d80b80c242 +2881606 0000000001831b5ff558e11cbdbd2e7833c93d5105de3bbb1cbd451585f61e2b +2882006 00000000014ac8cab792b024a063a49956af07d4dbeb6c1a5b84c5b5c58ca991 +2882406 0000000000fe52372f2bf1e9e39d565ecac9b676288da6a00f3dca79ee23d187 +2882806 00000000006cef9a31344bf5017b15a7a3fea6bca8cbbf354c1f0544fd25a5d8 +2883206 0000000000fc476e5cc08c02738969dab22271bdcff5ac87495005e4c5b57778 +2883606 000000000185df4299b5fb34f1dc7ad61f7468a5182d8d7c6cee29e4a3ebf9a5 +2884006 0000000000409e5f9f0e16fd54787c4aa521ee522a9705cd34422eaab847e3e7 +2884406 0000000001bc24204826797cdac96ae4d1d5cc4f6ba13b23b3c8c955fd561f66 +2884806 000000000105080bd89fc45fd090988937b2e4c08a9582488feed52c754ffc60 +2885206 00000000001fb314211cbaac3cf229577fecadf043bc4d62b05a5593d8f2c4d0 +2885606 00000000002cd60737d3c4832046f97cbe3c681bb06590da638dc36d25658fc4 +2886006 00000000006061476a71d5f9cae8a6bbf8b150c99f6807ba51465ca063b0e507 +2886406 0000000000f7be265f0b580313cb41d84ebe47027cc48025335eabcf4c2f5eca +2886806 000000000187d177b61da52d5e9e1bef5cc7941df78a1ee7bb41567366d00f1b +2887206 0000000001803d8025d0ff5afda2c58e0ba65752ee7117c954805d75bdee740c +2887606 00000000003db7338cca6430fd9e2eacebfd8680420f6539655cc8e6c1d2713a +2888006 00000000020affc82689da086a5104bbe1f44a9ac0a1c98057ca21f97c0a50cb +2888406 0000000000fb18ba329716896d4c8183e4fe820e78691dcc952dfe524736a98a +2888806 00000000007e3b90ad872bed85558a7ee894b202e00ed4854b5cc05377bf88a1 +2889206 000000000265b2625e8f27272d96a68b20980833cfd30e4e1e8fd0b312aff7dd +2889606 0000000001ba4d835d43da29e10b4b5afe3d4728d6e0c24e7042ad4a503f76d3 +2890006 0000000001307601c59be5217928108978c7ec7865862a0cf59bc6342313d1f8 +2890406 000000000040569a8cae7f50b83cc5fb495a550aa1a3008ac396e35b0191b80d +2890806 00000000013cf30a27091422d2c0a2b93860a79c1181354370b768e75ecc49cf +2891206 000000000083e81f9ecb3cb7c27482fd4a1eb636637887f969ba3cff1efc55d5 +2891606 0000000001257fb8d7f687e782624f8fd01a5e6a375b1214d5f9144ec10ee515 +2892006 0000000001fa35c8500b4d57377b9cdddb81bd31cc09aa0c0fadf547c6e116d3 +2892406 0000000000dac1837896e50d452169a1126f5a68ed7b6d4e2a200aca3f8bb5f2 +2892806 0000000001377719ac2c85156dcd9c0de8b3f5e2817edfe42f7613f46e4310a3 +2893206 0000000001e1634c7cd9e7dce10d4833147f300a555ce06c20e2604f0b81da9a +2893606 000000000146aeddf13262d40bd672f1fa79a75040220a940e61966c505abf28 +2894006 00000000006fc0e03ae98e3f1a7d9b3c67af59b7d6774e7a99a55b0b59ed325b +2894406 00000000006cf2c66b45e3f054ba1d0a20c133c921f8d161447c39fd2dcd781b +2894806 00000000019e3d575c8f29f23eac2ab73de57ae2a62ad77279fd08cf4aa28a46 +2895206 0000000000d0ad57d1eefb0c88236108306c87b79d2a63c82d8e9153cd072c0a +2895606 0000000000e19c6bc453b7753fbb938c19d4d6ea213e90d8bb1d402245c22452 +2896006 0000000001ac6a2f47f12ad5a055f299c6c9a60176a7a4a7fe3467934a2d5d83 +2896406 00000000004026de7f8f1e0a9042b1658471fb0ad8bc4cfcb1cdc0c56761650a +2896806 000000000168d7dcd5947621805f4292ee0419de0f36191423903a14145856de +2897206 0000000001a9fc9322f9056dc947e79f451eca492a934b5a7b8010ad349d2b70 +2897606 0000000000dadb6e9b561b880ef1d431eaec18804e548dcb334f721bd9ad977a +2898006 0000000002e2ea6feb6c81591f97487bf29b40cb0d4b2e12fafd3a5b0afaf68b +2898406 0000000000d54bbdf3cdb556f466facde22064e6699630e4d44fed9ce3b0902c +2898806 0000000000703a22865e98c519cc943bb1b79d9b58d30f8296006629cd7a8bb6 +2899206 00000000010fe32f9b2acaed1a067224a2390776f0fec7882cd43be4fb101a32 +2899606 00000000015c8e53957f55ae4d50d18887c4c7602e6a26afd836e40861734f6f +2900006 00000000002c72d5aaaf4bda796cb2b303d08cec48a6fedd8d66b74183da212f +2900406 0000000000ad84c34e1162e57a8b5e3dd381e5fe4f39f1091354989234959f5e +2900806 000000000173e7ba7f21017511b633b19fc1373362d081f2f9ac69c00adf8f19 +2901206 0000000001b0090eafabf9ac8fe4ff1c082e8c7ffc1f7a79fca1f12bf17b9c9a +2901606 00000000015243c6b8184297e045634fc17a18dd77f3f902245994c886927c86 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index 9ebb1282ec6..8b88826d2d0 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -8040,3 +8040,376 @@ 3215600 002a34a04dc75173ce838e0146b78ef0419add9bdcb852867d5efe6af0b92e95 3216000 00116b95243e992cc66091d2b9de0ae07c5f75439bbbe92123ebd7f6a48fa816 3216400 00c93a145bc882ca20dd4f622a718fcff52bd1ce09577a2dd8a7f4308f46afdf +3216800 0009e96dfe1d29e7024bf8d5d2754216582b7fd18777ebdee03eb4300e713066 +3217200 002f500098c01bb6c79b4eabb3cf61b5407b79110afc7bb70ac788802bef6736 +3217600 0008cfe10727d3fe79412790c7af112d059ae2a02999cfe12aec3e810eb6329c +3218000 00896638a6ab49f7883fc4bf5cee5566f56ae75c4b53cb5ff84e877f14370d16 +3218400 000ccebed662c3bf42c2e2017c361d1b3b1c6459f477c8f4764e30d048dc5c15 +3218800 001296c377da62b48e7a61819cebd28b42bbb672d647582ef1b3f30b692773d6 +3219200 002a263f30ebf555841d197dd97f344642d3171f03a95591f591d2c0c781cbc2 +3219600 0022d3485c31486fa928267b3ddac74822f64dd2216019648f5fc202f6a2b505 +3220000 0087a51cd86875ae291c1fd104839588ab6e1d6f7052af1c9b0fe1853309d69d +3220400 001d60d1121c27260accab951f5487fbf002d991a322d8725ed8d73d76524f85 +3220800 0019d222e36321f0f79ce58e66f1d18e050b36b9aedb239bdb1f477009b5573d +3221200 0029841dfccbf719b579c2c40becff0c48568dbe8f2c46fb551f20a848e7224b +3221600 000d518b535e8f4abc33fcc7ce8083decf6f51262ebb2b5d544b9a5b5c3c7490 +3222000 0080a5670d0f70e0a82300221f7fef9ce848e06570098b042e5a0639049f2596 +3222400 0021db2f23894ec6cf9f9cdc6a5fa93e7320b253fb3dad820de36832ace4d787 +3222800 0001229851b8608be6d564ee988524538d6d83b47d6559b4e0fbdbfda267d910 +3223200 0004e0c19dd061f7541434b28df0161d1da1c1719d9d1ea7cc4d622f05a1c3d7 +3223600 000da31de8b9bc830b3eefebceb8322630cb758b60abe6586f513a28f3b19f86 +3224000 008108a968452d7ec61616e1fcb6dc945ec70d6006c4d515f324b98bd266a9e2 +3224400 003963540da613d2ec813e6175c2a8ed62d7d74f9613207f02f3541aa6127773 +3224800 00322ada25424e3455320b2110a21daa99cb2c59e68778cec3d1fa6112e7cc16 +3225200 000cd6c9d142d9c82351ddcb8be207e5c06634744dba78c58bd6fd259248b522 +3225600 00003aab66615b7ce8c50870b5db1601d09df012463b2141f1404f97b88f320f +3226000 001fe20dc6503849f222d99d870e889a0ee29fe188bbe8f4a6d009a83f3cb704 +3226400 002669f211464032fa4333b8ad6ecddd277c04c1ba362ffe20213e70bba28a38 +3226800 007e548f7706511ee80d0465c42e533842295ba7149a2344cbe822a586b1b7ee +3227200 006b49f22d2d12122ec85a47e14aa81dc0d7ce0b0797416bac23d53dc93b7cad +3227600 002ff148de84665d675a0d8250a92397be3b513d84fa0f963c6e5fa6058091f0 +3228000 000c5e994073ee27b4c1396c0b81d5ed974f52321aa8d0148455ee352b87a3db +3228400 0022503ca1a0fddd23d0df0b6572c359f2e5ceab1433863640e6cad2c05118a9 +3228800 0000d952fee72fb0af2636a2546937cad060767d765fb070debf5d22f2477160 +3229200 0008f31f8a724ae432036d995e5a3a573c76555927ced18c36a91607f917aaad +3229600 00eb59edb7099fd76b7043e321af00788fc387fc92c6fa3209bc1063cc0033f4 +3230000 000943b552c6802b91e82bea575559fecaea34901d0142e253065b549b7d2bc8 +3230400 0008cd9ff3010e66ef4df3826c5412e4618970ed79aba5249782a477722dcba7 +3230800 000c7542b37f9848a3639dee121f994bfbcab980f0524261718119c7ea872324 +3231200 0032dec68c959332a3a4b0f8ee379576dac0189c0c4e42470fd2b83a5c9f19e4 +3231600 00755e5cf7dea06e2ef2d416c92da26cc18ff2d6f3fe05020b6cfa155b3c242f +3232000 001e8f492c0b22f5b08070866619354ae087ac4699ef15c7f2c55045aa4db811 +3232400 00a13342b3b912a301b4aecc3748fd79a55c2fbd7ab64b9e55b1ba40d160e704 +3232800 00de0b69d4490f56fcc25e2d4d07dbeefbd3bf22bfedb92a59e963da1a04dfca +3233200 002be3dd2ccdb3c2ca2c5369a6eee1349414e00f48c7b1d4ae18c97404c09b67 +3233600 0004e08612691ce1ae8cb287ad8bb79cd3203c498a16745cf39b5d9477d5eea2 +3234000 001a5123580ef4a572ee2190a4a6dae05fe05c2b2add24757d0a19590cbacc49 +3234400 0049892488b2804f483f5645a678e035055818f9971eab88c17d64bd4fe33cde +3234800 00161054a34c8fa286020f2d9ce3478bd8bade3cf5c9edbeafff8a51e29c5439 +3235200 0018ed6ecf0b1d32dfcd313877f0b53915656dbae59a8dcd1aaa873444f0fa97 +3235600 00242de56aca8ee960dfdc44553d6375a429bac035cd4370ad8078e1a78afb22 +3236000 0019e54deb9e5ced3e4e3d2dfc0eb056b14e6def9159ba049b6b83584dbaad56 +3236400 00470c186764a895a3ac58e81fe426b088deef8232e16939992a3c472b9c4d45 +3236800 0003fac018644fa3d7e592b7d41917390b84b1be22b699f166575e3b4c6f7550 +3237200 00019367acda59e7f8a52fc90767a5cfdea576ef7898b82b46284f600edc0a0e +3237600 002d3065cc39ecf76f5afd47b3c9b48bf2b81bfe0dcc4ac275efd137850563ff +3238000 001e66326c54412759a5f9aaa906c619d47f4e5c57aa6d228be1264ff4e4a5f5 +3238400 0087d370531f77583afc08d6323c5b41942d5e3cef3cff07b5508a2d92fa4dfa +3238800 001327477815e649a3f83e59cbb8d4a6aaf3209ad50717e0ffed55e35ca070e4 +3239200 0030c00f1f4a1e939babaa7a7cb53abfcb73ada8d44725a667576edc47a158dd +3239600 001eb0e45d759cd22793892ecaecbd33cfcfb98f5b70e9f8f108e7f6753e26a2 +3240000 003260a507a7bd077f18d5bc57fc17a66f8f9574f5726f79ba426d9eb321cf44 +3240400 0018d6f33c0871ee8e1a252b161276ff06a8bce9c7fd222b92f952bbe3f6aa49 +3240800 002d53b63a3278a62c15196e2b29c4efebeda7261367ed29af60b37eb7afaf43 +3241200 002a02d5b7c79912df84b5ca015e294b16bdba6c901c406bb877893dc009bd6b +3241600 000ef39d749f87512fb4dbdabc7f4e5a2a0a8846ea39fce788d49e5a72a5b636 +3242000 00000c6f7f958461b17f9372632ac87b3088b053efaa7dc9eb56bfa38aef0ce7 +3242400 00148afc447f8c235bcc2600cdfa719ea626753009f2e82e5356b79cf4039dcf +3242800 00199d0795eb886517264ddf7c64b9fa49e296ff423f619cc8f2fe0d77f290ec +3243200 00696206d9a547e8b2b33f6ec2e5b1b4a74b51d5abe984b7fa5bbf94226a1782 +3243600 002be3b7801c27119a4b7a5146f1a1153f8b95fd1c67546baa24ac413a176f14 +3244000 00215b3a4f0fae3847fb29996c536077c21b3bdb1cc00ce2d6678f6ab30178bb +3244400 0050a28d6baac5bb129f81bb2466575a9b6a9f7091972580147f9aa037aaa95b +3244800 00035e64a4b1f808e2f8af29b0f7d74a0c49a43c36dc4463bd61eee20c709917 +3245200 00105a7e5d76600283d41a870ba74e3bcc601146459847c86de6abe3d6694ce1 +3245600 00687348f01415b4cdafcc1cd030df8f22076ff9df7ddde02698f395ce4571d2 +3246000 001ae5c962414d5151d33371e6aff78dad6508ae0bb8637f6e94365a8be2f573 +3246400 0057d784c3d23a5466910724ac8bad27a810d265b3d3cffbfa3a649a2d2b052e +3246800 0009f2c0a2ce269d66dfd81d9c4ddeafe7c2f92972bce128c7f5417dbd10512b +3247200 00077f09e8133d5e21f60e3d611afbf52d4ba167fd1081d189817885c89d61b8 +3247600 00331291131a81544371079041d3c8d87809502ba72ad21c2dc4f12a8b1546cd +3248000 003604285234a00d027bfda1642ceb91445e9ba079283800a8e1c18db22107d3 +3248400 000a49b3e9c36e3e10303b6ea700fa01992981714c8956c1f3c976b8f7f3fc13 +3248800 005e7f554c26c98f2af7d65e6d5bfde9316b6d959506d04c08c47ffd66f9d794 +3249200 002646f56718b6ad59bd26339253a38235baca2318ab6cd8392f4f5aca3d0bf1 +3249600 0006cad21f658b4e638399f17cbe00f8920c2fb3ee24f2fcd8ddb291ba288728 +3250000 002203bdd51829f40055de2b0252dde7b64778a6330ca8e5664d8c57f56da1d8 +3250400 0065ffb1314dd14618876156090dac6e63e6f3bbc59affecf0c77e951e845bf2 +3250800 000ceac67de0c788cb8b2ba9a5cfdca733f4e268eedecbac014f5bd57c949482 +3251200 00227abe2fc538b2a4efd209ef1f4abe12fd2eef6f57a0c2bf4a89fc0840a28f +3251600 0004aecdd88c2bd204bbf9b69310eaa7299ed05b3fa412a4587d9c0e16533e69 +3252000 003fe5053edaa98c136697a5b4efea56405c7ee30c735be5f9a34e15c0ae1b61 +3252400 0013882927f265f9d6a9140c67aa9b0126b8c8d9af5d2e1c2927856bab9f429d +3252800 0009410d3965a7d6d02888eac1f00a70e66f2679cde8f99d03f14def76ebd34b +3253200 002899205a4dcb736c0ed5a2bd8627229a49b66cf952738c8fcaf83c5d9cd684 +3253600 0060e7499d4451f17174be59d02127b93fb90a85e11be25d23ce560af9dad904 +3254000 00285b3004c4083f5496d0611e7922d3aca6c12c96c25d721c75d439cdd6ee85 +3254400 0013dfcca5feb8abe6e9385d82aa5b7a5664fc3b5c1f1599bde09c49d82db6c0 +3254800 003762b97729f1e3a0a74266d74c97018fefd2807a3d2451b886b10408bf1647 +3255200 001a1bb4a569bc1fa7250fc8e8cf03dcca2bacf44f3f2b8c196ca6aa9d2f2437 +3255600 0011c9a97f6edfa95f4c8aa159d25d8278eab3693141b508de3e7a585d91c002 +3256000 004343e723117aae63182a8c399d234d5cbca041fe6d517a1154deca0c21716b +3256400 004f65f9ee254b405efca4d4714909bc647bb9a0c4ba1a7abd180c6f18d5f7a8 +3256800 001993c883360890b6cf3f8aec102027e67d3ead3ecd33d452fc98d3aa87b815 +3257200 0019c639436f5b6c26f98587edde8153ef93c782bcbb94445c08881b1518d368 +3257600 0038f0ea1512ac2804c8c2b01b537386c247eae7652bc0ba5b9a566db3ade0eb +3258000 007c0162aeaf2403b740cdee6cb82b4ee7df57a962b796faa5ec1bebf1454766 +3258400 00401240c3918dd047ddcaae23e29b1c706415af699dfe11cc1cdc07c28defdb +3258800 00d48d00a25d95f1028eb83bee36b34adfc8c62f06929f22b6b295bb705aaacc +3259200 0007a9bbf471ebacd008d74f56885acdcf1e83c77015147e85a95a3a34360ff6 +3259600 000e079211cde644f06c7ce73284e4ddd5fb928114cfdf1cc0307f7f7b1800a9 +3260000 0016066f7def8a1b82591cf51361d8ac4a8e11b262f7bb6062c89b16c93bad46 +3260400 000d22331f8c267e771eecb9f94a0493363937a4c67967b365f6f3ecba21720e +3260800 0058f910cd09a2e65243125afe52b1492388c38c7bd023c326c4954c384d2817 +3261200 0051d51754613d24d5bd0168d4b767b7d559a72dd92ab15d3792d9a98bf7bce8 +3261600 0030601bd83f913b33da8dc7c3976c2da1b5b91da4f3a032ec7f598e8dc8798b +3262000 00b2da8782746dad37e8195383606c58a51aef409bdd73af15af9a883ed54d10 +3262400 0024eab624996100dac12b07eb8773e5a9d175661244ba2d5d41a39f16ac1e6f +3262800 001e2f0e96cb5a43d25fc8c6fb7f0c91319b99c817c99c7fee8e23e2cc160186 +3263200 00593b4e95e6f4297a99b06258886517c12b3b8cb3877a071d9186a435d52666 +3263600 006a7b42d1921ee395b9602aebfc9393921a06746dbf51d4800222bc91ad53c3 +3264000 002580f1e3232e0e5548d211283238f78cb3d4ba21cdb4a8e4cd1ef8af22bcdd +3264400 0057084ad4f670e6ebe289cd670eccad6fd9e7ee4ccc2075f16d4902c375c288 +3264800 00004f94a1beeefccf3462c66974fbd30ba4688a7cc5b8cfd628667c593d5c67 +3265200 01243800381e203c1207ffbc94edf66fec287ddea71b6e3edb00d134eab46f14 +3265600 000e0d8032a6b345c8fbb04bbe3fc79b68a4c8d7af3ad752e99c6c8a3065edd6 +3266000 00179f89058565c49dba660b840f947ad5febac424a5758f3286948f0bc179f0 +3266400 000102b0fc5b7ce5112e9f31215534eed6a227e6340024e1e6bf8c50be6bf4e1 +3266800 0003adef24fbb6274e4b991a57d8e7e99f234e975a1c70fd892c8f7f03a18194 +3267200 00a0e111bc87dffdba8395b579953f17c55c4de26da991c51815b8a66804c75a +3267600 000759fbc7d7f93563d486a53839ecea68b6c118f7a9b5acef2457cd43b5c083 +3268000 0010737fc7645289e3dfa072468be4b0a1abb428346c1b2a20755e906c9302c2 +3268400 005ed8ce860e7b857da82147a59b9964f6694e13d32cc620be343d3fbd386e44 +3268800 0001b952122a565c1cfdd77766453f9f778e0b18c9091f6d0169731627f8ad02 +3269200 00cbab6a2e045703f48504db5a13671bad68331a3ae1eff18925c5124f2cfb16 +3269600 0010ff95e7cae113f693f2dbddb638e3ca691df45a9e71cb3a24de4a23d3d15d +3270000 001948c9aa4628d9cea234af664e99014118d8243524faa33488a95e8baf4fca +3270400 002e8a98b0bcd96084780a875cf404426dad28d5405f427b98e8057f8876fd65 +3270800 002b4d1fcb5a88d8a10180be14696169ad0c27938031660353c28520b5e49f3c +3271200 0025372198ad12ac48ed82019fb6cadc86f8c56ca190676f164effca980a4372 +3271600 000728e19cbf501e9f97e6aeab71d66436cc47b32c5ec40b3767f86e2ecf2720 +3272000 00010cdc90cc3660f6f5912de41285d13880be821bb2b411a368542e08620dd9 +3272400 0044ce5ce74de868184e14c7b2da1821c0c753a3e8e270cc1b4187e373172a1b +3272800 0042d80d9074dd0547e16691f4e9edbd7eb6b331ccc438d6a9d06e43aa4b8bfc +3273200 000e2dbb774c0d4f2d4a78e4147d94a1378afe9d3ebe82b6041983876dcbf667 +3273600 000eebebb9458c0c33f63fabdb32ff26363283e7b3cc4a3dde286ce6ecebff98 +3274000 001c38b978bac9e7beffa740383fad93da6f31d6c36c3c1341d25f7f702c74c9 +3274400 003a86ccb580b46b32240f187a125ffce976142478a8198c84ce337767391a9e +3274800 0059ab4260c0812a63c44da591ca55a810afd3b1631516505830847e26155194 +3275200 002aa8a2698ff02af6745f272c4ed564e95106ad08b2085d6943421020ef5006 +3275600 002728207178355d8c99e5a8afd06fb02413a36efcbe29853741ad8d39e03fbf +3276000 002c8fdff4c7eda5efdf75ac228e40395b681a5a87ce57b68cb14151de3089e8 +3276400 006954c163e4647d7d38b0d395a5d5033b46938a2bdb82b326899f8fb234c8cd +3276800 00138800872a707c7351298bb5a10abe0562e359e391f698fa9d1d3cdfb2100d +3277200 008cb53c11a55e377e755bfeb139f4159810295df23e302c5b2144f1a6eb61b3 +3277600 002174953db572478949e4fcbcdf35d7e4541c0642a253d322efa33307ec0b1b +3278000 002e434fd460ca649d03b00c744681da39706141bfbb0a8fc98dcf2713e48f7c +3278400 00c614ba7af61e0574133b418f630a7d7276d6816c325ffc27f60b4f06fde297 +3278800 000d355379572df30f2bbe66c27b149af2d264b93d8dd48f77c066282866a252 +3279200 003d48e80ba14781228c11b05d4e83641a321a028e8f3f6df6e1882a16e4567a +3279600 0016960074271c0e0bb9b293cd3bdd7c269e5dd3e2ea27555581c7ff8f4b8609 +3280000 003dea5501e0c0709dc3de535d8683238b5f66c09cf1a11654abb08115d5f47c +3280400 001205cecac00eb77e9e4709d36d7b50a71167b37ba99a8cec43043b068c84f7 +3280800 002e275ead0f5b22dee18fad7401a97859198bc7f6b59bdb67034b8b27abe49c +3281200 0024c80552ffc5f1fffd4e9ddd8a17a0feb7fd24d4aaea3df9c751095894ddef +3281600 0065e7ce0bac5e55aabe0d684dfa190e9d3b403fa0e04515785d7829f175ab65 +3282000 005e3e38eb2e2518287c4e0ecee78cb84e17df2de1b31badbfb278a3b43ffbec +3282400 0044d997231fbb4b523e636d9bd08636ca7108f2f8201e8c25d6f6a8ef37e066 +3282800 002b9d86d50bf3bef6b22752efe2d8741457ba0411857df9e3810b229d1b24ba +3283200 00158e1152e99f6fdef786963a8a4ca582a22105005dd90614b44b1d8a953db8 +3283600 0003b0e9021323e8e7ab02e8350f427c58be6fa0c95b0c00d8682553eaa11101 +3284000 000ba292a3d3ddd6ed7294fa3174648b8377247df48ba7750a112370e82b561d +3284400 00105f35106e27cd750bf8fd5ede98ef6f772b6f1f18cc8be5ea5a0826a68748 +3284800 004b481cddd1cf137b5230e2a546cb443419a724721567e61b3ff7482d138f5f +3285200 00859c70f2e40b5d1331f4b009fed12225debbaf36e0abcb9b2217bd788f0fa3 +3285600 00181a10a292fcde1ac54c4e36559f909163273e02a084e3add22abd7efd537d +3286000 00046676158e75fad36d84e3f1a6f3b6cdfe0caf459865193a3f4e6eb535824e +3286400 0090c2f1f8bb768af28b468d9dbcfaf680abbf1ba5c8996582773e7662198057 +3286800 002df3afd325a00a939bc397aa1c674e0058f43fd479366a5672bba9d62f0e6e +3287200 001b87f46cee266ad002e1af921a23acfc13061cffc2045b3a391f3fd203d353 +3287600 002a58df5f3f2ef24915e02dadc3db3e3d6ab66e9f44a63c8fdbbc7d522021a1 +3288000 00225d31a24ecfe383be493b10adf1b3a75392458f8d72fc4e2ec7b294d9c082 +3288400 007188f3da76f131a78f39e9ad05b0c968f48810e82e3c6c2feb8a3bfce439e9 +3288800 003216d971b1c55928c4c8e4f3fa1c833c4c11b7e1b7e67c897da02bc0e01fda +3289200 001ca7f099c5ab6975bf0735fa35ad3d9b45799f2a8f704b2264573a8d35ff8e +3289600 001fec1b44aee2225655a13c63a852b1081815c55cec7087b6c973a1c2fe56f2 +3290000 00054e2375358360ae0974e454008ffea5a8884492d709a40b797925afdd0104 +3290400 001f4069c6ed67d0a18271d57783a58604d06900d42dde3ba8bf4ba6c1fba5f6 +3290800 0017a24ff485e08deda5fa329ae83896b11d09ee2e680626c4561300ebe20972 +3291200 002bf6ac1f142678596e59f3abdd353f13cfcb9f0525ab45ad0ab9d3798f7a2f +3291600 00178f82d5fc6e75c76053c4eec9900e868e0c738bb0fb6c189cfbbd93f208ff +3292000 0009246337e374fd9671b75f227122dccc32ddbbc868ae1997bf58d1cf726f33 +3292400 0043cf31d222d79b83f52562e0d0eb08be730ffe4a4609ddb90842ebab2d8697 +3292800 006591ae747b98b56f7fb558866b6653e696fe37e81261ebbba7dd82d7aba813 +3293200 00057b4f4d0c852913f64bcb7c9ee87f837d095784df7379461895be40e47718 +3293600 002e2b55bdc7effb977bcb166ee5d18862a0609b8f7691d4224853fba5c993c7 +3294000 0007ad86c7daf08c4d93c2a826b47015b864950e3ebfc8c4c3a86af51e7ddb17 +3294400 001d6aee479d2c5058872eefc9bcc823eac374d0e8522a73f6a69b152f2ba8ef +3294800 0003418da923ba1e41d76c00b13ca82b2b28dca28e27d204d9189eb70b5b2659 +3295200 00229f61d3376f892fa72e89573775f19e691f05aacc0c60685736bc3ce07ee0 +3295600 0005688de2c3b779311701cb662df54c33a7e7ed5955e1f346cf4bb5dc91a04e +3296000 00264fe4fd4b3f805c258bc310112fe32984f303a2e67f0b426ab52ce3effbd3 +3296400 000d0f815aefedfb827cf406d3d6abcafea6167f56f1c50898181bac623f352c +3296800 00265a1ab4ec5f8567d6b98bd075b495e4e0b78216f9c139be17ce0308bad99f +3297200 0003cfa5cf4ef07ab45169ba0e94a183a78b2ce4b30acd4104e715db08c544be +3297600 0050f00f382c4c43c7f48b7c2e117630d595822aa6bec0b83251c92ad5bb16fa +3298000 0023ba4ccf2aa15fdcbefed353f5b466c8a0343a1dfd144f175b8e3952d5c57b +3298400 00947bbf86976bd76670f6a5e7674bd2712408e4ba999998d023f99ae7eaf8d8 +3298800 001532edccffc309b9667b765ea5529a6fe0fc83ce6a482069ab5627c5001a68 +3299200 000bbde6aa859d68e0c99e75eacbc0581bc16d8083fa14e3e437c606cc2a7920 +3299600 00a62e4f4068d19a006f1a7a4c6e46657a515e727d335cecea4d1674f8ec1701 +3300000 0021d92c16349666fb209c7b8bc9a8462f09b28edc223cf62520abf424de47fc +3300400 002492e35ffc89d0c32b66856ed1b8466127515181b834d5eaef47d50e96508b +3300800 0013f72d694ea927c12e7df703b40cfc66ed8a2d965206a3e5594089255c153f +3301200 001e9120709ae7793709bf82041cc2b7a5f46d782e9c9fbd60bf4252d81e1285 +3301600 00207caf709f67fa10e4f6addd5be2f12fbb991f5c1251e7a45fbba8f16c76bf +3302000 00494509c421eb12f9a085bd4d041e4e658f96900f51a724fafd3a1ceebf5280 +3302400 0035e7908c9d2e62e3033343b9e817af198416aa8635e7c6f5ffa8eb6b1d6106 +3302800 001164330b3062442626ebf630c589fcad0a510026edcfe25b42180d52399f20 +3303200 00606033dd39f94a6b9dc947ef464faf80a923b108fc2cb2d0bea22b131b92a6 +3303600 000599ff26d3b9b3380e0fc2eff1deac719250a3df25096680fd7d8a1e29749e +3304000 00213c9077a67fdd921192bb77c050e580ece67393651abc10052cf6b30e7b1f +3304400 00144e0d8aaeb07069b0d6a213a8539aa5e21d07e174d8070236065a84a62f33 +3304800 00076481cfae293b7a83359201a9600e343640879bf0dd65e2a03cbe1756b86d +3305200 0025920043c6d40b43fd775d4f30b136534d2cfe802d81535f81b4532a262309 +3305600 0033b17dfca4c43f9b9a9be13ef14bb7177a7dbed7efebb56b43b7e3556490e0 +3306000 0000e514e1ef3883527d2604c6c8aa05b99b4d9db191bdebddd133250b347784 +3306400 002ca823eb179259ce912fca8dc2964a5d86478c217b38c333dc45739508719e +3306800 008801c49886a84eaa9aef03c55cd066255d03318a6d48f0f8eb249914207080 +3307200 0031732e765b517ae426e00bfafc68c6bc388b52233ce6c15a8a1b57a8440f43 +3307600 0022f2b875b6dc90bc9625311f5be20444dc3189e1f3940f887306ceac8e1b45 +3308000 00cc7ae50d34312876dcc319b7cf60cd5559a9ab51c25eb1bd0ff8b6189a061e +3308400 0036f18d269416ad7eee1dd29398daac57e1d457d08ca5ecd77c069c35e56e69 +3308800 002bc5c529e2e3e53ca1857eea666866b52c02a4a442a2653b2ec1c4ccd7add7 +3309200 0003fc3e58bd91c20e5c663de2d4253957acf0f0a490046f73b6ed107d9f066d +3309600 000efbb0ec254210e2e47914d8688a609f58590ff942b37ee0d9300eae7e1d25 +3310000 000cdc8c3b614b61786a6d20a255b11ac26fa2c38debf369132e2ae5c21afcf4 +3310400 00462e53d4e9e5e530e7e3ae4e159bf5478fa0fd59a9e9d26a94166f718b32e5 +3310800 00108ab98de2cd04d77a2ffe1e16469246df1239a38a0b563c48dda9404dac27 +3311200 00092efe0e47b77eb3a9998eedd77881ffe973139956e22b82b99cf8d7c96437 +3311600 000f06b8c3c2bd93565a98897b0e93086fa7d00164c554d66b7f131bb33f0fd3 +3312000 008e8fdd7bd3271b087109180115b34738e6b3d4e6bc2cb8c01b2d7ec279b748 +3312400 002d43af6696a13fc834247b627dd9e94131e6d2b2ffeec1a3c2b74dd28c8b4e +3312800 003ed40bdcc523084698e70413ba3fb46f7ef291a3bfe9d190a9eefc1e62a876 +3313200 008ae830a4109b6a1061a1a9e08f3945fc7a23ec69594be78035afb7e851d56b +3313600 000a6494f83d839d4a6830f2597cce663c4dcd24b1e3569aa39305a6cf7f2f3c +3314000 00146db33334bc260f22bee31150fc58eb9a8df207163fbc794476717507a25a +3314400 00122e3cc9c9d03143fcf274a8483e500a2960205db9f7524555b940a2892539 +3314800 00237372473701767248073dcd0d3c434ac6ec07e4829d60e31aaa8b38696c59 +3315200 002cf1d69c874bbe913529b046d78f032bbf9db7100eea3ca7ac85fd72dfbfa2 +3315600 00adf8cca9fb60f04e1bde634eefcf56db01fcc5d6fda77b555eef002b44dfbc +3316000 001ae0c343b3987f6de85fc4a55d0c6d037a96c6872835488e72f8298477b80e +3316400 004da93ab8f6075c76f94d397e861ab4e47c6ed4869539db91104a8a0c52af4d +3316800 006301b94566fa0a049a2a11532d4f005a7fe1e310e916e3920b9a89430e4abb +3317200 0024cb33da0cf59c6ca8ea5dcad1f14114a0bc02d17f6eb93ac63fbd594b2e51 +3317600 001d489d01f2d47738daa3a1a14694aa5fd11c5596d3726d3821c13f809806b8 +3318000 001a1040cdf9a4115ce48c33565cd81218675823822297358989117a8f0fa810 +3318400 002ebe936f2869763c1b984b3041b50fcf55c2c7ceb76a0e1a37b686e3c0de5c +3318800 000e344574989a1feccb0abe431ea0028b709ab9874c7fc1e3cb7dba37b74384 +3319200 001ba1c0b6b230c83b01e40ae9c8032491176b1c1f4231ad0c299948560da183 +3319600 0023ebf0e854bbeee670156442f2f0624da30784b315ba13e2e5b7ba21f0e155 +3320000 000b7aaa7a22ab223384d4e154047df54704abc3c4baa1a5f76b4fb15a3ffcc5 +3320400 001a89b1ff6675ca47fafad82717089ad5912b2f951ebf9f20845b036439dea5 +3320800 000ae16c325d1d4416f948fa3e877c76bbb028ba9d4a3dab59f4c2be0c8bd731 +3321200 003dcf5f67685c2eb9d540e98555283deffae1645fc0e47a1981c058aed343bb +3321600 000a33f6527ebcd83c6e58fedbec35c4432edfa223945ec6403d9922cc372a68 +3322000 002e59fc998fe0ce7d8df491da1db7203324652b7dc74d6072869a34720974f1 +3322400 0060381c4d30bb2fc36df5b1b884dc0e24993b4a9071e67b6a4e9ead026acd1f +3322800 001d2fc7830bf567acad59c4b47c68e670e05ee894120ee39de2e3be59785811 +3323200 000744a34bc6b1c0a232210adb84569371553a57a2da8a09701f32cfa45a4e87 +3323600 0019a49974be6360afee324553c0c84b620141b3b8885cd95e40c5189027306d +3324000 00580f3cdbee2f61649e3acde45351429154679efa31cef67baf8df81e21dc2f +3324400 002b209221e9764cd1f7ae6ff18bfddd870af7aad69aa086a1efd159c763109d +3324800 007d955019b9ff3e8e866c3ad1ba026ccaaf5fe869f5ba1d0b6fe18983cc5c52 +3325200 0031add42c7a26c724fc96f412cd0fe8100060bd9fc2bfa0188c66d6c3f6a5ba +3325600 0023a73101990aea7997a0e3da72bf486fdb17d831d11df539361e6fdb3d82d2 +3326000 003488c7bb5d7969292f86bebe2afa462f4b9a618b5a2d5c89625a82f4ae43ed +3326400 0001f44c8090c3efb598e80de2d58f338daba5f001a8e5ad46b1949036eb4024 +3326800 005c193949cf63949d9267b283109ac484a2c84ae342810b35c2086015164f89 +3327200 001d1304723b03d4596d42c7b60c3a525b03df87fc8c56c5b1371e15b7e3dc01 +3327600 002a20c2f6b77a5b2b723d716cddf36a3a7f6d53da2e9a3f88893fa4348ee178 +3328000 0009ca5750e7d8592d79bf6ec34e3c9ed4685972aba40ce4efa18ea8c75f082b +3328400 003f43a86fc15ba0773848451a68d4b806f0aeed3c9dfff5cae1a695a0b0344c +3328800 0019f72625ad3f3be89081ce54b9de09758238022d94b180d94716daca44a22a +3329200 0029e2ab5d9c8071d962c2dc4869bc04d35ca43fd324d6f2fdcb3c78eb20fac1 +3329600 0013d5601254141fbd65ac372ba56548d19e3a03350f2ce9d1beb25881971640 +3330000 0008ab1799f49aba0c332afa5a56d48bbf1b34794c5f78b1ae5fff1d328eff3c +3330400 002ebd6e86e0e3cf8fdf5bedcc80c3428ceb44e15e9ab98d2c208b2588d69b9b +3330800 000da031a46f7e3c1814e7df92657526c464853672dd972625a40fa47ef71457 +3331200 0023acc20904e10488b7eeaf6efec210b13281b511303efc65e01ed60de16867 +3331600 008efb279d322d27dcdbbe38903af2c2ee9a77e94efe0beb1eefafe26ce25e7b +3332000 001acd2cdd94e084e02a2f203c09aef91ad78d715f77ec37440e5163499358ea +3332400 0060e25a7f0729b89efd7a253d8233c630524d9cd3355dfdfefbfc401271d0d1 +3332800 001d146db580c718d2f5da2a15c8c0e25405a4b97cab520153967fbe3fc829fd +3333200 00410d92cab54092a20dbec69ecb923b537f33fb23d8c62d862e9ed42a738dad +3333600 0027144e3e2c046a3b7944b666174f59ae3d389592da7f7d7b3cdabd2c744676 +3334000 000868d17c2451d49090f186557f06bdaed7f43db51de444fc0a02929ef6e17e +3334400 002b681d25027fee53e568e54af495f8ebb0002dfaaac0633e6db69d4c95e75d +3334800 0008762ef52cb1948b0d9bc3ec27088296800090ffb6da765f572617fb1d7be6 +3335200 002a9d02d66c04ca5c42aa04c947086cf8c2c4fa28c40f5122d53a88b5794be6 +3335600 00723a8dc580c527686a9c1f660fec7b68c7847a9b9e347a2cf94804b8da68b9 +3336000 0032ecbf74c2dc6510702e67494cd98bb3d94179c5975cd6781d207ef3898bf7 +3336400 01e9e607652f2fe7b5fbb925574d81bc259382127e6ff40c755d4d3e76e9066a +3336800 00846bcae737963252ca205db21887d33a8098d6c9c86f254e566260e9ab550c +3337200 002a06ca740acf4cc8971f68c6e39cab973ed1ab8faee9b5937834dd42c736e4 +3337600 0005b87c175f88464eaf6ee3b15c390d3ff4860291d7e900477f9c00cc6e5dfe +3338000 00697233c971afc8a6483e3f2b5e0f38247eda502b4cedfab34116623a8664b4 +3338400 00f4618cab7cb17c24c3f353da8db25e623102113160b623f2e907abaa2b1d18 +3338800 0019a76c47786b7638a554ba2da738d0ba0bf6ec80764138efda04dabc7e7f60 +3339200 0016b3af3c50624da7a890616d1455864062579c3e13b14d04dfcbb6c9d43dbc +3339600 002bdfdd896367738cd5f959ba8d7f9c1a05014fd55db9aca11d8c788a03d839 +3340000 0009202e65fe4f539e4070e5d45ed1376bdfbd87e2bafa7950b8108bc115a789 +3340400 005cc6fa27b4359fa71e30d88663a0600d0219bb38710b9465fed6dfefeb8844 +3340800 0094106315e533d933df754d8f0a56256228bf02520d78d96b27c1e1074a001f +3341200 001c0149264c27ead0efe69cb25a693b94b43c4797f40d9e9a1a2fbd24e0cb3f +3341600 002ad6f1f460e716811ae4fdde36cfc1c157a1f2654dbc388c18f09bd4f224a4 +3342000 0019713fe69da5d1d9a1b27e8240a63fe9013224db3111f8b7a19be5e258527f +3342400 00042fecbc745ef07d541df66d1f07ce05cda37c85022f9d163dc697fffa1e6b +3342800 000172ae7b24626e3426d8cbe853e5a467be39bb994d701f97e9c90563b1f0af +3343200 002942162cb198b9e7d817f457e6a0f8256123a55488cafa41a336a0b63ddf9c +3343600 001af3049751df1247922faeb807d7c8c932ff2f43692e8c99564617217ff22d +3344000 002567a39e4c514fecbc10f8865067b1800d8b276887d3a8d0c4e7e39e524dac +3344400 002c910ad08391ca2327351e69a683306d00d79fc3d2aebc8ca24a2c3daee259 +3344800 0001ec15009e981603bd13dea03d4e6f2272c2414950610762a05d965deab2f5 +3345200 000878d38a268a08db897b95e2d9d85d8ed777c1f64730db0070ed28907ac87e +3345600 0027accdeb6bd41dee7102adc684a7354807fa778100a356d7b472e8bef4f10c +3346000 00210854fefbc6a954c72557e7436a27328eeaec1f1472e5ef565385149f5751 +3346400 0018be7130bf259252f47184316e57f0e3eaf2bd0658cae64b8967deaa469718 +3346800 002b6624fe4216c31e46d9b8f50f53aa5db97623c8f564d1dceb66109b965f7c +3347200 002a0497b1ec910fbd3c6425f32021159654682e37a0eae68b17e12d66e5b420 +3347600 00100e033bc5c9cc21e7bc79efe3dc387488be0fb25bab4f8de02efad545a453 +3348000 004924cb367f2ec1944f0cb6ae8febfe8cdf7f5232d6cb7063643cfb28306300 +3348400 000a2843ab35a24ffb984af418358a2c99f4a59a823d15add013e2a09025f380 +3348800 003e97ff882e26dd16a6533f5131ebcebb237cf47fbeae57f2760801f4033041 +3349200 0035b69000ead960a4d5097896ce7f77f8db2ab606c63e50c5fd0b9cdc243810 +3349600 0021549bc9aa2f5875c07d0fa01c86e1c7bb6194ffc935cd19df1bc01eb4522f +3350000 00035fc24bb009670e32c783cbf86fc690d76b40b246b4b362a4664dc34f7956 +3350400 00075518b80af63f84bdf87b51721de619c51bbb9e811de7173c9225c65948ec +3350800 003e0ae13b17fd673c393922045a424a5bbd7b46723c2adc11099c82a0a38e35 +3351200 008b687017ef2a8e116f03321b1f88a29a563f2eb0393d91756251a30d832a8f +3351600 001a75e997c653a97b7d00519d8f3d51271de195c26cfe3e5300a4561b6d16c9 +3352000 00236718f41c1aad633eec65fdab945ff9c65e2a99b94c8dd28b6d7d33d94d7f +3352400 0029e9b3dbbc2d49a6d0860bfd12b432815da355bc402d4ce9c2bba9d18f0aae +3352800 0002694a33278fdd988eb507e94dc029f14b1a11ccaecff4d2274d16bd514bab +3353200 00b7ec91a90db1abdc195fdc5ebcccb62a0d5e4fce21ed312034281f59db262c +3353600 0002b54f834190e436c0828834a1a6b771eaa2709037f72f281de2fd9fc26b59 +3354000 001b58fd2746499bc7a63dc9ee2c489297f62049d2403da054070e7986aff1f9 +3354400 00078e3c35bc7c7686fdc8c33178de2a3afe4ed2bda648419489a3127a91ed93 +3354800 0033f8848d0c8b53c0b8e9a6a2cba16b0f47a30ba6b16d919a903f2cdb483a08 +3355200 0023bdeff333b8f3747957220048cf11d216a736172ec30fe8230b18092dba97 +3355600 002d76c0eb19806dc34e26af172b6f61c653917086f2af815e37fc8621aa1963 +3356000 001d9a25bc57dd9dd5f7bd9145d608810897f4de30e3bc986098d95635ba5be2 +3356400 001b18953af6c9ad50c1e6715766b884ac0221a6611030545c26a362c1a816de +3356800 00134a5d65de9b4c02ef53fabcf3ad9fadbb0d63be89f927fda8bd358b23c628 +3357200 000ab753aa6b4b4ac8d37c8226e506cb41ff4afb65b1e39c6d0646ae62791999 +3357600 005a985d035f2b7662580763ce1ab740f231100b3d46722a0fad7a3e4a516816 +3358000 0000edd0a262e6115e21da98d07cd3d8b2e7fbb4ff57a26cf01fad9971e0177e +3358400 001e3c4c522b1fd49363058e0a35bfa48f4bfd95562db2a40c14d771b2c7cb1e +3358800 0018248ceb491e036146f621196a24047d9bab7412ca78c5042fc4953ebd633a +3359200 0039d95b13b61d085ed2e9defe0b627c895e83d08871ae9fba834f65e184d2f6 +3359600 0066ebd886255561d28570044aa86be7ee84b862d288e11e2aa0d53f776a380b +3360000 00081ce59a0daf53be933a00e34648272955e393787119a5b1a599ff51701c7b +3360400 0001e0421ac267981cb9d51926bf93c149ef37790413bb77ea7d0b507a8b92d3 +3360800 001769149b6fc1c3a7721497987dcf8a1530dfe5b60aaea963d2ca1edf2481ed +3361200 0042a0a12ce53af0d7c590c33d524530188c4a0a7aca4f2e9a89d30d70df7c44 +3361600 001120ae9e57e3cba3de466f7afd9e5ccf67a01995cd3762eec059aaf0b86246 +3362000 00248a036b196ac0a06aa06d612aa5fbeb7b45adc9b5f43dd2a4a56378e55a2d +3362400 00230ecb307f107a2a960645feaf8532896bd8985c76e3503624b8ecf9db8311 +3362800 00060d267495a0292af9d3c23b6120952ffa18dee89514cde06b9b765bd45582 +3363200 001ab876f69ac3c929eb6d4f634a7ad12c7ecac170ce09813e5986be6d4eac6a +3363600 004939d1294953309e8d5b625e26fe221409ddbf39c8a4a8e67dd7f10fc606ec +3364000 00199adfcac52caba2acda3cc2d063ad8c1f1acbbcf407e25d6bbd7ba6b6e672 +3364400 0007335a2f4f55bc1c13ab3d2f6924c33e5fd2a7983571cfd0eda43de0961d7a +3364800 00315b354cdb2afe686445e4ef65b1818b5e96d32d525caa210b28a7e82eaa12 +3365200 0017488235227458721a41c62ffd07522823ef2e89f0f6070ae187d440aa1d3a +3365600 0060a30946367076ebae689ecc2599e770716e52a6450084a520b7df18062242 From 33a416b719efcfbf1ffa5da0df9fa0eb31feb170 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 5 May 2025 19:01:36 -0300 Subject: [PATCH 164/245] fix(ci): change dependabot to only update lockfile (#9416) --- .github/dependabot.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7b0b210e1e1..5e49874de2b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,6 +3,9 @@ updates: # Rust section - package-ecosystem: cargo directory: '/' + # Update only the lockfile. We shouldn't update Cargo.toml unless it's for + # a security issue, or if we need a new feature of the dependency. + versioning-strategy: lockfile-only # serde, clap, and other dependencies sometimes have multiple updates in a week schedule: interval: monthly From 3423e6c71c216e4130d6f5250812004b29c8d053 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 6 May 2025 02:00:46 +0200 Subject: [PATCH 165/245] Update `Cargo.lock` (#9467) --- Cargo.lock | 305 +++++++++++++++++++++++++---------------------------- 1 file changed, 142 insertions(+), 163 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fea79f98473..39a4e546868 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.5.35", + "clap 4.5.37", "color-eyre", "fs-err", "once_cell", @@ -90,7 +90,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "once_cell", "version_check", "zerocopy 0.7.35", @@ -193,9 +193,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arc-swap" @@ -217,9 +217,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-compression" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" +checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" dependencies = [ "flate2", "futures-core", @@ -247,7 +247,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -258,7 +258,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -443,7 +443,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -588,9 +588,9 @@ checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] name = "bytemuck" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" [[package]] name = "byteorder" @@ -669,9 +669,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.18" +version = "1.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525046617d8376e3db1deffb079e91cef90a89fc3ca5c185bbf8c9ecdd15cd5c" +checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" dependencies = [ "jobserver", "libc", @@ -725,9 +725,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", @@ -802,9 +802,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.35" +version = "4.5.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" +checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" dependencies = [ "clap_builder", "clap_derive", @@ -812,9 +812,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.35" +version = "4.5.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" +checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" dependencies = [ "anstream", "anstyle", @@ -831,7 +831,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1000,7 +1000,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.35", + "clap 4.5.37", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1029,9 +1029,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1111,7 +1111,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1135,7 +1135,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1146,7 +1146,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1161,9 +1161,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "zeroize", @@ -1229,7 +1229,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1413,7 +1413,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", - "miniz_oxide 0.8.7", + "miniz_oxide 0.8.8", ] [[package]] @@ -1535,7 +1535,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1591,9 +1591,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", @@ -1625,7 +1625,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1667,9 +1667,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" dependencies = [ "atomic-waker", "bytes", @@ -1686,9 +1686,9 @@ dependencies = [ [[package]] name = "half" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ "cfg-if", "crunchy", @@ -1857,13 +1857,13 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ "cfg-if", "libc", - "windows", + "windows-link", ] [[package]] @@ -2038,7 +2038,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.61.0", + "windows-core", ] [[package]] @@ -2165,7 +2165,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2212,7 +2212,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2292,16 +2292,14 @@ dependencies = [ [[package]] name = "insta" -version = "1.42.2" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50259abbaa67d11d2bcafc7ba1d094ed7a0c70e3ce893f0d0997f73558cb3084" +checksum = "ab2d11b2f17a45095b8c3603928ba29d7d918d7129d0d0641a36ba73cf07daa6" dependencies = [ "console", - "linked-hash-map", "once_cell", "pest", "pest_derive", - "pin-project", "ron", "serde", "similar", @@ -2448,7 +2446,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2530,9 +2528,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.171" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libgit2-sys" @@ -2558,9 +2556,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" [[package]] name = "libredox" @@ -2609,12 +2607,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -2623,9 +2615,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" @@ -2704,9 +2696,9 @@ checksum = "3d97bbf43eb4f088f8ca469930cde17fa036207c9a5e02ccc5107c4e8b17c964" [[package]] name = "metrics" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" +checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" dependencies = [ "ahash", "portable-atomic", @@ -2734,16 +2726,16 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd4884b1dd24f7d6628274a2f5ae22465c337c5ba065ec9b6edccddf8acc673" +checksum = "b8496cc523d1f94c1385dd8f0f0c2c480b2b8aeccb5b7e4485ad6365523ae376" dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", "metrics", "quanta", - "rand 0.8.5", + "rand 0.9.1", "rand_xoshiro", "sketches-ddsketch", ] @@ -2771,9 +2763,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ "adler2", ] @@ -2807,7 +2799,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -3058,7 +3050,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3136,7 +3128,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3177,7 +3169,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3265,7 +3257,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.24", + "zerocopy 0.8.25", ] [[package]] @@ -3275,7 +3267,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3341,14 +3333,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -3381,7 +3373,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3410,7 +3402,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.100", + "syn 2.0.101", "tempfile", ] @@ -3424,7 +3416,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3459,9 +3451,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.37.4" +version = "0.37.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4ce8c88de324ff838700f36fb6ab86c96df0e3c4ab6ef3a9b2044465cce1369" +checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb" dependencies = [ "memchr", ] @@ -3511,13 +3503,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" +checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b" dependencies = [ "bytes", "getrandom 0.3.2", - "rand 0.9.0", + "rand 0.9.1", "ring", "rustc-hash 2.1.1", "rustls", @@ -3590,13 +3582,12 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.24", ] [[package]] @@ -3644,7 +3635,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -3676,11 +3667,11 @@ dependencies = [ [[package]] name = "rand_xoshiro" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -3758,7 +3749,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 2.0.12", ] @@ -3870,7 +3861,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -3985,15 +3976,15 @@ dependencies = [ "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys 0.9.3", + "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.25" +version = "0.23.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" +checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" dependencies = [ "log", "once_cell", @@ -4255,7 +4246,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4319,7 +4310,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4399,9 +4390,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -4593,9 +4584,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" dependencies = [ "proc-macro2", "quote", @@ -4631,7 +4622,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4697,7 +4688,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4708,7 +4699,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4830,7 +4821,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4870,9 +4861,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", @@ -4893,9 +4884,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.20" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" dependencies = [ "serde", "serde_spanned", @@ -4905,26 +4896,33 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", + "toml_write", "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + [[package]] name = "tonic" version = "0.12.3" @@ -4966,7 +4964,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5111,7 +5109,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5224,7 +5222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5432,7 +5430,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5562,7 +5560,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-shared", ] @@ -5597,7 +5595,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5633,9 +5631,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.8" +version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" +checksum = "29aad86cec885cafd03e8305fd727c418e970a521322c91688414d5b8efba16b" dependencies = [ "rustls-pki-types", ] @@ -5683,25 +5681,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" -dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-core" version = "0.61.0" @@ -5723,7 +5702,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5734,7 +5713,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5929,9 +5908,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" +checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5" dependencies = [ "memchr", ] @@ -6010,7 +5989,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure 0.13.1", ] @@ -6414,7 +6393,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.20", + "toml 0.8.22", "tower 0.4.13", "tracing", "tracing-error", @@ -6506,7 +6485,7 @@ dependencies = [ "structopt", "tempfile", "tokio", - "toml 0.8.20", + "toml 0.8.22", "tonic", "tower 0.4.13", "tracing", @@ -6630,7 +6609,7 @@ dependencies = [ "serde_json", "serde_yml", "structopt", - "syn 2.0.100", + "syn 2.0.101", "thiserror 2.0.12", "tinyvec", "tokio", @@ -6652,7 +6631,7 @@ dependencies = [ "atty", "bytes", "chrono", - "clap 4.5.35", + "clap 4.5.37", "color-eyre", "console-subscriber", "dirs", @@ -6692,7 +6671,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-stream", - "toml 0.8.20", + "toml 0.8.22", "tonic", "tonic-build", "tower 0.4.13", @@ -6727,11 +6706,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ - "zerocopy-derive 0.8.24", + "zerocopy-derive 0.8.25", ] [[package]] @@ -6742,18 +6721,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "zerocopy-derive" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -6773,7 +6752,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure 0.13.1", ] @@ -6794,7 +6773,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -6816,7 +6795,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] From ee65be98c50336645ad8af4a857dbb9ab8ef35e1 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 5 May 2025 22:01:53 -0300 Subject: [PATCH 166/245] fix(usability): Improve the cache dir and database startup panics (#9441) * improve cache dir database panics * Apply suggestions from code review Co-authored-by: Arya * fix build --------- Co-authored-by: Arya --- zebra-state/src/constants.rs | 2 +- .../src/service/finalized_state/disk_db.rs | 42 ++++++++++++++++--- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index b7e9fd2859c..a6373637f59 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -118,5 +118,5 @@ pub const MAX_INVALIDATED_BLOCKS: usize = 100; lazy_static! { /// Regex that matches the RocksDB error when its lock file is already open. - pub static ref LOCK_FILE_ERROR: Regex = Regex::new("(lock file).*(temporarily unavailable)|(in use)|(being used by another process)").expect("regex is valid"); + pub static ref LOCK_FILE_ERROR: Regex = Regex::new("(lock file).*(temporarily unavailable)|(in use)|(being used by another process)|(Database likely already open)").expect("regex is valid"); } diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 3ba7026aafc..4e9907f2681 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -22,7 +22,7 @@ use std::{ use itertools::Itertools; use rlimit::increase_nofile_limit; -use rocksdb::{ColumnFamilyDescriptor, Options, ReadOptions}; +use rocksdb::{ColumnFamilyDescriptor, ErrorKind, Options, ReadOptions}; use semver::Version; use zebra_chain::{parameters::Network, primitives::byte_array::increment_big_endian}; @@ -833,6 +833,11 @@ impl DiskDb { /// Opens or creates the database at a path based on the kind, major version and network, /// with the supplied column families, preserving any existing column families, /// and returns a shared low-level database wrapper. + /// + /// # Panics + /// + /// - If the cache directory does not exist and can't be created. + /// - If the database cannot be opened for whatever reason. pub fn new( config: &Config, db_kind: impl AsRef, @@ -841,6 +846,11 @@ impl DiskDb { column_families_in_code: impl IntoIterator, read_only: bool, ) -> DiskDb { + // If the database is ephemeral, we don't need to check the cache directory. + if !config.ephemeral { + DiskDb::validate_cache_dir(&config.cache_dir); + } + let db_kind = db_kind.as_ref(); let path = config.db_path(db_kind, format_version_in_code.major, network); @@ -901,11 +911,15 @@ impl DiskDb { db } - // TODO: provide a different hint if the disk is full, see #1623 + Err(e) if matches!(e.kind(), ErrorKind::Busy | ErrorKind::IOError) => panic!( + "Database likely already open {path:?} \ + Hint: Check if another zebrad process is running." + ), + Err(e) => panic!( - "Opening database {path:?} failed: {e:?}. \ - Hint: Check if another zebrad process is running. \ - Try changing the state cache_dir in the Zebra config.", + "Opening database {path:?} failed. \ + Hint: Try changing the state cache_dir in the Zebra config. \ + Error: {e}", ), } } @@ -1515,6 +1529,24 @@ impl DiskDb { ); } } + + // Validates a cache directory and creates it if it doesn't exist. + // If the directory cannot be created, it panics with a specific error message. + fn validate_cache_dir(cache_dir: &std::path::PathBuf) { + if let Err(e) = fs::create_dir_all(cache_dir) { + match e.kind() { + std::io::ErrorKind::PermissionDenied => panic!( + "Permission denied creating {cache_dir:?}. \ + Hint: check if cache directory exist and has write permissions." + ), + std::io::ErrorKind::StorageFull => panic!( + "No space left on device creating {cache_dir:?}. \ + Hint: check if the disk is full." + ), + _ => panic!("Could not create cache dir {:?}: {}", cache_dir, e), + } + } + } } impl Drop for DiskDb { From 188cb77081063c2efdd5c7c6b7e4ee4174c9fb86 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 6 May 2025 02:58:02 -0400 Subject: [PATCH 167/245] fix(ci): check, create and mount lwd cached state for tests (#9481) Co-authored-by: Gustavo Valverde --- .../sub-ci-integration-tests-gcp.yml | 2 +- .../sub-deploy-integration-tests-gcp.yml | 4 +-- .github/workflows/sub-find-cached-disks.yml | 8 +++-- zebra-rpc/src/methods.rs | 29 ++++++++++----- .../src/methods/get_block_template_rpcs.rs | 2 +- zebrad/tests/common/lightwalletd.rs | 36 ++++++++++++++++++- 6 files changed, 66 insertions(+), 15 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 35e4570696c..2eef386e4ba 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -316,7 +316,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 6533996757c..ec901c35e06 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -414,12 +414,12 @@ jobs: # branch names to 12 characters. # # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + # Passes ${{ env.GITHUB_REF_POINT_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. - name: Format network name and branch name for disks run: | NETWORK_CAPS="${{ inputs.network }}" echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}" + LONG_GITHUB_REF="${{ env.GITHUB_REF_POINT_SLUG_URL }}" echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV" # Install our SSH secret diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index d0dd52d6c1e..d3de188a807 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -63,6 +63,10 @@ jobs: with: persist-credentials: false fetch-depth: 0 + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v5 + with: + short-length: 7 # Setup gcloud CLI - name: Authenticate to Google Cloud @@ -86,12 +90,12 @@ jobs: # More info: https://cloud.google.com/compute/docs/naming-resources#resource-name-format # # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + # Passes ${{ env.GITHUB_REF_POINT_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. - name: Format network name and branch name for disks run: | NETWORK_CAPS="${{ inputs.network }}" echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}" + LONG_GITHUB_REF="${{ env.GITHUB_REF_POINT_SLUG_URL }}" echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV" # Check if there are cached state disks available for subsequent jobs to use. diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index fb3f596ab65..13e157be611 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -590,7 +590,9 @@ where let relay_fee = zebra_chain::transaction::zip317::MIN_MEMPOOL_TX_FEE_RATE as f64 / (zebra_chain::amount::COIN as f64); - let difficulty = chain_tip_difficulty(self.network.clone(), self.state.clone()).await?; + let difficulty = chain_tip_difficulty(self.network.clone(), self.state.clone(), true) + .await + .expect("should always be Ok when `should_use_default` is true"); let response = GetInfo { version, @@ -622,7 +624,7 @@ where tokio::join!( state_call(UsageInfo), state_call(TipPoolValues), - chain_tip_difficulty(network.clone(), self.state.clone()) + chain_tip_difficulty(network.clone(), self.state.clone(), true) ) }; @@ -643,9 +645,9 @@ where Err(_) => ((Height::MIN, network.genesis_hash()), Default::default()), }; - let difficulty = chain_tip_difficulty.unwrap_or_else(|_| { - (U256::from(network.target_difficulty_limit()) >> 128).as_u128() as f64 - }); + let difficulty = chain_tip_difficulty + .expect("should always be Ok when `should_use_default` is true"); + (size_on_disk, tip, value_balance, difficulty) }; @@ -2702,7 +2704,11 @@ mod opthex { } } /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. -pub async fn chain_tip_difficulty(network: Network, mut state: State) -> Result +pub async fn chain_tip_difficulty( + network: Network, + mut state: State, + should_use_default: bool, +) -> Result where State: Service< zebra_state::ReadRequest, @@ -2724,8 +2730,15 @@ where let response = state .ready() .and_then(|service| service.call(request)) - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + .await; + + let response = match (should_use_default, response) { + (_, Ok(res)) => res, + (true, Err(_)) => { + return Ok((U256::from(network.target_difficulty_limit()) >> 128).as_u128() as f64) + } + (false, Err(error)) => return Err(ErrorObject::owned(0, error.to_string(), None::<()>)), + }; let chain_info = match response { ReadResponse::ChainInfo(info) => info, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index eec3b816f5b..98424c79c31 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1250,7 +1250,7 @@ where } async fn get_difficulty(&self) -> Result { - chain_tip_difficulty(self.network.clone(), self.state.clone()).await + chain_tip_difficulty(self.network.clone(), self.state.clone(), false).await } async fn z_list_unified_receivers(&self, address: String) -> Result { diff --git a/zebrad/tests/common/lightwalletd.rs b/zebrad/tests/common/lightwalletd.rs index 376ce096865..2ee37aff533 100644 --- a/zebrad/tests/common/lightwalletd.rs +++ b/zebrad/tests/common/lightwalletd.rs @@ -111,7 +111,7 @@ pub fn spawn_lightwalletd_for_rpc + std::fmt::Debug>( test_type.lightwalletd_failure_messages(); let mut lightwalletd = lightwalletd_dir - .spawn_lightwalletd_child(lightwalletd_state_path, arguments)? + .spawn_lightwalletd_child(lightwalletd_state_path, test_type, arguments)? .with_timeout(test_type.lightwalletd_timeout()) .with_failure_regex_iter(lightwalletd_failure_messages, lightwalletd_ignore_messages); @@ -157,6 +157,8 @@ where /// as a child process in this test directory, /// potentially taking ownership of the tempdir for the duration of the child process. /// + /// Uses `test_type` to determine logging behavior for the state directory. + /// /// By default, launch a working test instance with logging, and avoid port conflicts. /// /// # Panics @@ -165,6 +167,7 @@ where fn spawn_lightwalletd_child( self, lightwalletd_state_path: impl Into>, + test_type: TestType, extra_args: Arguments, ) -> Result>; @@ -184,6 +187,7 @@ where fn spawn_lightwalletd_child( self, lightwalletd_state_path: impl Into>, + test_type: TestType, extra_args: Arguments, ) -> Result> { let test_dir = self.as_ref().to_owned(); @@ -207,6 +211,35 @@ where // the lightwalletd cache directory if let Some(lightwalletd_state_path) = lightwalletd_state_path.into() { + tracing::info!(?lightwalletd_state_path, "using lightwalletd state path"); + + // Only log the directory size if it's expected to exist already. + // FullSyncFromGenesis creates this directory, so we skip logging for it. + if !matches!(test_type, TestType::FullSyncFromGenesis { .. }) { + let lwd_cache_dir_path = lightwalletd_state_path.join("db/main"); + let lwd_cache_entries: Vec<_> = std::fs::read_dir(&lwd_cache_dir_path) + .expect("unexpected failure reading lightwalletd cache dir") + .collect(); + + let lwd_cache_dir_size = lwd_cache_entries.iter().fold(0, |acc, entry_result| { + acc + entry_result + .as_ref() + .map(|entry| entry.metadata().map(|meta| meta.len()).unwrap_or(0)) + .unwrap_or(0) + }); + + tracing::info!("{lwd_cache_dir_size} bytes in lightwalletd cache dir"); + + for entry_result in &lwd_cache_entries { + match entry_result { + Ok(entry) => tracing::info!("{entry:?} entry in lightwalletd cache dir"), + Err(e) => { + tracing::warn!(?e, "error reading entry in lightwalletd cache dir") + } + } + } + } + args.set_parameter( "--data-dir", lightwalletd_state_path @@ -214,6 +247,7 @@ where .expect("path is valid Unicode"), ); } else { + tracing::info!("using lightwalletd empty state path"); let empty_state_path = test_dir.join("lightwalletd_state"); std::fs::create_dir(&empty_state_path) From 638b4a212def00fb3cc2864b772394c62f4fbbf0 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 6 May 2025 05:44:03 -0300 Subject: [PATCH 168/245] feat(rpc): Add `time` and `size` field to `TransactionObject` (#9458) * add size and time to getrawtransaction object * fix annoying clippy lint about large enum variant * add todo * add TODO, remove TODO Co-authored-by: Conrado Gouvea --------- Co-authored-by: Conrado Gouvea --- zebra-rpc/src/methods.rs | 53 +++++++++++-------- ...k_verbose_hash_verbosity_2@mainnet_10.snap | 4 +- ...k_verbose_hash_verbosity_2@testnet_10.snap | 4 +- ...verbose_height_verbosity_2@mainnet_10.snap | 4 +- ...verbose_height_verbosity_2@testnet_10.snap | 4 +- ...rawtransaction_verbosity=1@mainnet_10.snap | 4 +- ...rawtransaction_verbosity=1@testnet_10.snap | 4 +- zebra-rpc/src/methods/tests/vectors.rs | 41 +++++++------- zebra-rpc/src/methods/types/transaction.rs | 14 +++++ zebra-rpc/src/queue/tests/prop.rs | 2 +- zebra-rpc/src/tests/vectors.rs | 12 +++-- zebra-state/src/response.rs | 13 ++++- .../service/finalized_state/zebra_db/block.rs | 12 ++++- .../zebra_db/block/tests/snapshot.rs | 2 +- .../src/service/non_finalized_state/chain.rs | 4 +- zebra-state/src/service/read/block.rs | 10 ++-- zebra-state/src/service/read/tests/vectors.rs | 1 + 17 files changed, 126 insertions(+), 62 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 13e157be611..f55ad77a8a7 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -924,22 +924,25 @@ where ), zebra_state::ReadResponse::BlockAndSize(block_and_size) => { let (block, size) = block_and_size.ok_or_misc_error("Block not found")?; - let transactions = block - .transactions - .iter() - .map(|tx| { - GetBlockTransaction::Object(TransactionObject::from_transaction( - tx.clone(), - Some(height), - Some( - confirmations - .try_into() - .expect("should be less than max block height, i32::MAX"), - ), - &network, - )) - }) - .collect(); + let block_time = block.header.time; + let transactions = + block + .transactions + .iter() + .map(|tx| { + GetBlockTransaction::Object(Box::new( + TransactionObject::from_transaction( + tx.clone(), + Some(height), + Some(confirmations.try_into().expect( + "should be less than max block height, i32::MAX", + )), + &network, + Some(block_time), + ), + )) + }) + .collect(); (transactions, Some(size)) } _ => unreachable!("unmatched response to a transaction_ids_for_block request"), @@ -1250,12 +1253,13 @@ where mempool::Response::Transactions(txns) => { if let Some(tx) = txns.first() { return Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( + GetRawTransaction::Object(Box::new(TransactionObject::from_transaction( tx.transaction.clone(), None, None, &self.network, - )) + None, + ))) } else { let hex = tx.transaction.clone().into(); GetRawTransaction::Raw(hex) @@ -1274,12 +1278,15 @@ where .map_misc_error()? { zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( + GetRawTransaction::Object(Box::new(TransactionObject::from_transaction( tx.tx.clone(), Some(tx.height), Some(tx.confirmations), &self.network, - )) + // TODO: Performance gain: + // https://github.com/ZcashFoundation/zebra/pull/9458#discussion_r2059352752 + Some(tx.block_time), + ))) } else { let hex = tx.tx.into(); GetRawTransaction::Raw(hex) @@ -2243,7 +2250,7 @@ pub enum GetBlockTransaction { /// The transaction hash, hex-encoded. Hash(#[serde(with = "hex")] transaction::Hash), /// The block object. - Object(TransactionObject), + Object(Box), } /// Response to a `getblockheader` RPC request. @@ -2395,12 +2402,12 @@ pub enum GetRawTransaction { /// The raw transaction, encoded as hex bytes. Raw(#[serde(with = "hex")] SerializedTransaction), /// The transaction object. - Object(TransactionObject), + Object(Box), } impl Default for GetRawTransaction { fn default() -> Self { - Self::Object(TransactionObject::default()) + Self::Object(Box::default()) } } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 375fee775b3..4794341512a 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -53,7 +53,9 @@ expression: block "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 129, + "time": 1477671596 } ], "time": 1477671596, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index 4f84590dc61..b6a6fc2f242 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -53,7 +53,9 @@ expression: block "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 130, + "time": 1477674473 } ], "time": 1477674473, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index 375fee775b3..4794341512a 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -53,7 +53,9 @@ expression: block "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 129, + "time": 1477671596 } ], "time": 1477671596, diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index 4f84590dc61..b6a6fc2f242 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -53,7 +53,9 @@ expression: block "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 130, + "time": 1477674473 } ], "time": 1477674473, diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap index 1e63140c2ad..adac43ce5ee 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap @@ -44,6 +44,8 @@ expression: rsp "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 129, + "time": 1477671596 } } diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap index f35046e222d..1eddef2b8fd 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap @@ -44,6 +44,8 @@ expression: rsp "vShieldedSpend": [], "vShieldedOutput": [], "valueBalance": 0.0, - "valueBalanceZat": 0 + "valueBalanceZat": 0, + "size": 130, + "time": 1477674473 } } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 696f9d251fb..057ff93c5f3 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -372,13 +372,13 @@ async fn rpc_getblock() { assert_eq!(solution, &Some(block.header.solution)); for (actual_tx, expected_tx) in tx.iter().zip(block.transactions.iter()) { - if let GetBlockTransaction::Object(TransactionObject { - hex, - height, - confirmations, - .. - }) = actual_tx - { + if let GetBlockTransaction::Object(boxed_transaction_object) = actual_tx { + let TransactionObject { + hex, + height, + confirmations, + .. + } = &**boxed_transaction_object; // Dereference the Box assert_eq!(hex, &(*expected_tx).clone().into()); assert_eq!(height, &Some(i.try_into().expect("valid u32"))); assert_eq!( @@ -460,13 +460,13 @@ async fn rpc_getblock() { assert_eq!(solution, &Some(block.header.solution)); for (actual_tx, expected_tx) in tx.iter().zip(block.transactions.iter()) { - if let GetBlockTransaction::Object(TransactionObject { - hex, - height, - confirmations, - .. - }) = actual_tx - { + if let GetBlockTransaction::Object(boxed_transaction_object) = actual_tx { + let TransactionObject { + hex, + height, + confirmations, + .. + } = &**boxed_transaction_object; // Dereference the Box assert_eq!(hex, &(*expected_tx).clone().into()); assert_eq!(height, &Some(i.try_into().expect("valid u32"))); assert_eq!( @@ -956,15 +956,18 @@ async fn rpc_getrawtransaction() { let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(txid)); - let GetRawTransaction::Object(TransactionObject { + let transaction_object = match response + .expect("We should have a GetRawTransaction struct") + { + GetRawTransaction::Object(transaction_object) => transaction_object, + GetRawTransaction::Raw(_) => panic!("Expected GetRawTransaction::Object, got Raw"), + }; + let TransactionObject { hex, height, confirmations, .. - }) = response.expect("We should have a GetRawTransaction struct") - else { - unreachable!("Should return a Raw enum") - }; + } = *transaction_object; let height = height.expect("state requests should have height"); let confirmations = confirmations.expect("state requests should have confirmations"); diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index 26df8c4dd7a..66c7baf510f 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use chrono::{DateTime, Utc}; use hex::ToHex; use zebra_chain::{ @@ -59,6 +60,14 @@ pub struct TransactionObject { /// The net value of Sapling Spends minus Outputs in zatoshis #[serde(rename = "valueBalanceZat", skip_serializing_if = "Option::is_none")] pub value_balance_zat: Option, + + /// The size of the transaction in bytes. + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, + + /// The time the transaction was included in a block. + #[serde(skip_serializing_if = "Option::is_none")] + pub time: Option, // TODO: some fields not yet supported } @@ -244,6 +253,8 @@ impl Default for TransactionObject { orchard: None, value_balance: None, value_balance_zat: None, + size: None, + time: None, } } } @@ -256,6 +267,7 @@ impl TransactionObject { height: Option, confirmations: Option, network: &Network, + block_time: Option>, ) -> Self { Self { hex: tx.clone().into(), @@ -415,6 +427,8 @@ impl TransactionObject { value_balance_zat: tx.orchard_value_balance().orchard_amount().zatoshis(), }) }, + size: tx.as_bytes().len().try_into().ok(), + time: block_time.map(|bt| bt.timestamp()), } } } diff --git a/zebra-rpc/src/queue/tests/prop.rs b/zebra-rpc/src/queue/tests/prop.rs index 9f63ecce24d..ca1240ac12a 100644 --- a/zebra-rpc/src/queue/tests/prop.rs +++ b/zebra-rpc/src/queue/tests/prop.rs @@ -294,7 +294,7 @@ proptest! { let send_task = tokio::spawn(Runner::check_state(read_state.clone(), transactions_hash_set)); let expected_request = ReadRequest::Transaction(transaction.hash()); - let response = ReadResponse::Transaction(Some(zebra_state::MinedTx::new(Arc::new(transaction), Height(1), 1))); + let response = ReadResponse::Transaction(Some(zebra_state::MinedTx::new(Arc::new(transaction), Height(1), 1, block.header.time))); read_state .expect_request(expected_request) diff --git a/zebra-rpc/src/tests/vectors.rs b/zebra-rpc/src/tests/vectors.rs index 48fd389f480..7b0a6e23ba1 100644 --- a/zebra-rpc/src/tests/vectors.rs +++ b/zebra-rpc/src/tests/vectors.rs @@ -8,7 +8,7 @@ pub fn test_transaction_serialization() { assert_eq!(serde_json::to_string(&tx).unwrap(), r#""42""#); - let tx = GetRawTransaction::Object(TransactionObject { + let tx = GetRawTransaction::Object(Box::new(TransactionObject { hex: vec![0x42].into(), height: Some(1), confirmations: Some(0), @@ -19,14 +19,16 @@ pub fn test_transaction_serialization() { value_balance: None, value_balance_zat: None, orchard: None, - }); + size: None, + time: None, + })); assert_eq!( serde_json::to_string(&tx).unwrap(), r#"{"hex":"42","height":1,"confirmations":0}"# ); - let tx = GetRawTransaction::Object(TransactionObject { + let tx = GetRawTransaction::Object(Box::new(TransactionObject { hex: vec![0x42].into(), height: None, confirmations: None, @@ -37,7 +39,9 @@ pub fn test_transaction_serialization() { value_balance: None, value_balance_zat: None, orchard: None, - }); + size: None, + time: None, + })); assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"42"}"#); } diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 48c9c6c5783..48132dffa4f 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -2,6 +2,8 @@ use std::{collections::BTreeMap, sync::Arc}; +use chrono::{DateTime, Utc}; + use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, ChainHistoryMmrRootHash}, @@ -119,15 +121,24 @@ pub struct MinedTx { /// The number of confirmations for this transaction /// (1 + depth of block the transaction was found in) pub confirmations: u32, + + /// The time of the block where the transaction was mined. + pub block_time: DateTime, } impl MinedTx { /// Creates a new [`MinedTx`] - pub fn new(tx: Arc, height: block::Height, confirmations: u32) -> Self { + pub fn new( + tx: Arc, + height: block::Height, + confirmations: u32, + block_time: DateTime, + ) -> Self { Self { tx, height, confirmations, + block_time, } } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 8c515f0c27e..e8617cf96c2 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -15,6 +15,7 @@ use std::{ sync::Arc, }; +use chrono::{DateTime, Utc}; use itertools::Itertools; use zebra_chain::{ @@ -278,14 +279,21 @@ impl ZebraDb { /// Returns the [`Transaction`] with [`transaction::Hash`], and its [`Height`], /// if a transaction with that hash exists in the finalized chain. #[allow(clippy::unwrap_in_result)] - pub fn transaction(&self, hash: transaction::Hash) -> Option<(Arc, Height)> { + pub fn transaction( + &self, + hash: transaction::Hash, + ) -> Option<(Arc, Height, DateTime)> { let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); let transaction_location = self.transaction_location(hash)?; + let block_time = self + .block_header(transaction_location.height.into()) + .map(|header| header.time); + self.db .zs_get(&tx_by_loc, &transaction_location) - .map(|tx| (tx, transaction_location.height)) + .and_then(|tx| block_time.map(|time| (tx, transaction_location.height, time))) } /// Returns an iterator of all [`Transaction`]s for a provided block height in finalized state. diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index c5f1ba371d5..567de6423ef 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -343,7 +343,7 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // using transaction location queries. // Check that the transaction indexes are consistent. - let (direct_transaction, direct_transaction_height) = state + let (direct_transaction, direct_transaction_height, _) = state .transaction(transaction_hash) .expect("transactions in blocks must also be available directly"); let stored_transaction_hash = state diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 771ecc1affd..8001c51e1a6 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -8,6 +8,7 @@ use std::{ sync::Arc, }; +use chrono::{DateTime, Utc}; use mset::MultiSet; use tracing::instrument; @@ -425,11 +426,12 @@ impl Chain { pub fn transaction( &self, hash: transaction::Hash, - ) -> Option<(&Arc, block::Height)> { + ) -> Option<(&Arc, block::Height, DateTime)> { self.tx_loc_by_hash.get(&hash).map(|tx_loc| { ( &self.blocks[&tx_loc.height].block.transactions[tx_loc.index.as_usize()], tx_loc.height, + self.blocks[&tx_loc.height].block.header.time, ) }) } diff --git a/zebra-state/src/service/read/block.rs b/zebra-state/src/service/read/block.rs index f00d69091b9..d0d133e6bad 100644 --- a/zebra-state/src/service/read/block.rs +++ b/zebra-state/src/service/read/block.rs @@ -14,6 +14,8 @@ use std::sync::Arc; +use chrono::{DateTime, Utc}; + use zebra_chain::{ block::{self, Block, Height}, serialization::ZcashSerialize as _, @@ -105,7 +107,7 @@ fn transaction( chain: Option, db: &ZebraDb, hash: transaction::Hash, -) -> Option<(Arc, Height)> +) -> Option<(Arc, Height, DateTime)> where C: AsRef, { @@ -119,7 +121,7 @@ where chain .as_ref() .transaction(hash) - .map(|(tx, height)| (tx.clone(), height)) + .map(|(tx, height, time)| (tx.clone(), height, time)) }) .or_else(|| db.transaction(hash)) } @@ -140,10 +142,10 @@ where // can only add overlapping blocks, and hashes are unique. let chain = chain.as_ref(); - let (tx, height) = transaction(chain, db, hash)?; + let (tx, height, time) = transaction(chain, db, hash)?; let confirmations = 1 + tip_height(chain, db)?.0 - height.0; - Some(MinedTx::new(tx, height, confirmations)) + Some(MinedTx::new(tx, height, confirmations, time)) } /// Returns the [`transaction::Hash`]es for the block with `hash_or_height`, diff --git a/zebra-state/src/service/read/tests/vectors.rs b/zebra-state/src/service/read/tests/vectors.rs index 61fbdc99f6a..75a5b94c3f2 100644 --- a/zebra-state/src/service/read/tests/vectors.rs +++ b/zebra-state/src/service/read/tests/vectors.rs @@ -90,6 +90,7 @@ async fn populated_read_state_responds_correctly() -> Result<()> { tx: transaction.clone(), height: block.coinbase_height().unwrap(), confirmations: 1 + tip_height.0 - block.coinbase_height().unwrap().0, + block_time: block.header.time, }))), )]; From b0e7fd4a9a6159c2b81976452fe368cb06d2dead Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 6 May 2025 16:47:47 +0200 Subject: [PATCH 169/245] chore: Release v2.3.0 (#9475) * Update changelog * Update end of support * Add release date * Add breaking change description * Add new PRs to changelog * Add breaking change description to changelog * Update list of PRs in changelog * chore: Release * Apply suggestions from code review Co-authored-by: Arya --------- Co-authored-by: Arya --- CHANGELOG.md | 78 ++++++++++++++++++++ Cargo.lock | 28 +++---- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 6 +- tower-fallback/Cargo.toml | 4 +- zebra-chain/Cargo.toml | 6 +- zebra-consensus/Cargo.toml | 20 ++--- zebra-grpc/Cargo.toml | 6 +- zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 24 +++--- zebra-scan/Cargo.toml | 20 ++--- zebra-script/Cargo.toml | 6 +- zebra-state/Cargo.toml | 10 +-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +- zebrad/Cargo.toml | 30 ++++---- zebrad/src/components/sync/end_of_support.rs | 2 +- 19 files changed, 171 insertions(+), 93 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43c189f398b..b652d07de2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,84 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 2.3.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.3.0) - 2025-05-06 + +### Breaking Changes + +- The RPC endpoint is no longer enabled by default in Docker. To enable it, + follow the docs [here](https://zebra.zfnd.org/user/docker.html#rpc). +- We will no longer be publishing Docker images tagged with the `sha-` or `v` + prefixes. If you use tags starting with the `v` prefix, please update to + images tagged `N.N.N`. For example, use `2.3.0` instead of `v2.3.0`. If you + need a specific hash, each tag has a digest that you can use instead. +- All functionality that used to be guarded by the `getblocktemplate-rpcs` Cargo + feature was moved out and the feature is no longer present in the codebase. + Note that all release builds following Zebra 1.3.0 had this feature enabled by + default. + +### Added + +- Track misbehaving peer connections and ban them past a threshold ([#9201](https://github.com/ZcashFoundation/zebra/pull/9201)) +- Restore internal miner ([#9311](https://github.com/ZcashFoundation/zebra/pull/9311)) +- Add `reconsider_block` method to non-finalized state ([#9260](https://github.com/ZcashFoundation/zebra/pull/9260)) +- Add NU7 constants ([#9256](https://github.com/ZcashFoundation/zebra/pull/9256)) +- Add `invalidate_block_method` and `invalidated_blocks` field to non-finalized state ([#9167](https://github.com/ZcashFoundation/zebra/pull/9167)) +- Add unused `Transaction::V6` variant ([#9339](https://github.com/ZcashFoundation/zebra/pull/9339)) + +### Changed + +- Downgrade verbose info message ([#9448](https://github.com/ZcashFoundation/zebra/pull/9448)) +- Use read-only db instance when running `tip-height` or `copy-state` commands ([#9359](https://github.com/ZcashFoundation/zebra/pull/9359)) +- Refactor format upgrades into trait ([#9263](https://github.com/ZcashFoundation/zebra/pull/9263)) +- Remove the `getblocktemplate-rpcs` Cargo feature ([#9401](https://github.com/ZcashFoundation/zebra/pull/9401)) +- Improve cache dir and database startup panics ([#9441](https://github.com/ZcashFoundation/zebra/pull/9441)) + +### Fixed + +- Remove a redundant startup warning ([#9397](https://github.com/ZcashFoundation/zebra/pull/9397)) +- Advertise mined blocks ([#9176](https://github.com/ZcashFoundation/zebra/pull/9176)) +- Ensure secondary rocksdb instance has caught up to the primary instance ([#9346](https://github.com/ZcashFoundation/zebra/pull/9346)) +- Use network kind of `TestnetKind` in transparent addresses on Regtest ([#9175](https://github.com/ZcashFoundation/zebra/pull/9175)) +- Fix redundant attributes on enum variants ([#9309](https://github.com/ZcashFoundation/zebra/pull/9309)) + +### RPCs + +- Add `time` and `size` fields to `TransactionObject` ([#9458](https://github.com/ZcashFoundation/zebra/pull/9458)) +- Add inbound peers to `getpeerinfo` response ([#9214](https://github.com/ZcashFoundation/zebra/pull/9214)) +- Extend `getinfo` ([#9261](https://github.com/ZcashFoundation/zebra/pull/9261)) +- Add fields to `getblockchaininfo` RPC output ([#9215](https://github.com/ZcashFoundation/zebra/pull/9215)) +- Add some missing fields to transaction object ([#9329](https://github.com/ZcashFoundation/zebra/pull/9329)) +- Support negative heights in `HashOrHeight` ([#9316](https://github.com/ZcashFoundation/zebra/pull/9316)) +- Add verbose support to getrawmempool ([#9249](https://github.com/ZcashFoundation/zebra/pull/9249)) +- Fill size field in getblock with verbosity=2 ([#9327](https://github.com/ZcashFoundation/zebra/pull/9327)) +- Add `blockcommitments` field to `getblock` output ([#9217](https://github.com/ZcashFoundation/zebra/pull/9217)) +- Accept an unused second param in `sendrawtransaction` RPC ([#9242](https://github.com/ZcashFoundation/zebra/pull/9242)) +- Make start and end fields optional and apply range rules to match zcashd ([#9408](https://github.com/ZcashFoundation/zebra/pull/9408)) +- Return only the history tree root in `GetBlockTemplateChainInfo` response ([#9444](https://github.com/ZcashFoundation/zebra/pull/9444)) +- Correctly map JSON-RPC to/from 2.0 ([#9216](https://github.com/ZcashFoundation/zebra/pull/9216)) +- Permit JSON-RPC IDs to be non-strings ([#9341](https://github.com/ZcashFoundation/zebra/pull/9341)) +- Match coinbase outputs order in `Getblocktemplate` ([#9272](https://github.com/ZcashFoundation/zebra/pull/9272)) + +### Docker + +- Refactor Dockerfile and entrypoint ([#8923](https://github.com/ZcashFoundation/zebra/pull/8923)) +- Enhance Zebra configuration options and entrypoint logic ([#9344](https://github.com/ZcashFoundation/zebra/pull/9344)) +- Better permission and cache dirs handling in Docker ([#9323](https://github.com/ZcashFoundation/zebra/pull/9323)) +- Allow r/w access in mounted volumes ([#9281](https://github.com/ZcashFoundation/zebra/pull/9281)) + +### Documentation + +- Update examples for running Zebra in Docker ([#9269](https://github.com/ZcashFoundation/zebra/pull/9269)) +- Add architectural decision records structure ([#9310](https://github.com/ZcashFoundation/zebra/pull/9310)) +- Add Mempool Specification to Zebra Book ([#9336](https://github.com/ZcashFoundation/zebra/pull/9336)) +- Complete the Treestate RFC documentation ([#9340](https://github.com/ZcashFoundation/zebra/pull/9340)) + +### Contributors + +@AloeareV, @Metalcape, @PaulLaux, @VolodymyrBg, @aphelionz, @arya2, @conradoplg, +@crStiv, @elijahhampton, @gustavovalverde, @mdqst, @natalieesk, @nuttycom, +@oxarbitrage, @podZzzzz, @sellout, @str4d, @upbqdn and @zeroprooff. + ## [Zebra 2.2.0](https://github.com/ZcashFoundation/zebra/releases/tag/v2.2.0) - 2025-02-03 In this release, Zebra introduced an additional consensus check on the branch ID of Nu6 transactions diff --git a/Cargo.lock b/Cargo.lock index 39a4e546868..992f8547917 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5018,7 +5018,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.21" +version = "0.2.41-beta.22" dependencies = [ "color-eyre", "ed25519-zebra", @@ -5041,7 +5041,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.21" +version = "0.2.41-beta.22" dependencies = [ "futures-core", "pin-project", @@ -6229,7 +6229,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "bitflags 2.9.0", "bitflags-serde-legacy", @@ -6295,7 +6295,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "bellman", "blake2b_simd", @@ -6341,7 +6341,7 @@ dependencies = [ [[package]] name = "zebra-grpc" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" dependencies = [ "color-eyre", "futures-util", @@ -6363,7 +6363,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "bitflags 2.9.0", "byteorder", @@ -6404,7 +6404,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "color-eyre", "jsonrpsee-types", @@ -6417,7 +6417,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "base64 0.22.1", "chrono", @@ -6460,7 +6460,7 @@ dependencies = [ [[package]] name = "zebra-scan" -version = "0.1.0-alpha.14" +version = "0.1.0-alpha.15" dependencies = [ "bls12_381", "chrono", @@ -6508,7 +6508,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "hex", "lazy_static", @@ -6520,7 +6520,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "bincode", "chrono", @@ -6566,7 +6566,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "color-eyre", "futures", @@ -6594,7 +6594,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" dependencies = [ "color-eyre", "hex", @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "2.2.0" +version = "2.3.0" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index c16fe1dd41b..bd545c46b60 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -29,7 +29,7 @@ docker run \ You can also use `docker compose`, which we recommend. First get the repo: ```shell -git clone --depth 1 --branch v2.2.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v2.3.0 https://github.com/ZcashFoundation/zebra.git cd zebra ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index 272a99cba7a..84247780199 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -76,7 +76,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v2.2.0 +git checkout v2.3.0 ``` 3. Build and Run `zebrad` @@ -89,7 +89,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.2.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v2.3.0 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 90cfae1fc3e..a482b877764 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.21" +version = "0.2.41-beta.22" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -43,10 +43,10 @@ rand = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } tokio-test = { workspace = true } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.21" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.22" } tower-test = { workspace = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 745574c898b..f543d0e111b 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.21" +version = "0.2.41-beta.22" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -24,4 +24,4 @@ tracing = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 3d50b186baf..a514f9f34ed 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -138,7 +138,7 @@ proptest-derive = { workspace = true, optional = true } rand = { workspace = true, optional = true } rand_chacha = { workspace = true, optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46", optional = true } [dev-dependencies] # Benchmarks @@ -161,7 +161,7 @@ rand_chacha = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46" } [[bench]] name = "block" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 07a1d09bffc..4fe2bf0de15 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -58,13 +58,13 @@ orchard.workspace = true zcash_proofs = { workspace = true, features = ["multicore" ] } wagyu-zcash-parameters = { workspace = true } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.21" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.21" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.22" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.22" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.45" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.46" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46" } # prod feature progress-bar howudoin = { workspace = true, optional = true } @@ -89,6 +89,6 @@ tokio = { workspace = true, features = ["full", "tracing", "test-util"] } tracing-error = { workspace = true } tracing-subscriber = { workspace = true } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46" } diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml index ad696e03482..98a40ee1599 100644 --- a/zebra-grpc/Cargo.toml +++ b/zebra-grpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-grpc" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" authors = ["Zcash Foundation "] description = "Zebra gRPC interface" license = "MIT OR Apache-2.0" @@ -28,8 +28,8 @@ color-eyre = { workspace = true } zcash_primitives.workspace = true -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["shielded-scan"] } -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.45" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.46" } [build-dependencies] tonic-build = { workspace = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 69d41648286..59e5814eea1 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { workspace = true, optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["async-error"] } [dev-dependencies] proptest = { workspace = true } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 7f28671e75d..72e0aca6033 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -32,7 +32,7 @@ rpc-client = [ shielded-scan = [] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.46" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index cdc1f8d8262..73a62f5164a 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -90,16 +90,16 @@ zcash_address = { workspace = true } # Test-only feature proptest-impl proptest = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = [ "json-conversion", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.46" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.46" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46", features = [ "rpc-client", ] } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.45" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.46" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46" } [build-dependencies] tonic-build = { workspace = true, optional = true } @@ -112,17 +112,17 @@ proptest = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = [ +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = [ "proptest-impl", ] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45", features = [ +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.46", features = [ "proptest-impl", ] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45", features = [ +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.46", features = [ "proptest-impl", ] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = [ +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46", features = [ "proptest-impl", ] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.46" } diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index ef928498b2a..84f87edc7c7 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-scan" -version = "0.1.0-alpha.14" +version = "0.1.0-alpha.15" authors = ["Zcash Foundation "] description = "Shielded transaction scanner for the Zcash blockchain" license = "MIT OR Apache-2.0" @@ -79,11 +79,11 @@ zcash_address.workspace = true sapling-crypto.workspace = true zip32 = { workspace = true, features = ["std"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["shielded-scan"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["shielded-scan"] } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["shielded-scan"] } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.12" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.13" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.46" } chrono = { workspace = true, features = ["clock", "std", "serde"] } @@ -98,7 +98,7 @@ jubjub = { workspace = true, optional = true } rand = { workspace = true, optional = true } zcash_note_encryption = { workspace = true, optional = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45", optional = true } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.46", optional = true } # zebra-scanner binary dependencies tracing-subscriber = { workspace = true, features = ["env-filter"] } @@ -109,7 +109,7 @@ serde_json = { workspace = true } jsonrpc = { workspace = true, optional = true } hex = { workspace = true, optional = true } -zebrad = { path = "../zebrad", version = "2.2.0" } +zebrad = { path = "../zebrad", version = "2.3.0" } [dev-dependencies] insta = { workspace = true, features = ["ron", "redactions"] } @@ -127,5 +127,5 @@ zcash_note_encryption = { workspace = true } toml = { workspace = true } tonic = { workspace = true } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.46" } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 1813b7c2629..2f385501406 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -16,11 +16,11 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = { workspace = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46" } thiserror = { workspace = true } [dev-dependencies] hex = { workspace = true } lazy_static = { workspace = true } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.46" } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 1b49b1f6b87..5fe8d111699 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -78,13 +78,13 @@ tracing = { workspace = true } elasticsearch = { workspace = true, features = ["rustls-tls"], optional = true } serde_json = { workspace = true, optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["async-error"] } # prod feature progress-bar howudoin = { workspace = true, optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46", optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } @@ -109,5 +109,5 @@ jubjub = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.46" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 10c455d5b49..ea7eb1aac4d 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 782319e3762..d682dd6a5a7 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.45" +version = "1.0.0-beta.46" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -85,11 +85,11 @@ tracing-error = { workspace = true } tracing-subscriber = { workspace = true } thiserror = { workspace = true } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.46" } # These crates are needed for the zebra-checkpoints binary itertools = { workspace = true, optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 5f702b0488b..7fcb4326456 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "2.2.0" +version = "2.3.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -153,15 +153,15 @@ test_sync_past_mandatory_checkpoint_testnet = [] tx_v6 = ["zebra-chain/tx_v6", "zebra-state/tx_v6", "zebra-consensus/tx_v6"] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.45", features = ["rpc-client"] } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.45" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.46" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.46" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.46", features = ["rpc-client"] } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.46" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.45", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.46", optional = true } abscissa_core = { workspace = true } clap = { workspace = true, features = ["cargo"] } @@ -275,13 +275,13 @@ proptest-derive = { workspace = true } # enable span traces and track caller in tests color-eyre = { workspace = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.45", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.45", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.46", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.46", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.45" } -zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.12" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.46" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.13" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -292,7 +292,7 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.12" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.45" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.46" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 9fa98425531..0f6f998b802 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_809_400; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_914_600; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 0ca510b7cb4163420078d3bc43c531e33d3f8578 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 6 May 2025 19:09:17 -0300 Subject: [PATCH 170/245] fix z_get_treestate optional values (#9451) --- Cargo.lock | 1 + zebra-rpc/Cargo.toml | 1 + ..._get_treestate_by_hash@custom_testnet.snap | 8 ++- ...mpty_Sapling_treestate@custom_testnet.snap | 3 ++ ...treestate_no_treestate@custom_testnet.snap | 8 ++- zebra-rpc/src/methods/trees.rs | 50 +++++++++++-------- 6 files changed, 49 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 992f8547917..6c14c870da9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6438,6 +6438,7 @@ dependencies = [ "semver", "serde", "serde_json", + "serde_with", "thiserror 2.0.12", "tokio", "tokio-stream", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 73a62f5164a..1678209c089 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -56,6 +56,7 @@ hyper = { workspace = true } http-body-util = { workspace = true } semver = { workspace = true } serde_json = { workspace = true } +serde_with = { workspace = true, features = ["hex"] } indexmap = { workspace = true, features = ["serde"] } # RPC endpoint basic auth diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_hash@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_hash@custom_testnet.snap index c262f204552..3e8b80d4e6f 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_hash@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_by_hash@custom_testnet.snap @@ -5,5 +5,11 @@ expression: treestate { "hash": "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38", "height": 0, - "time": 1477648033 + "time": 1477648033, + "sapling": { + "commitments": {} + }, + "orchard": { + "commitments": {} + } } diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_empty_Sapling_treestate@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_empty_Sapling_treestate@custom_testnet.snap index 3ba356fe52b..8ffda91b891 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_empty_Sapling_treestate@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_empty_Sapling_treestate@custom_testnet.snap @@ -10,5 +10,8 @@ expression: treestate "commitments": { "finalState": "000000" } + }, + "orchard": { + "commitments": {} } } diff --git a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_no_treestate@custom_testnet.snap b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_no_treestate@custom_testnet.snap index 7c77e4c3a6d..dfb9b282703 100644 --- a/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_no_treestate@custom_testnet.snap +++ b/zebra-rpc/src/methods/tests/snapshots/z_get_treestate_no_treestate@custom_testnet.snap @@ -5,5 +5,11 @@ expression: treestate { "hash": "025579869bcf52a989337342f5f57a84f3a28b968f7d6a8307902b065a668d23", "height": 1, - "time": 1477674473 + "time": 1477674473, + "sapling": { + "commitments": {} + }, + "orchard": { + "commitments": {} + } } diff --git a/zebra-rpc/src/methods/trees.rs b/zebra-rpc/src/methods/trees.rs index 70838bb719e..2bf77992ecc 100644 --- a/zebra-rpc/src/methods/trees.rs +++ b/zebra-rpc/src/methods/trees.rs @@ -73,12 +73,10 @@ pub struct GetTreestate { time: u32, /// A treestate containing a Sapling note commitment tree, hex-encoded. - #[serde(skip_serializing_if = "Option::is_none")] - sapling: Option>>, + sapling: Treestate>, /// A treestate containing an Orchard note commitment tree, hex-encoded. - #[serde(skip_serializing_if = "Option::is_none")] - orchard: Option>>, + orchard: Treestate>, } impl GetTreestate { @@ -90,12 +88,16 @@ impl GetTreestate { sapling: Option>, orchard: Option>, ) -> Self { - let sapling = sapling.map(|tree| Treestate { - commitments: Commitments { final_state: tree }, - }); - let orchard = orchard.map(|tree| Treestate { - commitments: Commitments { final_state: tree }, - }); + let sapling = Treestate { + commitments: Commitments { + final_state: sapling, + }, + }; + let orchard = Treestate { + commitments: Commitments { + final_state: orchard, + }, + }; Self { hash, @@ -112,10 +114,8 @@ impl GetTreestate { self.hash, self.height, self.time, - self.sapling - .map(|treestate| treestate.commitments.final_state), - self.orchard - .map(|treestate| treestate.commitments.final_state), + self.sapling.commitments.final_state, + self.orchard.commitments.final_state, ) } } @@ -154,28 +154,38 @@ impl> Treestate { } } +impl Default for Treestate> { + fn default() -> Self { + Self { + commitments: Commitments { final_state: None }, + } + } +} + /// A wrapper that contains either an Orchard or Sapling note commitment tree. /// /// Note that in the original [`z_gettreestate`][1] RPC, [`Commitments`] also /// contains the field `finalRoot`. Zebra does *not* use this field. /// /// [1]: https://zcash.github.io/rpc/z_gettreestate.html +#[serde_with::serde_as] #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] pub struct Commitments> { /// Orchard or Sapling serialized note commitment tree, hex-encoded. - #[serde(with = "hex")] + #[serde_as(as = "Option")] + #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "finalState")] - final_state: Tree, + final_state: Option, } impl> Commitments { - /// Returns a new instance of ['Commitments']. - pub fn new(final_state: Tree) -> Self { + /// Returns a new instance of ['Commitments'] with optional `final_state`. + pub fn new(final_state: Option) -> Self { Commitments { final_state } } - /// Returns a reference to the final_state. - pub fn inner(&self) -> &Tree { + /// Returns a reference to the optional `final_state`. + pub fn inner(&self) -> &Option { &self.final_state } } From 3a9927969062e1427882fdad1fb761a6ab0ca3c3 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 7 May 2025 07:19:15 -0300 Subject: [PATCH 171/245] use `RawValue` instead of `Value` for the `result` field of `JsonRpcResponse` (#9474) --- zebra-rpc/src/server/http_request_compatibility.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index a5e0c41f57b..618ab436ee8 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -301,7 +301,7 @@ struct JsonRpcResponse { jsonrpc: Option, id: serde_json::Value, #[serde(skip_serializing_if = "Option::is_none")] - result: Option, + result: Option>, #[serde(skip_serializing_if = "Option::is_none")] error: Option, } @@ -311,12 +311,16 @@ impl JsonRpcResponse { match version { JsonRpcVersion::Bitcoind => { self.jsonrpc = None; - self.result = self.result.or(Some(serde_json::Value::Null)); + self.result = self + .result + .or_else(|| serde_json::value::to_raw_value(&()).ok()); self.error = self.error.or(Some(serde_json::Value::Null)); } JsonRpcVersion::Lightwalletd => { self.jsonrpc = Some("1.0".into()); - self.result = self.result.or(Some(serde_json::Value::Null)); + self.result = self + .result + .or_else(|| serde_json::value::to_raw_value(&()).ok()); self.error = self.error.or(Some(serde_json::Value::Null)); } JsonRpcVersion::TwoPointZero => { @@ -325,7 +329,9 @@ impl JsonRpcResponse { // we map the result explicitly to `Null` when there is no error. assert_eq!(self.jsonrpc.as_deref(), Some("2.0")); if self.error.is_none() { - self.result = self.result.or(Some(serde_json::Value::Null)); + self.result = self + .result + .or_else(|| serde_json::value::to_raw_value(&()).ok()); } else { assert!(self.result.is_none()); } From ca51c39003169fda913fff4eb3f58457ae55a89c Mon Sep 17 00:00:00 2001 From: ala-mode Date: Thu, 8 May 2025 03:46:59 -0400 Subject: [PATCH 172/245] update README with Arch build patch (#9513) * update readme with GCC on Arch patch * suggested change --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 416842bf967..47c582fb087 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,12 @@ sudo pacman -S rust clang protobuf Note that the package `clang` includes `libclang` as well as the C++ compiler. +Recently the GCC version on Arch has broken a build script in the `rocksdb` dependency. A workaround is: + +```sh +export CXXFLAGS="$CXXFLAGS -include cstdint" +``` +
Once you have the dependencies in place, you can build and install Zebra with: From 89f825265a8fa184b5d4619da7af900f10870769 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 9 May 2025 11:56:54 +0200 Subject: [PATCH 173/245] change(rpc): Move GBT RPCs into the main RPC server (#9459) * Remove the `getblocktemplate-rpcs` Cargo feature * Merge GBT RPCs with the rest * Fix `rpc_server_spawn_port_conflict` * Fix snapshot data * Remove old files after merge * Fix snapshot for `get_mining_info` * Fix refs in docs * Update snapshot data for `bet_block_count` * Update snapshot data for `get_minig_info` * Fix snapshots * Fix RPC tests * Unguard internal miner config * remove getblocktemplate file (#9485) * Fix imports in `zebra-rpc/src/server.rs` * Fix imports in `zebra-rpc/src/methods.rs` * Rename vars in `RpcImpl::new` * Split imports in `.../methods/tests/snapshot.rs` * Refactor imports in `.../methods/tests/vectors.rs` * Update error log messages * fmt * Fix snapshots with missing peers * fmt --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- zebra-rpc/src/config.rs | 96 +- zebra-rpc/src/config/mining.rs | 1 + zebra-rpc/src/config/rpc.rs | 94 ++ zebra-rpc/src/methods.rs | 1150 +++++++++++++- .../src/methods/get_block_template_rpcs.rs | 1361 ----------------- .../get_block_template.rs | 480 ------ .../methods/get_block_template_rpcs/types.rs | 13 - .../types/get_block_template.rs | 389 ----- .../types/transaction.rs | 130 -- zebra-rpc/src/methods/tests/prop.rs | 55 +- zebra-rpc/src/methods/tests/snapshot.rs | 706 ++++++++- .../tests/snapshot/get_block_template_rpcs.rs | 636 -------- .../snapshots/get_block_count@mainnet_10.snap | 5 - .../snapshots/get_block_count@testnet_10.snap | 5 - .../snapshots/get_block_count@mainnet_10.snap | 5 + .../snapshots/get_block_count@testnet_10.snap | 5 + ...h_invalid_excessive_height@mainnet_10.snap | 0 ...h_invalid_excessive_height@testnet_10.snap | 0 .../get_block_hash_valid@mainnet_10.snap | 0 .../get_block_hash_valid@testnet_10.snap | 0 ...k_subsidy_excessive_height@mainnet_10.snap | 0 ...k_subsidy_excessive_height@testnet_10.snap | 0 ...lock_subsidy_future_height@mainnet_10.snap | 0 ...lock_subsidy_future_height@testnet_10.snap | 0 ...bsidy_future_nu6_height@nu6testnet_10.snap | 0 ...t_block_subsidy_tip_height@mainnet_10.snap | 0 ...t_block_subsidy_tip_height@testnet_10.snap | 0 ...template_basic.coinbase_tx@mainnet_10.snap | 0 ...template_basic.coinbase_tx@testnet_10.snap | 0 .../get_block_template_basic@mainnet_10.snap | 0 .../get_block_template_basic@testnet_10.snap | 0 ..._template_invalid-proposal@mainnet_10.snap | 0 ..._template_invalid-proposal@testnet_10.snap | 0 ...late_long_poll.coinbase_tx@mainnet_10.snap | 0 ...late_long_poll.coinbase_tx@testnet_10.snap | 0 ...t_block_template_long_poll@mainnet_10.snap | 0 ...t_block_template_long_poll@testnet_10.snap | 0 ...et_block_template_proposal@mainnet_10.snap | 0 ...et_block_template_proposal@testnet_10.snap | 0 .../get_difficulty_valid_mock@mainnet_10.snap | 0 .../get_difficulty_valid_mock@testnet_10.snap | 0 .../snapshots/get_mining_info@mainnet_10.snap | 2 +- .../snapshots/get_mining_info@testnet_10.snap | 2 +- .../get_network_sol_ps@mainnet_10.snap | 0 .../get_network_sol_ps@testnet_10.snap | 0 .../snapshots/get_peer_info@mainnet_10.snap | 2 +- .../snapshots/get_peer_info@testnet_10.snap | 2 +- ...t_rpc_submit_block_invalid@mainnet_10.snap | 0 ...t_rpc_submit_block_invalid@testnet_10.snap | 0 .../validate_address_basic@mainnet_10.snap | 0 .../validate_address_basic@testnet_10.snap | 0 .../validate_address_invalid@mainnet_10.snap | 0 .../validate_address_invalid@testnet_10.snap | 0 ...list_unified_receivers_ua1@mainnet_10.snap | 0 ...list_unified_receivers_ua1@testnet_10.snap | 0 ...list_unified_receivers_ua2@mainnet_10.snap | 0 ...list_unified_receivers_ua2@testnet_10.snap | 0 .../z_validate_address_basic@mainnet_10.snap | 0 .../z_validate_address_basic@testnet_10.snap | 0 ...z_validate_address_invalid@mainnet_10.snap | 0 ...z_validate_address_invalid@testnet_10.snap | 0 zebra-rpc/src/methods/tests/vectors.rs | 707 ++++----- zebra-rpc/src/methods/types.rs | 23 +- .../types/default_roots.rs | 0 .../src/methods/types/get_block_template.rs | 1012 ++++++++++++ .../get_block_template}/constants.rs | 8 +- .../types/get_block_template/parameters.rs | 5 +- .../types/get_block_template/proposal.rs | 2 +- .../get_block_template}/zip317.rs | 16 +- .../get_block_template}/zip317/tests.rs | 0 .../src/methods/types/get_blockchain_info.rs | 1 + .../types/get_mining_info.rs | 0 .../src/methods/types/get_raw_mempool.rs | 3 +- .../types/long_poll.rs | 0 .../types/peer_info.rs | 0 .../types/submit_block.rs | 4 +- .../types/subsidy.rs | 2 +- zebra-rpc/src/methods/types/transaction.rs | 142 +- .../types/unified_address.rs | 0 .../types/validate_address.rs | 0 .../types/z_validate_address.rs | 0 zebra-rpc/src/server.rs | 106 +- zebra-rpc/src/server/tests/vectors.rs | 107 +- zebra-rpc/src/tests/vectors.rs | 2 +- .../bin/block-template-to-proposal/args.rs | 3 +- .../bin/block-template-to-proposal/main.rs | 6 +- zebra-utils/src/bin/openapi-generator/main.rs | 60 +- zebrad/src/commands/start.rs | 103 +- .../components/inbound/tests/fake_peer_set.rs | 33 +- .../components/inbound/tests/real_peer_set.rs | 21 +- zebrad/src/components/miner.rs | 27 +- zebrad/src/config.rs | 2 +- zebrad/tests/acceptance.rs | 87 +- zebrad/tests/common/failure_messages.rs | 4 +- .../get_block_template.rs | 24 +- .../get_block_template_rpcs/get_peer_info.rs | 2 +- zebrad/tests/common/regtest.rs | 21 +- 97 files changed, 3678 insertions(+), 3994 deletions(-) create mode 100644 zebra-rpc/src/config/rpc.rs delete mode 100644 zebra-rpc/src/methods/get_block_template_rpcs.rs delete mode 100644 zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs delete mode 100644 zebra-rpc/src/methods/get_block_template_rpcs/types.rs delete mode 100644 zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs delete mode 100644 zebra-rpc/src/methods/get_block_template_rpcs/types/transaction.rs delete mode 100644 zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs delete mode 100644 zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@mainnet_10.snap delete mode 100644 zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@testnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_count@mainnet_10.snap create mode 100644 zebra-rpc/src/methods/tests/snapshots/get_block_count@testnet_10.snap rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_hash_invalid_excessive_height@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_hash_invalid_excessive_height@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_hash_valid@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_hash_valid@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_excessive_height@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_excessive_height@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_future_height@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_future_height@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_future_nu6_height@nu6testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_tip_height@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_subsidy_tip_height@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_basic@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_basic@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_invalid-proposal@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_invalid-proposal@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_long_poll@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_long_poll@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_proposal@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_block_template_proposal@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_difficulty_valid_mock@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_difficulty_valid_mock@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_mining_info@mainnet_10.snap (70%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_mining_info@testnet_10.snap (70%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_network_sol_ps@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_network_sol_ps@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_peer_info@mainnet_10.snap (57%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/get_peer_info@testnet_10.snap (57%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/snapshot_rpc_submit_block_invalid@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/snapshot_rpc_submit_block_invalid@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/validate_address_basic@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/validate_address_basic@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/validate_address_invalid@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/validate_address_invalid@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_list_unified_receivers_ua1@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_list_unified_receivers_ua1@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_list_unified_receivers_ua2@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_list_unified_receivers_ua2@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_validate_address_basic@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_validate_address_basic@testnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_validate_address_invalid@mainnet_10.snap (100%) rename zebra-rpc/src/methods/tests/{snapshot => }/snapshots/z_validate_address_invalid@testnet_10.snap (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/default_roots.rs (100%) create mode 100644 zebra-rpc/src/methods/types/get_block_template.rs rename zebra-rpc/src/methods/{get_block_template_rpcs => types/get_block_template}/constants.rs (90%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/get_block_template/parameters.rs (96%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/get_block_template/proposal.rs (99%) rename zebra-rpc/src/methods/{get_block_template_rpcs => types/get_block_template}/zip317.rs (99%) rename zebra-rpc/src/methods/{get_block_template_rpcs => types/get_block_template}/zip317/tests.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/get_mining_info.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/long_poll.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/peer_info.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/submit_block.rs (95%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/subsidy.rs (99%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/unified_address.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/validate_address.rs (100%) rename zebra-rpc/src/methods/{get_block_template_rpcs => }/types/z_validate_address.rs (100%) diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index a6711c3f4cc..13fea023661 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -1,96 +1,4 @@ -//! User-configurable RPC settings. - -use std::{net::SocketAddr, path::PathBuf}; - -use serde::{Deserialize, Serialize}; - -use zebra_chain::common::default_cache_dir; +//! User-configurable settings. pub mod mining; - -/// RPC configuration section. -#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Config { - /// IP address and port for the RPC server. - /// - /// Note: The RPC server is disabled by default. - /// To enable the RPC server, set a listen address in the config: - /// ```toml - /// [rpc] - /// listen_addr = '127.0.0.1:8232' - /// ``` - /// - /// The recommended ports for the RPC server are: - /// - Mainnet: 127.0.0.1:8232 - /// - Testnet: 127.0.0.1:18232 - /// - /// # Security - /// - /// If you bind Zebra's RPC port to a public IP address, - /// anyone on the internet can send transactions via your node. - /// They can also query your node's state. - pub listen_addr: Option, - - /// IP address and port for the indexer RPC server. - /// - /// Note: The indexer RPC server is disabled by default. - /// To enable the indexer RPC server, compile `zebrad` with the - /// `indexer` feature flag and set a listen address in the config: - /// ```toml - /// [rpc] - /// indexer_listen_addr = '127.0.0.1:8230' - /// ``` - /// - /// # Security - /// - /// If you bind Zebra's indexer RPC port to a public IP address, - /// anyone on the internet can query your node's state. - pub indexer_listen_addr: Option, - - /// The number of threads used to process RPC requests and responses. - /// - /// This field is deprecated and could be removed in a future release. - /// We keep it just for backward compatibility but it actually do nothing. - /// It was something configurable when the RPC server was based in the jsonrpc-core crate, - /// not anymore since we migrated to jsonrpsee. - // TODO: Prefix this field name with an underscore so it's clear that it's now unused, and - // use serde(rename) to continue successfully deserializing old configs. - pub parallel_cpu_threads: usize, - - /// Test-only option that makes Zebra say it is at the chain tip, - /// no matter what the estimated height or local clock is. - pub debug_force_finished_sync: bool, - - /// The directory where Zebra stores RPC cookies. - pub cookie_dir: PathBuf, - - /// Enable cookie-based authentication for RPCs. - pub enable_cookie_auth: bool, -} - -// This impl isn't derivable because it depends on features. -#[allow(clippy::derivable_impls)] -impl Default for Config { - fn default() -> Self { - Self { - // Disable RPCs by default. - listen_addr: None, - - // Disable indexer RPCs by default. - indexer_listen_addr: None, - - // Use multiple threads, because we pause requests during getblocktemplate long polling - parallel_cpu_threads: 0, - - // Debug options are always off by default. - debug_force_finished_sync: false, - - // Use the default cache dir for the auth cookie. - cookie_dir: default_cache_dir(), - - // Enable cookie-based authentication by default. - enable_cookie_auth: true, - } - } -} +pub mod rpc; diff --git a/zebra-rpc/src/config/mining.rs b/zebra-rpc/src/config/mining.rs index 2946d80bf81..4206262f8ba 100644 --- a/zebra-rpc/src/config/mining.rs +++ b/zebra-rpc/src/config/mining.rs @@ -28,6 +28,7 @@ pub struct Config { /// Should Zebra's block templates try to imitate `zcashd`? /// /// This developer-only config is not supported for general use. + /// TODO: remove this option as part of zcashd deprecation pub debug_like_zcashd: bool, /// Mine blocks using Zebra's internal miner, without an external mining pool or equihash solver. diff --git a/zebra-rpc/src/config/rpc.rs b/zebra-rpc/src/config/rpc.rs new file mode 100644 index 00000000000..2cb924dd840 --- /dev/null +++ b/zebra-rpc/src/config/rpc.rs @@ -0,0 +1,94 @@ +//! RPC config + +use std::{net::SocketAddr, path::PathBuf}; + +use serde::{Deserialize, Serialize}; + +use zebra_chain::common::default_cache_dir; + +/// RPC configuration section. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct Config { + /// IP address and port for the RPC server. + /// + /// Note: The RPC server is disabled by default. + /// To enable the RPC server, set a listen address in the config: + /// ```toml + /// [rpc] + /// listen_addr = '127.0.0.1:8232' + /// ``` + /// + /// The recommended ports for the RPC server are: + /// - Mainnet: 127.0.0.1:8232 + /// - Testnet: 127.0.0.1:18232 + /// + /// # Security + /// + /// If you bind Zebra's RPC port to a public IP address, + /// anyone on the internet can send transactions via your node. + /// They can also query your node's state. + pub listen_addr: Option, + + /// IP address and port for the indexer RPC server. + /// + /// Note: The indexer RPC server is disabled by default. + /// To enable the indexer RPC server, compile `zebrad` with the + /// `indexer` feature flag and set a listen address in the config: + /// ```toml + /// [rpc] + /// indexer_listen_addr = '127.0.0.1:8230' + /// ``` + /// + /// # Security + /// + /// If you bind Zebra's indexer RPC port to a public IP address, + /// anyone on the internet can query your node's state. + pub indexer_listen_addr: Option, + + /// The number of threads used to process RPC requests and responses. + /// + /// This field is deprecated and could be removed in a future release. + /// We keep it just for backward compatibility but it actually do nothing. + /// It was something configurable when the RPC server was based in the jsonrpc-core crate, + /// not anymore since we migrated to jsonrpsee. + // TODO: Prefix this field name with an underscore so it's clear that it's now unused, and + // use serde(rename) to continue successfully deserializing old configs. + pub parallel_cpu_threads: usize, + + /// Test-only option that makes Zebra say it is at the chain tip, + /// no matter what the estimated height or local clock is. + pub debug_force_finished_sync: bool, + + /// The directory where Zebra stores RPC cookies. + pub cookie_dir: PathBuf, + + /// Enable cookie-based authentication for RPCs. + pub enable_cookie_auth: bool, +} + +// This impl isn't derivable because it depends on features. +#[allow(clippy::derivable_impls)] +impl Default for Config { + fn default() -> Self { + Self { + // Disable RPCs by default. + listen_addr: None, + + // Disable indexer RPCs by default. + indexer_listen_addr: None, + + // Use multiple threads, because we pause requests during getblocktemplate long polling + parallel_cpu_threads: 0, + + // Debug options are always off by default. + debug_force_finished_sync: false, + + // Use the default cache dir for the auth cookie. + cookie_dir: default_cache_dir(), + + // Enable cookie-based authentication by default. + enable_cookie_auth: true, + } + } +} diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index f55ad77a8a7..a0baf4a1e74 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -7,13 +7,16 @@ //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. use std::{ + cmp, collections::{HashMap, HashSet}, - fmt::Debug, + fmt, ops::RangeInclusive, + sync::Arc, + time::Duration, }; use chrono::Utc; -use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt}; +use futures::{future::OptionFuture, stream::FuturesOrdered, StreamExt, TryFutureExt}; use hex::{FromHex, ToHex}; use hex_data::HexData; use indexmap::IndexMap; @@ -27,12 +30,20 @@ use tokio::{ use tower::{Service, ServiceExt}; use tracing::Instrument; +use zcash_address::{unified::Encoding, TryFromAddress}; use zcash_primitives::consensus::Parameters; + use zebra_chain::{ - block::{self, Commitment, Height, SerializedBlock}, + amount::{self, Amount, NonNegative}, + block::{self, Block, Commitment, Height, SerializedBlock, TryIntoHeight}, + chain_sync_status::ChainSyncStatus, chain_tip::{ChainTip, NetworkChainTipHeightEstimator}, - parameters::{ConsensusBranchId, Network, NetworkUpgrade}, - serialization::{ZcashDeserialize, ZcashSerialize}, + parameters::{ + subsidy::{FundingStreamReceiver, ParameterSubsidy}, + ConsensusBranchId, Network, NetworkUpgrade, POW_AVERAGING_WINDOW, + }, + primitives, + serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}, subtree::NoteCommitmentSubtreeIndex, transaction::{self, SerializedTransaction, Transaction, UnminedTx}, transparent::{self, Address}, @@ -41,7 +52,10 @@ use zebra_chain::{ equihash::Solution, }, }; -use zebra_consensus::ParameterCheckpoint; +use zebra_consensus::{ + block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, + ParameterCheckpoint, RouterError, +}; use zebra_network::address_book_peers::AddressBookPeers; use zebra_node_services::mempool; use zebra_state::{ @@ -49,6 +63,7 @@ use zebra_state::{ }; use crate::{ + config, methods::trees::{GetSubtrees, GetTreestate, SubtreeRpcData}, queue::Queue, server::{ @@ -57,21 +72,25 @@ use crate::{ }, }; -pub mod hex_data; +use types::{ + get_block_template::{ + self, constants::MEMPOOL_LONG_POLL_INTERVAL, proposal::proposal_block_from_template, + GetBlockTemplate, GetBlockTemplateHandler, ZCASHD_FUNDING_STREAM_ORDER, + }, + get_blockchain_info, get_mining_info, + get_raw_mempool::{self, GetRawMempool}, + long_poll::LongPollInput, + peer_info::PeerInfo, + submit_block, + subsidy::BlockSubsidy, + transaction::TransactionObject, + unified_address, validate_address, z_validate_address, +}; -// We don't use a types/ module here, because it is redundant. +pub mod hex_data; pub mod trees; - pub mod types; -use types::GetRawMempool; -use types::MempoolObject; -use types::TransactionObject; - -pub mod get_block_template_rpcs; - -pub use get_block_template_rpcs::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; - #[cfg(test)] mod tests; @@ -353,11 +372,222 @@ pub trait Rpc { /// tags: control #[method(name = "stop")] fn stop(&self) -> Result; + + /// Returns the height of the most recent block in the best valid block chain (equivalently, + /// the number of blocks in this chain excluding the genesis block). + /// + /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) + /// method: post + /// tags: blockchain + #[method(name = "getblockcount")] + fn get_block_count(&self) -> Result; + + /// Returns the hash of the block of a given height iff the index argument correspond + /// to a block in the best chain. + /// + /// zcashd reference: [`getblockhash`](https://zcash-rpc.github.io/getblockhash.html) + /// method: post + /// tags: blockchain + /// + /// # Parameters + /// + /// - `index`: (numeric, required, example=1) The block index. + /// + /// # Notes + /// + /// - If `index` is positive then index = block height. + /// - If `index` is negative then -1 is the last known valid block. + #[method(name = "getblockhash")] + async fn get_block_hash(&self, index: i32) -> Result; + + /// Returns a block template for mining new Zcash blocks. + /// + /// # Parameters + /// + /// - `jsonrequestobject`: (string, optional) A JSON object containing arguments. + /// + /// zcashd reference: [`getblocktemplate`](https://zcash-rpc.github.io/getblocktemplate.html) + /// method: post + /// tags: mining + /// + /// # Notes + /// + /// Arguments to this RPC are currently ignored. + /// Long polling, block proposals, server lists, and work IDs are not supported. + /// + /// Miners can make arbitrary changes to blocks, as long as: + /// - the data sent to `submitblock` is a valid Zcash block, and + /// - the parent block is a valid block that Zebra already has, or will receive soon. + /// + /// Zebra verifies blocks in parallel, and keeps recent chains in parallel, + /// so moving between chains and forking chains is very cheap. + #[method(name = "getblocktemplate")] + async fn get_block_template( + &self, + parameters: Option, + ) -> Result; + + /// Submits block to the node to be validated and committed. + /// Returns the [`submit_block::Response`] for the operation, as a JSON string. + /// + /// zcashd reference: [`submitblock`](https://zcash.github.io/rpc/submitblock.html) + /// method: post + /// tags: mining + /// + /// # Parameters + /// + /// - `hexdata`: (string, required) + /// - `jsonparametersobject`: (string, optional) - currently ignored + /// + /// # Notes + /// + /// - `jsonparametersobject` holds a single field, workid, that must be included in submissions if provided by the server. + #[method(name = "submitblock")] + async fn submit_block( + &self, + hex_data: HexData, + _parameters: Option, + ) -> Result; + + /// Returns mining-related information. + /// + /// zcashd reference: [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) + /// method: post + /// tags: mining + #[method(name = "getmininginfo")] + async fn get_mining_info(&self) -> Result; + + /// Returns the estimated network solutions per second based on the last `num_blocks` before + /// `height`. + /// + /// If `num_blocks` is not supplied, uses 120 blocks. If it is 0 or -1, uses the difficulty + /// averaging window. + /// If `height` is not supplied or is -1, uses the tip height. + /// + /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) + /// method: post + /// tags: mining + #[method(name = "getnetworksolps")] + async fn get_network_sol_ps(&self, num_blocks: Option, height: Option) + -> Result; + + /// Returns the estimated network solutions per second based on the last `num_blocks` before + /// `height`. + /// + /// This method name is deprecated, use [`getnetworksolps`](Self::get_network_sol_ps) instead. + /// See that method for details. + /// + /// zcashd reference: [`getnetworkhashps`](https://zcash.github.io/rpc/getnetworkhashps.html) + /// method: post + /// tags: mining + #[method(name = "getnetworkhashps")] + async fn get_network_hash_ps( + &self, + num_blocks: Option, + height: Option, + ) -> Result { + self.get_network_sol_ps(num_blocks, height).await + } + + /// Returns data about each connected network node. + /// + /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) + /// method: post + /// tags: network + #[method(name = "getpeerinfo")] + async fn get_peer_info(&self) -> Result>; + + /// Checks if a zcash address is valid. + /// Returns information about the given address if valid. + /// + /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) + /// method: post + /// tags: util + /// + /// # Parameters + /// + /// - `address`: (string, required) The zcash address to validate. + #[method(name = "validateaddress")] + async fn validate_address(&self, address: String) -> Result; + + /// Checks if a zcash address is valid. + /// Returns information about the given address if valid. + /// + /// zcashd reference: [`z_validateaddress`](https://zcash.github.io/rpc/z_validateaddress.html) + /// method: post + /// tags: util + /// + /// # Parameters + /// + /// - `address`: (string, required) The zcash address to validate. + /// + /// # Notes + /// + /// - No notes + #[method(name = "z_validateaddress")] + async fn z_validate_address(&self, address: String) -> Result; + + /// Returns the block subsidy reward of the block at `height`, taking into account the mining slow start. + /// Returns an error if `height` is less than the height of the first halving for the current network. + /// + /// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html) + /// method: post + /// tags: mining + /// + /// # Parameters + /// + /// - `height`: (numeric, optional, example=1) Can be any valid current or future height. + /// + /// # Notes + /// + /// If `height` is not supplied, uses the tip height. + #[method(name = "getblocksubsidy")] + async fn get_block_subsidy(&self, height: Option) -> Result; + + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. + /// + /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) + /// method: post + /// tags: blockchain + #[method(name = "getdifficulty")] + async fn get_difficulty(&self) -> Result; + + /// Returns the list of individual payment addresses given a unified address. + /// + /// zcashd reference: [`z_listunifiedreceivers`](https://zcash.github.io/rpc/z_listunifiedreceivers.html) + /// method: post + /// tags: wallet + /// + /// # Parameters + /// + /// - `address`: (string, required) The zcash unified address to get the list from. + /// + /// # Notes + /// + /// - No notes + #[method(name = "z_listunifiedreceivers")] + async fn z_list_unified_receivers(&self, address: String) -> Result; + + #[method(name = "generate")] + /// Mine blocks immediately. Returns the block hashes of the generated blocks. + /// + /// # Parameters + /// + /// - `num_blocks`: (numeric, required, example=1) Number of blocks to be generated. + /// + /// # Notes + /// + /// Only works if the network of the running zebrad process is `Regtest`. + /// + /// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html) + /// method: post + /// tags: generating + async fn generate(&self, num_blocks: u32) -> Result>; } /// RPC method implementations. #[derive(Clone)] -pub struct RpcImpl +pub struct RpcImpl where Mempool: Service< mempool::Request, @@ -379,6 +609,13 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, { // Configuration // @@ -420,12 +657,16 @@ where /// The last warning or error event logged by the server. last_warn_error_log_rx: LoggedLastEvent, + + /// Handler for the `getblocktemplate` RPC. + gbt: GetBlockTemplateHandler, } /// A type alias for the last event logged by the server. pub type LoggedLastEvent = watch::Receiver)>>; -impl Debug for RpcImpl +impl fmt::Debug + for RpcImpl where Mempool: Service< mempool::Request, @@ -447,8 +688,15 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Skip fields without Debug impls, and skip channels f.debug_struct("RpcImpl") .field("build_version", &self.build_version) @@ -456,11 +704,13 @@ where .field("network", &self.network) .field("debug_force_finished_sync", &self.debug_force_finished_sync) .field("debug_like_zcashd", &self.debug_like_zcashd) + .field("getblocktemplate", &self.gbt) .finish() } } -impl RpcImpl +impl + RpcImpl where Mempool: Service< mempool::Request, @@ -482,6 +732,13 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, { /// Create a new instance of the RPC handler. // @@ -489,16 +746,19 @@ where // - put some of the configs or services in their own struct? #[allow(clippy::too_many_arguments)] pub fn new( - build_version: VersionString, - user_agent: UserAgentString, network: Network, + mining_config: config::mining::Config, debug_force_finished_sync: bool, - debug_like_zcashd: bool, + build_version: VersionString, + user_agent: UserAgentString, mempool: Mempool, state: State, + block_verifier_router: BlockVerifierRouter, + sync_status: SyncStatus, latest_chain_tip: Tip, address_book: AddressBook, last_warn_error_log_rx: LoggedLastEvent, + mined_block_sender: Option>, ) -> (Self, JoinHandle<()>) where VersionString: ToString + Clone + Send + 'static, @@ -514,18 +774,27 @@ where build_version.insert(0, 'v'); } + let gbt = GetBlockTemplateHandler::new( + &network, + mining_config.clone(), + block_verifier_router, + sync_status, + mined_block_sender, + ); + let rpc_impl = RpcImpl { build_version, user_agent, network: network.clone(), debug_force_finished_sync, - debug_like_zcashd, + debug_like_zcashd: mining_config.debug_like_zcashd, mempool: mempool.clone(), state: state.clone(), latest_chain_tip: latest_chain_tip.clone(), queue_sender, address_book, last_warn_error_log_rx, + gbt, }; // run the process queue @@ -540,7 +809,8 @@ where } #[async_trait] -impl RpcServer for RpcImpl +impl RpcServer + for RpcImpl where Mempool: Service< mempool::Request, @@ -562,6 +832,13 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, { async fn get_info(&self) -> Result { let version = GetInfo::version(&self.build_version).expect("invalid version string"); @@ -722,8 +999,8 @@ where blocks: tip_height, best_block_hash: tip_hash, estimated_height, - chain_supply: types::Balance::chain_supply(value_balance), - value_pools: types::Balance::value_pools(value_balance), + chain_supply: get_blockchain_info::Balance::chain_supply(value_balance), + value_pools: get_blockchain_info::Balance::value_pools(value_balance), upgrades, consensus, headers: tip_height, @@ -1178,7 +1455,7 @@ where .map(|unmined_tx| { ( unmined_tx.transaction.id.mined_id().encode_hex(), - MempoolObject::from_verified_unmined_tx( + get_raw_mempool::MempoolObject::from_verified_unmined_tx( unmined_tx, &transactions, &transaction_dependencies, @@ -1195,7 +1472,7 @@ where transactions.sort_by_cached_key(|tx| { // zcashd uses modified fee here but Zebra doesn't currently // support prioritizing transactions - std::cmp::Reverse(( + cmp::Reverse(( i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 / tx.transaction.size as u128, // transaction hashes are compared in their serialized byte-order. @@ -1592,8 +1869,804 @@ where None, )) } + + fn get_block_count(&self) -> Result { + best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0) + } + + async fn get_block_hash(&self, index: i32) -> Result { + let mut state = self.state.clone(); + let latest_chain_tip = self.latest_chain_tip.clone(); + + // TODO: look up this height as part of the state request? + let tip_height = best_chain_tip_height(&latest_chain_tip)?; + + let height = height_from_signed_int(index, tip_height)?; + + let request = zebra_state::ReadRequest::BestChainBlockHash(height); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + + match response { + zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), + zebra_state::ReadResponse::BlockHash(None) => Err(ErrorObject::borrowed( + server::error::LegacyCode::InvalidParameter.into(), + "Block not found", + None, + )), + _ => unreachable!("unmatched response to a block request"), + } + } + + async fn get_block_template( + &self, + parameters: Option, + ) -> Result { + // Clone Configs + let network = self.network.clone(); + let miner_address = self.gbt.miner_address(); + let debug_like_zcashd = self.debug_like_zcashd; + let extra_coinbase_data = self.gbt.extra_coinbase_data(); + + // Clone Services + let mempool = self.mempool.clone(); + let mut latest_chain_tip = self.latest_chain_tip.clone(); + let sync_status = self.gbt.sync_status(); + let state = self.state.clone(); + + if let Some(HexData(block_proposal_bytes)) = parameters + .as_ref() + .and_then(get_block_template::JsonParameters::block_proposal_data) + { + return get_block_template::validate_block_proposal( + self.gbt.block_verifier_router(), + block_proposal_bytes, + network, + latest_chain_tip, + sync_status, + ) + .await; + } + + // To implement long polling correctly, we split this RPC into multiple phases. + get_block_template::check_parameters(¶meters)?; + + let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); + + // - One-off checks + + // Check config and parameters. + // These checks always have the same result during long polling. + let miner_address = get_block_template::check_miner_address(miner_address)?; + + // - Checks and fetches that can change during long polling + // + // Set up the loop. + let mut max_time_reached = false; + + // The loop returns the server long poll ID, + // which should be different to the client long poll ID. + let ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ) = loop { + // Check if we are synced to the tip. + // The result of this check can change during long polling. + // + // Optional TODO: + // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) + get_block_template::check_synced_to_tip( + &network, + latest_chain_tip.clone(), + sync_status.clone(), + )?; + // TODO: return an error if we have no peers, like `zcashd` does, + // and add a developer config that mines regardless of how many peers we have. + // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 + + // We're just about to fetch state data, then maybe wait for any changes. + // Mark all the changes before the fetch as seen. + // Changes are also ignored in any clones made after the mark. + latest_chain_tip.mark_best_tip_seen(); + + // Fetch the state data and local time for the block template: + // - if the tip block hash changes, we must return from long polling, + // - if the local clock changes on testnet, we might return from long polling + // + // We always return after 90 minutes on mainnet, even if we have the same response, + // because the max time has been reached. + let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { + tip_hash, + tip_height, + max_time, + cur_time, + .. + } = get_block_template::fetch_state_tip_and_local_time(state.clone()).await?; + + // Fetch the mempool data for the block template: + // - if the mempool transactions change, we might return from long polling. + // + // If the chain fork has just changed, miners want to get the new block as fast + // as possible, rather than wait for transactions to re-verify. This increases + // miner profits (and any delays can cause chain forks). So we don't wait between + // the chain tip changing and getting mempool transactions. + // + // Optional TODO: + // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) + let Some((mempool_txs, mempool_tx_deps)) = + get_block_template::fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Default::default)) + else { + continue; + }; + + // - Long poll ID calculation + let server_long_poll_id = LongPollInput::new( + tip_height, + tip_hash, + max_time, + mempool_txs.iter().map(|tx| tx.transaction.id), + ) + .generate_id(); + + // The loop finishes if: + // - the client didn't pass a long poll ID, + // - the server long poll ID is different to the client long poll ID, or + // - the previous loop iteration waited until the max time. + if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { + let mut submit_old = client_long_poll_id + .as_ref() + .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); + + // On testnet, the max time changes the block difficulty, so old shares are + // invalid. On mainnet, this means there has been 90 minutes without a new + // block or mempool transaction, which is very unlikely. So the miner should + // probably reset anyway. + if max_time_reached { + submit_old = Some(false); + } + + break ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ); + } + + // - Polling wait conditions + // + // TODO: when we're happy with this code, split it into a function. + // + // Periodically check the mempool for changes. + // + // Optional TODO: + // Remove this polling wait if we switch to using futures to detect sync status + // and mempool changes. + let wait_for_mempool_request = + tokio::time::sleep(Duration::from_secs(MEMPOOL_LONG_POLL_INTERVAL)); + + // Return immediately if the chain tip has changed. + // The clone preserves the seen status of the chain tip. + let mut wait_for_best_tip_change = latest_chain_tip.clone(); + let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); + + // Wait for the maximum block time to elapse. This can change the block header + // on testnet. (On mainnet it can happen due to a network disconnection, or a + // rapid drop in hash rate.) + // + // This duration might be slightly lower than the actual maximum, + // if cur_time was clamped to min_time. In that case the wait is very long, + // and it's ok to return early. + // + // It can also be zero if cur_time was clamped to max_time. In that case, + // we want to wait for another change, and ignore this timeout. So we use an + // `OptionFuture::None`. + let duration_until_max_time = max_time.saturating_duration_since(cur_time); + let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { + Some(tokio::time::sleep(duration_until_max_time.to_std())) + } else { + None + } + .into(); + + // Optional TODO: + // `zcashd` generates the next coinbase transaction while waiting for changes. + // When Zebra supports shielded coinbase, we might want to do this in parallel. + // But the coinbase value depends on the selected transactions, so this needs + // further analysis to check if it actually saves us any time. + + tokio::select! { + // Poll the futures in the listed order, for efficiency. + // We put the most frequent conditions first. + biased; + + // This timer elapses every few seconds + _elapsed = wait_for_mempool_request => { + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + MEMPOOL_LONG_POLL_INTERVAL, + "checking for a new mempool change after waiting a few seconds" + ); + } + + // The state changes after around a target block interval (75s) + tip_changed_result = wait_for_best_tip_change => { + match tip_changed_result { + Ok(()) => { + // Spurious updates shouldn't happen in the state, because the + // difficulty and hash ordering is a stable total order. But + // since they could cause a busy-loop, guard against them here. + latest_chain_tip.mark_best_tip_seen(); + + let new_tip_hash = latest_chain_tip.best_tip_hash(); + if new_tip_hash == Some(tip_hash) { + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + ?tip_hash, + ?tip_height, + "ignoring spurious state change notification" + ); + + // Wait for the mempool interval, then check for any changes. + tokio::time::sleep(Duration::from_secs( + MEMPOOL_LONG_POLL_INTERVAL, + )).await; + + continue; + } + + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because state has changed" + ); + } + + Err(recv_error) => { + // This log is rare and helps with debugging, so it's ok to be info. + tracing::info!( + ?recv_error, + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll due to a state error.\ + Is Zebra shutting down?" + ); + + return Err(recv_error).map_error(server::error::LegacyCode::default()); + } + } + } + + // The max time does not elapse during normal operation on mainnet, + // and it rarely elapses on testnet. + Some(_elapsed) = wait_for_max_time => { + // This log is very rare so it's ok to be info. + tracing::info!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because max time was reached" + ); + + max_time_reached = true; + } + } + }; + + // - Processing fetched data to create a transaction template + // + // Apart from random weighted transaction selection, + // the template only depends on the previously fetched data. + // This processing never fails. + + // Calculate the next block height. + let next_block_height = + (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); + + tracing::debug!( + mempool_tx_hashes = ?mempool_txs + .iter() + .map(|tx| tx.transaction.id.mined_id()) + .collect::>(), + "selecting transactions for the template from the mempool" + ); + + // Randomly select some mempool transactions. + let mempool_txs = get_block_template::zip317::select_mempool_transactions( + &network, + next_block_height, + &miner_address, + mempool_txs, + mempool_tx_deps, + debug_like_zcashd, + extra_coinbase_data.clone(), + ); + + tracing::debug!( + selected_mempool_tx_hashes = ?mempool_txs + .iter() + .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) + .collect::>(), + "selected transactions for the template from the mempool" + ); + + // - After this point, the template only depends on the previously fetched data. + + let response = GetBlockTemplate::new( + &network, + &miner_address, + &chain_tip_and_local_time, + server_long_poll_id, + mempool_txs, + submit_old, + debug_like_zcashd, + extra_coinbase_data, + ); + + Ok(response.into()) + } + + async fn submit_block( + &self, + HexData(block_bytes): HexData, + _parameters: Option, + ) -> Result { + let mut block_verifier_router = self.gbt.block_verifier_router(); + + let block: Block = match block_bytes.zcash_deserialize_into() { + Ok(block_bytes) => block_bytes, + Err(error) => { + tracing::info!( + ?error, + "submit block failed: block bytes could not be deserialized into a structurally valid block" + ); + + return Ok(submit_block::ErrorResponse::Rejected.into()); + } + }; + + let height = block + .coinbase_height() + .ok_or_error(0, "coinbase height not found")?; + let block_hash = block.hash(); + + let block_verifier_router_response = block_verifier_router + .ready() + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? + .call(zebra_consensus::Request::Commit(Arc::new(block))) + .await; + + let chain_error = match block_verifier_router_response { + // Currently, this match arm returns `null` (Accepted) for blocks committed + // to any chain, but Accepted is only for blocks in the best chain. + // + // TODO (#5487): + // - Inconclusive: check if the block is on a side-chain + // The difference is important to miners, because they want to mine on the best chain. + Ok(hash) => { + tracing::info!(?hash, ?height, "submit block accepted"); + + self.gbt + .advertise_mined_block(hash, height) + .map_error_with_prefix(0, "failed to send mined block")?; + + return Ok(submit_block::Response::Accepted); + } + + // Turns BoxError into Result, + // by downcasting from Any to VerifyChainError. + Err(box_error) => { + let error = box_error + .downcast::() + .map(|boxed_chain_error| *boxed_chain_error); + + tracing::info!( + ?error, + ?block_hash, + ?height, + "submit block failed verification" + ); + + error + } + }; + + let response = match chain_error { + Ok(source) if source.is_duplicate_request() => submit_block::ErrorResponse::Duplicate, + + // Currently, these match arms return Reject for the older duplicate in a queue, + // but queued duplicates should be DuplicateInconclusive. + // + // Optional TODO (#5487): + // - DuplicateInconclusive: turn these non-finalized state duplicate block errors + // into BlockError enum variants, and handle them as DuplicateInconclusive: + // - "block already sent to be committed to the state" + // - "replaced by newer request" + // - keep the older request in the queue, + // and return a duplicate error for the newer request immediately. + // This improves the speed of the RPC response. + // + // Checking the download queues and BlockVerifierRouter buffer for duplicates + // might require architectural changes to Zebra, so we should only do it + // if mining pools really need it. + Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, + + // This match arm is currently unreachable, but if future changes add extra error types, + // we want to turn them into `Rejected`. + Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, + }; + + Ok(response.into()) + } + + async fn get_mining_info(&self) -> Result { + let network = self.network.clone(); + let mut state = self.state.clone(); + + let chain_tip = self.latest_chain_tip.clone(); + let tip_height = chain_tip.best_tip_height().unwrap_or(Height(0)).0; + + let mut current_block_tx = None; + if tip_height > 0 { + let mined_tx_ids = chain_tip.best_tip_mined_transaction_ids(); + current_block_tx = + (!mined_tx_ids.is_empty()).then(|| mined_tx_ids.len().saturating_sub(1)); + } + + let solution_rate_fut = self.get_network_sol_ps(None, None); + // Get the current block size. + let mut current_block_size = None; + if tip_height > 0 { + let request = zebra_state::ReadRequest::TipBlockSize; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + current_block_size = match response { + zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), + _ => None, + }; + } + + Ok(get_mining_info::Response::new( + tip_height, + current_block_size, + current_block_tx, + network, + solution_rate_fut.await?, + )) + } + + async fn get_network_sol_ps( + &self, + num_blocks: Option, + height: Option, + ) -> Result { + // Default number of blocks is 120 if not supplied. + let mut num_blocks = + num_blocks.unwrap_or(get_block_template::DEFAULT_SOLUTION_RATE_WINDOW_SIZE); + // But if it is 0 or negative, it uses the proof of work averaging window. + if num_blocks < 1 { + num_blocks = i32::try_from(POW_AVERAGING_WINDOW).expect("fits in i32"); + } + let num_blocks = + usize::try_from(num_blocks).expect("just checked for negatives, i32 fits in usize"); + + // Default height is the tip height if not supplied. Negative values also mean the tip + // height. Since negative values aren't valid heights, we can just use the conversion. + let height = height.and_then(|height| height.try_into_height().ok()); + + let mut state = self.state.clone(); + + let request = ReadRequest::SolutionRate { num_blocks, height }; + + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let solution_rate = match response { + // zcashd returns a 0 rate when the calculation is invalid + ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), + + _ => unreachable!("unmatched response to a solution rate request"), + }; + + Ok(solution_rate + .try_into() + .expect("per-second solution rate always fits in u64")) + } + + async fn get_peer_info(&self) -> Result> { + let address_book = self.address_book.clone(); + Ok(address_book + .recently_live_peers(chrono::Utc::now()) + .into_iter() + .map(PeerInfo::from) + .collect()) + } + + async fn validate_address(&self, raw_address: String) -> Result { + let network = self.network.clone(); + + let Ok(address) = raw_address.parse::() else { + return Ok(validate_address::Response::invalid()); + }; + + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); + return Ok(validate_address::Response::invalid()); + } + }; + + // we want to match zcashd's behaviour + if !address.is_transparent() { + return Ok(validate_address::Response::invalid()); + } + + if address.network() == network.kind() { + Ok(validate_address::Response { + address: Some(raw_address), + is_valid: true, + is_script: Some(address.is_script_hash()), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address in validateaddress RPC: Zebra's configured network must match address network" + ); + + Ok(validate_address::Response::invalid()) + } + } + + async fn z_validate_address( + &self, + raw_address: String, + ) -> Result { + let network = self.network.clone(); + + let Ok(address) = raw_address.parse::() else { + return Ok(z_validate_address::Response::invalid()); + }; + + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); + return Ok(z_validate_address::Response::invalid()); + } + }; + + if address.network() == network.kind() { + Ok(z_validate_address::Response { + is_valid: true, + address: Some(raw_address), + address_type: Some(z_validate_address::AddressType::from(&address)), + is_mine: Some(false), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", + address.network(), + network + ); + + Ok(z_validate_address::Response::invalid()) + } + } + + async fn get_block_subsidy(&self, height: Option) -> Result { + let latest_chain_tip = self.latest_chain_tip.clone(); + let network = self.network.clone(); + + let height = if let Some(height) = height { + Height(height) + } else { + best_chain_tip_height(&latest_chain_tip)? + }; + + if height < network.height_for_first_halving() { + return Err(ErrorObject::borrowed( + 0, + "Zebra does not support founders' reward subsidies, \ + use a block height that is after the first halving", + None, + )); + } + + // Always zero for post-halving blocks + let founders = Amount::zero(); + + let total_block_subsidy = + block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; + let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())?; + + let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = + funding_stream_values(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())? + .into_iter() + // Separate the funding streams into deferred and non-deferred streams + .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); + + let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; + + let [lockbox_total, funding_streams_total]: [std::result::Result< + Amount, + amount::Error, + >; 2] = [&lockbox_streams, &funding_streams] + .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); + + // Use the same funding stream order as zcashd + funding_streams.sort_by_key(|(receiver, _funding_stream)| { + ZCASHD_FUNDING_STREAM_ORDER + .iter() + .position(|zcashd_receiver| zcashd_receiver == receiver) + }); + + // Format the funding streams and lockbox streams + let [funding_streams, lockbox_streams]: [Vec<_>; 2] = [funding_streams, lockbox_streams] + .map(|streams| { + streams + .into_iter() + .map(|(receiver, value)| { + let address = funding_stream_address(height, &network, receiver); + types::subsidy::FundingStream::new(is_nu6, receiver, value, address) + }) + .collect() + }); + + Ok(BlockSubsidy { + miner: miner_subsidy.into(), + founders: founders.into(), + funding_streams, + lockbox_streams, + funding_streams_total: funding_streams_total + .map_error(server::error::LegacyCode::default())? + .into(), + lockbox_total: lockbox_total + .map_error(server::error::LegacyCode::default())? + .into(), + total_block_subsidy: total_block_subsidy.into(), + }) + } + + async fn get_difficulty(&self) -> Result { + chain_tip_difficulty(self.network.clone(), self.state.clone(), false).await + } + + async fn z_list_unified_receivers(&self, address: String) -> Result { + use zcash_address::unified::Container; + + let (network, unified_address): ( + zcash_protocol::consensus::NetworkType, + zcash_address::unified::Address, + ) = zcash_address::unified::Encoding::decode(address.clone().as_str()) + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let mut p2pkh = String::new(); + let mut p2sh = String::new(); + let mut orchard = String::new(); + let mut sapling = String::new(); + + for item in unified_address.items() { + match item { + zcash_address::unified::Receiver::Orchard(_data) => { + let addr = zcash_address::unified::Address::try_from_items(vec![item]) + .expect("using data already decoded as valid"); + orchard = addr.encode(&network); + } + zcash_address::unified::Receiver::Sapling(data) => { + let addr = zebra_chain::primitives::Address::try_from_sapling(network, data) + .expect("using data already decoded as valid"); + sapling = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2pkh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2pkh(network, data) + .expect("using data already decoded as valid"); + p2pkh = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2sh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2sh(network, data) + .expect("using data already decoded as valid"); + p2sh = addr.payment_address().unwrap_or_default(); + } + _ => (), + } + } + + Ok(unified_address::Response::new( + orchard, sapling, p2pkh, p2sh, + )) + } + + async fn generate(&self, num_blocks: u32) -> Result> { + let rpc = self.clone(); + let network = self.network.clone(); + + if !network.disable_pow() { + return Err(ErrorObject::borrowed( + 0, + "generate is only supported on networks where PoW is disabled", + None, + )); + } + + let mut block_hashes = Vec::new(); + for _ in 0..num_blocks { + let block_template = rpc + .get_block_template(None) + .await + .map_error(server::error::LegacyCode::default())?; + + let get_block_template::Response::TemplateMode(block_template) = block_template else { + return Err(ErrorObject::borrowed( + 0, + "error generating block template", + None, + )); + }; + + let proposal_block = proposal_block_from_template( + &block_template, + get_block_template::TimeSource::CurTime, + NetworkUpgrade::current(&network, Height(block_template.height)), + ) + .map_error(server::error::LegacyCode::default())?; + let hex_proposal_block = HexData( + proposal_block + .zcash_serialize_to_vec() + .map_error(server::error::LegacyCode::default())?, + ); + + let _submit = rpc + .submit_block(hex_proposal_block, None) + .await + .map_error(server::error::LegacyCode::default())?; + + block_hashes.push(GetBlockHash(proposal_block.hash())); + } + + Ok(block_hashes) + } } +// TODO: Move the code below to separate modules. + /// Returns the best chain tip height of `latest_chain_tip`, /// or an RPC error if there are no blocks in the state. pub fn best_chain_tip_height(latest_chain_tip: &Tip) -> Result @@ -1813,11 +2886,11 @@ pub struct GetBlockChainInfo { /// Chain supply balance #[serde(rename = "chainSupply")] - chain_supply: types::Balance, + chain_supply: get_blockchain_info::Balance, /// Value pool balances #[serde(rename = "valuePools")] - value_pools: [types::Balance; 5], + value_pools: [get_blockchain_info::Balance; 5], /// Status of network upgrades upgrades: IndexMap, @@ -1833,8 +2906,8 @@ impl Default for GetBlockChainInfo { blocks: Height(1), best_block_hash: block::Hash([0; 32]), estimated_height: Height(1), - chain_supply: types::Balance::chain_supply(Default::default()), - value_pools: types::Balance::zero_pools(), + chain_supply: get_blockchain_info::Balance::chain_supply(Default::default()), + value_pools: get_blockchain_info::Balance::zero_pools(), upgrades: IndexMap::new(), consensus: TipConsensusBranch { chain_tip: ConsensusBranchIdHex(ConsensusBranchId::default()), @@ -1859,8 +2932,8 @@ impl GetBlockChainInfo { blocks: Height, best_block_hash: block::Hash, estimated_height: Height, - chain_supply: types::Balance, - value_pools: [types::Balance; 5], + chain_supply: get_blockchain_info::Balance, + value_pools: [get_blockchain_info::Balance; 5], upgrades: IndexMap, consensus: TipConsensusBranch, headers: Height, @@ -1915,7 +2988,7 @@ impl GetBlockChainInfo { } /// Returns the value pool balances. - pub fn value_pools(&self) -> &[types::Balance; 5] { + pub fn value_pools(&self) -> &[get_blockchain_info::Balance; 5] { &self.value_pools } @@ -2664,7 +3737,7 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result ErrorCode::InvalidParams.code(), "Provided index is not valid", None, - )) + )); } Some(h) => { if h < 0 { @@ -2710,6 +3783,7 @@ mod opthex { } } } + /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. pub async fn chain_tip_difficulty( network: Network, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs deleted file mode 100644 index 98424c79c31..00000000000 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ /dev/null @@ -1,1361 +0,0 @@ -//! Mining-related RPCs. - -use std::{fmt::Debug, sync::Arc, time::Duration}; - -use futures::{future::OptionFuture, TryFutureExt}; -use jsonrpsee::core::{async_trait, RpcResult as Result}; -use jsonrpsee_proc_macros::rpc; -use jsonrpsee_types::ErrorObject; -use tokio::sync::watch; -use tower::{Service, ServiceExt}; - -use zcash_address::{unified::Encoding, TryFromAddress}; - -use zebra_chain::{ - amount::{self, Amount, NonNegative}, - block::{self, Block, Height, TryIntoHeight}, - chain_sync_status::ChainSyncStatus, - chain_tip::ChainTip, - parameters::{ - subsidy::{FundingStreamReceiver, ParameterSubsidy}, - Network, NetworkKind, NetworkUpgrade, POW_AVERAGING_WINDOW, - }, - primitives, - serialization::{ZcashDeserializeInto, ZcashSerialize}, - transparent::{ - self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN, - }, -}; -use zebra_consensus::{ - block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, RouterError, -}; -use zebra_network::AddressBookPeers; -use zebra_node_services::mempool; -use zebra_state::{ReadRequest, ReadResponse}; - -use crate::{ - methods::{ - best_chain_tip_height, chain_tip_difficulty, - get_block_template_rpcs::{ - constants::{ - DEFAULT_SOLUTION_RATE_WINDOW_SIZE, GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - ZCASHD_FUNDING_STREAM_ORDER, - }, - get_block_template::{ - check_miner_address, check_synced_to_tip, fetch_mempool_transactions, - fetch_state_tip_and_local_time, validate_block_proposal, - }, - // TODO: move the types/* modules directly under get_block_template_rpcs, - // and combine any modules with the same names. - types::{ - get_block_template::{ - proposal::TimeSource, proposal_block_from_template, GetBlockTemplate, - }, - get_mining_info, - long_poll::LongPollInput, - peer_info::PeerInfo, - submit_block, - subsidy::{BlockSubsidy, FundingStream}, - unified_address, validate_address, z_validate_address, - }, - }, - height_from_signed_int, - hex_data::HexData, - GetBlockHash, - }, - server::{ - self, - error::{MapError, OkOrError}, - }, -}; - -pub mod constants; -pub mod get_block_template; -pub mod types; -pub mod zip317; - -/// getblocktemplate RPC method signatures. -#[rpc(server)] -pub trait GetBlockTemplateRpc { - /// Returns the height of the most recent block in the best valid block chain (equivalently, - /// the number of blocks in this chain excluding the genesis block). - /// - /// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html) - /// method: post - /// tags: blockchain - #[method(name = "getblockcount")] - fn get_block_count(&self) -> Result; - - /// Returns the hash of the block of a given height iff the index argument correspond - /// to a block in the best chain. - /// - /// zcashd reference: [`getblockhash`](https://zcash-rpc.github.io/getblockhash.html) - /// method: post - /// tags: blockchain - /// - /// # Parameters - /// - /// - `index`: (numeric, required, example=1) The block index. - /// - /// # Notes - /// - /// - If `index` is positive then index = block height. - /// - If `index` is negative then -1 is the last known valid block. - #[method(name = "getblockhash")] - async fn get_block_hash(&self, index: i32) -> Result; - - /// Returns a block template for mining new Zcash blocks. - /// - /// # Parameters - /// - /// - `jsonrequestobject`: (string, optional) A JSON object containing arguments. - /// - /// zcashd reference: [`getblocktemplate`](https://zcash-rpc.github.io/getblocktemplate.html) - /// method: post - /// tags: mining - /// - /// # Notes - /// - /// Arguments to this RPC are currently ignored. - /// Long polling, block proposals, server lists, and work IDs are not supported. - /// - /// Miners can make arbitrary changes to blocks, as long as: - /// - the data sent to `submitblock` is a valid Zcash block, and - /// - the parent block is a valid block that Zebra already has, or will receive soon. - /// - /// Zebra verifies blocks in parallel, and keeps recent chains in parallel, - /// so moving between chains and forking chains is very cheap. - #[method(name = "getblocktemplate")] - async fn get_block_template( - &self, - parameters: Option, - ) -> Result; - - /// Submits block to the node to be validated and committed. - /// Returns the [`submit_block::Response`] for the operation, as a JSON string. - /// - /// zcashd reference: [`submitblock`](https://zcash.github.io/rpc/submitblock.html) - /// method: post - /// tags: mining - /// - /// # Parameters - /// - /// - `hexdata`: (string, required) - /// - `jsonparametersobject`: (string, optional) - currently ignored - /// - /// # Notes - /// - /// - `jsonparametersobject` holds a single field, workid, that must be included in submissions if provided by the server. - #[method(name = "submitblock")] - async fn submit_block( - &self, - hex_data: HexData, - _parameters: Option, - ) -> Result; - - /// Returns mining-related information. - /// - /// zcashd reference: [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) - /// method: post - /// tags: mining - #[method(name = "getmininginfo")] - async fn get_mining_info(&self) -> Result; - - /// Returns the estimated network solutions per second based on the last `num_blocks` before - /// `height`. - /// - /// If `num_blocks` is not supplied, uses 120 blocks. If it is 0 or -1, uses the difficulty - /// averaging window. - /// If `height` is not supplied or is -1, uses the tip height. - /// - /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) - /// method: post - /// tags: mining - #[method(name = "getnetworksolps")] - async fn get_network_sol_ps(&self, num_blocks: Option, height: Option) - -> Result; - - /// Returns the estimated network solutions per second based on the last `num_blocks` before - /// `height`. - /// - /// This method name is deprecated, use [`getnetworksolps`](Self::get_network_sol_ps) instead. - /// See that method for details. - /// - /// zcashd reference: [`getnetworkhashps`](https://zcash.github.io/rpc/getnetworkhashps.html) - /// method: post - /// tags: mining - #[method(name = "getnetworkhashps")] - async fn get_network_hash_ps( - &self, - num_blocks: Option, - height: Option, - ) -> Result { - self.get_network_sol_ps(num_blocks, height).await - } - - /// Returns data about each connected network node. - /// - /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) - /// method: post - /// tags: network - #[method(name = "getpeerinfo")] - async fn get_peer_info(&self) -> Result>; - - /// Checks if a zcash address is valid. - /// Returns information about the given address if valid. - /// - /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) - /// method: post - /// tags: util - /// - /// # Parameters - /// - /// - `address`: (string, required) The zcash address to validate. - #[method(name = "validateaddress")] - async fn validate_address(&self, address: String) -> Result; - - /// Checks if a zcash address is valid. - /// Returns information about the given address if valid. - /// - /// zcashd reference: [`z_validateaddress`](https://zcash.github.io/rpc/z_validateaddress.html) - /// method: post - /// tags: util - /// - /// # Parameters - /// - /// - `address`: (string, required) The zcash address to validate. - /// - /// # Notes - /// - /// - No notes - #[method(name = "z_validateaddress")] - async fn z_validate_address( - &self, - address: String, - ) -> Result; - - /// Returns the block subsidy reward of the block at `height`, taking into account the mining slow start. - /// Returns an error if `height` is less than the height of the first halving for the current network. - /// - /// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html) - /// method: post - /// tags: mining - /// - /// # Parameters - /// - /// - `height`: (numeric, optional, example=1) Can be any valid current or future height. - /// - /// # Notes - /// - /// If `height` is not supplied, uses the tip height. - #[method(name = "getblocksubsidy")] - async fn get_block_subsidy(&self, height: Option) -> Result; - - /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. - /// - /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) - /// method: post - /// tags: blockchain - #[method(name = "getdifficulty")] - async fn get_difficulty(&self) -> Result; - - /// Returns the list of individual payment addresses given a unified address. - /// - /// zcashd reference: [`z_listunifiedreceivers`](https://zcash.github.io/rpc/z_listunifiedreceivers.html) - /// method: post - /// tags: wallet - /// - /// # Parameters - /// - /// - `address`: (string, required) The zcash unified address to get the list from. - /// - /// # Notes - /// - /// - No notes - #[method(name = "z_listunifiedreceivers")] - async fn z_list_unified_receivers(&self, address: String) -> Result; - - #[method(name = "generate")] - /// Mine blocks immediately. Returns the block hashes of the generated blocks. - /// - /// # Parameters - /// - /// - `num_blocks`: (numeric, required, example=1) Number of blocks to be generated. - /// - /// # Notes - /// - /// Only works if the network of the running zebrad process is `Regtest`. - /// - /// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html) - /// method: post - /// tags: generating - async fn generate(&self, num_blocks: u32) -> Result>; -} - -/// RPC method implementations. -#[derive(Clone)] -pub struct GetBlockTemplateRpcImpl< - Mempool, - State, - Tip, - BlockVerifierRouter, - SyncStatus, - AddressBook, -> where - Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - > + Clone - + Send - + Sync - + 'static, - Mempool::Future: Send, - State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, - Tip: ChainTip + Clone + Send + Sync + 'static, - BlockVerifierRouter: Service - + Clone - + Send - + Sync - + 'static, - >::Future: Send, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, -{ - // Configuration - // - /// The configured network for this RPC service. - network: Network, - - /// The configured miner address for this RPC service. - /// - /// Zebra currently only supports transparent addresses. - miner_address: Option, - - /// Extra data to include in coinbase transaction inputs. - /// Limited to around 95 bytes by the consensus rules. - extra_coinbase_data: Vec, - - /// Should Zebra's block templates try to imitate `zcashd`? - /// Developer-only config. - debug_like_zcashd: bool, - - // Services - // - /// A handle to the mempool service. - mempool: Mempool, - - /// A handle to the state service. - state: State, - - /// Allows efficient access to the best tip of the blockchain. - latest_chain_tip: Tip, - - /// The chain verifier, used for submitting blocks. - block_verifier_router: BlockVerifierRouter, - - /// The chain sync status, used for checking if Zebra is likely close to the network chain tip. - sync_status: SyncStatus, - - /// Address book of peers, used for `getpeerinfo`. - address_book: AddressBook, - - /// A channel to send successful block submissions to the block gossip task, - /// so they can be advertised to peers. - mined_block_sender: watch::Sender<(block::Hash, block::Height)>, -} - -impl Debug - for GetBlockTemplateRpcImpl -where - Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - > + Clone - + Send - + Sync - + 'static, - Mempool::Future: Send, - State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, - Tip: ChainTip + Clone + Send + Sync + 'static, - BlockVerifierRouter: Service - + Clone - + Send - + Sync - + 'static, - >::Future: Send, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // Skip fields without debug impls - f.debug_struct("GetBlockTemplateRpcImpl") - .field("network", &self.network) - .field("miner_address", &self.miner_address) - .field("extra_coinbase_data", &self.extra_coinbase_data) - .field("debug_like_zcashd", &self.debug_like_zcashd) - .finish() - } -} - -impl - GetBlockTemplateRpcImpl -where - Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - > + Clone - + Send - + Sync - + 'static, - Mempool::Future: Send, - State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, - Tip: ChainTip + Clone + Send + Sync + 'static, - BlockVerifierRouter: Service - + Clone - + Send - + Sync - + 'static, - >::Future: Send, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, -{ - /// Create a new instance of the handler for getblocktemplate RPCs. - /// - /// # Panics - /// - /// If the `mining_config` is invalid. - #[allow(clippy::too_many_arguments)] - pub fn new( - network: &Network, - mining_config: crate::config::mining::Config, - mempool: Mempool, - state: State, - latest_chain_tip: Tip, - block_verifier_router: BlockVerifierRouter, - sync_status: SyncStatus, - address_book: AddressBook, - mined_block_sender: Option>, - ) -> Self { - // Prevent loss of miner funds due to an unsupported or incorrect address type. - if let Some(miner_address) = mining_config.miner_address.clone() { - match network.kind() { - NetworkKind::Mainnet => assert_eq!( - miner_address.network_kind(), - NetworkKind::Mainnet, - "Incorrect config: Zebra is configured to run on a Mainnet network, \ - which implies the configured mining address needs to be for Mainnet, \ - but the provided address is for {}.", - miner_address.network_kind(), - ), - // `Regtest` uses `Testnet` transparent addresses. - network_kind @ (NetworkKind::Testnet | NetworkKind::Regtest) => assert_eq!( - miner_address.network_kind(), - NetworkKind::Testnet, - "Incorrect config: Zebra is configured to run on a {network_kind} network, \ - which implies the configured mining address needs to be for Testnet, \ - but the provided address is for {}.", - miner_address.network_kind(), - ), - } - } - - // A limit on the configured extra coinbase data, regardless of the current block height. - // This is different from the consensus rule, which limits the total height + data. - const EXTRA_COINBASE_DATA_LIMIT: usize = - MAX_COINBASE_DATA_LEN - MAX_COINBASE_HEIGHT_DATA_LEN; - - let debug_like_zcashd = mining_config.debug_like_zcashd; - - // Hex-decode to bytes if possible, otherwise UTF-8 encode to bytes. - let extra_coinbase_data = mining_config.extra_coinbase_data.unwrap_or_else(|| { - if debug_like_zcashd { - "" - } else { - EXTRA_ZEBRA_COINBASE_DATA - } - .to_string() - }); - let extra_coinbase_data = hex::decode(&extra_coinbase_data) - .unwrap_or_else(|_error| extra_coinbase_data.as_bytes().to_vec()); - - assert!( - extra_coinbase_data.len() <= EXTRA_COINBASE_DATA_LIMIT, - "extra coinbase data is {} bytes, but Zebra's limit is {}.\n\ - Configure mining.extra_coinbase_data with a shorter string", - extra_coinbase_data.len(), - EXTRA_COINBASE_DATA_LIMIT, - ); - - Self { - network: network.clone(), - miner_address: mining_config.miner_address, - extra_coinbase_data, - debug_like_zcashd, - mempool, - state, - latest_chain_tip, - block_verifier_router, - sync_status, - address_book, - mined_block_sender: mined_block_sender - .unwrap_or(submit_block::SubmitBlockChannel::default().sender()), - } - } -} - -#[async_trait] -impl GetBlockTemplateRpcServer - for GetBlockTemplateRpcImpl -where - Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - > + Clone - + Send - + Sync - + 'static, - Mempool::Future: Send, - State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, - Tip: ChainTip + Clone + Send + Sync + 'static, - BlockVerifierRouter: Service - + Clone - + Send - + Sync - + 'static, - >::Future: Send, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, -{ - fn get_block_count(&self) -> Result { - best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0) - } - - async fn get_block_hash(&self, index: i32) -> Result { - let mut state = self.state.clone(); - let latest_chain_tip = self.latest_chain_tip.clone(); - - // TODO: look up this height as part of the state request? - let tip_height = best_chain_tip_height(&latest_chain_tip)?; - - let height = height_from_signed_int(index, tip_height)?; - - let request = zebra_state::ReadRequest::BestChainBlockHash(height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - - match response { - zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), - zebra_state::ReadResponse::BlockHash(None) => Err(ErrorObject::borrowed( - server::error::LegacyCode::InvalidParameter.into(), - "Block not found", - None, - )), - _ => unreachable!("unmatched response to a block request"), - } - } - - async fn get_block_template( - &self, - parameters: Option, - ) -> Result { - // Clone Configs - let network = self.network.clone(); - let miner_address = self.miner_address.clone(); - let debug_like_zcashd = self.debug_like_zcashd; - let extra_coinbase_data = self.extra_coinbase_data.clone(); - - // Clone Services - let mempool = self.mempool.clone(); - let mut latest_chain_tip = self.latest_chain_tip.clone(); - let sync_status = self.sync_status.clone(); - let state = self.state.clone(); - - if let Some(HexData(block_proposal_bytes)) = parameters - .as_ref() - .and_then(get_block_template::JsonParameters::block_proposal_data) - { - return validate_block_proposal( - self.block_verifier_router.clone(), - block_proposal_bytes, - network, - latest_chain_tip, - sync_status, - ) - .await; - } - - // To implement long polling correctly, we split this RPC into multiple phases. - get_block_template::check_parameters(¶meters)?; - - let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); - - // - One-off checks - - // Check config and parameters. - // These checks always have the same result during long polling. - let miner_address = check_miner_address(miner_address)?; - - // - Checks and fetches that can change during long polling - // - // Set up the loop. - let mut max_time_reached = false; - - // The loop returns the server long poll ID, - // which should be different to the client long poll ID. - let ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ) = loop { - // Check if we are synced to the tip. - // The result of this check can change during long polling. - // - // Optional TODO: - // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) - check_synced_to_tip(&network, latest_chain_tip.clone(), sync_status.clone())?; - // TODO: return an error if we have no peers, like `zcashd` does, - // and add a developer config that mines regardless of how many peers we have. - // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 - - // We're just about to fetch state data, then maybe wait for any changes. - // Mark all the changes before the fetch as seen. - // Changes are also ignored in any clones made after the mark. - latest_chain_tip.mark_best_tip_seen(); - - // Fetch the state data and local time for the block template: - // - if the tip block hash changes, we must return from long polling, - // - if the local clock changes on testnet, we might return from long polling - // - // We always return after 90 minutes on mainnet, even if we have the same response, - // because the max time has been reached. - let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { - tip_hash, - tip_height, - max_time, - cur_time, - .. - } = fetch_state_tip_and_local_time(state.clone()).await?; - - // Fetch the mempool data for the block template: - // - if the mempool transactions change, we might return from long polling. - // - // If the chain fork has just changed, miners want to get the new block as fast - // as possible, rather than wait for transactions to re-verify. This increases - // miner profits (and any delays can cause chain forks). So we don't wait between - // the chain tip changing and getting mempool transactions. - // - // Optional TODO: - // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some((mempool_txs, mempool_tx_deps)) = - fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Default::default)) - else { - continue; - }; - - // - Long poll ID calculation - let server_long_poll_id = LongPollInput::new( - tip_height, - tip_hash, - max_time, - mempool_txs.iter().map(|tx| tx.transaction.id), - ) - .generate_id(); - - // The loop finishes if: - // - the client didn't pass a long poll ID, - // - the server long poll ID is different to the client long poll ID, or - // - the previous loop iteration waited until the max time. - if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { - let mut submit_old = client_long_poll_id - .as_ref() - .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); - - // On testnet, the max time changes the block difficulty, so old shares are - // invalid. On mainnet, this means there has been 90 minutes without a new - // block or mempool transaction, which is very unlikely. So the miner should - // probably reset anyway. - if max_time_reached { - submit_old = Some(false); - } - - break ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ); - } - - // - Polling wait conditions - // - // TODO: when we're happy with this code, split it into a function. - // - // Periodically check the mempool for changes. - // - // Optional TODO: - // Remove this polling wait if we switch to using futures to detect sync status - // and mempool changes. - let wait_for_mempool_request = tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )); - - // Return immediately if the chain tip has changed. - // The clone preserves the seen status of the chain tip. - let mut wait_for_best_tip_change = latest_chain_tip.clone(); - let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); - - // Wait for the maximum block time to elapse. This can change the block header - // on testnet. (On mainnet it can happen due to a network disconnection, or a - // rapid drop in hash rate.) - // - // This duration might be slightly lower than the actual maximum, - // if cur_time was clamped to min_time. In that case the wait is very long, - // and it's ok to return early. - // - // It can also be zero if cur_time was clamped to max_time. In that case, - // we want to wait for another change, and ignore this timeout. So we use an - // `OptionFuture::None`. - let duration_until_max_time = max_time.saturating_duration_since(cur_time); - let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { - Some(tokio::time::sleep(duration_until_max_time.to_std())) - } else { - None - } - .into(); - - // Optional TODO: - // `zcashd` generates the next coinbase transaction while waiting for changes. - // When Zebra supports shielded coinbase, we might want to do this in parallel. - // But the coinbase value depends on the selected transactions, so this needs - // further analysis to check if it actually saves us any time. - - tokio::select! { - // Poll the futures in the listed order, for efficiency. - // We put the most frequent conditions first. - biased; - - // This timer elapses every few seconds - _elapsed = wait_for_mempool_request => { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - "checking for a new mempool change after waiting a few seconds" - ); - } - - // The state changes after around a target block interval (75s) - tip_changed_result = wait_for_best_tip_change => { - match tip_changed_result { - Ok(()) => { - // Spurious updates shouldn't happen in the state, because the - // difficulty and hash ordering is a stable total order. But - // since they could cause a busy-loop, guard against them here. - latest_chain_tip.mark_best_tip_seen(); - - let new_tip_hash = latest_chain_tip.best_tip_hash(); - if new_tip_hash == Some(tip_hash) { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - ?tip_hash, - ?tip_height, - "ignoring spurious state change notification" - ); - - // Wait for the mempool interval, then check for any changes. - tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )).await; - - continue; - } - - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll because state has changed" - ); - } - - Err(recv_error) => { - // This log is rare and helps with debugging, so it's ok to be info. - tracing::info!( - ?recv_error, - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll due to a state error.\ - Is Zebra shutting down?" - ); - - return Err(recv_error).map_error(server::error::LegacyCode::default()); - } - } - } - - // The max time does not elapse during normal operation on mainnet, - // and it rarely elapses on testnet. - Some(_elapsed) = wait_for_max_time => { - // This log is very rare so it's ok to be info. - tracing::info!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll because max time was reached" - ); - - max_time_reached = true; - } - } - }; - - // - Processing fetched data to create a transaction template - // - // Apart from random weighted transaction selection, - // the template only depends on the previously fetched data. - // This processing never fails. - - // Calculate the next block height. - let next_block_height = - (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); - - tracing::debug!( - mempool_tx_hashes = ?mempool_txs - .iter() - .map(|tx| tx.transaction.id.mined_id()) - .collect::>(), - "selecting transactions for the template from the mempool" - ); - - // Randomly select some mempool transactions. - let mempool_txs = zip317::select_mempool_transactions( - &network, - next_block_height, - &miner_address, - mempool_txs, - mempool_tx_deps, - debug_like_zcashd, - extra_coinbase_data.clone(), - ); - - tracing::debug!( - selected_mempool_tx_hashes = ?mempool_txs - .iter() - .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) - .collect::>(), - "selected transactions for the template from the mempool" - ); - - // - After this point, the template only depends on the previously fetched data. - - let response = GetBlockTemplate::new( - &network, - &miner_address, - &chain_tip_and_local_time, - server_long_poll_id, - mempool_txs, - submit_old, - debug_like_zcashd, - extra_coinbase_data, - ); - - Ok(response.into()) - } - - async fn submit_block( - &self, - HexData(block_bytes): HexData, - _parameters: Option, - ) -> Result { - let mut block_verifier_router = self.block_verifier_router.clone(); - - let block: Block = match block_bytes.zcash_deserialize_into() { - Ok(block_bytes) => block_bytes, - Err(error) => { - tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); - - return Ok(submit_block::ErrorResponse::Rejected.into()); - } - }; - - let block_height = block - .coinbase_height() - .ok_or_error(0, "coinbase height not found")?; - let block_hash = block.hash(); - - let block_verifier_router_response = block_verifier_router - .ready() - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? - .call(zebra_consensus::Request::Commit(Arc::new(block))) - .await; - - let chain_error = match block_verifier_router_response { - // Currently, this match arm returns `null` (Accepted) for blocks committed - // to any chain, but Accepted is only for blocks in the best chain. - // - // TODO (#5487): - // - Inconclusive: check if the block is on a side-chain - // The difference is important to miners, because they want to mine on the best chain. - Ok(block_hash) => { - tracing::info!(?block_hash, ?block_height, "submit block accepted"); - - self.mined_block_sender - .send((block_hash, block_height)) - .map_error_with_prefix(0, "failed to send mined block")?; - - return Ok(submit_block::Response::Accepted); - } - - // Turns BoxError into Result, - // by downcasting from Any to VerifyChainError. - Err(box_error) => { - let error = box_error - .downcast::() - .map(|boxed_chain_error| *boxed_chain_error); - - tracing::info!( - ?error, - ?block_hash, - ?block_height, - "submit block failed verification" - ); - - error - } - }; - - let response = match chain_error { - Ok(source) if source.is_duplicate_request() => submit_block::ErrorResponse::Duplicate, - - // Currently, these match arms return Reject for the older duplicate in a queue, - // but queued duplicates should be DuplicateInconclusive. - // - // Optional TODO (#5487): - // - DuplicateInconclusive: turn these non-finalized state duplicate block errors - // into BlockError enum variants, and handle them as DuplicateInconclusive: - // - "block already sent to be committed to the state" - // - "replaced by newer request" - // - keep the older request in the queue, - // and return a duplicate error for the newer request immediately. - // This improves the speed of the RPC response. - // - // Checking the download queues and BlockVerifierRouter buffer for duplicates - // might require architectural changes to Zebra, so we should only do it - // if mining pools really need it. - Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, - - // This match arm is currently unreachable, but if future changes add extra error types, - // we want to turn them into `Rejected`. - Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, - }; - - Ok(response.into()) - } - - async fn get_mining_info(&self) -> Result { - let network = self.network.clone(); - let mut state = self.state.clone(); - - let chain_tip = self.latest_chain_tip.clone(); - let tip_height = chain_tip.best_tip_height().unwrap_or(Height(0)).0; - - let mut current_block_tx = None; - if tip_height > 0 { - let mined_tx_ids = chain_tip.best_tip_mined_transaction_ids(); - current_block_tx = - (!mined_tx_ids.is_empty()).then(|| mined_tx_ids.len().saturating_sub(1)); - } - - let solution_rate_fut = self.get_network_sol_ps(None, None); - // Get the current block size. - let mut current_block_size = None; - if tip_height > 0 { - let request = zebra_state::ReadRequest::TipBlockSize; - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - current_block_size = match response { - zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), - _ => None, - }; - } - - Ok(get_mining_info::Response::new( - tip_height, - current_block_size, - current_block_tx, - network, - solution_rate_fut.await?, - )) - } - - async fn get_network_sol_ps( - &self, - num_blocks: Option, - height: Option, - ) -> Result { - // Default number of blocks is 120 if not supplied. - let mut num_blocks = num_blocks.unwrap_or(DEFAULT_SOLUTION_RATE_WINDOW_SIZE); - // But if it is 0 or negative, it uses the proof of work averaging window. - if num_blocks < 1 { - num_blocks = i32::try_from(POW_AVERAGING_WINDOW).expect("fits in i32"); - } - let num_blocks = - usize::try_from(num_blocks).expect("just checked for negatives, i32 fits in usize"); - - // Default height is the tip height if not supplied. Negative values also mean the tip - // height. Since negative values aren't valid heights, we can just use the conversion. - let height = height.and_then(|height| height.try_into_height().ok()); - - let mut state = self.state.clone(); - - let request = ReadRequest::SolutionRate { num_blocks, height }; - - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - - let solution_rate = match response { - // zcashd returns a 0 rate when the calculation is invalid - ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), - - _ => unreachable!("unmatched response to a solution rate request"), - }; - - Ok(solution_rate - .try_into() - .expect("per-second solution rate always fits in u64")) - } - - async fn get_peer_info(&self) -> Result> { - let address_book = self.address_book.clone(); - - Ok(address_book - .recently_live_peers(chrono::Utc::now()) - .into_iter() - .map(PeerInfo::new) - .collect()) - } - - async fn validate_address(&self, raw_address: String) -> Result { - let network = self.network.clone(); - - let Ok(address) = raw_address.parse::() else { - return Ok(validate_address::Response::invalid()); - }; - - let address = match address.convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(validate_address::Response::invalid()); - } - }; - - // we want to match zcashd's behaviour - if !address.is_transparent() { - return Ok(validate_address::Response::invalid()); - } - - if address.network() == network.kind() { - Ok(validate_address::Response { - address: Some(raw_address), - is_valid: true, - is_script: Some(address.is_script_hash()), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address in validateaddress RPC: Zebra's configured network must match address network" - ); - - Ok(validate_address::Response::invalid()) - } - } - - async fn z_validate_address( - &self, - raw_address: String, - ) -> Result { - let network = self.network.clone(); - - let Ok(address) = raw_address.parse::() else { - return Ok(z_validate_address::Response::invalid()); - }; - - let address = match address.convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(z_validate_address::Response::invalid()); - } - }; - - if address.network() == network.kind() { - Ok(z_validate_address::Response { - is_valid: true, - address: Some(raw_address), - address_type: Some(z_validate_address::AddressType::from(&address)), - is_mine: Some(false), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", - address.network(), - network - ); - - Ok(z_validate_address::Response::invalid()) - } - } - - async fn get_block_subsidy(&self, height: Option) -> Result { - let latest_chain_tip = self.latest_chain_tip.clone(); - let network = self.network.clone(); - - let height = if let Some(height) = height { - Height(height) - } else { - best_chain_tip_height(&latest_chain_tip)? - }; - - if height < network.height_for_first_halving() { - return Err(ErrorObject::borrowed( - 0, - "Zebra does not support founders' reward subsidies, \ - use a block height that is after the first halving", - None, - )); - } - - // Always zero for post-halving blocks - let founders = Amount::zero(); - - let total_block_subsidy = - block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; - let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())?; - - let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = - funding_stream_values(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())? - .into_iter() - // Separate the funding streams into deferred and non-deferred streams - .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); - - let is_post_nu6 = NetworkUpgrade::current(&network, height) >= NetworkUpgrade::Nu6; - - let [lockbox_total, funding_streams_total]: [std::result::Result< - Amount, - amount::Error, - >; 2] = [&lockbox_streams, &funding_streams] - .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); - - // Use the same funding stream order as zcashd - funding_streams.sort_by_key(|(receiver, _funding_stream)| { - ZCASHD_FUNDING_STREAM_ORDER - .iter() - .position(|zcashd_receiver| zcashd_receiver == receiver) - }); - - // Format the funding streams and lockbox streams - let [funding_streams, lockbox_streams]: [Vec<_>; 2] = [funding_streams, lockbox_streams] - .map(|streams| { - streams - .into_iter() - .map(|(receiver, value)| { - let address = funding_stream_address(height, &network, receiver); - FundingStream::new(is_post_nu6, receiver, value, address) - }) - .collect() - }); - - Ok(BlockSubsidy { - miner: miner_subsidy.into(), - founders: founders.into(), - funding_streams, - lockbox_streams, - funding_streams_total: funding_streams_total - .map_error(server::error::LegacyCode::default())? - .into(), - lockbox_total: lockbox_total - .map_error(server::error::LegacyCode::default())? - .into(), - total_block_subsidy: total_block_subsidy.into(), - }) - } - - async fn get_difficulty(&self) -> Result { - chain_tip_difficulty(self.network.clone(), self.state.clone(), false).await - } - - async fn z_list_unified_receivers(&self, address: String) -> Result { - use zcash_address::unified::Container; - - let (network, unified_address): ( - zcash_protocol::consensus::NetworkType, - zcash_address::unified::Address, - ) = zcash_address::unified::Encoding::decode(address.clone().as_str()) - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - - let mut p2pkh = String::new(); - let mut p2sh = String::new(); - let mut orchard = String::new(); - let mut sapling = String::new(); - - for item in unified_address.items() { - match item { - zcash_address::unified::Receiver::Orchard(_data) => { - let addr = zcash_address::unified::Address::try_from_items(vec![item]) - .expect("using data already decoded as valid"); - orchard = addr.encode(&network); - } - zcash_address::unified::Receiver::Sapling(data) => { - let addr = zebra_chain::primitives::Address::try_from_sapling(network, data) - .expect("using data already decoded as valid"); - sapling = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2pkh(data) => { - let addr = - zebra_chain::primitives::Address::try_from_transparent_p2pkh(network, data) - .expect("using data already decoded as valid"); - p2pkh = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2sh(data) => { - let addr = - zebra_chain::primitives::Address::try_from_transparent_p2sh(network, data) - .expect("using data already decoded as valid"); - p2sh = addr.payment_address().unwrap_or_default(); - } - _ => (), - } - } - - Ok(unified_address::Response::new( - orchard, sapling, p2pkh, p2sh, - )) - } - - async fn generate(&self, num_blocks: u32) -> Result> { - let rpc: GetBlockTemplateRpcImpl< - Mempool, - State, - Tip, - BlockVerifierRouter, - SyncStatus, - AddressBook, - > = self.clone(); - let network = self.network.clone(); - - if !network.disable_pow() { - return Err(ErrorObject::borrowed( - 0, - "generate is only supported on networks where PoW is disabled", - None, - )); - } - - let mut block_hashes = Vec::new(); - for _ in 0..num_blocks { - let block_template = rpc - .get_block_template(None) - .await - .map_error(server::error::LegacyCode::default())?; - - let get_block_template::Response::TemplateMode(block_template) = block_template else { - return Err(ErrorObject::borrowed( - 0, - "error generating block template", - None, - )); - }; - - let proposal_block = proposal_block_from_template( - &block_template, - TimeSource::CurTime, - NetworkUpgrade::current(&network, Height(block_template.height)), - ) - .map_error(server::error::LegacyCode::default())?; - let hex_proposal_block = HexData( - proposal_block - .zcash_serialize_to_vec() - .map_error(server::error::LegacyCode::default())?, - ); - - let _submit = rpc - .submit_block(hex_proposal_block, None) - .await - .map_error(server::error::LegacyCode::default())?; - - block_hashes.push(GetBlockHash(proposal_block.hash())); - } - - Ok(block_hashes) - } -} - -// Put support functions in a submodule, to keep this file small. diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs deleted file mode 100644 index 59b01132dee..00000000000 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ /dev/null @@ -1,480 +0,0 @@ -//! Support functions for the `get_block_template()` RPC. - -use std::{collections::HashMap, iter, sync::Arc}; - -use jsonrpsee::core::RpcResult as Result; -use jsonrpsee_types::{ErrorCode, ErrorObject}; -use tower::{Service, ServiceExt}; - -use zebra_chain::{ - amount::{self, Amount, NegativeOrZero, NonNegative}, - block::{ - self, - merkle::{self, AuthDataRoot}, - Block, ChainHistoryBlockTxAuthCommitmentHash, ChainHistoryMmrRootHash, Height, - }, - chain_sync_status::ChainSyncStatus, - chain_tip::ChainTip, - parameters::{subsidy::FundingStreamReceiver, Network, NetworkUpgrade}, - serialization::ZcashDeserializeInto, - transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, - transparent, -}; -use zebra_consensus::{ - block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, -}; -use zebra_node_services::mempool::{self, TransactionDependencies}; -use zebra_state::GetBlockTemplateChainInfo; - -use crate::{ - methods::get_block_template_rpcs::{ - constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE}, - types::{default_roots::DefaultRoots, transaction::TransactionTemplate}, - }, - server::error::OkOrError, -}; - -pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; - -// - Parameter checks - -/// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode, -/// -/// Returns an error if there's a mismatch between the mode and whether `data` is provided. -pub fn check_parameters(parameters: &Option) -> Result<()> { - let Some(parameters) = parameters else { - return Ok(()); - }; - - match parameters { - JsonParameters { - mode: GetBlockTemplateRequestMode::Template, - data: None, - .. - } - | JsonParameters { - mode: GetBlockTemplateRequestMode::Proposal, - data: Some(_), - .. - } => Ok(()), - - JsonParameters { - mode: GetBlockTemplateRequestMode::Proposal, - data: None, - .. - } => Err(ErrorObject::borrowed( - ErrorCode::InvalidParams.code(), - "\"data\" parameter must be \ - provided in \"proposal\" mode", - None, - )), - - JsonParameters { - mode: GetBlockTemplateRequestMode::Template, - data: Some(_), - .. - } => Err(ErrorObject::borrowed( - ErrorCode::InvalidParams.code(), - "\"data\" parameter must be \ - omitted in \"template\" mode", - None, - )), - } -} - -/// Returns the miner address, or an error if it is invalid. -pub fn check_miner_address( - miner_address: Option, -) -> Result { - miner_address.ok_or_misc_error( - "set `mining.miner_address` in `zebrad.toml` to a transparent address".to_string(), - ) -} - -/// Attempts to validate block proposal against all of the server's -/// usual acceptance rules (except proof-of-work). -/// -/// Returns a `getblocktemplate` [`Response`]. -pub async fn validate_block_proposal( - mut block_verifier_router: BlockVerifierRouter, - block_proposal_bytes: Vec, - network: Network, - latest_chain_tip: Tip, - sync_status: SyncStatus, -) -> Result -where - BlockVerifierRouter: Service - + Clone - + Send - + Sync - + 'static, - Tip: ChainTip + Clone + Send + Sync + 'static, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, -{ - check_synced_to_tip(&network, latest_chain_tip, sync_status)?; - - let block: Block = match block_proposal_bytes.zcash_deserialize_into() { - Ok(block) => block, - Err(parse_error) => { - tracing::info!( - ?parse_error, - "error response from block parser in CheckProposal request" - ); - - return Ok( - ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(), - ); - } - }; - - let block_verifier_router_response = block_verifier_router - .ready() - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? - .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) - .await; - - Ok(block_verifier_router_response - .map(|_hash| ProposalResponse::Valid) - .unwrap_or_else(|verify_chain_error| { - tracing::info!( - ?verify_chain_error, - "error response from block_verifier_router in CheckProposal request" - ); - - ProposalResponse::rejected("invalid proposal", verify_chain_error) - }) - .into()) -} - -// - State and syncer checks - -/// Returns an error if Zebra is not synced to the consensus chain tip. -/// Returns early with `Ok(())` if Proof-of-Work is disabled on the provided `network`. -/// This error might be incorrect if the local clock is skewed. -pub fn check_synced_to_tip( - network: &Network, - latest_chain_tip: Tip, - sync_status: SyncStatus, -) -> Result<()> -where - Tip: ChainTip + Clone + Send + Sync + 'static, - SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, -{ - if network.is_a_test_network() { - return Ok(()); - } - - // The tip estimate may not be the same as the one coming from the state - // but this is ok for an estimate - let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip - .estimate_distance_to_network_chain_tip(network) - .ok_or_misc_error("no chain tip available yet")?; - - if !sync_status.is_close_to_tip() - || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP - { - tracing::info!( - ?estimated_distance_to_chain_tip, - ?local_tip_height, - "Zebra has not synced to the chain tip. \ - Hint: check your network connection, clock, and time zone settings." - ); - - return Err(ErrorObject::borrowed( - NOT_SYNCED_ERROR_CODE.code(), - "Zebra has not synced to the chain tip, \ - estimated distance: {estimated_distance_to_chain_tip:?}, \ - local tip: {local_tip_height:?}. \ - Hint: check your network connection, clock, and time zone settings.", - None, - )); - } - - Ok(()) -} - -// - State and mempool data fetches - -/// Returns the state data for the block template. -/// -/// You should call `check_synced_to_tip()` before calling this function. -/// If the state does not have enough blocks, returns an error. -pub async fn fetch_state_tip_and_local_time( - state: State, -) -> Result -where - State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, -{ - let request = zebra_state::ReadRequest::ChainInfo; - let response = state - .oneshot(request.clone()) - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - - let chain_info = match response { - zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, - _ => unreachable!("incorrect response to {request:?}"), - }; - - Ok(chain_info) -} - -/// Returns the transactions that are currently in `mempool`, or None if the -/// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state. -/// -/// You should call `check_synced_to_tip()` before calling this function. -/// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions. -pub async fn fetch_mempool_transactions( - mempool: Mempool, - chain_tip_hash: block::Hash, -) -> Result, TransactionDependencies)>> -where - Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - > + 'static, - Mempool::Future: Send, -{ - let response = mempool - .oneshot(mempool::Request::FullTransactions) - .await - .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - - // TODO: Order transactions in block templates based on their dependencies - - let mempool::Response::FullTransactions { - transactions, - transaction_dependencies, - last_seen_tip_hash, - } = response - else { - unreachable!("unmatched response to a mempool::FullTransactions request") - }; - - // Check that the mempool and state were in sync when we made the requests - Ok((last_seen_tip_hash == chain_tip_hash).then_some((transactions, transaction_dependencies))) -} - -// - Response processing - -/// Generates and returns the coinbase transaction and default roots. -/// -/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` -/// in the `getblocktemplate` RPC. -pub fn generate_coinbase_and_roots( - network: &Network, - block_template_height: Height, - miner_address: &transparent::Address, - mempool_txs: &[VerifiedUnminedTx], - chain_history_root: Option, - like_zcashd: bool, - extra_coinbase_data: Vec, -) -> (TransactionTemplate, DefaultRoots) { - // Generate the coinbase transaction - let miner_fee = calculate_miner_fee(mempool_txs); - let coinbase_txn = generate_coinbase_transaction( - network, - block_template_height, - miner_address, - miner_fee, - like_zcashd, - extra_coinbase_data, - ); - - // Calculate block default roots - // - // TODO: move expensive root, hash, and tree cryptography to a rayon thread? - let chain_history_root = chain_history_root - .or_else(|| { - (NetworkUpgrade::Heartwood.activation_height(network) == Some(block_template_height)) - .then_some([0; 32].into()) - }) - .expect("history tree can't be empty"); - let default_roots = - calculate_default_root_hashes(&coinbase_txn, mempool_txs, chain_history_root); - - let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee); - - (coinbase_txn, default_roots) -} - -// - Coinbase transaction processing - -/// Returns a coinbase transaction for the supplied parameters. -/// -/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` -/// in the `getblocktemplate` RPC. -pub fn generate_coinbase_transaction( - network: &Network, - height: Height, - miner_address: &transparent::Address, - miner_fee: Amount, - like_zcashd: bool, - extra_coinbase_data: Vec, -) -> UnminedTx { - let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd); - - if like_zcashd { - Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data) - .into() - } else { - Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into() - } -} - -/// Returns the total miner fee for `mempool_txs`. -pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount { - let miner_fee: amount::Result> = - mempool_txs.iter().map(|tx| tx.miner_fee).sum(); - - miner_fee.expect( - "invalid selected transactions: \ - fees in a valid block can not be more than MAX_MONEY", - ) -} - -/// Returns the standard funding stream and miner reward transparent output scripts -/// for `network`, `height` and `miner_fee`. -/// -/// Only works for post-Canopy heights. -/// -/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` -/// in the `getblocktemplate` RPC. -pub fn standard_coinbase_outputs( - network: &Network, - height: Height, - miner_address: &transparent::Address, - miner_fee: Amount, - like_zcashd: bool, -) -> Vec<(Amount, transparent::Script)> { - let expected_block_subsidy = block_subsidy(height, network).expect("valid block subsidy"); - let funding_streams = funding_stream_values(height, network, expected_block_subsidy) - .expect("funding stream value calculations are valid for reasonable chain heights"); - - // Optional TODO: move this into a zebra_consensus function? - let funding_streams: HashMap< - FundingStreamReceiver, - (Amount, &transparent::Address), - > = funding_streams - .into_iter() - .filter_map(|(receiver, amount)| { - Some(( - receiver, - (amount, funding_stream_address(height, network, receiver)?), - )) - }) - .collect(); - - let miner_reward = miner_subsidy(height, network, expected_block_subsidy) - .expect("reward calculations are valid for reasonable chain heights") - + miner_fee; - let miner_reward = - miner_reward.expect("reward calculations are valid for reasonable chain heights"); - - combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd) -} - -/// Combine the miner reward and funding streams into a list of coinbase amounts and addresses. -/// -/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` -/// in the `getblocktemplate` RPC. -fn combine_coinbase_outputs( - funding_streams: HashMap, &transparent::Address)>, - miner_address: &transparent::Address, - miner_reward: Amount, - like_zcashd: bool, -) -> Vec<(Amount, transparent::Script)> { - // Collect all the funding streams and convert them to outputs. - let funding_streams_outputs: Vec<(Amount, &transparent::Address)> = - funding_streams - .into_iter() - .map(|(_receiver, (amount, address))| (amount, address)) - .collect(); - - let mut coinbase_outputs: Vec<(Amount, transparent::Script)> = - funding_streams_outputs - .iter() - .map(|(amount, address)| (*amount, address.create_script_from_address())) - .collect(); - - // The HashMap returns funding streams in an arbitrary order, - // but Zebra's snapshot tests expect the same order every time. - if like_zcashd { - // zcashd sorts outputs in serialized data order, excluding the length field - coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); - - // The miner reward is always the first output independent of the sort order - coinbase_outputs.insert( - 0, - (miner_reward, miner_address.create_script_from_address()), - ); - } else { - // Unlike zcashd, in Zebra the miner reward is part of the sorting - coinbase_outputs.push((miner_reward, miner_address.create_script_from_address())); - - // Zebra sorts by amount then script. - // - // Since the sort is stable, equal amounts will remain sorted by script. - coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); - coinbase_outputs.sort_by_key(|(amount, _script)| *amount); - } - - coinbase_outputs -} - -// - Transaction roots processing - -/// Returns the default block roots for the supplied coinbase and mempool transactions, -/// and the supplied history tree. -/// -/// This function runs expensive cryptographic operations. -pub fn calculate_default_root_hashes( - coinbase_txn: &UnminedTx, - mempool_txs: &[VerifiedUnminedTx], - chain_history_root: ChainHistoryMmrRootHash, -) -> DefaultRoots { - let (merkle_root, auth_data_root) = calculate_transaction_roots(coinbase_txn, mempool_txs); - - let block_commitments_hash = if chain_history_root == [0; 32].into() { - [0; 32].into() - } else { - ChainHistoryBlockTxAuthCommitmentHash::from_commitments( - &chain_history_root, - &auth_data_root, - ) - }; - - DefaultRoots { - merkle_root, - chain_history_root, - auth_data_root, - block_commitments_hash, - } -} - -/// Returns the transaction effecting and authorizing roots -/// for `coinbase_txn` and `mempool_txs`, which are used in the block header. -// -// TODO: should this be spawned into a cryptographic operations pool? -// (it would only matter if there were a lot of small transactions in a block) -pub fn calculate_transaction_roots( - coinbase_txn: &UnminedTx, - mempool_txs: &[VerifiedUnminedTx], -) -> (merkle::Root, AuthDataRoot) { - let block_transactions = - || iter::once(coinbase_txn).chain(mempool_txs.iter().map(|tx| &tx.transaction)); - - let merkle_root = block_transactions().cloned().collect(); - let auth_data_root = block_transactions().cloned().collect(); - - (merkle_root, auth_data_root) -} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types.rs deleted file mode 100644 index 41046f7b2b7..00000000000 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! Types used in mining RPC methods. - -pub mod default_roots; -pub mod get_block_template; -pub mod get_mining_info; -pub mod long_poll; -pub mod peer_info; -pub mod submit_block; -pub mod subsidy; -pub mod transaction; -pub mod unified_address; -pub mod validate_address; -pub mod z_validate_address; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs deleted file mode 100644 index 74142cd005d..00000000000 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ /dev/null @@ -1,389 +0,0 @@ -//! The `GetBlockTempate` type is the output of the `getblocktemplate` RPC method in the -//! default 'template' mode. See [`ProposalResponse`] for the output in 'proposal' mode. - -use std::fmt; - -use zebra_chain::{ - amount, - block::{ChainHistoryBlockTxAuthCommitmentHash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, - parameters::Network, - serialization::DateTime32, - transaction::VerifiedUnminedTx, - transparent, - work::difficulty::{CompactDifficulty, ExpandedDifficulty}, -}; -use zebra_consensus::MAX_BLOCK_SIGOPS; -use zebra_state::GetBlockTemplateChainInfo; - -use crate::methods::{ - get_block_template_rpcs::{ - constants::{ - GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD, GET_BLOCK_TEMPLATE_MUTABLE_FIELD, - GET_BLOCK_TEMPLATE_NONCE_RANGE_FIELD, - }, - get_block_template::generate_coinbase_and_roots, - types::{ - default_roots::DefaultRoots, long_poll::LongPollId, transaction::TransactionTemplate, - }, - }, - GetBlockHash, -}; - -pub mod parameters; -pub mod proposal; - -pub use parameters::{GetBlockTemplateCapability, GetBlockTemplateRequestMode, JsonParameters}; -pub use proposal::{proposal_block_from_template, ProposalResponse}; - -/// An alias to indicate that a usize value represents the depth of in-block dependencies of a transaction. -/// -/// See the `dependencies_depth()` function in [`zip317`](super::super::zip317) for more details. -pub type InBlockTxDependenciesDepth = usize; - -/// A serialized `getblocktemplate` RPC response in template mode. -#[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] -pub struct GetBlockTemplate { - /// The getblocktemplate RPC capabilities supported by Zebra. - /// - /// At the moment, Zebra does not support any of the extra capabilities from the specification: - /// - `proposal`: - /// - `longpoll`: - /// - `serverlist`: - /// - /// By the above, Zebra will always return an empty vector here. - pub capabilities: Vec, - - /// The version of the block format. - /// Always 4 for new Zcash blocks. - pub version: u32, - - /// The hash of the previous block. - #[serde(rename = "previousblockhash")] - pub previous_block_hash: GetBlockHash, - - /// The block commitment for the new block's header. - /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. - #[serde(rename = "blockcommitmentshash")] - #[serde(with = "hex")] - pub block_commitments_hash: ChainHistoryBlockTxAuthCommitmentHash, - - /// Legacy backwards-compatibility header root field. - /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. - #[serde(rename = "lightclientroothash")] - #[serde(with = "hex")] - pub light_client_root_hash: ChainHistoryBlockTxAuthCommitmentHash, - - /// Legacy backwards-compatibility header root field. - /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. - #[serde(rename = "finalsaplingroothash")] - #[serde(with = "hex")] - pub final_sapling_root_hash: ChainHistoryBlockTxAuthCommitmentHash, - - /// The block header roots for [`GetBlockTemplate.transactions`]. - /// - /// If the transactions in the block template are modified, these roots must be recalculated - /// [according to the specification](https://zcash.github.io/rpc/getblocktemplate.html). - #[serde(rename = "defaultroots")] - pub default_roots: DefaultRoots, - - /// The non-coinbase transactions selected for this block template. - pub transactions: Vec>, - - /// The coinbase transaction generated from `transactions` and `height`. - #[serde(rename = "coinbasetxn")] - pub coinbase_txn: TransactionTemplate, - - /// An ID that represents the chain tip and mempool contents for this template. - #[serde(rename = "longpollid")] - pub long_poll_id: LongPollId, - - /// The expected difficulty for the new block displayed in expanded form. - #[serde(with = "hex")] - pub target: ExpandedDifficulty, - - /// > For each block other than the genesis block, nTime MUST be strictly greater than - /// > the median-time-past of that block. - /// - /// - #[serde(rename = "mintime")] - pub min_time: DateTime32, - - /// Hardcoded list of block fields the miner is allowed to change. - pub mutable: Vec, - - /// A range of valid nonces that goes from `u32::MIN` to `u32::MAX`. - #[serde(rename = "noncerange")] - pub nonce_range: String, - - /// Max legacy signature operations in the block. - #[serde(rename = "sigoplimit")] - pub sigop_limit: u64, - - /// Max block size in bytes - #[serde(rename = "sizelimit")] - pub size_limit: u64, - - /// > the current time as seen by the server (recommended for block time). - /// > note this is not necessarily the system clock, and must fall within the mintime/maxtime rules - /// - /// - #[serde(rename = "curtime")] - pub cur_time: DateTime32, - - /// The expected difficulty for the new block displayed in compact form. - #[serde(with = "hex")] - pub bits: CompactDifficulty, - - /// The height of the next block in the best chain. - // Optional TODO: use Height type, but check that deserialized heights are within Height::MAX - pub height: u32, - - /// > the maximum time allowed - /// - /// - /// - /// Zebra adjusts the minimum and current times for testnet minimum difficulty blocks, - /// so we need to tell miners what the maximum valid time is. - /// - /// This field is not in `zcashd` or the Zcash RPC reference yet. - /// - /// Currently, some miners just use `min_time` or `cur_time`. Others calculate `max_time` from the - /// fixed 90 minute consensus rule, or a smaller fixed interval (like 1000s). - /// Some miners don't check the maximum time. This can cause invalid blocks after network downtime, - /// a significant drop in the hash rate, or after the testnet minimum difficulty interval. - #[serde(rename = "maxtime")] - pub max_time: DateTime32, - - /// > only relevant for long poll responses: - /// > indicates if work received prior to this response remains potentially valid (default) - /// > and should have its shares submitted; - /// > if false, the miner may wish to discard its share queue - /// - /// - /// - /// This field is not in `zcashd` or the Zcash RPC reference yet. - /// - /// In Zebra, `submit_old` is `false` when the tip block changed or max time is reached, - /// and `true` if only the mempool transactions have changed. - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - #[serde(rename = "submitold")] - pub submit_old: Option, -} - -impl fmt::Debug for GetBlockTemplate { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // A block with a lot of transactions can be extremely long in logs. - let mut transactions_truncated = self.transactions.clone(); - if self.transactions.len() > 4 { - // Remove transaction 3 onwards, but leave the last transaction - let end = self.transactions.len() - 2; - transactions_truncated.splice(3..=end, Vec::new()); - } - - f.debug_struct("GetBlockTemplate") - .field("capabilities", &self.capabilities) - .field("version", &self.version) - .field("previous_block_hash", &self.previous_block_hash) - .field("block_commitments_hash", &self.block_commitments_hash) - .field("light_client_root_hash", &self.light_client_root_hash) - .field("final_sapling_root_hash", &self.final_sapling_root_hash) - .field("default_roots", &self.default_roots) - .field("transaction_count", &self.transactions.len()) - .field("transactions", &transactions_truncated) - .field("coinbase_txn", &self.coinbase_txn) - .field("long_poll_id", &self.long_poll_id) - .field("target", &self.target) - .field("min_time", &self.min_time) - .field("mutable", &self.mutable) - .field("nonce_range", &self.nonce_range) - .field("sigop_limit", &self.sigop_limit) - .field("size_limit", &self.size_limit) - .field("cur_time", &self.cur_time) - .field("bits", &self.bits) - .field("height", &self.height) - .field("max_time", &self.max_time) - .field("submit_old", &self.submit_old) - .finish() - } -} - -impl GetBlockTemplate { - /// Returns a `Vec` of capabilities supported by the `getblocktemplate` RPC - pub fn capabilities() -> Vec { - GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD - .iter() - .map(ToString::to_string) - .collect() - } - - /// Returns a new [`GetBlockTemplate`] struct, based on the supplied arguments and defaults. - /// - /// The result of this method only depends on the supplied arguments and constants. - /// - /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` - /// in the `getblocktemplate` RPC. - #[allow(clippy::too_many_arguments)] - pub fn new( - network: &Network, - miner_address: &transparent::Address, - chain_tip_and_local_time: &GetBlockTemplateChainInfo, - long_poll_id: LongPollId, - #[cfg(not(test))] mempool_txs: Vec, - #[cfg(test)] mempool_txs: Vec<(InBlockTxDependenciesDepth, VerifiedUnminedTx)>, - submit_old: Option, - like_zcashd: bool, - extra_coinbase_data: Vec, - ) -> Self { - // Calculate the next block height. - let next_block_height = - (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); - - // Convert transactions into TransactionTemplates - #[cfg(not(test))] - let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = - mempool_txs.into_iter().map(|tx| ((&tx).into(), tx)).unzip(); - - // Transaction selection returns transactions in an arbitrary order, - // but Zebra's snapshot tests expect the same order every time. - // - // # Correctness - // - // Transactions that spend outputs created in the same block must appear - // after the transactions that create those outputs. - #[cfg(test)] - let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = { - let mut mempool_txs_with_templates: Vec<( - InBlockTxDependenciesDepth, - TransactionTemplate, - VerifiedUnminedTx, - )> = mempool_txs - .into_iter() - .map(|(min_tx_index, tx)| (min_tx_index, (&tx).into(), tx)) - .collect(); - - if like_zcashd { - // Sort in serialized data order, excluding the length byte. - // `zcashd` sometimes seems to do this, but other times the order is arbitrary. - mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { - (*min_tx_index, tx_template.data.clone()) - }); - } else { - // Sort by hash, this is faster. - mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { - (*min_tx_index, tx_template.hash.bytes_in_display_order()) - }); - } - mempool_txs_with_templates - .into_iter() - .map(|(_, template, tx)| (template, tx)) - .unzip() - }; - - // Generate the coinbase transaction and default roots - // - // TODO: move expensive root, hash, and tree cryptography to a rayon thread? - let (coinbase_txn, default_roots) = generate_coinbase_and_roots( - network, - next_block_height, - miner_address, - &mempool_txs, - chain_tip_and_local_time.chain_history_root, - like_zcashd, - extra_coinbase_data, - ); - - // Convert difficulty - let target = chain_tip_and_local_time - .expected_difficulty - .to_expanded() - .expect("state always returns a valid difficulty value"); - - // Convert default values - let capabilities: Vec = Self::capabilities(); - let mutable: Vec = GET_BLOCK_TEMPLATE_MUTABLE_FIELD - .iter() - .map(ToString::to_string) - .collect(); - - tracing::debug!( - selected_txs = ?mempool_txs - .iter() - .map(|tx| (tx.transaction.id.mined_id(), tx.unpaid_actions)) - .collect::>(), - "creating template ... " - ); - - GetBlockTemplate { - capabilities, - - version: ZCASH_BLOCK_VERSION, - - previous_block_hash: GetBlockHash(chain_tip_and_local_time.tip_hash), - block_commitments_hash: default_roots.block_commitments_hash, - light_client_root_hash: default_roots.block_commitments_hash, - final_sapling_root_hash: default_roots.block_commitments_hash, - default_roots, - - transactions: mempool_tx_templates, - - coinbase_txn, - - long_poll_id, - - target, - - min_time: chain_tip_and_local_time.min_time, - - mutable, - - nonce_range: GET_BLOCK_TEMPLATE_NONCE_RANGE_FIELD.to_string(), - - sigop_limit: MAX_BLOCK_SIGOPS, - - size_limit: MAX_BLOCK_BYTES, - - cur_time: chain_tip_and_local_time.cur_time, - - bits: chain_tip_and_local_time.expected_difficulty, - - height: next_block_height.0, - - max_time: chain_tip_and_local_time.max_time, - - submit_old, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] -#[serde(untagged)] -/// A `getblocktemplate` RPC response. -pub enum Response { - /// `getblocktemplate` RPC request in template mode. - TemplateMode(Box), - - /// `getblocktemplate` RPC request in proposal mode. - ProposalMode(ProposalResponse), -} - -impl Response { - /// Returns the inner template, if the response is in template mode. - pub fn try_into_template(self) -> Option { - match self { - Response::TemplateMode(template) => Some(*template), - Response::ProposalMode(_) => None, - } - } - - /// Returns the inner proposal, if the response is in proposal mode. - pub fn try_into_proposal(self) -> Option { - match self { - Response::TemplateMode(_) => None, - Response::ProposalMode(proposal) => Some(proposal), - } - } -} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/transaction.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/transaction.rs deleted file mode 100644 index c373722a362..00000000000 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/transaction.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! The `TransactionTemplate` type is part of the `getblocktemplate` RPC method output. - -use zebra_chain::{ - amount::{self, Amount, NegativeOrZero, NonNegative}, - block::merkle::AUTH_DIGEST_PLACEHOLDER, - transaction::{self, SerializedTransaction, UnminedTx, VerifiedUnminedTx}, -}; -use zebra_script::CachedFfiTransaction; - -/// Transaction data and fields needed to generate blocks using the `getblocktemplate` RPC. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(bound = "FeeConstraint: amount::Constraint + Clone")] -pub struct TransactionTemplate -where - FeeConstraint: amount::Constraint + Clone, -{ - /// The hex-encoded serialized data for this transaction. - #[serde(with = "hex")] - pub data: SerializedTransaction, - - /// The transaction ID of this transaction. - #[serde(with = "hex")] - pub(crate) hash: transaction::Hash, - - /// The authorizing data digest of a v5 transaction, or a placeholder for older versions. - #[serde(rename = "authdigest")] - #[serde(with = "hex")] - pub(crate) auth_digest: transaction::AuthDigest, - - /// The transactions in this block template that this transaction depends upon. - /// These are 1-based indexes in the `transactions` list. - /// - /// Zebra's mempool does not support transaction dependencies, so this list is always empty. - /// - /// We use `u16` because 2 MB blocks are limited to around 39,000 transactions. - pub(crate) depends: Vec, - - /// The fee for this transaction. - /// - /// Non-coinbase transactions must be `NonNegative`. - /// The Coinbase transaction `fee` is the negative sum of the fees of the transactions in - /// the block, so their fee must be `NegativeOrZero`. - pub(crate) fee: Amount, - - /// The number of transparent signature operations in this transaction. - pub(crate) sigops: u64, - - /// Is this transaction required in the block? - /// - /// Coinbase transactions are required, all other transactions are not. - pub(crate) required: bool, -} - -// Convert from a mempool transaction to a non-coinbase transaction template. -impl From<&VerifiedUnminedTx> for TransactionTemplate { - fn from(tx: &VerifiedUnminedTx) -> Self { - assert!( - !tx.transaction.transaction.is_coinbase(), - "unexpected coinbase transaction in mempool" - ); - - Self { - data: tx.transaction.transaction.as_ref().into(), - hash: tx.transaction.id.mined_id(), - auth_digest: tx - .transaction - .id - .auth_digest() - .unwrap_or(AUTH_DIGEST_PLACEHOLDER), - - // Always empty, not supported by Zebra's mempool. - depends: Vec::new(), - - fee: tx.miner_fee, - - sigops: tx.legacy_sigop_count, - - // Zebra does not require any transactions except the coinbase transaction. - required: false, - } - } -} - -impl From for TransactionTemplate { - fn from(tx: VerifiedUnminedTx) -> Self { - Self::from(&tx) - } -} - -impl TransactionTemplate { - /// Convert from a generated coinbase transaction into a coinbase transaction template. - /// - /// `miner_fee` is the total miner fees for the block, excluding newly created block rewards. - // - // TODO: use a different type for generated coinbase transactions? - pub fn from_coinbase(tx: &UnminedTx, miner_fee: Amount) -> Self { - assert!( - tx.transaction.is_coinbase(), - "invalid generated coinbase transaction: \ - must have exactly one input, which must be a coinbase input", - ); - - let miner_fee = (-miner_fee) - .constrain() - .expect("negating a NonNegative amount always results in a valid NegativeOrZero"); - - let legacy_sigop_count = CachedFfiTransaction::new(tx.transaction.clone(), Vec::new()) - .legacy_sigop_count() - .expect( - "invalid generated coinbase transaction: \ - failure in zcash_script sigop count", - ); - - Self { - data: tx.transaction.as_ref().into(), - hash: tx.id.mined_id(), - auth_digest: tx.id.auth_digest().unwrap_or(AUTH_DIGEST_PLACEHOLDER), - - // Always empty, coinbase transactions never have inputs. - depends: Vec::new(), - - fee: miner_fee, - - sigops: legacy_sigop_count, - - // Zcash requires a coinbase transaction. - required: true, - } - } -} diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index dbac40d3fb3..74d9ff89a25 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -1,45 +1,46 @@ //! Randomised property tests for RPC methods. -use std::collections::HashMap; -use std::{collections::HashSet, fmt::Debug, sync::Arc}; +use crate::methods::{ + self, + types::{ + get_blockchain_info, + get_raw_mempool::{GetRawMempool, MempoolObject}, + }, +}; +use super::super::{ + AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, +}; use futures::{join, FutureExt, TryFutureExt}; use hex::{FromHex, ToHex}; use jsonrpsee_types::{ErrorCode, ErrorObject}; use proptest::{collection::vec, prelude::*}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, +}; use thiserror::Error; use tokio::sync::oneshot; use tower::buffer::Buffer; - -use zebra_chain::history_tree::HistoryTree; use zebra_chain::{ amount::{Amount, NonNegative}, - block::{Block, Height}, + block::{self, Block, Height}, + chain_sync_status::MockSyncStatus, chain_tip::{mock::MockChainTip, ChainTip, NoChainTip}, + history_tree::HistoryTree, parameters::{ConsensusBranchId, Network, NetworkUpgrade}, serialization::{DateTime32, ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}, transaction::{self, Transaction, UnminedTx, VerifiedUnminedTx}, transparent, value_balance::ValueBalance, }; - use zebra_consensus::ParameterCheckpoint; use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::mempool; use zebra_state::{BoxError, GetBlockTemplateChainInfo}; - use zebra_test::mock_service::MockService; -use crate::methods::types::MempoolObject; -use crate::methods::{ - self, - types::{Balance, GetRawMempool}, -}; - -use super::super::{ - AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, -}; - proptest! { /// Test that when sending a raw transaction, it is received by the mempool service. #[test] @@ -583,7 +584,7 @@ proptest! { prop_assert_eq!(response.best_block_hash, genesis_block.header.hash()); prop_assert_eq!(response.chain, network.bip70_network_name()); prop_assert_eq!(response.blocks, Height::MIN); - prop_assert_eq!(response.value_pools, Balance::value_pools(ValueBalance::zero())); + prop_assert_eq!(response.value_pools, get_blockchain_info::Balance::value_pools(ValueBalance::zero())); let genesis_branch_id = NetworkUpgrade::current(&network, Height::MIN).branch_id().unwrap_or(ConsensusBranchId::RPC_MISSING_ID); let next_height = (Height::MIN + 1).expect("genesis height plus one is next height and valid"); @@ -948,6 +949,12 @@ fn mock_services( >, Tip, MockAddressBookPeers, + zebra_test::mock_service::MockService< + zebra_consensus::Request, + block::Hash, + zebra_test::mock_service::PropTestAssertion, + >, + MockSyncStatus, >, tokio::task::JoinHandle<()>, ) @@ -956,19 +963,23 @@ where { let mempool = MockService::build().for_prop_tests(); let state = MockService::build().for_prop_tests(); + let block_verifier_router = MockService::build().for_prop_tests(); let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, mempool_tx_queue) = RpcImpl::new( + network, + Default::default(), + Default::default(), "0.0.1", "RPC test", - network, - false, - true, mempool.clone(), Buffer::new(state.clone(), 1), + block_verifier_router, + MockSyncStatus::default(), chain_tip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); (mempool, state, rpc, mempool_tx_queue) diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index ab32234fccd..ef682d4fae0 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -5,34 +5,65 @@ //! cargo insta test --review --release -p zebra-rpc --lib -- test_rpc_response_data //! ``` -use std::{collections::BTreeMap, sync::Arc}; +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::Arc, + time::Instant, +}; use futures::FutureExt; -use insta::dynamic_redaction; +use hex::FromHex; +use insta::{dynamic_redaction, Settings}; use jsonrpsee::core::RpcResult as Result; -use tower::buffer::Buffer; +use tower::{buffer::Buffer, util::BoxService, Service}; use zebra_chain::{ - block::Block, + block::{Block, Hash}, + chain_sync_status::MockSyncStatus, chain_tip::mock::MockChainTip, orchard, parameters::{ subsidy::POST_NU6_FUNDING_STREAMS_TESTNET, testnet::{self, ConfiguredActivationHeights, Parameters}, - Network::Mainnet, + Network::{self, Mainnet}, + NetworkUpgrade, }, sapling, - serialization::ZcashDeserializeInto, + serialization::{DateTime32, ZcashDeserializeInto}, subtree::NoteCommitmentSubtreeData, + transaction::Transaction, + transparent, + work::difficulty::CompactDifficulty, +}; +use zebra_consensus::Request; +use zebra_network::{ + address_book_peers::MockAddressBookPeers, + types::{MetaAddr, PeerServices}, +}; +use zebra_node_services::{mempool, BoxError}; +use zebra_state::{GetBlockTemplateChainInfo, ReadRequest, ReadResponse, MAX_ON_DISK_HEIGHT}; +use zebra_test::{ + mock_service::{MockService, PanicAssertion}, + vectors::BLOCK_MAINNET_1_BYTES, }; -use zebra_network::address_book_peers::MockAddressBookPeers; -use zebra_node_services::BoxError; -use zebra_state::{ReadRequest, ReadResponse, MAX_ON_DISK_HEIGHT}; -use zebra_test::mock_service::MockService; -use super::super::*; +use crate::methods::{ + hex_data::HexData, + tests::utils::fake_history_tree, + types::{ + get_block_template::{self, GetBlockTemplateRequestMode}, + get_mining_info, + long_poll::{LongPollId, LONG_POLL_ID_LENGTH}, + peer_info::PeerInfo, + submit_block, + subsidy::BlockSubsidy, + unified_address, validate_address, z_validate_address, + }, + GetBlockHash, +}; -mod get_block_template_rpcs; +use super::super::*; /// The first block height in the state that can never be stored in the database, /// due to optimisations in the disk format. @@ -78,7 +109,7 @@ async fn test_z_get_treestate() { let _init_guard = zebra_test::init(); const SAPLING_ACTIVATION_HEIGHT: u32 = 2; - let testnet = Parameters::build() + let custom_testnet = Parameters::build() .with_activation_heights(ConfiguredActivationHeights { sapling: Some(SAPLING_ACTIVATION_HEIGHT), // We need to set the NU5 activation height higher than the height of the last block for @@ -98,27 +129,29 @@ async fn test_z_get_treestate() { // Initiate the snapshots of the RPC responses. let mut settings = insta::Settings::clone_current(); - settings.set_snapshot_suffix(network_string(&testnet).to_string()); + settings.set_snapshot_suffix(network_string(&custom_testnet).to_string()); - let blocks: Vec<_> = testnet + let blocks: Vec<_> = custom_testnet .blockchain_iter() .map(|(_, block_bytes)| block_bytes.zcash_deserialize_into().unwrap()) .collect(); - let (_, state, tip, _) = zebra_state::populated_state(blocks.clone(), &testnet).await; - + let (_, state, tip, _) = zebra_state::populated_state(blocks.clone(), &custom_testnet).await; let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, _) = RpcImpl::new( - "", - "", - testnet, + custom_testnet, + Default::default(), false, - true, + "0.0.1", + "RPC test", Buffer::new(MockService::build().for_unit_tests::<_, _, BoxError>(), 1), state, + Buffer::new(MockService::build().for_unit_tests::<_, _, BoxError>(), 1), + MockSyncStatus::default(), tip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); // Request the treestate by a hash. @@ -181,39 +214,48 @@ async fn test_rpc_response_data_for_network(network: &Network) { MockService::build().for_unit_tests(); // Create a populated state service - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), network).await; + let (state, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), network).await; // Start snapshots of RPC responses. let mut settings = insta::Settings::clone_current(); settings.set_snapshot_suffix(format!("{}_{}", network_string(network), blocks.len() - 1)); - // Test the `getblocktemplate` RPC snapshots. - get_block_template_rpcs::test_responses( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( + zebra_consensus::Config::default(), + network, + state.clone(), + ) + .await; + + test_mining_rpcs( network, mempool.clone(), - state, read_state.clone(), + block_verifier_router.clone(), settings.clone(), ) .await; - // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( - "0.0.1", - "/Zebra:RPC test/", + + let (rpc, _) = RpcImpl::new( network.clone(), + Default::default(), false, - true, + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), read_state, - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + block_verifier_router, + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); - // We only want a snapshot of the `getblocksubsidy` and `getblockchaininfo` methods for the non-default Testnet (with an NU6 activation height). + // We only want a snapshot of the `getblocksubsidy` and `getblockchaininfo` methods for the + // non-default Testnet (with an NU6 activation height). if network.is_a_test_network() && !network.is_default_testnet() { let get_blockchain_info = rpc .get_blockchain_info() @@ -528,20 +570,22 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { let (latest_chain_tip, _) = MockChainTip::new(); let mut state = MockService::build().for_unit_tests(); - let mempool = MockService::build().for_unit_tests(); let (_tx, rx) = tokio::sync::watch::channel(None); let (rpc, _) = RpcImpl::new( - "0.0.1", - "/Zebra:RPC test/", network.clone(), + Default::default(), false, - true, - mempool, + "0.0.1", + "RPC test", + MockService::build().for_unit_tests(), state.clone(), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), latest_chain_tip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); // Test the response format from `z_getsubtreesbyindex` for Sapling. @@ -607,7 +651,7 @@ fn snapshot_rpc_getinfo(info: GetInfo, settings: &insta::Settings) { insta::assert_json_snapshot!("get_info", info, { ".subversion" => dynamic_redaction(|value, _path| { // assert that the subversion value is user agent - assert_eq!(value.as_str().unwrap(), format!("/Zebra:RPC test/")); + assert_eq!(value.as_str().unwrap(), format!("RPC test")); // replace with: "[SubVersion]" }), @@ -739,9 +783,587 @@ fn snapshot_rpc_getaddressutxos(utxos: Vec, settings: &insta::S settings.bind(|| insta::assert_json_snapshot!("get_address_utxos", utxos)); } +/// Snapshot `getblockcount` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getblockcount(block_count: u32, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_block_count", block_count)); +} + +/// Snapshot valid `getblockhash` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getblockhash_valid(block_hash: GetBlockHash, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_block_hash_valid", block_hash)); +} + +/// Snapshot invalid `getblockhash` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getblockhash_invalid( + variant: &'static str, + block_hash: Result, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("get_block_hash_invalid_{variant}"), block_hash) + }); +} + +/// Snapshot `getblocktemplate` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getblocktemplate( + variant: &'static str, + block_template: get_block_template::Response, + coinbase_tx: Option, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("get_block_template_{variant}"), block_template) + }); + + if let Some(coinbase_tx) = coinbase_tx { + settings.bind(|| { + insta::assert_ron_snapshot!( + format!("get_block_template_{variant}.coinbase_tx"), + coinbase_tx + ) + }); + }; +} + +/// Snapshot `submitblock` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_submit_block_invalid( + submit_block_response: submit_block::Response, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!("snapshot_rpc_submit_block_invalid", submit_block_response) + }); +} + +/// Snapshot `getmininginfo` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getmininginfo( + get_mining_info: get_mining_info::Response, + settings: &insta::Settings, +) { + settings.bind(|| insta::assert_json_snapshot!("get_mining_info", get_mining_info)); +} + +/// Snapshot `getblocksubsidy` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getblocksubsidy( + variant: &'static str, + get_block_subsidy: BlockSubsidy, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("get_block_subsidy_{variant}"), get_block_subsidy) + }); +} + +/// Snapshot `getpeerinfo` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getpeerinfo(get_peer_info: Vec, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_peer_info", get_peer_info)); +} + +/// Snapshot `getnetworksolps` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getnetworksolps(get_network_sol_ps: u64, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_network_sol_ps", get_network_sol_ps)); +} + +/// Snapshot `validateaddress` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_validateaddress( + variant: &'static str, + validate_address: validate_address::Response, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("validate_address_{variant}"), validate_address) + }); +} + +/// Snapshot `z_validateaddress` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_z_validateaddress( + variant: &'static str, + z_validate_address: z_validate_address::Response, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("z_validate_address_{variant}"), z_validate_address) + }); +} + +/// Snapshot valid `getdifficulty` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getdifficulty_valid( + variant: &'static str, + difficulty: f64, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("get_difficulty_valid_{variant}"), difficulty) + }); +} + +/// Snapshot `snapshot_rpc_z_listunifiedreceivers` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_z_listunifiedreceivers( + variant: &'static str, + response: unified_address::Response, + settings: &insta::Settings, +) { + settings.bind(|| { + insta::assert_json_snapshot!(format!("z_list_unified_receivers_{variant}"), response) + }); +} + /// Utility function to convert a `Network` to a lowercase string. fn network_string(network: &Network) -> String { let mut net_suffix = network.to_string(); net_suffix.make_ascii_lowercase(); net_suffix } + +pub async fn test_mining_rpcs( + network: &Network, + mempool: MockService< + mempool::Request, + mempool::Response, + PanicAssertion, + zebra_node_services::BoxError, + >, + read_state: ReadState, + block_verifier_router: Buffer, Request>, + settings: Settings, +) where + ReadState: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, +{ + let mut mock_sync_status = MockSyncStatus::default(); + mock_sync_status.set_is_close_to_tip(true); + + #[allow(clippy::unnecessary_struct_initialization)] + let mining_conf = crate::config::mining::Config { + miner_address: Some(transparent::Address::from_script_hash( + network.kind(), + [0xad; 20], + )), + extra_coinbase_data: None, + debug_like_zcashd: true, + // TODO: Use default field values when optional features are enabled in tests #8183 + internal_miner: true, + }; + + // nu5 block height + let fake_tip_height = NetworkUpgrade::Nu5.activation_height(network).unwrap(); + // nu5 block hash + let fake_tip_hash = + Hash::from_hex("0000000000d723156d9b65ffcf4984da7a19675ed7e2f06d9e5d5188af087bf8").unwrap(); + + // nu5 block time + 1 + let fake_min_time = DateTime32::from(1654008606); + // nu5 block time + 12 + let fake_cur_time = DateTime32::from(1654008617); + // nu5 block time + 123 + let fake_max_time = DateTime32::from(1654008728); + + // Use a valid fractional difficulty for snapshots + let pow_limit = network.target_difficulty_limit(); + let fake_difficulty = pow_limit * 2 / 3; + let fake_difficulty = CompactDifficulty::from(fake_difficulty); + + let (mock_tip, mock_tip_sender) = MockChainTip::new(); + mock_tip_sender.send_best_tip_height(fake_tip_height); + mock_tip_sender.send_best_tip_hash(fake_tip_hash); + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); + + let mock_address_book = MockAddressBookPeers::new(vec![MetaAddr::new_connected( + SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + network.default_port(), + ) + .into(), + &PeerServices::NODE_NETWORK, + false, + ) + .into_new_meta_addr(Instant::now(), DateTime32::now())]); + + let (_tx, rx) = tokio::sync::watch::channel(None); + + let (rpc, _) = RpcImpl::new( + network.clone(), + mining_conf.clone(), + false, + "0.0.1", + "RPC test", + Buffer::new(mempool.clone(), 1), + read_state, + block_verifier_router.clone(), + mock_sync_status.clone(), + mock_tip.clone(), + mock_address_book, + rx.clone(), + None, + ); + + if network.is_a_test_network() && !network.is_default_testnet() { + let fake_future_nu6_block_height = + NetworkUpgrade::Nu6.activation_height(network).unwrap().0 + 100_000; + let get_block_subsidy = rpc + .get_block_subsidy(Some(fake_future_nu6_block_height)) + .await + .expect("We should have a success response"); + snapshot_rpc_getblocksubsidy("future_nu6_height", get_block_subsidy, &settings); + // We only want a snapshot of the `getblocksubsidy` method for the non-default Testnet (with an NU6 activation height). + return; + } + + // `getblockcount` + let get_block_count = rpc.get_block_count().expect("We should have a number"); + snapshot_rpc_getblockcount(get_block_count, &settings); + + // `getblockhash` + const BLOCK_HEIGHT10: i32 = 10; + + let get_block_hash = rpc + .get_block_hash(BLOCK_HEIGHT10) + .await + .expect("We should have a GetBlockHash struct"); + snapshot_rpc_getblockhash_valid(get_block_hash, &settings); + + let get_block_hash = rpc + .get_block_hash( + EXCESSIVE_BLOCK_HEIGHT + .try_into() + .expect("constant fits in i32"), + ) + .await; + snapshot_rpc_getblockhash_invalid("excessive_height", get_block_hash, &settings); + + // `getmininginfo` + let get_mining_info = rpc + .get_mining_info() + .await + .expect("We should have a success response"); + snapshot_rpc_getmininginfo(get_mining_info, &settings); + + // `getblocksubsidy` + let fake_future_block_height = fake_tip_height.0 + 100_000; + let get_block_subsidy = rpc + .get_block_subsidy(Some(fake_future_block_height)) + .await + .expect("We should have a success response"); + snapshot_rpc_getblocksubsidy("future_height", get_block_subsidy, &settings); + + let get_block_subsidy = rpc + .get_block_subsidy(None) + .await + .expect("We should have a success response"); + snapshot_rpc_getblocksubsidy("tip_height", get_block_subsidy, &settings); + + let get_block_subsidy = rpc + .get_block_subsidy(Some(EXCESSIVE_BLOCK_HEIGHT)) + .await + .expect("We should have a success response"); + snapshot_rpc_getblocksubsidy("excessive_height", get_block_subsidy, &settings); + + // `getpeerinfo` + let get_peer_info = rpc + .get_peer_info() + .await + .expect("We should have a success response"); + snapshot_rpc_getpeerinfo(get_peer_info, &settings); + + // `getnetworksolps` (and `getnetworkhashps`) + // + // TODO: add tests for excessive num_blocks and height (#6688) + // add the same tests for get_network_hash_ps + let get_network_sol_ps = rpc + .get_network_sol_ps(None, None) + .await + .expect("We should have a success response"); + snapshot_rpc_getnetworksolps(get_network_sol_ps, &settings); + + // `getblocktemplate` - the following snapshots use a mock read_state + + // get a new empty state + let read_state = MockService::build().for_unit_tests(); + + let make_mock_read_state_request_handler = || { + let mut read_state = read_state.clone(); + + async move { + read_state + .expect_request_that(|req| matches!(req, ReadRequest::ChainInfo)) + .await + .respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo { + expected_difficulty: fake_difficulty, + tip_height: fake_tip_height, + tip_hash: fake_tip_hash, + cur_time: fake_cur_time, + min_time: fake_min_time, + max_time: fake_max_time, + chain_history_root: fake_history_tree(network).hash(), + })); + } + }; + + let make_mock_mempool_request_handler = || { + let mut mempool = mempool.clone(); + + async move { + mempool + .expect_request(mempool::Request::FullTransactions) + .await + .respond(mempool::Response::FullTransactions { + transactions: vec![], + transaction_dependencies: Default::default(), + // tip hash needs to match chain info for long poll requests + last_seen_tip_hash: fake_tip_hash, + }); + } + }; + + // send tip hash and time needed for getblocktemplate rpc + mock_tip_sender.send_best_tip_hash(fake_tip_hash); + + let (rpc_mock_state, _) = RpcImpl::new( + network.clone(), + mining_conf.clone(), + false, + "0.0.1", + "RPC test", + Buffer::new(mempool.clone(), 1), + read_state.clone(), + block_verifier_router, + mock_sync_status.clone(), + mock_tip.clone(), + MockAddressBookPeers::default(), + rx.clone(), + None, + ); + + // Basic variant (default mode and no extra features) + + // Fake the ChainInfo and FullTransaction responses + let mock_read_state_request_handler = make_mock_read_state_request_handler(); + let mock_mempool_request_handler = make_mock_mempool_request_handler(); + + let get_block_template_fut = rpc_mock_state.get_block_template(None); + + let (get_block_template, ..) = tokio::join!( + get_block_template_fut, + mock_mempool_request_handler, + mock_read_state_request_handler, + ); + + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) + }; + + let coinbase_tx: Transaction = get_block_template + .coinbase_txn + .data + .as_ref() + .zcash_deserialize_into() + .expect("coinbase bytes are valid"); + + snapshot_rpc_getblocktemplate( + "basic", + (*get_block_template).into(), + Some(coinbase_tx), + &settings, + ); + + // long polling feature with submit old field + + let long_poll_id: LongPollId = "0" + .repeat(LONG_POLL_ID_LENGTH) + .parse() + .expect("unexpected invalid LongPollId"); + + // Fake the ChainInfo and FullTransaction responses + let mock_read_state_request_handler = make_mock_read_state_request_handler(); + let mock_mempool_request_handler = make_mock_mempool_request_handler(); + + let get_block_template_fut = rpc_mock_state.get_block_template( + get_block_template::JsonParameters { + long_poll_id: long_poll_id.into(), + ..Default::default() + } + .into(), + ); + + let (get_block_template, ..) = tokio::join!( + get_block_template_fut, + mock_mempool_request_handler, + mock_read_state_request_handler, + ); + + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) + }; + + let coinbase_tx: Transaction = get_block_template + .coinbase_txn + .data + .as_ref() + .zcash_deserialize_into() + .expect("coinbase bytes are valid"); + + snapshot_rpc_getblocktemplate( + "long_poll", + (*get_block_template).into(), + Some(coinbase_tx), + &settings, + ); + + // `getblocktemplate` proposal mode variant + + let get_block_template = + rpc_mock_state.get_block_template(Some(get_block_template::JsonParameters { + mode: GetBlockTemplateRequestMode::Proposal, + data: Some(HexData("".into())), + ..Default::default() + })); + + let get_block_template = get_block_template + .await + .expect("unexpected error in getblocktemplate RPC call"); + + snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings); + + // the following snapshots use a mock read_state and block_verifier_router + + let mut mock_block_verifier_router = MockService::build().for_unit_tests(); + let (rpc_mock_state_verifier, _) = RpcImpl::new( + network.clone(), + mining_conf, + false, + "0.0.1", + "RPC test", + Buffer::new(mempool, 1), + read_state.clone(), + mock_block_verifier_router.clone(), + mock_sync_status, + mock_tip, + MockAddressBookPeers::default(), + rx, + None, + ); + + let get_block_template_fut = + rpc_mock_state_verifier.get_block_template(Some(get_block_template::JsonParameters { + mode: GetBlockTemplateRequestMode::Proposal, + data: Some(HexData(BLOCK_MAINNET_1_BYTES.to_vec())), + ..Default::default() + })); + + let mock_block_verifier_router_request_handler = async move { + mock_block_verifier_router + .expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_))) + .await + .respond(Hash::from([0; 32])); + }; + + let (get_block_template, ..) = tokio::join!( + get_block_template_fut, + mock_block_verifier_router_request_handler, + ); + + let get_block_template = + get_block_template.expect("unexpected error in getblocktemplate RPC call"); + + snapshot_rpc_getblocktemplate("proposal", get_block_template, None, &settings); + + // These RPC snapshots use the populated state + + // `submitblock` + + let submit_block = rpc + .submit_block(HexData("".into()), None) + .await + .expect("unexpected error in submitblock RPC call"); + + snapshot_rpc_submit_block_invalid(submit_block, &settings); + + // `validateaddress` + let founder_address = if network.is_mainnet() { + "t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR" + } else { + "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" + }; + + let validate_address = rpc + .validate_address(founder_address.to_string()) + .await + .expect("We should have a validate_address::Response"); + snapshot_rpc_validateaddress("basic", validate_address, &settings); + + let validate_address = rpc + .validate_address("".to_string()) + .await + .expect("We should have a validate_address::Response"); + snapshot_rpc_validateaddress("invalid", validate_address, &settings); + + // `z_validateaddress` + let founder_address = if network.is_mainnet() { + "t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR" + } else { + "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" + }; + + let z_validate_address = rpc + .z_validate_address(founder_address.to_string()) + .await + .expect("We should have a z_validate_address::Response"); + snapshot_rpc_z_validateaddress("basic", z_validate_address, &settings); + + let z_validate_address = rpc + .z_validate_address("".to_string()) + .await + .expect("We should have a z_validate_address::Response"); + snapshot_rpc_z_validateaddress("invalid", z_validate_address, &settings); + + // `getdifficulty` + // This RPC snapshot uses both the mock and populated states + + // Fake the ChainInfo response using the mock state + let mock_read_state_request_handler = make_mock_read_state_request_handler(); + + let get_difficulty_fut = rpc_mock_state.get_difficulty(); + + let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); + + let mock_get_difficulty = get_difficulty.expect("unexpected error in getdifficulty RPC call"); + + snapshot_rpc_getdifficulty_valid("mock", mock_get_difficulty, &settings); + + // `z_listunifiedreceivers` + + let ua1 = String::from( + "u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf", + ); + let z_list_unified_receivers = rpc + .z_list_unified_receivers(ua1) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); + + snapshot_rpc_z_listunifiedreceivers("ua1", z_list_unified_receivers, &settings); + + let ua2 = String::from( + "u1uf4qsmh037x2jp6k042h9d2w22wfp39y9cqdf8kcg0gqnkma2gf4g80nucnfeyde8ev7a6kf0029gnwqsgadvaye9740gzzpmr67nfkjjvzef7rkwqunqga4u4jges4tgptcju5ysd0", + ); + let z_list_unified_receivers = rpc + .z_list_unified_receivers(ua2) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); + + snapshot_rpc_z_listunifiedreceivers("ua2", z_list_unified_receivers, &settings); +} diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs deleted file mode 100644 index afd8aca3bfa..00000000000 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ /dev/null @@ -1,636 +0,0 @@ -//! Snapshot tests for getblocktemplate RPCs. -//! -//! To update these snapshots, run: -//! ```sh -//! cargo insta test --review --delete-unreferenced-snapshots -//! ``` - -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - time::Instant, -}; - -use hex::FromHex; -use insta::Settings; -use jsonrpsee::core::RpcResult as Result; -use tower::{buffer::Buffer, Service}; - -use zebra_chain::{ - block::Hash, - chain_sync_status::MockSyncStatus, - chain_tip::mock::MockChainTip, - parameters::{Network, NetworkUpgrade}, - serialization::{DateTime32, ZcashDeserializeInto}, - transaction::Transaction, - transparent, - work::difficulty::{CompactDifficulty, ParameterDifficulty as _}, -}; -use zebra_network::{ - address_book_peers::MockAddressBookPeers, - types::{MetaAddr, PeerServices}, -}; -use zebra_node_services::mempool; - -use zebra_state::{GetBlockTemplateChainInfo, ReadRequest, ReadResponse}; - -use zebra_test::{ - mock_service::{MockService, PanicAssertion}, - vectors::BLOCK_MAINNET_1_BYTES, -}; - -use crate::methods::{ - get_block_template_rpcs::types::{ - get_block_template::{self, GetBlockTemplateRequestMode}, - get_mining_info, - long_poll::{LongPollId, LONG_POLL_ID_LENGTH}, - peer_info::PeerInfo, - submit_block, - subsidy::BlockSubsidy, - unified_address, validate_address, z_validate_address, - }, - hex_data::HexData, - tests::{snapshot::EXCESSIVE_BLOCK_HEIGHT, utils::fake_history_tree}, - GetBlockHash, GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, -}; - -pub async fn test_responses( - network: &Network, - mempool: MockService< - mempool::Request, - mempool::Response, - PanicAssertion, - zebra_node_services::BoxError, - >, - state: State, - read_state: ReadState, - settings: Settings, -) where - State: Service< - zebra_state::Request, - Response = zebra_state::Response, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, - ReadState: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - > + Clone - + Send - + Sync - + 'static, - >::Future: Send, -{ - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( - zebra_consensus::Config::default(), - network, - state.clone(), - ) - .await; - - let mut mock_sync_status = MockSyncStatus::default(); - mock_sync_status.set_is_close_to_tip(true); - - #[allow(clippy::unnecessary_struct_initialization)] - let mining_config = crate::config::mining::Config { - miner_address: Some(transparent::Address::from_script_hash( - network.kind(), - [0xad; 20], - )), - extra_coinbase_data: None, - debug_like_zcashd: true, - internal_miner: true, - }; - - // nu5 block height - let fake_tip_height = NetworkUpgrade::Nu5.activation_height(network).unwrap(); - // nu5 block hash - let fake_tip_hash = - Hash::from_hex("0000000000d723156d9b65ffcf4984da7a19675ed7e2f06d9e5d5188af087bf8").unwrap(); - - // nu5 block time + 1 - let fake_min_time = DateTime32::from(1654008606); - // nu5 block time + 12 - let fake_cur_time = DateTime32::from(1654008617); - // nu5 block time + 123 - let fake_max_time = DateTime32::from(1654008728); - - // Use a valid fractional difficulty for snapshots - let pow_limit = network.target_difficulty_limit(); - let fake_difficulty = pow_limit * 2 / 3; - let fake_difficulty = CompactDifficulty::from(fake_difficulty); - - let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new(); - mock_chain_tip_sender.send_best_tip_height(fake_tip_height); - mock_chain_tip_sender.send_best_tip_hash(fake_tip_hash); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); - - let mock_address_book = MockAddressBookPeers::new(vec![MetaAddr::new_connected( - SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - network.default_port(), - ) - .into(), - &PeerServices::NODE_NETWORK, - false, - ) - .into_new_meta_addr(Instant::now(), DateTime32::now())]); - - // get an rpc instance with continuous blockchain state - let get_block_template_rpc = GetBlockTemplateRpcImpl::new( - network, - mining_config.clone(), - Buffer::new(mempool.clone(), 1), - read_state, - mock_chain_tip.clone(), - block_verifier_router.clone(), - mock_sync_status.clone(), - mock_address_book, - None, - ); - - if network.is_a_test_network() && !network.is_default_testnet() { - let fake_future_nu6_block_height = - NetworkUpgrade::Nu6.activation_height(network).unwrap().0 + 100_000; - let get_block_subsidy = get_block_template_rpc - .get_block_subsidy(Some(fake_future_nu6_block_height)) - .await - .expect("We should have a success response"); - snapshot_rpc_getblocksubsidy("future_nu6_height", get_block_subsidy, &settings); - // We only want a snapshot of the `getblocksubsidy` method for the non-default Testnet (with an NU6 activation height). - return; - } - - // `getblockcount` - let get_block_count = get_block_template_rpc - .get_block_count() - .expect("We should have a number"); - snapshot_rpc_getblockcount(get_block_count, &settings); - - // `getblockhash` - const BLOCK_HEIGHT10: i32 = 10; - - let get_block_hash = get_block_template_rpc - .get_block_hash(BLOCK_HEIGHT10) - .await - .expect("We should have a GetBlockHash struct"); - snapshot_rpc_getblockhash_valid(get_block_hash, &settings); - - let get_block_hash = get_block_template_rpc - .get_block_hash( - EXCESSIVE_BLOCK_HEIGHT - .try_into() - .expect("constant fits in i32"), - ) - .await; - snapshot_rpc_getblockhash_invalid("excessive_height", get_block_hash, &settings); - - // `getmininginfo` - let get_mining_info = get_block_template_rpc - .get_mining_info() - .await - .expect("We should have a success response"); - snapshot_rpc_getmininginfo(get_mining_info, &settings); - - // `getblocksubsidy` - let fake_future_block_height = fake_tip_height.0 + 100_000; - let get_block_subsidy = get_block_template_rpc - .get_block_subsidy(Some(fake_future_block_height)) - .await - .expect("We should have a success response"); - snapshot_rpc_getblocksubsidy("future_height", get_block_subsidy, &settings); - - let get_block_subsidy = get_block_template_rpc - .get_block_subsidy(None) - .await - .expect("We should have a success response"); - snapshot_rpc_getblocksubsidy("tip_height", get_block_subsidy, &settings); - - let get_block_subsidy = get_block_template_rpc - .get_block_subsidy(Some(EXCESSIVE_BLOCK_HEIGHT)) - .await - .expect("We should have a success response"); - snapshot_rpc_getblocksubsidy("excessive_height", get_block_subsidy, &settings); - - // `getpeerinfo` - let get_peer_info = get_block_template_rpc - .get_peer_info() - .await - .expect("We should have a success response"); - snapshot_rpc_getpeerinfo(get_peer_info, &settings); - - // `getnetworksolps` (and `getnetworkhashps`) - // - // TODO: add tests for excessive num_blocks and height (#6688) - // add the same tests for get_network_hash_ps - let get_network_sol_ps = get_block_template_rpc - .get_network_sol_ps(None, None) - .await - .expect("We should have a success response"); - snapshot_rpc_getnetworksolps(get_network_sol_ps, &settings); - - // `getblocktemplate` - the following snapshots use a mock read_state - - // get a new empty state - let read_state = MockService::build().for_unit_tests(); - - let make_mock_read_state_request_handler = || { - let mut read_state = read_state.clone(); - - async move { - read_state - .expect_request_that(|req| matches!(req, ReadRequest::ChainInfo)) - .await - .respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo { - expected_difficulty: fake_difficulty, - tip_height: fake_tip_height, - tip_hash: fake_tip_hash, - cur_time: fake_cur_time, - min_time: fake_min_time, - max_time: fake_max_time, - chain_history_root: fake_history_tree(network).hash(), - })); - } - }; - - let make_mock_mempool_request_handler = || { - let mut mempool = mempool.clone(); - - async move { - mempool - .expect_request(mempool::Request::FullTransactions) - .await - .respond(mempool::Response::FullTransactions { - transactions: vec![], - transaction_dependencies: Default::default(), - // tip hash needs to match chain info for long poll requests - last_seen_tip_hash: fake_tip_hash, - }); - } - }; - - // send tip hash and time needed for getblocktemplate rpc - mock_chain_tip_sender.send_best_tip_hash(fake_tip_hash); - - // create a new rpc instance with new state and mock - let get_block_template_rpc_mock_state = GetBlockTemplateRpcImpl::new( - network, - mining_config.clone(), - Buffer::new(mempool.clone(), 1), - read_state.clone(), - mock_chain_tip.clone(), - block_verifier_router, - mock_sync_status.clone(), - MockAddressBookPeers::default(), - None, - ); - - // Basic variant (default mode and no extra features) - - // Fake the ChainInfo and FullTransaction responses - let mock_read_state_request_handler = make_mock_read_state_request_handler(); - let mock_mempool_request_handler = make_mock_mempool_request_handler(); - - let get_block_template_fut = get_block_template_rpc_mock_state.get_block_template(None); - - let (get_block_template, ..) = tokio::join!( - get_block_template_fut, - mock_mempool_request_handler, - mock_read_state_request_handler, - ); - - let get_block_template::Response::TemplateMode(get_block_template) = - get_block_template.expect("unexpected error in getblocktemplate RPC call") - else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; - - let coinbase_tx: Transaction = get_block_template - .coinbase_txn - .data - .as_ref() - .zcash_deserialize_into() - .expect("coinbase bytes are valid"); - - snapshot_rpc_getblocktemplate( - "basic", - (*get_block_template).into(), - Some(coinbase_tx), - &settings, - ); - - // long polling feature with submit old field - - let long_poll_id: LongPollId = "0" - .repeat(LONG_POLL_ID_LENGTH) - .parse() - .expect("unexpected invalid LongPollId"); - - // Fake the ChainInfo and FullTransaction responses - let mock_read_state_request_handler = make_mock_read_state_request_handler(); - let mock_mempool_request_handler = make_mock_mempool_request_handler(); - - let get_block_template_fut = get_block_template_rpc_mock_state.get_block_template( - get_block_template::JsonParameters { - long_poll_id: long_poll_id.into(), - ..Default::default() - } - .into(), - ); - - let (get_block_template, ..) = tokio::join!( - get_block_template_fut, - mock_mempool_request_handler, - mock_read_state_request_handler, - ); - - let get_block_template::Response::TemplateMode(get_block_template) = - get_block_template.expect("unexpected error in getblocktemplate RPC call") - else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; - - let coinbase_tx: Transaction = get_block_template - .coinbase_txn - .data - .as_ref() - .zcash_deserialize_into() - .expect("coinbase bytes are valid"); - - snapshot_rpc_getblocktemplate( - "long_poll", - (*get_block_template).into(), - Some(coinbase_tx), - &settings, - ); - - // `getblocktemplate` proposal mode variant - - let get_block_template = get_block_template_rpc_mock_state.get_block_template(Some( - get_block_template::JsonParameters { - mode: GetBlockTemplateRequestMode::Proposal, - data: Some(HexData("".into())), - ..Default::default() - }, - )); - - let get_block_template = get_block_template - .await - .expect("unexpected error in getblocktemplate RPC call"); - - snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings); - - // the following snapshots use a mock read_state and block_verifier_router - - let mut mock_block_verifier_router = MockService::build().for_unit_tests(); - let get_block_template_rpc_mock_state_verifier = GetBlockTemplateRpcImpl::new( - network, - mining_config, - Buffer::new(mempool.clone(), 1), - read_state.clone(), - mock_chain_tip, - mock_block_verifier_router.clone(), - mock_sync_status, - MockAddressBookPeers::default(), - None, - ); - - let get_block_template_fut = get_block_template_rpc_mock_state_verifier.get_block_template( - Some(get_block_template::JsonParameters { - mode: GetBlockTemplateRequestMode::Proposal, - data: Some(HexData(BLOCK_MAINNET_1_BYTES.to_vec())), - ..Default::default() - }), - ); - - let mock_block_verifier_router_request_handler = async move { - mock_block_verifier_router - .expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_))) - .await - .respond(Hash::from([0; 32])); - }; - - let (get_block_template, ..) = tokio::join!( - get_block_template_fut, - mock_block_verifier_router_request_handler, - ); - - let get_block_template = - get_block_template.expect("unexpected error in getblocktemplate RPC call"); - - snapshot_rpc_getblocktemplate("proposal", get_block_template, None, &settings); - - // These RPC snapshots use the populated state - - // `submitblock` - - let submit_block = get_block_template_rpc - .submit_block(HexData("".into()), None) - .await - .expect("unexpected error in submitblock RPC call"); - - snapshot_rpc_submit_block_invalid(submit_block, &settings); - - // `validateaddress` - let founder_address = if network.is_mainnet() { - "t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR" - } else { - "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" - }; - - let validate_address = get_block_template_rpc - .validate_address(founder_address.to_string()) - .await - .expect("We should have a validate_address::Response"); - snapshot_rpc_validateaddress("basic", validate_address, &settings); - - let validate_address = get_block_template_rpc - .validate_address("".to_string()) - .await - .expect("We should have a validate_address::Response"); - snapshot_rpc_validateaddress("invalid", validate_address, &settings); - - // `z_validateaddress` - let founder_address = if network.is_mainnet() { - "t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR" - } else { - "t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi" - }; - - let z_validate_address = get_block_template_rpc - .z_validate_address(founder_address.to_string()) - .await - .expect("We should have a z_validate_address::Response"); - snapshot_rpc_z_validateaddress("basic", z_validate_address, &settings); - - let z_validate_address = get_block_template_rpc - .z_validate_address("".to_string()) - .await - .expect("We should have a z_validate_address::Response"); - snapshot_rpc_z_validateaddress("invalid", z_validate_address, &settings); - - // `getdifficulty` - // This RPC snapshot uses both the mock and populated states - - // Fake the ChainInfo response using the mock state - let mock_read_state_request_handler = make_mock_read_state_request_handler(); - - let get_difficulty_fut = get_block_template_rpc_mock_state.get_difficulty(); - - let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); - - let mock_get_difficulty = get_difficulty.expect("unexpected error in getdifficulty RPC call"); - - snapshot_rpc_getdifficulty_valid("mock", mock_get_difficulty, &settings); - - // `z_listunifiedreceivers` - - let ua1 = String::from("u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf"); - let z_list_unified_receivers = get_block_template_rpc - .z_list_unified_receivers(ua1) - .await - .expect("unexpected error in z_list_unified_receivers RPC call"); - - snapshot_rpc_z_listunifiedreceivers("ua1", z_list_unified_receivers, &settings); - - let ua2 = String::from("u1uf4qsmh037x2jp6k042h9d2w22wfp39y9cqdf8kcg0gqnkma2gf4g80nucnfeyde8ev7a6kf0029gnwqsgadvaye9740gzzpmr67nfkjjvzef7rkwqunqga4u4jges4tgptcju5ysd0"); - let z_list_unified_receivers = get_block_template_rpc - .z_list_unified_receivers(ua2) - .await - .expect("unexpected error in z_list_unified_receivers RPC call"); - - snapshot_rpc_z_listunifiedreceivers("ua2", z_list_unified_receivers, &settings); -} - -/// Snapshot `getblockcount` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getblockcount(block_count: u32, settings: &insta::Settings) { - settings.bind(|| insta::assert_json_snapshot!("get_block_count", block_count)); -} - -/// Snapshot valid `getblockhash` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getblockhash_valid(block_hash: GetBlockHash, settings: &insta::Settings) { - settings.bind(|| insta::assert_json_snapshot!("get_block_hash_valid", block_hash)); -} - -/// Snapshot invalid `getblockhash` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getblockhash_invalid( - variant: &'static str, - block_hash: Result, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("get_block_hash_invalid_{variant}"), block_hash) - }); -} - -/// Snapshot `getblocktemplate` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getblocktemplate( - variant: &'static str, - block_template: get_block_template::Response, - coinbase_tx: Option, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("get_block_template_{variant}"), block_template) - }); - - if let Some(coinbase_tx) = coinbase_tx { - settings.bind(|| { - insta::assert_ron_snapshot!( - format!("get_block_template_{variant}.coinbase_tx"), - coinbase_tx - ) - }); - }; -} - -/// Snapshot `submitblock` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_submit_block_invalid( - submit_block_response: submit_block::Response, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!("snapshot_rpc_submit_block_invalid", submit_block_response) - }); -} - -/// Snapshot `getmininginfo` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getmininginfo( - get_mining_info: get_mining_info::Response, - settings: &insta::Settings, -) { - settings.bind(|| insta::assert_json_snapshot!("get_mining_info", get_mining_info)); -} - -/// Snapshot `getblocksubsidy` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getblocksubsidy( - variant: &'static str, - get_block_subsidy: BlockSubsidy, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("get_block_subsidy_{variant}"), get_block_subsidy) - }); -} - -/// Snapshot `getpeerinfo` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getpeerinfo(get_peer_info: Vec, settings: &insta::Settings) { - settings.bind(|| insta::assert_json_snapshot!("get_peer_info", get_peer_info)); -} - -/// Snapshot `getnetworksolps` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getnetworksolps(get_network_sol_ps: u64, settings: &insta::Settings) { - settings.bind(|| insta::assert_json_snapshot!("get_network_sol_ps", get_network_sol_ps)); -} - -/// Snapshot `validateaddress` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_validateaddress( - variant: &'static str, - validate_address: validate_address::Response, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("validate_address_{variant}"), validate_address) - }); -} - -/// Snapshot `z_validateaddress` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_z_validateaddress( - variant: &'static str, - z_validate_address: z_validate_address::Response, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("z_validate_address_{variant}"), z_validate_address) - }); -} - -/// Snapshot valid `getdifficulty` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_getdifficulty_valid( - variant: &'static str, - difficulty: f64, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("get_difficulty_valid_{variant}"), difficulty) - }); -} - -/// Snapshot `snapshot_rpc_z_listunifiedreceivers` response, using `cargo insta` and JSON serialization. -fn snapshot_rpc_z_listunifiedreceivers( - variant: &'static str, - response: unified_address::Response, - settings: &insta::Settings, -) { - settings.bind(|| { - insta::assert_json_snapshot!(format!("z_list_unified_receivers_{variant}"), response) - }); -} diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@mainnet_10.snap deleted file mode 100644 index 07a15a78408..00000000000 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@mainnet_10.snap +++ /dev/null @@ -1,5 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs -expression: block_count ---- -1687104 diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@testnet_10.snap deleted file mode 100644 index 21ed9c9bf4c..00000000000 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_count@testnet_10.snap +++ /dev/null @@ -1,5 +0,0 @@ ---- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs -expression: block_count ---- -1842420 diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_count@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_count@mainnet_10.snap new file mode 100644 index 00000000000..0a8012ad06e --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_count@mainnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block_count +--- +1687104 diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_count@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_count@testnet_10.snap new file mode 100644 index 00000000000..970d492c6a9 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_count@testnet_10.snap @@ -0,0 +1,5 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: block_count +--- +1842420 diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_invalid_excessive_height@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_hash_invalid_excessive_height@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_invalid_excessive_height@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_hash_invalid_excessive_height@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_invalid_excessive_height@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_hash_invalid_excessive_height@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_invalid_excessive_height@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_hash_invalid_excessive_height@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_valid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_hash_valid@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_valid@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_hash_valid@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_valid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_hash_valid@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_hash_valid@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_hash_valid@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_excessive_height@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_excessive_height@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_excessive_height@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_excessive_height@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_excessive_height@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_excessive_height@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_excessive_height@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_excessive_height@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_height@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_height@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_height@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_height@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_height@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_height@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_height@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_height@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_nu6_height@nu6testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_nu6_height@nu6testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_future_nu6_height@nu6testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_future_nu6_height@nu6testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_tip_height@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_tip_height@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_tip_height@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_tip_height@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_tip_height@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_tip_height@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_subsidy_tip_height@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_subsidy_tip_height@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_basic.coinbase_tx@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_basic.coinbase_tx@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_basic@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_basic@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_basic@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_basic@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_basic@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_invalid-proposal@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_invalid-proposal@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_invalid-proposal@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_invalid-proposal@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_invalid-proposal@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_invalid-proposal@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_invalid-proposal@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_invalid-proposal@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll.coinbase_tx@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll.coinbase_tx@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_long_poll@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_long_poll@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_proposal@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_proposal@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_proposal@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_proposal@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_proposal@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_template_proposal@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_block_template_proposal@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_block_template_proposal@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_difficulty_valid_mock@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_difficulty_valid_mock@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_difficulty_valid_mock@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_difficulty_valid_mock@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_difficulty_valid_mock@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_difficulty_valid_mock@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_difficulty_valid_mock@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_difficulty_valid_mock@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_mining_info@mainnet_10.snap similarity index 70% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_mining_info@mainnet_10.snap index de309513443..bedfe06914a 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_mining_info@mainnet_10.snap @@ -1,5 +1,5 @@ --- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +source: zebra-rpc/src/methods/tests/snapshot.rs expression: get_mining_info --- { diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_mining_info@testnet_10.snap similarity index 70% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_mining_info@testnet_10.snap index 2051e6913ce..d7150714e46 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_mining_info@testnet_10.snap @@ -1,5 +1,5 @@ --- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +source: zebra-rpc/src/methods/tests/snapshot.rs expression: get_mining_info --- { diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_network_sol_ps@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_network_sol_ps@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_network_sol_ps@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_network_sol_ps@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_network_sol_ps@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_network_sol_ps@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_network_sol_ps@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_network_sol_ps@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_peer_info@mainnet_10.snap similarity index 57% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_peer_info@mainnet_10.snap index 651e1f005ee..568bac9709f 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_peer_info@mainnet_10.snap @@ -1,5 +1,5 @@ --- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +source: zebra-rpc/src/methods/tests/snapshot.rs expression: get_peer_info --- [ diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_peer_info@testnet_10.snap similarity index 57% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/get_peer_info@testnet_10.snap index 62a290aebed..badd0067d4c 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_peer_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_peer_info@testnet_10.snap @@ -1,5 +1,5 @@ --- -source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +source: zebra-rpc/src/methods/tests/snapshot.rs expression: get_peer_info --- [ diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/snapshot_rpc_submit_block_invalid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/snapshot_rpc_submit_block_invalid@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/snapshot_rpc_submit_block_invalid@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/snapshot_rpc_submit_block_invalid@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/snapshot_rpc_submit_block_invalid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/snapshot_rpc_submit_block_invalid@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/snapshot_rpc_submit_block_invalid@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/snapshot_rpc_submit_block_invalid@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_basic@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/validate_address_basic@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_basic@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/validate_address_basic@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_basic@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/validate_address_basic@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_basic@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/validate_address_basic@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_invalid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/validate_address_invalid@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_invalid@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/validate_address_invalid@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_invalid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/validate_address_invalid@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/validate_address_invalid@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/validate_address_invalid@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua1@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua1@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua1@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua1@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua1@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua1@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua2@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua2@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua2@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua2@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_list_unified_receivers_ua2@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_list_unified_receivers_ua2@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_basic@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_validate_address_basic@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_basic@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_validate_address_basic@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_basic@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_validate_address_basic@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_basic@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_validate_address_basic@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_invalid@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_validate_address_invalid@mainnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_invalid@mainnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_validate_address_invalid@mainnet_10.snap diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_invalid@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/z_validate_address_invalid@testnet_10.snap similarity index 100% rename from zebra-rpc/src/methods/tests/snapshot/snapshots/z_validate_address_invalid@testnet_10.snap rename to zebra-rpc/src/methods/tests/snapshots/z_validate_address_invalid@testnet_10.snap diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 057ff93c5f3..6cef3fbec1e 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -6,47 +6,65 @@ use futures::FutureExt; use tower::buffer::Buffer; use zebra_chain::{ - amount::Amount, - block::Block, + amount::{Amount, NonNegative}, + block::{Block, Hash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, + chain_sync_status::MockSyncStatus, chain_tip::{mock::MockChainTip, NoChainTip}, history_tree::HistoryTree, parameters::Network::*, - serialization::{ZcashDeserializeInto, ZcashSerialize}, - transaction::UnminedTxId, + serialization::{DateTime32, ZcashDeserializeInto, ZcashSerialize}, + transaction::{zip317, UnminedTxId, VerifiedUnminedTx}, + work::difficulty::{CompactDifficulty, ExpandedDifficulty, ParameterDifficulty as _, U256}, }; -use zebra_network::address_book_peers::MockAddressBookPeers; +use zebra_consensus::MAX_BLOCK_SIGOPS; +use zebra_network::{address_book_peers::MockAddressBookPeers, types::PeerServices}; use zebra_node_services::BoxError; - -use zebra_state::{GetBlockTemplateChainInfo, IntoDisk, LatestChainTip, ReadStateService}; +use zebra_state::{ + GetBlockTemplateChainInfo, IntoDisk, LatestChainTip, ReadRequest, ReadResponse, + ReadStateService, +}; use zebra_test::mock_service::MockService; +use crate::methods::{ + get_block_template::constants::{CAPABILITIES_FIELD, MUTABLE_FIELD, NONCE_RANGE_FIELD}, + hex_data::HexData, + tests::utils::fake_history_tree, +}; + use super::super::*; +use config::mining; +use get_block_template::GetBlockTemplateRequestMode; +use types::long_poll::LONG_POLL_ID_LENGTH; + #[tokio::test(flavor = "multi_thread")] async fn rpc_getinfo() { let _init_guard = zebra_test::init(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut read_state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( - "0.0.1", - "/Zebra:RPC test/", + let (rpc, rpc_tx_queue) = RpcImpl::new( Mainnet, - false, - true, + Default::default(), + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), NoChainTip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); let getinfo_future = tokio::spawn(async move { rpc.get_info().await }); // Make the mock service respond with - let response_handler = state + let response_handler = read_state .expect_request(zebra_state::ReadRequest::ChainInfo) .await; response_handler.respond(zebra_state::ReadResponse::ChainInfo( @@ -72,13 +90,13 @@ async fn rpc_getinfo() { // make sure there is a `subversion` field, // and that is equal to the Zebra user agent. - assert_eq!(get_info.subversion, format!("/Zebra:RPC test/")); + assert_eq!(get_info.subversion, format!("RPC test")); mempool.expect_no_requests().await; - state.expect_no_requests().await; + read_state.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -140,22 +158,24 @@ async fn rpc_getblock() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - read_state.clone(), - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // Make height calls with verbosity=0 and check response @@ -571,7 +591,7 @@ async fn rpc_getblock() { mempool.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -580,21 +600,24 @@ async fn rpc_getblock_parse_error() { let _init_guard = zebra_test::init(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut read_state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), NoChainTip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); // Make sure we get an error if Zebra can't parse the block height. @@ -614,10 +637,10 @@ async fn rpc_getblock_parse_error() { .is_err()); mempool.expect_no_requests().await; - state.expect_no_requests().await; + read_state.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -628,19 +651,21 @@ async fn rpc_getblock_missing_error() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), NoChainTip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); // Make sure Zebra returns the correct error code `-8` for missing blocks @@ -674,7 +699,7 @@ async fn rpc_getblock_missing_error() { state.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -689,23 +714,24 @@ async fn rpc_getblockheader() { .collect(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - read_state.clone(), - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // Make height calls with verbose=false and check response @@ -797,7 +823,7 @@ async fn rpc_getblockheader() { mempool.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -819,22 +845,24 @@ async fn rpc_getbestblockhash() { // Get a mempool handle let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create a populated state service, the tip will be in `NUMBER_OF_BLOCKS`. - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // Get the tip hash using RPC method `get_best_block_hash` @@ -849,7 +877,7 @@ async fn rpc_getbestblockhash() { mempool.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -864,26 +892,27 @@ async fn rpc_getrawtransaction() { .collect(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Create a populated state service - let (_state, read_state, _latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, _, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; - let (latest_chain_tip, latest_chain_tip_sender) = MockChainTip::new(); - latest_chain_tip_sender.send_best_tip_height(Height(10)); + let (tip, tip_sender) = MockChainTip::new(); + tip_sender.send_best_tip_height(Height(10)); // Init RPC let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - read_state.clone(), - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // Test case where transaction is in mempool. @@ -1002,7 +1031,7 @@ async fn rpc_getrawtransaction() { // Test case where transaction is _not_ in mempool with a fake chain tip height of 0 // Skip genesis because its tx is not indexed. - latest_chain_tip_sender.send_best_tip_height(Height(0)); + tip_sender.send_best_tip_height(Height(0)); for (block_idx, block) in blocks.iter().enumerate().skip(1) { for tx in block.transactions.iter() { let (confirmations, expected_confirmations) = @@ -1019,7 +1048,7 @@ async fn rpc_getrawtransaction() { // Test case where transaction is _not_ in mempool with a fake chain tip height of 0 // Skip genesis because its tx is not indexed. - latest_chain_tip_sender.send_best_tip_height(Height(20)); + tip_sender.send_best_tip_height(Height(20)); for (block_idx, block) in blocks.iter().enumerate().skip(1) { for tx in block.transactions.iter() { let (confirmations, expected_confirmations) = @@ -1035,7 +1064,7 @@ async fn rpc_getrawtransaction() { } // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -1052,21 +1081,23 @@ async fn rpc_getaddresstxids_invalid_arguments() { .collect(); // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // call the method with an invalid address string @@ -1106,7 +1137,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { mempool.expect_no_requests().await; // The queue task should continue without errors or panics - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!(rpc_tx_queue_task_result.is_none()); } @@ -1235,23 +1266,26 @@ async fn rpc_getaddresstxids_response_with( end: Option, address: &transparent::Address, read_state: &ReadStateService, - latest_chain_tip: &LatestChainTip, + tip: &LatestChainTip, expected_response_len: usize, ) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let (_tx, rx) = tokio::sync::watch::channel(None); - let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + let (rpc, rpc_tx_queue) = RpcImpl::new( + network.clone(), + Default::default(), + Default::default(), "0.0.1", "RPC test", - network.clone(), - false, - true, Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), - latest_chain_tip.clone(), - MockAddressBookPeers::new(vec![]), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip.clone(), + MockAddressBookPeers::default(), rx, + None, ); // call the method with valid arguments @@ -1274,11 +1308,11 @@ async fn rpc_getaddresstxids_response_with( // (If we don't, opening ~100 simultaneous states causes process file descriptor limit errors.) // // TODO: abort all the join handles in all the tests, except one? - rpc_tx_queue_task_handle.abort(); + rpc_tx_queue.abort(); // The queue task should not have panicked or exited by itself. // It can still be running, or it can have exited due to the abort. - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); + let rpc_tx_queue_task_result = rpc_tx_queue.now_or_never(); assert!( rpc_tx_queue_task_result.is_none() || rpc_tx_queue_task_result @@ -1293,25 +1327,27 @@ async fn rpc_getaddressutxos_invalid_arguments() { let _init_guard = zebra_test::init(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut read_state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let (_tx, rx) = tokio::sync::watch::channel(None); - let rpc = RpcImpl::new( + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), NoChainTip, - MockAddressBookPeers::new(vec![]), + MockAddressBookPeers::default(), rx, + None, ); // call the method with an invalid address string let error = rpc - .0 .get_address_utxos(AddressStrings::new(vec!["t1invalidaddress".to_owned()])) .await .unwrap_err(); @@ -1319,7 +1355,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { assert_eq!(error.code(), ErrorCode::ServerError(-5).code()); mempool.expect_no_requests().await; - state.expect_no_requests().await; + read_state.expect_no_requests().await; } #[tokio::test(flavor = "multi_thread")] @@ -1340,27 +1376,28 @@ async fn rpc_getaddressutxos_response() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; let (_tx, rx) = tokio::sync::watch::channel(None); - let rpc = RpcImpl::new( + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), + Default::default(), "0.0.1", "RPC test", - Mainnet, - false, - true, Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), - latest_chain_tip, - MockAddressBookPeers::new(vec![]), + MockService::build().for_unit_tests(), + MockSyncStatus::default(), + tip, + MockAddressBookPeers::default(), rx, + None, ); // call the method with a valid address let addresses = vec![address.to_string()]; let response = rpc - .0 .get_address_utxos(AddressStrings::new(addresses)) .await .expect("address is valid so no error can happen here"); @@ -1373,9 +1410,6 @@ async fn rpc_getaddressutxos_response() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getblockcount() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); // Create a continuous chain of mainnet blocks from genesis @@ -1392,15 +1426,9 @@ async fn rpc_getblockcount() { // Get a mempool handle let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create a populated state service, the tip will be in `NUMBER_OF_BLOCKS`. - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (state, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &Mainnet, state.clone(), @@ -1408,22 +1436,25 @@ async fn rpc_getblockcount() { .await; // Init RPC - let get_block_template_rpc = GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, Default::default(), + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), block_verifier_router, MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); // Get the tip height using RPC method `get_block_count` - let get_block_count = get_block_template_rpc - .get_block_count() - .expect("We should have a number"); + let get_block_count = rpc.get_block_count().expect("We should have a number"); // Check if response is equal to block 10 hash. assert_eq!(get_block_count, tip_block_height.0); @@ -1433,23 +1464,14 @@ async fn rpc_getblockcount() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getblockcount_empty_state() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); // Get a mempool handle let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create an empty state - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::init_test_services(&Mainnet); + let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet); - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &Mainnet, state.clone(), @@ -1457,20 +1479,25 @@ async fn rpc_getblockcount_empty_state() { .await; // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), block_verifier_router, MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); // Get the tip height using RPC method `get_block_count - let get_block_count = get_block_template_rpc.get_block_count(); + let get_block_count = rpc.get_block_count(); // state an empty so we should get an error assert!(get_block_count.is_err()); @@ -1486,24 +1513,13 @@ async fn rpc_getblockcount_empty_state() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getpeerinfo() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::{address_book_peers::MockAddressBookPeers, types::PeerServices}; - let _init_guard = zebra_test::init(); let network = Mainnet; - // Get a mempool handle let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Create an empty state - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::init_test_services(&Mainnet); + let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet); - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &network, state.clone(), @@ -1560,20 +1576,25 @@ async fn rpc_getpeerinfo() { ]); // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &network, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + network, Default::default(), + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), block_verifier_router, MockSyncStatus::default(), + tip.clone(), mock_address_book, + rx, None, ); // Call `get_peer_info` - let get_peer_info = get_block_template_rpc + let get_peer_info = rpc .get_peer_info() .await .expect("We should have an array of addresses"); @@ -1603,9 +1624,6 @@ async fn rpc_getpeerinfo() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getblockhash() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); // Create a continuous chain of mainnet blocks from genesis @@ -1615,16 +1633,9 @@ async fn rpc_getblockhash() { .collect(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Create a populated state service - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (state, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &Mainnet, state.clone(), @@ -1632,21 +1643,26 @@ async fn rpc_getblockhash() { .await; // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, Default::default(), + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip.clone(), - tower::ServiceBuilder::new().service(block_verifier_router), + Buffer::new(read_state.clone(), 1), + block_verifier_router, MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); // Query the hashes using positive indexes for (i, block) in blocks.iter().enumerate() { - let get_block_hash = get_block_template_rpc + let get_block_hash = rpc .get_block_hash(i.try_into().expect("usize always fits in i32")) .await .expect("We should have a GetBlockHash struct"); @@ -1656,7 +1672,7 @@ async fn rpc_getblockhash() { // Query the hashes using negative indexes for i in (-10..=-1).rev() { - let get_block_hash = get_block_template_rpc + let get_block_hash = rpc .get_block_hash(i) .await .expect("We should have a GetBlockHash struct"); @@ -1672,9 +1688,6 @@ async fn rpc_getblockhash() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getmininginfo() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); // Create a continuous chain of mainnet blocks from genesis @@ -1684,33 +1697,33 @@ async fn rpc_getmininginfo() { .collect(); // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), Default::default(), + "0.0.1", + "RPC test", MockService::build().for_unit_tests(), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), MockService::build().for_unit_tests(), MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); - get_block_template_rpc - .get_mining_info() + rpc.get_mining_info() .await .expect("get_mining_info call should succeed"); } #[tokio::test(flavor = "multi_thread")] async fn rpc_getnetworksolps() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); // Create a continuous chain of mainnet blocks from genesis @@ -1720,19 +1733,23 @@ async fn rpc_getnetworksolps() { .collect(); // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.clone(), &Mainnet).await; + let (_, read_state, tip, _) = zebra_state::populated_state(blocks.clone(), &Mainnet).await; // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), Default::default(), + "0.0.1", + "RPC test", MockService::build().for_unit_tests(), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), MockService::build().for_unit_tests(), MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); @@ -1762,9 +1779,8 @@ async fn rpc_getnetworksolps() { ]; for (num_blocks_input, height_input, return_value) in get_network_sol_ps_inputs { - let get_network_sol_ps_result = get_block_template_rpc - .get_network_sol_ps(num_blocks_input, height_input) - .await; + let get_network_sol_ps_result = + rpc.get_network_sol_ps(num_blocks_input, height_input).await; assert_eq!( get_network_sol_ps_result, return_value, "get_network_sol_ps({num_blocks_input:?}, {height_input:?}) result\n\ @@ -1783,37 +1799,10 @@ async fn rpc_getblocktemplate() { } async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { - use zebra_chain::{ - amount::NonNegative, - block::{Hash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, - chain_sync_status::MockSyncStatus, - serialization::DateTime32, - transaction::{zip317, VerifiedUnminedTx}, - work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256}, - }; - use zebra_consensus::MAX_BLOCK_SIGOPS; - use zebra_network::address_book_peers::MockAddressBookPeers; - use zebra_state::{GetBlockTemplateChainInfo, ReadRequest, ReadResponse}; - - use crate::methods::{ - get_block_template_rpcs::{ - constants::{ - GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD, GET_BLOCK_TEMPLATE_MUTABLE_FIELD, - GET_BLOCK_TEMPLATE_NONCE_RANGE_FIELD, - }, - get_block_template::{self, GetBlockTemplateRequestMode}, - types::long_poll::LONG_POLL_ID_LENGTH, - }, - hex_data::HexData, - tests::utils::fake_history_tree, - }; - let _init_guard = zebra_test::init(); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - - let read_state = MockService::build().for_unit_tests(); - let block_verifier_router = MockService::build().for_unit_tests(); + let read_state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1831,7 +1820,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { }; #[allow(clippy::unnecessary_struct_initialization)] - let mining_config = crate::config::mining::Config { + let mining_conf = crate::config::mining::Config { miner_address: miner_address.clone(), extra_coinbase_data: None, debug_like_zcashd: true, @@ -1851,21 +1840,26 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let fake_max_time = DateTime32::from(1654008728); let fake_difficulty = CompactDifficulty::from(ExpandedDifficulty::from(U256::one())); - let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new(); - mock_chain_tip_sender.send_best_tip_height(fake_tip_height); - mock_chain_tip_sender.send_best_tip_hash(fake_tip_hash); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); + let (mock_tip, mock_tip_sender) = MockChainTip::new(); + mock_tip_sender.send_best_tip_height(fake_tip_height); + mock_tip_sender.send_best_tip_hash(fake_tip_hash); + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); // Init RPC - let get_block_template_rpc = GetBlockTemplateRpcImpl::new( - &Mainnet, - mining_config, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + mining_conf, + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state.clone(), - mock_chain_tip, - block_verifier_router, + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), mock_sync_status.clone(), + mock_tip, MockAddressBookPeers::default(), + rx, None, ); @@ -1903,7 +1897,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { } }; - let get_block_template_fut = get_block_template_rpc.get_block_template(None); + let get_block_template_fut = rpc.get_block_template(None); let (get_block_template, ..) = tokio::join!( get_block_template_fut, make_mock_mempool_request_handler(vec![], fake_tip_hash), @@ -1913,7 +1907,9 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let get_block_template::Response::TemplateMode(get_block_template) = get_block_template.expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) }; let coinbase_transaction = @@ -1929,10 +1925,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { miner_address ); - assert_eq!( - get_block_template.capabilities, - GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD.to_vec() - ); + assert_eq!(get_block_template.capabilities, CAPABILITIES_FIELD.to_vec()); assert_eq!(get_block_template.version, ZCASH_BLOCK_VERSION); assert!(get_block_template.transactions.is_empty()); assert_eq!( @@ -1943,14 +1936,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect("test vector is valid") ); assert_eq!(get_block_template.min_time, fake_min_time); - assert_eq!( - get_block_template.mutable, - GET_BLOCK_TEMPLATE_MUTABLE_FIELD.to_vec() - ); - assert_eq!( - get_block_template.nonce_range, - GET_BLOCK_TEMPLATE_NONCE_RANGE_FIELD - ); + assert_eq!(get_block_template.mutable, MUTABLE_FIELD.to_vec()); + assert_eq!(get_block_template.nonce_range, NONCE_RANGE_FIELD); assert_eq!(get_block_template.sigop_limit, MAX_BLOCK_SIGOPS); assert_eq!(get_block_template.size_limit, MAX_BLOCK_BYTES); assert_eq!(get_block_template.cur_time, fake_cur_time); @@ -1978,8 +1965,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { Amount::::zero() ); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); - let get_block_template_sync_error = get_block_template_rpc + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); + let get_block_template_sync_error = rpc .get_block_template(None) .await .expect_err("needs an error when estimated distance to network chain tip is far"); @@ -1991,8 +1978,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { mock_sync_status.set_is_close_to_tip(false); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); - let get_block_template_sync_error = get_block_template_rpc + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); + let get_block_template_sync_error = rpc .get_block_template(None) .await .expect_err("needs an error when syncer is not close to tip"); @@ -2002,8 +1989,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { ErrorCode::ServerError(-10).code() ); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); - let get_block_template_sync_error = get_block_template_rpc + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); + let get_block_template_sync_error = rpc .get_block_template(None) .await .expect_err("needs an error when syncer is not close to tip or estimated distance to network chain tip is far"); @@ -2013,7 +2000,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { ErrorCode::ServerError(-10).code() ); - let get_block_template_sync_error = get_block_template_rpc + let get_block_template_sync_error = rpc .get_block_template(Some(get_block_template::JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, ..Default::default() @@ -2026,7 +2013,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { ErrorCode::InvalidParams.code() ); - let get_block_template_sync_error = get_block_template_rpc + let get_block_template_sync_error = rpc .get_block_template(Some(get_block_template::JsonParameters { data: Some(HexData("".into())), ..Default::default() @@ -2040,7 +2027,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { ); // The long poll id is valid, so it returns a state error instead - let get_block_template_sync_error = get_block_template_rpc + let get_block_template_sync_error = rpc .get_block_template(Some(get_block_template::JsonParameters { // This must parse as a LongPollId. // It must be the correct length and have hex/decimal digits. @@ -2092,10 +2079,10 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { mock_sync_status.set_is_close_to_tip(true); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); let (get_block_template, ..) = tokio::join!( - get_block_template_rpc.get_block_template(None), + rpc.get_block_template(None), make_mock_mempool_request_handler(vec![verified_unmined_tx], next_fake_tip_hash), make_mock_read_state_request_handler(), ); @@ -2103,7 +2090,9 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let get_block_template::Response::TemplateMode(get_block_template) = get_block_template.expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) }; // mempool transactions should be omitted if the tip hash in the GetChainInfo response from the state @@ -2115,11 +2104,6 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { #[tokio::test(flavor = "multi_thread")] async fn rpc_submitblock_errors() { - use zebra_chain::chain_sync_status::MockSyncStatus; - use zebra_network::address_book_peers::MockAddressBookPeers; - - use crate::methods::{get_block_template_rpcs::types::submit_block, hex_data::HexData}; - let _init_guard = zebra_test::init(); // Create a continuous chain of mainnet blocks from genesis @@ -2130,16 +2114,10 @@ async fn rpc_submitblock_errors() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create a populated state service - let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks, &Mainnet).await; + let (state, read_state, tip, _) = zebra_state::populated_state(blocks, &Mainnet).await; // Init RPCs - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &Mainnet, state.clone(), @@ -2147,23 +2125,26 @@ async fn rpc_submitblock_errors() { .await; // Init RPC - let get_block_template_rpc = GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, Default::default(), + Default::default(), + "0.0.1", + "RPC test", Buffer::new(mempool.clone(), 1), - read_state, - latest_chain_tip.clone(), + Buffer::new(read_state.clone(), 1), block_verifier_router, MockSyncStatus::default(), + tip.clone(), MockAddressBookPeers::default(), + rx, None, ); // Try to submit pre-populated blocks and assert that it responds with duplicate. for (_height, &block_bytes) in zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS.iter() { - let submit_block_response = get_block_template_rpc - .submit_block(HexData(block_bytes.into()), None) - .await; + let submit_block_response = rpc.submit_block(HexData(block_bytes.into()), None).await; assert_eq!( submit_block_response, @@ -2171,7 +2152,7 @@ async fn rpc_submitblock_errors() { ); } - let submit_block_response = get_block_template_rpc + let submit_block_response = rpc .submit_block( HexData(zebra_test::vectors::BAD_BLOCK_MAINNET_202_BYTES.to_vec()), None, @@ -2190,28 +2171,26 @@ async fn rpc_submitblock_errors() { #[tokio::test(flavor = "multi_thread")] async fn rpc_validateaddress() { - use get_block_template_rpcs::types::validate_address; - use zebra_chain::{chain_sync_status::MockSyncStatus, chain_tip::mock::MockChainTip}; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); - let (mock_chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); - - // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, Default::default(), + Default::default(), + "0.0.1", + "RPC test", MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), - mock_chain_tip, MockService::build().for_unit_tests(), MockSyncStatus::default(), + NoChainTip, MockAddressBookPeers::default(), + rx, None, ); - let validate_address = get_block_template_rpc + let validate_address = rpc .validate_address("t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR".to_string()) .await .expect("we should have a validate_address::Response"); @@ -2221,7 +2200,7 @@ async fn rpc_validateaddress() { "Mainnet founder address should be valid on Mainnet" ); - let validate_address = get_block_template_rpc + let validate_address = rpc .validate_address("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi".to_string()) .await .expect("We should have a validate_address::Response"); @@ -2235,28 +2214,27 @@ async fn rpc_validateaddress() { #[tokio::test(flavor = "multi_thread")] async fn rpc_z_validateaddress() { - use get_block_template_rpcs::types::z_validate_address; - use zebra_chain::{chain_sync_status::MockSyncStatus, chain_tip::mock::MockChainTip}; - use zebra_network::address_book_peers::MockAddressBookPeers; - let _init_guard = zebra_test::init(); - let (mock_chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); - // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), Default::default(), + "0.0.1", + "RPC test", MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), - mock_chain_tip, MockService::build().for_unit_tests(), MockSyncStatus::default(), + NoChainTip, MockAddressBookPeers::default(), + rx, None, ); - let z_validate_address = get_block_template_rpc + let z_validate_address = rpc .z_validate_address("t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR".to_string()) .await .expect("we should have a z_validate_address::Response"); @@ -2266,7 +2244,7 @@ async fn rpc_z_validateaddress() { "Mainnet founder address should be valid on Mainnet" ); - let z_validate_address = get_block_template_rpc + let z_validate_address = rpc .z_validate_address("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi".to_string()) .await .expect("We should have a z_validate_address::Response"); @@ -2280,32 +2258,15 @@ async fn rpc_z_validateaddress() { #[tokio::test(flavor = "multi_thread")] async fn rpc_getdifficulty() { - use zebra_chain::{ - block::Hash, - chain_sync_status::MockSyncStatus, - chain_tip::mock::MockChainTip, - serialization::DateTime32, - work::difficulty::{CompactDifficulty, ExpandedDifficulty, ParameterDifficulty as _, U256}, - }; - - use zebra_network::address_book_peers::MockAddressBookPeers; - - use zebra_state::{GetBlockTemplateChainInfo, ReadRequest, ReadResponse}; - - use crate::{config::mining::Config, methods::tests::utils::fake_history_tree}; - let _init_guard = zebra_test::init(); - let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - - let read_state = MockService::build().for_unit_tests(); - let block_verifier_router = MockService::build().for_unit_tests(); + let read_state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); #[allow(clippy::unnecessary_struct_initialization)] - let mining_config = Config { + let mining_conf = mining::Config { miner_address: None, extra_coinbase_data: None, debug_like_zcashd: true, @@ -2324,21 +2285,26 @@ async fn rpc_getdifficulty() { // nu5 block time + 123 let fake_max_time = DateTime32::from(1654008728); - let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new(); - mock_chain_tip_sender.send_best_tip_height(fake_tip_height); - mock_chain_tip_sender.send_best_tip_hash(fake_tip_hash); - mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); + let (mock_tip, mock_tip_sender) = MockChainTip::new(); + mock_tip_sender.send_best_tip_height(fake_tip_height); + mock_tip_sender.send_best_tip_hash(fake_tip_hash); + mock_tip_sender.send_estimated_distance_to_network_chain_tip(Some(0)); // Init RPC - let get_block_template_rpc = GetBlockTemplateRpcImpl::new( - &Mainnet, - mining_config, - Buffer::new(mempool.clone(), 1), - read_state.clone(), - mock_chain_tip, - block_verifier_router, - mock_sync_status.clone(), + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + mining_conf, + Default::default(), + "0.0.1", + "RPC test", + MockService::build().for_unit_tests(), + Buffer::new(read_state.clone(), 1), + MockService::build().for_unit_tests(), + mock_sync_status, + mock_tip, MockAddressBookPeers::default(), + rx, None, ); @@ -2361,7 +2327,7 @@ async fn rpc_getdifficulty() { })); }; - let get_difficulty_fut = get_block_template_rpc.get_difficulty(); + let get_difficulty_fut = rpc.get_difficulty(); let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); // Our implementation is slightly different to `zcashd`, so we require 6 significant figures @@ -2387,7 +2353,7 @@ async fn rpc_getdifficulty() { })); }; - let get_difficulty_fut = get_block_template_rpc.get_difficulty(); + let get_difficulty_fut = rpc.get_difficulty(); let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); assert_eq!(format!("{:.5}", get_difficulty.unwrap()), "1.00000"); @@ -2410,7 +2376,7 @@ async fn rpc_getdifficulty() { })); }; - let get_difficulty_fut = get_block_template_rpc.get_difficulty(); + let get_difficulty_fut = rpc.get_difficulty(); let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); assert_eq!(format!("{:.5}", get_difficulty.unwrap()), "1.50000"); @@ -2433,7 +2399,7 @@ async fn rpc_getdifficulty() { })); }; - let get_difficulty_fut = get_block_template_rpc.get_difficulty(); + let get_difficulty_fut = rpc.get_difficulty(); let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,); assert_eq!(format!("{:.2}", get_difficulty.unwrap()), "4096.00"); @@ -2443,34 +2409,32 @@ async fn rpc_getdifficulty() { async fn rpc_z_listunifiedreceivers() { let _init_guard = zebra_test::init(); - use zebra_chain::{chain_sync_status::MockSyncStatus, chain_tip::mock::MockChainTip}; - use zebra_network::address_book_peers::MockAddressBookPeers; - - let _init_guard = zebra_test::init(); - - let (mock_chain_tip, _mock_chain_tip_sender) = MockChainTip::new(); - // Init RPC - let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &Mainnet, + let (_tx, rx) = tokio::sync::watch::channel(None); + let (rpc, _) = RpcImpl::new( + Mainnet, + Default::default(), Default::default(), + "0.0.1", + "RPC test", MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), - mock_chain_tip, MockService::build().for_unit_tests(), MockSyncStatus::default(), + NoChainTip, MockAddressBookPeers::default(), + rx, None, ); // invalid address - assert!(get_block_template_rpc + assert!(rpc .z_list_unified_receivers("invalid string for an address".to_string()) .await .is_err()); // address taken from https://github.com/zcash-hackworks/zcash-test-vectors/blob/master/test-vectors/zcash/unified_address.json#L4 - let response = get_block_template_rpc.z_list_unified_receivers("u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf".to_string()).await.unwrap(); + let response = rpc.z_list_unified_receivers("u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf".to_string()).await.unwrap(); assert_eq!(response.orchard(), None); assert_eq!( response.sapling(), @@ -2485,8 +2449,13 @@ async fn rpc_z_listunifiedreceivers() { assert_eq!(response.p2sh(), None); // address taken from https://github.com/zcash-hackworks/zcash-test-vectors/blob/master/test-vectors/zcash/unified_address.json#L39 - let response = get_block_template_rpc.z_list_unified_receivers("u12acx92vw49jek4lwwnjtzm0cssn2wxfneu7ryj4amd8kvnhahdrq0htsnrwhqvl92yg92yut5jvgygk0rqfs4lgthtycsewc4t57jyjn9p2g6ffxek9rdg48xe5kr37hxxh86zxh2ef0u2lu22n25xaf3a45as6mtxxlqe37r75mndzu9z2fe4h77m35c5mrzf4uqru3fjs39ednvw9ay8nf9r8g9jx8rgj50mj098exdyq803hmqsek3dwlnz4g5whc88mkvvjnfmjldjs9hm8rx89ctn5wxcc2e05rcz7m955zc7trfm07gr7ankf96jxwwfcqppmdefj8gc6508gep8ndrml34rdpk9tpvwzgdcv7lk2d70uh5jqacrpk6zsety33qcc554r3cls4ajktg03d9fye6exk8gnve562yadzsfmfh9d7v6ctl5ufm9ewpr6se25c47huk4fh2hakkwerkdd2yy3093snsgree5lt6smejfvse8v".to_string()).await.unwrap(); - assert_eq!(response.orchard(), Some(String::from("u10c5q7qkhu6f0ktaz7jqu4sfsujg0gpsglzudmy982mku7t0uma52jmsaz8h24a3wa7p0jwtsjqt8shpg25cvyexzlsw3jtdz4v6w70lv"))); + let response = rpc.z_list_unified_receivers("u12acx92vw49jek4lwwnjtzm0cssn2wxfneu7ryj4amd8kvnhahdrq0htsnrwhqvl92yg92yut5jvgygk0rqfs4lgthtycsewc4t57jyjn9p2g6ffxek9rdg48xe5kr37hxxh86zxh2ef0u2lu22n25xaf3a45as6mtxxlqe37r75mndzu9z2fe4h77m35c5mrzf4uqru3fjs39ednvw9ay8nf9r8g9jx8rgj50mj098exdyq803hmqsek3dwlnz4g5whc88mkvvjnfmjldjs9hm8rx89ctn5wxcc2e05rcz7m955zc7trfm07gr7ankf96jxwwfcqppmdefj8gc6508gep8ndrml34rdpk9tpvwzgdcv7lk2d70uh5jqacrpk6zsety33qcc554r3cls4ajktg03d9fye6exk8gnve562yadzsfmfh9d7v6ctl5ufm9ewpr6se25c47huk4fh2hakkwerkdd2yy3093snsgree5lt6smejfvse8v".to_string()).await.unwrap(); + assert_eq!( + response.orchard(), + Some(String::from( + "u10c5q7qkhu6f0ktaz7jqu4sfsujg0gpsglzudmy982mku7t0uma52jmsaz8h24a3wa7p0jwtsjqt8shpg25cvyexzlsw3jtdz4v6w70lv" + )) + ); assert_eq!(response.sapling(), None); assert_eq!( response.p2pkh(), diff --git a/zebra-rpc/src/methods/types.rs b/zebra-rpc/src/methods/types.rs index 07ceb58e1f0..b596950b0d7 100644 --- a/zebra-rpc/src/methods/types.rs +++ b/zebra-rpc/src/methods/types.rs @@ -1,11 +1,16 @@ //! Types used in RPC methods. -mod get_blockchain_info; -mod get_raw_mempool; -mod transaction; -mod zec; - -pub use get_blockchain_info::Balance; -pub use get_raw_mempool::{GetRawMempool, MempoolObject}; -pub use transaction::{Input, TransactionObject}; -pub use zec::Zec; +pub mod default_roots; +pub mod get_block_template; +pub mod get_blockchain_info; +pub mod get_mining_info; +pub mod get_raw_mempool; +pub mod long_poll; +pub mod peer_info; +pub mod submit_block; +pub mod subsidy; +pub mod transaction; +pub mod unified_address; +pub mod validate_address; +pub mod z_validate_address; +pub mod zec; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/default_roots.rs b/zebra-rpc/src/methods/types/default_roots.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/default_roots.rs rename to zebra-rpc/src/methods/types/default_roots.rs diff --git a/zebra-rpc/src/methods/types/get_block_template.rs b/zebra-rpc/src/methods/types/get_block_template.rs new file mode 100644 index 00000000000..60e7439f2eb --- /dev/null +++ b/zebra-rpc/src/methods/types/get_block_template.rs @@ -0,0 +1,1012 @@ +//! Types and functions for the `getblocktemplate` RPC. + +pub mod constants; +pub mod parameters; +pub mod proposal; +pub mod zip317; + +pub use constants::{ + CAPABILITIES_FIELD, DEFAULT_SOLUTION_RATE_WINDOW_SIZE, + MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, MEMPOOL_LONG_POLL_INTERVAL, MUTABLE_FIELD, + NONCE_RANGE_FIELD, NOT_SYNCED_ERROR_CODE, ZCASHD_FUNDING_STREAM_ORDER, +}; +pub use parameters::{GetBlockTemplateRequestMode, JsonParameters}; +pub use proposal::{ProposalResponse, TimeSource}; + +use crate::{ + config, + methods::{ + types::{ + default_roots::DefaultRoots, long_poll::LongPollId, submit_block, + transaction::TransactionTemplate, + }, + GetBlockHash, + }, + server::error::OkOrError, +}; +use jsonrpsee::core::RpcResult; +use jsonrpsee_types::{ErrorCode, ErrorObject}; +use std::{collections::HashMap, fmt, iter, sync::Arc}; +use tokio::sync::watch::{self, error::SendError}; +use tower::{Service, ServiceExt}; +use zebra_chain::{ + amount::{self, Amount, NegativeOrZero, NonNegative}, + block::{ + self, + merkle::{self, AuthDataRoot}, + Block, ChainHistoryBlockTxAuthCommitmentHash, ChainHistoryMmrRootHash, Height, + MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION, + }, + chain_sync_status::ChainSyncStatus, + chain_tip::ChainTip, + parameters::{subsidy::FundingStreamReceiver, Network, NetworkKind, NetworkUpgrade}, + serialization::{DateTime32, ZcashDeserializeInto}, + transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, + transparent::{ + self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN, + }, + work::difficulty::{CompactDifficulty, ExpandedDifficulty}, +}; +use zebra_consensus::{ + block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, MAX_BLOCK_SIGOPS, +}; +use zebra_node_services::mempool::{self, TransactionDependencies}; +use zebra_state::GetBlockTemplateChainInfo; + +/// An alias to indicate that a usize value represents the depth of in-block dependencies of a +/// transaction. +/// +/// See the `dependencies_depth()` function in [`zip317`] for more details. +pub type InBlockTxDependenciesDepth = usize; + +/// A serialized `getblocktemplate` RPC response in template mode. +/// +/// This is the output of the `getblocktemplate` RPC in the default 'template' mode. See +/// [`ProposalResponse`] for the output in 'proposal' mode. +#[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct GetBlockTemplate { + /// The getblocktemplate RPC capabilities supported by Zebra. + /// + /// At the moment, Zebra does not support any of the extra capabilities from the specification: + /// - `proposal`: + /// - `longpoll`: + /// - `serverlist`: + /// + /// By the above, Zebra will always return an empty vector here. + pub capabilities: Vec, + + /// The version of the block format. + /// Always 4 for new Zcash blocks. + pub version: u32, + + /// The hash of the previous block. + #[serde(rename = "previousblockhash")] + pub previous_block_hash: GetBlockHash, + + /// The block commitment for the new block's header. + /// + /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + #[serde(rename = "blockcommitmentshash")] + #[serde(with = "hex")] + pub block_commitments_hash: ChainHistoryBlockTxAuthCommitmentHash, + + /// Legacy backwards-compatibility header root field. + /// + /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + #[serde(rename = "lightclientroothash")] + #[serde(with = "hex")] + pub light_client_root_hash: ChainHistoryBlockTxAuthCommitmentHash, + + /// Legacy backwards-compatibility header root field. + /// + /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + #[serde(rename = "finalsaplingroothash")] + #[serde(with = "hex")] + pub final_sapling_root_hash: ChainHistoryBlockTxAuthCommitmentHash, + + /// The block header roots for [`GetBlockTemplate.transactions`]. + /// + /// If the transactions in the block template are modified, these roots must be recalculated + /// [according to the specification](https://zcash.github.io/rpc/getblocktemplate.html). + #[serde(rename = "defaultroots")] + pub default_roots: DefaultRoots, + + /// The non-coinbase transactions selected for this block template. + pub transactions: Vec>, + + /// The coinbase transaction generated from `transactions` and `height`. + #[serde(rename = "coinbasetxn")] + pub coinbase_txn: TransactionTemplate, + + /// An ID that represents the chain tip and mempool contents for this template. + #[serde(rename = "longpollid")] + pub long_poll_id: LongPollId, + + /// The expected difficulty for the new block displayed in expanded form. + #[serde(with = "hex")] + pub target: ExpandedDifficulty, + + /// > For each block other than the genesis block, nTime MUST be strictly greater than + /// > the median-time-past of that block. + /// + /// + #[serde(rename = "mintime")] + pub min_time: DateTime32, + + /// Hardcoded list of block fields the miner is allowed to change. + pub mutable: Vec, + + /// A range of valid nonces that goes from `u32::MIN` to `u32::MAX`. + #[serde(rename = "noncerange")] + pub nonce_range: String, + + /// Max legacy signature operations in the block. + #[serde(rename = "sigoplimit")] + pub sigop_limit: u64, + + /// Max block size in bytes + #[serde(rename = "sizelimit")] + pub size_limit: u64, + + /// > the current time as seen by the server (recommended for block time). + /// > note this is not necessarily the system clock, and must fall within the mintime/maxtime rules + /// + /// + #[serde(rename = "curtime")] + pub cur_time: DateTime32, + + /// The expected difficulty for the new block displayed in compact form. + #[serde(with = "hex")] + pub bits: CompactDifficulty, + + /// The height of the next block in the best chain. + // Optional TODO: use Height type, but check that deserialized heights are within Height::MAX + pub height: u32, + + /// > the maximum time allowed + /// + /// + /// + /// Zebra adjusts the minimum and current times for testnet minimum difficulty blocks, + /// so we need to tell miners what the maximum valid time is. + /// + /// This field is not in `zcashd` or the Zcash RPC reference yet. + /// + /// Currently, some miners just use `min_time` or `cur_time`. Others calculate `max_time` from the + /// fixed 90 minute consensus rule, or a smaller fixed interval (like 1000s). + /// Some miners don't check the maximum time. This can cause invalid blocks after network downtime, + /// a significant drop in the hash rate, or after the testnet minimum difficulty interval. + #[serde(rename = "maxtime")] + pub max_time: DateTime32, + + /// > only relevant for long poll responses: + /// > indicates if work received prior to this response remains potentially valid (default) + /// > and should have its shares submitted; + /// > if false, the miner may wish to discard its share queue + /// + /// + /// + /// This field is not in `zcashd` or the Zcash RPC reference yet. + /// + /// In Zebra, `submit_old` is `false` when the tip block changed or max time is reached, + /// and `true` if only the mempool transactions have changed. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + #[serde(rename = "submitold")] + pub submit_old: Option, +} + +impl fmt::Debug for GetBlockTemplate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // A block with a lot of transactions can be extremely long in logs. + let mut transactions_truncated = self.transactions.clone(); + if self.transactions.len() > 4 { + // Remove transaction 3 onwards, but leave the last transaction + let end = self.transactions.len() - 2; + transactions_truncated.splice(3..=end, Vec::new()); + } + + f.debug_struct("GetBlockTemplate") + .field("capabilities", &self.capabilities) + .field("version", &self.version) + .field("previous_block_hash", &self.previous_block_hash) + .field("block_commitments_hash", &self.block_commitments_hash) + .field("light_client_root_hash", &self.light_client_root_hash) + .field("final_sapling_root_hash", &self.final_sapling_root_hash) + .field("default_roots", &self.default_roots) + .field("transaction_count", &self.transactions.len()) + .field("transactions", &transactions_truncated) + .field("coinbase_txn", &self.coinbase_txn) + .field("long_poll_id", &self.long_poll_id) + .field("target", &self.target) + .field("min_time", &self.min_time) + .field("mutable", &self.mutable) + .field("nonce_range", &self.nonce_range) + .field("sigop_limit", &self.sigop_limit) + .field("size_limit", &self.size_limit) + .field("cur_time", &self.cur_time) + .field("bits", &self.bits) + .field("height", &self.height) + .field("max_time", &self.max_time) + .field("submit_old", &self.submit_old) + .finish() + } +} + +impl GetBlockTemplate { + /// Returns a `Vec` of capabilities supported by the `getblocktemplate` RPC + pub fn capabilities() -> Vec { + CAPABILITIES_FIELD.iter().map(ToString::to_string).collect() + } + + /// Returns a new [`GetBlockTemplate`] struct, based on the supplied arguments and defaults. + /// + /// The result of this method only depends on the supplied arguments and constants. + /// + /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` + /// in the `getblocktemplate` RPC. + #[allow(clippy::too_many_arguments)] + pub fn new( + network: &Network, + miner_address: &transparent::Address, + chain_tip_and_local_time: &GetBlockTemplateChainInfo, + long_poll_id: LongPollId, + #[cfg(not(test))] mempool_txs: Vec, + #[cfg(test)] mempool_txs: Vec<(InBlockTxDependenciesDepth, VerifiedUnminedTx)>, + submit_old: Option, + like_zcashd: bool, + extra_coinbase_data: Vec, + ) -> Self { + // Calculate the next block height. + let next_block_height = + (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); + + // Convert transactions into TransactionTemplates + #[cfg(not(test))] + let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = + mempool_txs.into_iter().map(|tx| ((&tx).into(), tx)).unzip(); + + // Transaction selection returns transactions in an arbitrary order, + // but Zebra's snapshot tests expect the same order every time. + // + // # Correctness + // + // Transactions that spend outputs created in the same block must appear + // after the transactions that create those outputs. + #[cfg(test)] + let (mempool_tx_templates, mempool_txs): (Vec<_>, Vec<_>) = { + let mut mempool_txs_with_templates: Vec<( + InBlockTxDependenciesDepth, + TransactionTemplate, + VerifiedUnminedTx, + )> = mempool_txs + .into_iter() + .map(|(min_tx_index, tx)| (min_tx_index, (&tx).into(), tx)) + .collect(); + + if like_zcashd { + // Sort in serialized data order, excluding the length byte. + // `zcashd` sometimes seems to do this, but other times the order is arbitrary. + mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { + (*min_tx_index, tx_template.data.clone()) + }); + } else { + // Sort by hash, this is faster. + mempool_txs_with_templates.sort_by_key(|(min_tx_index, tx_template, _tx)| { + (*min_tx_index, tx_template.hash.bytes_in_display_order()) + }); + } + mempool_txs_with_templates + .into_iter() + .map(|(_, template, tx)| (template, tx)) + .unzip() + }; + + // Generate the coinbase transaction and default roots + // + // TODO: move expensive root, hash, and tree cryptography to a rayon thread? + let (coinbase_txn, default_roots) = generate_coinbase_and_roots( + network, + next_block_height, + miner_address, + &mempool_txs, + chain_tip_and_local_time.chain_history_root, + like_zcashd, + extra_coinbase_data, + ); + + // Convert difficulty + let target = chain_tip_and_local_time + .expected_difficulty + .to_expanded() + .expect("state always returns a valid difficulty value"); + + // Convert default values + let capabilities: Vec = Self::capabilities(); + let mutable: Vec = MUTABLE_FIELD.iter().map(ToString::to_string).collect(); + + tracing::debug!( + selected_txs = ?mempool_txs + .iter() + .map(|tx| (tx.transaction.id.mined_id(), tx.unpaid_actions)) + .collect::>(), + "creating template ... " + ); + + GetBlockTemplate { + capabilities, + + version: ZCASH_BLOCK_VERSION, + + previous_block_hash: GetBlockHash(chain_tip_and_local_time.tip_hash), + block_commitments_hash: default_roots.block_commitments_hash, + light_client_root_hash: default_roots.block_commitments_hash, + final_sapling_root_hash: default_roots.block_commitments_hash, + default_roots, + + transactions: mempool_tx_templates, + + coinbase_txn, + + long_poll_id, + + target, + + min_time: chain_tip_and_local_time.min_time, + + mutable, + + nonce_range: NONCE_RANGE_FIELD.to_string(), + + sigop_limit: MAX_BLOCK_SIGOPS, + + size_limit: MAX_BLOCK_BYTES, + + cur_time: chain_tip_and_local_time.cur_time, + + bits: chain_tip_and_local_time.expected_difficulty, + + height: next_block_height.0, + + max_time: chain_tip_and_local_time.max_time, + + submit_old, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +/// A `getblocktemplate` RPC response. +pub enum Response { + /// `getblocktemplate` RPC request in template mode. + TemplateMode(Box), + + /// `getblocktemplate` RPC request in proposal mode. + ProposalMode(ProposalResponse), +} + +impl Response { + /// Returns the inner template, if the response is in template mode. + pub fn try_into_template(self) -> Option { + match self { + Response::TemplateMode(template) => Some(*template), + Response::ProposalMode(_) => None, + } + } + + /// Returns the inner proposal, if the response is in proposal mode. + pub fn try_into_proposal(self) -> Option { + match self { + Response::TemplateMode(_) => None, + Response::ProposalMode(proposal) => Some(proposal), + } + } +} + +/// Handler for the `getblocktemplate` RPC. +#[derive(Clone)] +pub struct GetBlockTemplateHandler +where + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, +{ + /// The configured miner address for this RPC service. + /// + /// Zebra currently only supports transparent addresses. + miner_address: Option, + + /// Extra data to include in coinbase transaction inputs. + /// Limited to around 95 bytes by the consensus rules. + extra_coinbase_data: Vec, + + /// The chain verifier, used for submitting blocks. + block_verifier_router: BlockVerifierRouter, + + /// The chain sync status, used for checking if Zebra is likely close to the network chain tip. + sync_status: SyncStatus, + + /// A channel to send successful block submissions to the block gossip task, + /// so they can be advertised to peers. + mined_block_sender: watch::Sender<(block::Hash, block::Height)>, +} + +impl GetBlockTemplateHandler +where + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, +{ + /// Creates a new [`GetBlockTemplateHandler`]. + /// + /// # Panics + /// + /// - If the provided `mining_config` is not valid. + pub fn new( + net: &Network, + conf: config::mining::Config, + block_verifier_router: BlockVerifierRouter, + sync_status: SyncStatus, + mined_block_sender: Option>, + ) -> Self { + // Prevent loss of miner funds due to an unsupported or incorrect address type. + if let Some(miner_address) = conf.miner_address.clone() { + match net.kind() { + NetworkKind::Mainnet => assert_eq!( + miner_address.network_kind(), + NetworkKind::Mainnet, + "Incorrect config: Zebra is configured to run on a Mainnet network, \ + which implies the configured mining address needs to be for Mainnet, \ + but the provided address is for {}.", + miner_address.network_kind(), + ), + // `Regtest` uses `Testnet` transparent addresses. + network_kind @ (NetworkKind::Testnet | NetworkKind::Regtest) => assert_eq!( + miner_address.network_kind(), + NetworkKind::Testnet, + "Incorrect config: Zebra is configured to run on a {network_kind} network, \ + which implies the configured mining address needs to be for Testnet, \ + but the provided address is for {}.", + miner_address.network_kind(), + ), + } + } + + // A limit on the configured extra coinbase data, regardless of the current block height. + // This is different from the consensus rule, which limits the total height + data. + const EXTRA_COINBASE_DATA_LIMIT: usize = + MAX_COINBASE_DATA_LEN - MAX_COINBASE_HEIGHT_DATA_LEN; + + let debug_like_zcashd = conf.debug_like_zcashd; + + // Hex-decode to bytes if possible, otherwise UTF-8 encode to bytes. + let extra_coinbase_data = conf.extra_coinbase_data.unwrap_or_else(|| { + if debug_like_zcashd { + "" + } else { + EXTRA_ZEBRA_COINBASE_DATA + } + .to_string() + }); + let extra_coinbase_data = hex::decode(&extra_coinbase_data) + .unwrap_or_else(|_error| extra_coinbase_data.as_bytes().to_vec()); + + assert!( + extra_coinbase_data.len() <= EXTRA_COINBASE_DATA_LIMIT, + "extra coinbase data is {} bytes, but Zebra's limit is {}.\n\ + Configure mining.extra_coinbase_data with a shorter string", + extra_coinbase_data.len(), + EXTRA_COINBASE_DATA_LIMIT, + ); + + Self { + miner_address: conf.miner_address, + extra_coinbase_data, + block_verifier_router, + sync_status, + mined_block_sender: mined_block_sender + .unwrap_or(submit_block::SubmitBlockChannel::default().sender()), + } + } + + /// Returns the miner's address. + pub fn miner_address(&self) -> Option { + self.miner_address.clone() + } + + /// Returns the extra coinbase data. + pub fn extra_coinbase_data(&self) -> Vec { + self.extra_coinbase_data.clone() + } + + /// Returns the sync status. + pub fn sync_status(&self) -> SyncStatus { + self.sync_status.clone() + } + + /// Returns the block verifier router. + pub fn block_verifier_router(&self) -> BlockVerifierRouter { + self.block_verifier_router.clone() + } + + /// Advertises the mined block. + pub fn advertise_mined_block( + &self, + block: block::Hash, + height: block::Height, + ) -> Result<(), SendError<(block::Hash, block::Height)>> { + self.mined_block_sender.send((block, height)) + } +} + +impl fmt::Debug + for GetBlockTemplateHandler +where + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // Skip fields without debug impls + f.debug_struct("GetBlockTemplateRpcImpl") + .field("miner_address", &self.miner_address) + .field("extra_coinbase_data", &self.extra_coinbase_data) + .finish() + } +} + +// - Parameter checks + +/// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode, +/// +/// Returns an error if there's a mismatch between the mode and whether `data` is provided. +pub fn check_parameters(parameters: &Option) -> RpcResult<()> { + let Some(parameters) = parameters else { + return Ok(()); + }; + + match parameters { + JsonParameters { + mode: GetBlockTemplateRequestMode::Template, + data: None, + .. + } + | JsonParameters { + mode: GetBlockTemplateRequestMode::Proposal, + data: Some(_), + .. + } => Ok(()), + + JsonParameters { + mode: GetBlockTemplateRequestMode::Proposal, + data: None, + .. + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + provided in \"proposal\" mode", + None, + )), + + JsonParameters { + mode: GetBlockTemplateRequestMode::Template, + data: Some(_), + .. + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + omitted in \"template\" mode", + None, + )), + } +} + +/// Returns the miner address, or an error if it is invalid. +pub fn check_miner_address( + miner_address: Option, +) -> RpcResult { + miner_address.ok_or_misc_error( + "set `mining.miner_address` in `zebrad.toml` to a transparent address".to_string(), + ) +} + +/// Attempts to validate block proposal against all of the server's +/// usual acceptance rules (except proof-of-work). +/// +/// Returns a `getblocktemplate` [`Response`]. +pub async fn validate_block_proposal( + mut block_verifier_router: BlockVerifierRouter, + block_proposal_bytes: Vec, + network: Network, + latest_chain_tip: Tip, + sync_status: SyncStatus, +) -> RpcResult +where + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + Tip: ChainTip + Clone + Send + Sync + 'static, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, +{ + check_synced_to_tip(&network, latest_chain_tip, sync_status)?; + + let block: Block = match block_proposal_bytes.zcash_deserialize_into() { + Ok(block) => block, + Err(parse_error) => { + tracing::info!( + ?parse_error, + "error response from block parser in CheckProposal request" + ); + + return Ok( + ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(), + ); + } + }; + + let block_verifier_router_response = block_verifier_router + .ready() + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? + .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) + .await; + + Ok(block_verifier_router_response + .map(|_hash| ProposalResponse::Valid) + .unwrap_or_else(|verify_chain_error| { + tracing::info!( + ?verify_chain_error, + "error response from block_verifier_router in CheckProposal request" + ); + + ProposalResponse::rejected("invalid proposal", verify_chain_error) + }) + .into()) +} + +// - State and syncer checks + +/// Returns an error if Zebra is not synced to the consensus chain tip. +/// Returns early with `Ok(())` if Proof-of-Work is disabled on the provided `network`. +/// This error might be incorrect if the local clock is skewed. +pub fn check_synced_to_tip( + network: &Network, + latest_chain_tip: Tip, + sync_status: SyncStatus, +) -> RpcResult<()> +where + Tip: ChainTip + Clone + Send + Sync + 'static, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, +{ + if network.is_a_test_network() { + return Ok(()); + } + + // The tip estimate may not be the same as the one coming from the state + // but this is ok for an estimate + let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip + .estimate_distance_to_network_chain_tip(network) + .ok_or_misc_error("no chain tip available yet")?; + + if !sync_status.is_close_to_tip() + || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP + { + tracing::info!( + ?estimated_distance_to_chain_tip, + ?local_tip_height, + "Zebra has not synced to the chain tip. \ + Hint: check your network connection, clock, and time zone settings." + ); + + return Err(ErrorObject::borrowed( + NOT_SYNCED_ERROR_CODE.code(), + "Zebra has not synced to the chain tip, \ + estimated distance: {estimated_distance_to_chain_tip:?}, \ + local tip: {local_tip_height:?}. \ + Hint: check your network connection, clock, and time zone settings.", + None, + )); + } + + Ok(()) +} + +// - State and mempool data fetches + +/// Returns the state data for the block template. +/// +/// You should call `check_synced_to_tip()` before calling this function. +/// If the state does not have enough blocks, returns an error. +pub async fn fetch_state_tip_and_local_time( + state: State, +) -> RpcResult +where + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, +{ + let request = zebra_state::ReadRequest::ChainInfo; + let response = state + .oneshot(request.clone()) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let chain_info = match response { + zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, + _ => unreachable!("incorrect response to {request:?}"), + }; + + Ok(chain_info) +} + +/// Returns the transactions that are currently in `mempool`, or None if the +/// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state. +/// +/// You should call `check_synced_to_tip()` before calling this function. +/// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions. +pub async fn fetch_mempool_transactions( + mempool: Mempool, + chain_tip_hash: block::Hash, +) -> RpcResult, TransactionDependencies)>> +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + 'static, + Mempool::Future: Send, +{ + let response = mempool + .oneshot(mempool::Request::FullTransactions) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + // TODO: Order transactions in block templates based on their dependencies + + let mempool::Response::FullTransactions { + transactions, + transaction_dependencies, + last_seen_tip_hash, + } = response + else { + unreachable!("unmatched response to a mempool::FullTransactions request") + }; + + // Check that the mempool and state were in sync when we made the requests + Ok((last_seen_tip_hash == chain_tip_hash).then_some((transactions, transaction_dependencies))) +} + +// - Response processing + +/// Generates and returns the coinbase transaction and default roots. +/// +/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` +/// in the `getblocktemplate` RPC. +pub fn generate_coinbase_and_roots( + network: &Network, + block_template_height: Height, + miner_address: &transparent::Address, + mempool_txs: &[VerifiedUnminedTx], + chain_history_root: Option, + like_zcashd: bool, + extra_coinbase_data: Vec, +) -> (TransactionTemplate, DefaultRoots) { + // Generate the coinbase transaction + let miner_fee = calculate_miner_fee(mempool_txs); + let coinbase_txn = generate_coinbase_transaction( + network, + block_template_height, + miner_address, + miner_fee, + like_zcashd, + extra_coinbase_data, + ); + + // Calculate block default roots + // + // TODO: move expensive root, hash, and tree cryptography to a rayon thread? + let chain_history_root = chain_history_root + .or_else(|| { + (NetworkUpgrade::Heartwood.activation_height(network) == Some(block_template_height)) + .then_some([0; 32].into()) + }) + .expect("history tree can't be empty"); + let default_roots = + calculate_default_root_hashes(&coinbase_txn, mempool_txs, chain_history_root); + + let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee); + + (coinbase_txn, default_roots) +} + +// - Coinbase transaction processing + +/// Returns a coinbase transaction for the supplied parameters. +/// +/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` +/// in the `getblocktemplate` RPC. +pub fn generate_coinbase_transaction( + network: &Network, + height: Height, + miner_address: &transparent::Address, + miner_fee: Amount, + like_zcashd: bool, + extra_coinbase_data: Vec, +) -> UnminedTx { + let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd); + + if like_zcashd { + Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data) + .into() + } else { + Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into() + } +} + +/// Returns the total miner fee for `mempool_txs`. +pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount { + let miner_fee: amount::Result> = + mempool_txs.iter().map(|tx| tx.miner_fee).sum(); + + miner_fee.expect( + "invalid selected transactions: \ + fees in a valid block can not be more than MAX_MONEY", + ) +} + +/// Returns the standard funding stream and miner reward transparent output scripts +/// for `network`, `height` and `miner_fee`. +/// +/// Only works for post-Canopy heights. +/// +/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` +/// in the `getblocktemplate` RPC. +pub fn standard_coinbase_outputs( + network: &Network, + height: Height, + miner_address: &transparent::Address, + miner_fee: Amount, + like_zcashd: bool, +) -> Vec<(Amount, transparent::Script)> { + let expected_block_subsidy = block_subsidy(height, network).expect("valid block subsidy"); + let funding_streams = funding_stream_values(height, network, expected_block_subsidy) + .expect("funding stream value calculations are valid for reasonable chain heights"); + + // Optional TODO: move this into a zebra_consensus function? + let funding_streams: HashMap< + FundingStreamReceiver, + (Amount, &transparent::Address), + > = funding_streams + .into_iter() + .filter_map(|(receiver, amount)| { + Some(( + receiver, + (amount, funding_stream_address(height, network, receiver)?), + )) + }) + .collect(); + + let miner_reward = miner_subsidy(height, network, expected_block_subsidy) + .expect("reward calculations are valid for reasonable chain heights") + + miner_fee; + let miner_reward = + miner_reward.expect("reward calculations are valid for reasonable chain heights"); + + combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd) +} + +/// Combine the miner reward and funding streams into a list of coinbase amounts and addresses. +/// +/// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` +/// in the `getblocktemplate` RPC. +fn combine_coinbase_outputs( + funding_streams: HashMap, &transparent::Address)>, + miner_address: &transparent::Address, + miner_reward: Amount, + like_zcashd: bool, +) -> Vec<(Amount, transparent::Script)> { + // Collect all the funding streams and convert them to outputs. + let funding_streams_outputs: Vec<(Amount, &transparent::Address)> = + funding_streams + .into_iter() + .map(|(_receiver, (amount, address))| (amount, address)) + .collect(); + + let mut coinbase_outputs: Vec<(Amount, transparent::Script)> = + funding_streams_outputs + .iter() + .map(|(amount, address)| (*amount, address.create_script_from_address())) + .collect(); + + // The HashMap returns funding streams in an arbitrary order, + // but Zebra's snapshot tests expect the same order every time. + if like_zcashd { + // zcashd sorts outputs in serialized data order, excluding the length field + coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); + + // The miner reward is always the first output independent of the sort order + coinbase_outputs.insert( + 0, + (miner_reward, miner_address.create_script_from_address()), + ); + } else { + // Unlike zcashd, in Zebra the miner reward is part of the sorting + coinbase_outputs.push((miner_reward, miner_address.create_script_from_address())); + + // Zebra sorts by amount then script. + // + // Since the sort is stable, equal amounts will remain sorted by script. + coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); + coinbase_outputs.sort_by_key(|(amount, _script)| *amount); + } + + coinbase_outputs +} + +// - Transaction roots processing + +/// Returns the default block roots for the supplied coinbase and mempool transactions, +/// and the supplied history tree. +/// +/// This function runs expensive cryptographic operations. +pub fn calculate_default_root_hashes( + coinbase_txn: &UnminedTx, + mempool_txs: &[VerifiedUnminedTx], + chain_history_root: ChainHistoryMmrRootHash, +) -> DefaultRoots { + let (merkle_root, auth_data_root) = calculate_transaction_roots(coinbase_txn, mempool_txs); + + let block_commitments_hash = if chain_history_root == [0; 32].into() { + [0; 32].into() + } else { + ChainHistoryBlockTxAuthCommitmentHash::from_commitments( + &chain_history_root, + &auth_data_root, + ) + }; + + DefaultRoots { + merkle_root, + chain_history_root, + auth_data_root, + block_commitments_hash, + } +} + +/// Returns the transaction effecting and authorizing roots +/// for `coinbase_txn` and `mempool_txs`, which are used in the block header. +// +// TODO: should this be spawned into a cryptographic operations pool? +// (it would only matter if there were a lot of small transactions in a block) +pub fn calculate_transaction_roots( + coinbase_txn: &UnminedTx, + mempool_txs: &[VerifiedUnminedTx], +) -> (merkle::Root, AuthDataRoot) { + let block_transactions = + || iter::once(coinbase_txn).chain(mempool_txs.iter().map(|tx| &tx.transaction)); + + let merkle_root = block_transactions().cloned().collect(); + let auth_data_root = block_transactions().cloned().collect(); + + (merkle_root, auth_data_root) +} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs b/zebra-rpc/src/methods/types/get_block_template/constants.rs similarity index 90% rename from zebra-rpc/src/methods/get_block_template_rpcs/constants.rs rename to zebra-rpc/src/methods/types/get_block_template/constants.rs index 950dff5db5d..3513f4b9a0b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs +++ b/zebra-rpc/src/methods/types/get_block_template/constants.rs @@ -17,15 +17,15 @@ use zebra_chain::{ /// /// `zcashd` waits 10 seconds between checking the state /// -pub const GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL: u64 = 5; +pub const MEMPOOL_LONG_POLL_INTERVAL: u64 = 5; /// A range of valid block template nonces, that goes from `u32::MIN` to `u32::MAX` as a string. -pub const GET_BLOCK_TEMPLATE_NONCE_RANGE_FIELD: &str = "00000000ffffffff"; +pub const NONCE_RANGE_FIELD: &str = "00000000ffffffff"; /// A hardcoded list of fields that the miner can change from the block template. /// /// -pub const GET_BLOCK_TEMPLATE_MUTABLE_FIELD: &[&str] = &[ +pub const MUTABLE_FIELD: &[&str] = &[ // Standard mutations, copied from zcashd "time", "transactions", @@ -35,7 +35,7 @@ pub const GET_BLOCK_TEMPLATE_MUTABLE_FIELD: &[&str] = &[ /// A hardcoded list of Zebra's getblocktemplate RPC capabilities. /// /// -pub const GET_BLOCK_TEMPLATE_CAPABILITIES_FIELD: &[&str] = &["proposal"]; +pub const CAPABILITIES_FIELD: &[&str] = &["proposal"]; /// The max estimated distance to the chain tip for the getblocktemplate method. /// diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/parameters.rs b/zebra-rpc/src/methods/types/get_block_template/parameters.rs similarity index 96% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/parameters.rs rename to zebra-rpc/src/methods/types/get_block_template/parameters.rs index 73e1ed820ba..d23a09ef761 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/parameters.rs +++ b/zebra-rpc/src/methods/types/get_block_template/parameters.rs @@ -1,8 +1,9 @@ //! Parameter types for the `getblocktemplate` RPC. -use crate::methods::{get_block_template_rpcs::types::long_poll::LongPollId, hex_data::HexData}; +use crate::methods::{hex_data::HexData, types::long_poll::LongPollId}; -/// Defines whether the RPC method should generate a block template or attempt to validate a block proposal. +/// Defines whether the RPC method should generate a block template or attempt to validate a block +/// proposal. #[derive(Clone, Debug, serde::Deserialize, serde::Serialize, PartialEq, Eq)] #[serde(rename_all = "lowercase")] pub enum GetBlockTemplateRequestMode { diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs b/zebra-rpc/src/methods/types/get_block_template/proposal.rs similarity index 99% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs rename to zebra-rpc/src/methods/types/get_block_template/proposal.rs index 373ba2d7c20..1b8100c8c29 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template/proposal.rs +++ b/zebra-rpc/src/methods/types/get_block_template/proposal.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use zebra_node_services::BoxError; use crate::methods::{ - get_block_template_rpcs::types::{ + types::{ default_roots::DefaultRoots, get_block_template::{GetBlockTemplate, Response}, }, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs b/zebra-rpc/src/methods/types/get_block_template/zip317.rs similarity index 99% rename from zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs rename to zebra-rpc/src/methods/types/get_block_template/zip317.rs index 75ae9575d62..111f7a14b19 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/zip317.rs +++ b/zebra-rpc/src/methods/types/get_block_template/zip317.rs @@ -6,13 +6,16 @@ //! > when computing `size_target`, since there is no consensus requirement for this to be //! > exactly the same between implementations. -use std::collections::{HashMap, HashSet}; - +#[cfg(test)] +use crate::methods::types::get_block_template::InBlockTxDependenciesDepth; +use crate::methods::{ + get_block_template::generate_coinbase_transaction, types::transaction::TransactionTemplate, +}; use rand::{ distributions::{Distribution, WeightedIndex}, prelude::thread_rng, }; - +use std::collections::{HashMap, HashSet}; use zebra_chain::{ amount::NegativeOrZero, block::{Height, MAX_BLOCK_BYTES}, @@ -23,13 +26,6 @@ use zebra_chain::{ use zebra_consensus::MAX_BLOCK_SIGOPS; use zebra_node_services::mempool::TransactionDependencies; -use crate::methods::get_block_template_rpcs::{ - get_block_template::generate_coinbase_transaction, types::transaction::TransactionTemplate, -}; - -#[cfg(test)] -use super::get_block_template::InBlockTxDependenciesDepth; - #[cfg(test)] mod tests; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs b/zebra-rpc/src/methods/types/get_block_template/zip317/tests.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/zip317/tests.rs rename to zebra-rpc/src/methods/types/get_block_template/zip317/tests.rs diff --git a/zebra-rpc/src/methods/types/get_blockchain_info.rs b/zebra-rpc/src/methods/types/get_blockchain_info.rs index 82b5644a1ea..6ac392ff501 100644 --- a/zebra-rpc/src/methods/types/get_blockchain_info.rs +++ b/zebra-rpc/src/methods/types/get_blockchain_info.rs @@ -4,6 +4,7 @@ use zebra_chain::{ amount::{Amount, NonNegative}, value_balance::ValueBalance, }; +use zec::Zec; use super::*; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/types/get_mining_info.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs rename to zebra-rpc/src/methods/types/get_mining_info.rs diff --git a/zebra-rpc/src/methods/types/get_raw_mempool.rs b/zebra-rpc/src/methods/types/get_raw_mempool.rs index 4fefcea5516..76e13f7469f 100644 --- a/zebra-rpc/src/methods/types/get_raw_mempool.rs +++ b/zebra-rpc/src/methods/types/get_raw_mempool.rs @@ -5,11 +5,12 @@ use std::collections::HashSet; use hex::ToHex as _; -use super::Zec; use zebra_chain::transaction::VerifiedUnminedTx; use zebra_chain::{amount::NonNegative, block::Height}; use zebra_node_services::mempool::TransactionDependencies; +use super::zec::Zec; + /// Response to a `getrawmempool` RPC request. /// /// See the notes for the [`Rpc::get_raw_mempool` method]. diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs b/zebra-rpc/src/methods/types/long_poll.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs rename to zebra-rpc/src/methods/types/long_poll.rs diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs b/zebra-rpc/src/methods/types/peer_info.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/peer_info.rs rename to zebra-rpc/src/methods/types/peer_info.rs diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs b/zebra-rpc/src/methods/types/submit_block.rs similarity index 95% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs rename to zebra-rpc/src/methods/types/submit_block.rs index 54f593eb867..b46c8886f60 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs +++ b/zebra-rpc/src/methods/types/submit_block.rs @@ -6,11 +6,11 @@ use zebra_chain::{block, parameters::GENESIS_PREVIOUS_BLOCK_HASH}; // Allow doc links to these imports. #[allow(unused_imports)] -use crate::methods::get_block_template_rpcs::GetBlockTemplate; +use crate::methods::get_block_template::GetBlockTemplateHandler; /// Optional argument `jsonparametersobject` for `submitblock` RPC request /// -/// See notes for [`crate::methods::GetBlockTemplateRpcServer::submit_block`] method +/// See the notes for the [`submit_block`](crate::methods::RpcServer::submit_block) RPC. #[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)] pub struct JsonParameters { /// The workid for the block template. Currently unused. diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs b/zebra-rpc/src/methods/types/subsidy.rs similarity index 99% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs rename to zebra-rpc/src/methods/types/subsidy.rs index e1973c7af4a..856713c416b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/subsidy.rs +++ b/zebra-rpc/src/methods/types/subsidy.rs @@ -6,7 +6,7 @@ use zebra_chain::{ transparent, }; -use crate::methods::types::Zec; +use super::zec::Zec; /// A response to a `getblocksubsidy` RPC request #[derive(Clone, Debug, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index 66c7baf510f..f1dab1cff97 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -1,21 +1,143 @@ -//! Verbose transaction-related types. - -use std::sync::Arc; +//! Transaction-related types. +use super::zec::Zec; use chrono::{DateTime, Utc}; use hex::ToHex; - +use std::sync::Arc; use zebra_chain::{ + amount::{self, Amount, NegativeOrZero, NonNegative}, block, + block::merkle::AUTH_DIGEST_PLACEHOLDER, parameters::Network, sapling::NotSmallOrderValueCommitment, - transaction::{SerializedTransaction, Transaction}, + transaction::{self, SerializedTransaction, Transaction, UnminedTx, VerifiedUnminedTx}, transparent::Script, }; use zebra_consensus::groth16::Description; +use zebra_script::CachedFfiTransaction; use zebra_state::IntoDisk; -use crate::methods::types; +/// Transaction data and fields needed to generate blocks using the `getblocktemplate` RPC. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(bound = "FeeConstraint: amount::Constraint + Clone")] +pub struct TransactionTemplate +where + FeeConstraint: amount::Constraint + Clone, +{ + /// The hex-encoded serialized data for this transaction. + #[serde(with = "hex")] + pub data: SerializedTransaction, + + /// The transaction ID of this transaction. + #[serde(with = "hex")] + pub(crate) hash: transaction::Hash, + + /// The authorizing data digest of a v5 transaction, or a placeholder for older versions. + #[serde(rename = "authdigest")] + #[serde(with = "hex")] + pub(crate) auth_digest: transaction::AuthDigest, + + /// The transactions in this block template that this transaction depends upon. + /// These are 1-based indexes in the `transactions` list. + /// + /// Zebra's mempool does not support transaction dependencies, so this list is always empty. + /// + /// We use `u16` because 2 MB blocks are limited to around 39,000 transactions. + pub(crate) depends: Vec, + + /// The fee for this transaction. + /// + /// Non-coinbase transactions must be `NonNegative`. + /// The Coinbase transaction `fee` is the negative sum of the fees of the transactions in + /// the block, so their fee must be `NegativeOrZero`. + pub(crate) fee: Amount, + + /// The number of transparent signature operations in this transaction. + pub(crate) sigops: u64, + + /// Is this transaction required in the block? + /// + /// Coinbase transactions are required, all other transactions are not. + pub(crate) required: bool, +} + +// Convert from a mempool transaction to a non-coinbase transaction template. +impl From<&VerifiedUnminedTx> for TransactionTemplate { + fn from(tx: &VerifiedUnminedTx) -> Self { + assert!( + !tx.transaction.transaction.is_coinbase(), + "unexpected coinbase transaction in mempool" + ); + + Self { + data: tx.transaction.transaction.as_ref().into(), + hash: tx.transaction.id.mined_id(), + auth_digest: tx + .transaction + .id + .auth_digest() + .unwrap_or(AUTH_DIGEST_PLACEHOLDER), + + // Always empty, not supported by Zebra's mempool. + depends: Vec::new(), + + fee: tx.miner_fee, + + sigops: tx.legacy_sigop_count, + + // Zebra does not require any transactions except the coinbase transaction. + required: false, + } + } +} + +impl From for TransactionTemplate { + fn from(tx: VerifiedUnminedTx) -> Self { + Self::from(&tx) + } +} + +impl TransactionTemplate { + /// Convert from a generated coinbase transaction into a coinbase transaction template. + /// + /// `miner_fee` is the total miner fees for the block, excluding newly created block rewards. + // + // TODO: use a different type for generated coinbase transactions? + pub fn from_coinbase(tx: &UnminedTx, miner_fee: Amount) -> Self { + assert!( + tx.transaction.is_coinbase(), + "invalid generated coinbase transaction: \ + must have exactly one input, which must be a coinbase input", + ); + + let miner_fee = (-miner_fee) + .constrain() + .expect("negating a NonNegative amount always results in a valid NegativeOrZero"); + + let legacy_sigop_count = CachedFfiTransaction::new(tx.transaction.clone(), Vec::new()) + .legacy_sigop_count() + .expect( + "invalid generated coinbase transaction: \ + failure in zcash_script sigop count", + ); + + Self { + data: tx.transaction.as_ref().into(), + hash: tx.id.mined_id(), + auth_digest: tx.id.auth_digest().unwrap_or(AUTH_DIGEST_PLACEHOLDER), + + // Always empty, coinbase transactions never have inputs. + depends: Vec::new(), + + fee: miner_fee, + + sigops: legacy_sigop_count, + + // Zcash requires a coinbase transaction. + required: true, + } + } +} /// A Transaction object as returned by `getrawtransaction` and `getblock` RPC /// requests. @@ -315,7 +437,7 @@ impl TransactionObject { }; Output { - value: types::Zec::from(output.1.value).lossy_zec(), + value: Zec::from(output.1.value).lossy_zec(), value_zat: output.1.value.zatoshis(), n: output.0 as u32, script_pub_key: ScriptPubKey { @@ -373,9 +495,7 @@ impl TransactionObject { }) .collect(), ), - value_balance: Some( - types::Zec::from(tx.sapling_value_balance().sapling_amount()).lossy_zec(), - ), + value_balance: Some(Zec::from(tx.sapling_value_balance().sapling_amount()).lossy_zec()), value_balance_zat: Some(tx.sapling_value_balance().sapling_amount().zatoshis()), orchard: if !tx.has_orchard_shielded_data() { @@ -422,7 +542,7 @@ impl TransactionObject { } }) .collect(), - value_balance: types::Zec::from(tx.orchard_value_balance().orchard_amount()) + value_balance: Zec::from(tx.orchard_value_balance().orchard_amount()) .lossy_zec(), value_balance_zat: tx.orchard_value_balance().orchard_amount().zatoshis(), }) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/unified_address.rs b/zebra-rpc/src/methods/types/unified_address.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/unified_address.rs rename to zebra-rpc/src/methods/types/unified_address.rs diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/validate_address.rs b/zebra-rpc/src/methods/types/validate_address.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/validate_address.rs rename to zebra-rpc/src/methods/types/validate_address.rs diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/z_validate_address.rs b/zebra-rpc/src/methods/types/z_validate_address.rs similarity index 100% rename from zebra-rpc/src/methods/get_block_template_rpcs/types/z_validate_address.rs rename to zebra-rpc/src/methods/types/z_validate_address.rs diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 15245ed7c49..7a6ff1fa9fc 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -10,9 +10,8 @@ use std::{fmt, panic}; use cookie::Cookie; -use jsonrpsee::server::middleware::rpc::RpcServiceBuilder; -use jsonrpsee::server::{Server, ServerHandle}; -use tokio::{sync::watch, task::JoinHandle}; +use jsonrpsee::server::{middleware::rpc::RpcServiceBuilder, Server, ServerHandle}; +use tokio::task::JoinHandle; use tower::Service; use tracing::*; @@ -23,16 +22,14 @@ use zebra_network::AddressBookPeers; use zebra_node_services::mempool; use crate::{ - config::Config, - methods::{LoggedLastEvent, RpcImpl, RpcServer as _}, + config, + methods::{RpcImpl, RpcServer as _}, server::{ http_request_compatibility::HttpRequestMiddlewareLayer, rpc_call_compatibility::FixRpcResponseMiddleware, }, }; -use crate::methods::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; - pub mod cookie; pub mod error; pub mod http_request_compatibility; @@ -45,7 +42,7 @@ mod tests; #[derive(Clone)] pub struct RpcServer { /// The RPC config. - config: Config, + config: config::rpc::Config, /// The configured network. network: Network, @@ -78,7 +75,7 @@ pub const OPENED_RPC_ENDPOINT_MSG: &str = "Opened RPC endpoint at "; type ServerTask = JoinHandle>; impl RpcServer { - /// Start a new RPC server endpoint using the supplied configs and services. + /// Starts the RPC server. /// /// `build_version` and `user_agent` are version strings for the application, /// which are used in RPC responses. @@ -88,39 +85,16 @@ impl RpcServer { /// /// # Panics /// - /// - If [`Config::listen_addr`] is `None`. + /// - If [`Config::listen_addr`](config::rpc::Config::listen_addr) is `None`. // // TODO: - // - put some of the configs or services in their own struct? // - replace VersionString with semver::Version, and update the tests to provide valid versions #[allow(clippy::too_many_arguments)] - pub async fn spawn< - VersionString, - UserAgentString, - Mempool, - State, - Tip, - BlockVerifierRouter, - SyncStatus, - AddressBook, - >( - config: Config, - mining_config: crate::config::mining::Config, - build_version: VersionString, - user_agent: UserAgentString, - mempool: Mempool, - state: State, - block_verifier_router: BlockVerifierRouter, - sync_status: SyncStatus, - address_book: AddressBook, - latest_chain_tip: Tip, - network: Network, - mined_block_sender: Option>, - last_event: LoggedLastEvent, - ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> + pub async fn start( + rpc: RpcImpl, + conf: config::rpc::Config, + ) -> Result where - VersionString: ToString + Clone + Send + 'static, - UserAgentString: ToString + Clone + Send + 'static, Mempool: tower::Service< mempool::Request, Response = mempool::Response, @@ -140,6 +114,7 @@ impl RpcServer { + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, BlockVerifierRouter: Service< zebra_consensus::Request, Response = block::Hash, @@ -150,42 +125,14 @@ impl RpcServer { + 'static, >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { - let listen_addr = config + let listen_addr = conf .listen_addr .expect("caller should make sure listen_addr is set"); - // Initialize the getblocktemplate rpc method handler - let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( - &network, - mining_config.clone(), - mempool.clone(), - state.clone(), - latest_chain_tip.clone(), - block_verifier_router, - sync_status, - address_book.clone(), - mined_block_sender, - ); - - // Initialize the rpc methods with the zebra version - let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( - build_version.clone(), - user_agent, - network.clone(), - config.debug_force_finished_sync, - mining_config.debug_like_zcashd, - mempool, - state, - latest_chain_tip, - address_book, - last_event, - ); - - let http_middleware_layer = if config.enable_cookie_auth { + let http_middleware_layer = if conf.enable_cookie_auth { let cookie = Cookie::default(); - cookie::write_to_disk(&cookie, &config.cookie_dir) + cookie::write_to_disk(&cookie, &conf.cookie_dir) .expect("Zebra must be able to write the auth cookie to the disk"); HttpRequestMiddlewareLayer::new(Some(cookie)) } else { @@ -198,28 +145,19 @@ impl RpcServer { .rpc_logger(1024) .layer_fn(FixRpcResponseMiddleware::new); - let server_instance = Server::builder() + let server = Server::builder() .http_only() .set_http_middleware(http_middleware) .set_rpc_middleware(rpc_middleware) .build(listen_addr) - .await - .expect("Unable to start RPC server"); - let addr = server_instance - .local_addr() - .expect("Unable to get local address"); - info!("{OPENED_RPC_ENDPOINT_MSG}{}", addr); + .await?; - let mut rpc_module = rpc_impl.into_rpc(); - rpc_module - .merge(get_block_template_rpc_impl.into_rpc()) - .unwrap(); + info!("{OPENED_RPC_ENDPOINT_MSG}{}", server.local_addr()?); - let server_task: JoinHandle> = tokio::spawn(async move { - server_instance.start(rpc_module).stopped().await; + Ok(tokio::spawn(async move { + server.start(rpc.into_rpc()).stopped().await; Ok(()) - }); - Ok((server_task, rpc_tx_queue_task_handle)) + })) } /// Shut down this RPC server, blocking the current thread. @@ -245,7 +183,7 @@ impl RpcServer { /// Shuts down this RPC server using its `close_handle`. /// /// See `shutdown_blocking()` for details. - fn shutdown_blocking_inner(close_handle: ServerHandle, config: Config) { + fn shutdown_blocking_inner(close_handle: ServerHandle, config: config::rpc::Config) { // The server is a blocking task, so it can't run inside a tokio thread. // See the note at wait_on_server. let span = Span::current(); diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 183f54d23b9..2422b8ab9c7 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -3,20 +3,18 @@ // These tests call functions which can take unit arguments if some features aren't enabled. #![allow(clippy::unit_arg)] +use super::super::*; +use config::rpc::Config; use std::net::{Ipv4Addr, SocketAddrV4}; - +use tokio::sync::watch; use tower::buffer::Buffer; - use zebra_chain::{ chain_sync_status::MockSyncStatus, chain_tip::NoChainTip, parameters::Network::*, }; use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::BoxError; - use zebra_test::mock_service::MockService; -use super::super::*; - /// Test that the JSON-RPC server spawns. #[tokio::test] async fn rpc_server_spawn_test() { @@ -28,7 +26,7 @@ async fn rpc_server_spawn_test() { async fn rpc_server_spawn() { let _init_guard = zebra_test::init(); - let config = Config { + let conf = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0).into()), indexer_listen_addr: None, parallel_cpu_threads: 0, @@ -45,22 +43,26 @@ async fn rpc_server_spawn() { info!("spawning RPC server..."); let (_tx, rx) = watch::channel(None); - let _rpc_server_task_handle = RpcServer::spawn( - config, + let (rpc_impl, _) = RpcImpl::new( + Mainnet, Default::default(), - "RPC server test", - "RPC server test", + false, + "RPC test", + "RPC test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), - MockAddressBookPeers::default(), NoChainTip, - Mainnet, - None, + MockAddressBookPeers::default(), rx, + None, ); + RpcServer::start(rpc_impl, conf) + .await + .expect("RPC server should start"); + info!("spawned RPC server, checking services..."); mempool.expect_no_requests().await; @@ -90,7 +92,7 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { let port = zebra_test::net::random_unallocated_port(); #[allow(unknown_lints)] #[allow(clippy::bool_to_int_with_if)] - let config = Config { + let conf = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, parallel_cpu_threads: 0, @@ -107,23 +109,25 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { info!("spawning RPC server..."); let (_tx, rx) = watch::channel(None); - let rpc_server_task_handle = RpcServer::spawn( - config, + let (rpc_impl, _) = RpcImpl::new( + Mainnet, Default::default(), - "RPC server test", - "RPC server test", + false, + "RPC test", + "RPC test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), - MockAddressBookPeers::default(), NoChainTip, - Mainnet, - None, + MockAddressBookPeers::default(), rx, - ) - .await - .expect(""); + None, + ); + + let rpc = RpcServer::start(rpc_impl, conf) + .await + .expect("server should start"); info!("spawned RPC server, checking services..."); @@ -132,23 +136,18 @@ async fn rpc_spawn_unallocated_port(do_shutdown: bool) { block_verifier_router.expect_no_requests().await; if do_shutdown { - rpc_server_task_handle.0.abort(); + rpc.abort(); } } /// Test if the RPC server will panic correctly when there is a port conflict. -/// -/// This test is sometimes unreliable on Windows, and hangs on macOS. -/// We believe this is a CI infrastructure issue, not a platform-specific issue. #[tokio::test] -#[should_panic(expected = "Unable to start RPC server")] -#[cfg(not(any(target_os = "windows", target_os = "macos")))] async fn rpc_server_spawn_port_conflict() { use std::time::Duration; let _init_guard = zebra_test::init(); let port = zebra_test::net::random_known_port(); - let config = Config { + let conf = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, debug_force_finished_sync: false, @@ -162,48 +161,32 @@ async fn rpc_server_spawn_port_conflict() { let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - info!("spawning RPC server 1..."); - let (_tx, rx) = watch::channel(None); - let _rpc_server_1_task_handle = RpcServer::spawn( - config.clone(), + let (rpc_impl, _) = RpcImpl::new( + Mainnet, Default::default(), - "RPC server 1 test", - "RPC server 1 test", + false, + "RPC test", + "RPC test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), - MockAddressBookPeers::default(), NoChainTip, - Mainnet, - None, + MockAddressBookPeers::default(), rx.clone(), - ) - .await; - - tokio::time::sleep(Duration::from_secs(3)).await; + None, + ); - info!("spawning conflicted RPC server 2..."); + RpcServer::start(rpc_impl.clone(), conf.clone()) + .await + .expect("RPC server should start"); - let _rpc_server_2_task_handle = RpcServer::spawn( - config, - Default::default(), - "RPC server 2 conflict test", - "RPC server 2 conflict test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - None, - rx, - ) - .await; + tokio::time::sleep(Duration::from_secs(3)).await; - info!("spawned RPC servers, checking services..."); + RpcServer::start(rpc_impl, conf) + .await + .expect_err("RPC server should not start"); mempool.expect_no_requests().await; state.expect_no_requests().await; diff --git a/zebra-rpc/src/tests/vectors.rs b/zebra-rpc/src/tests/vectors.rs index 7b0a6e23ba1..25da141dc75 100644 --- a/zebra-rpc/src/tests/vectors.rs +++ b/zebra-rpc/src/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed Zebra RPC serialization test vectors. -use crate::methods::{types::TransactionObject, GetBlock, GetRawTransaction}; +use crate::methods::{types::transaction::TransactionObject, GetBlock, GetRawTransaction}; #[test] pub fn test_transaction_serialization() { diff --git a/zebra-utils/src/bin/block-template-to-proposal/args.rs b/zebra-utils/src/bin/block-template-to-proposal/args.rs index 3378a784a5d..d6600dc51ac 100644 --- a/zebra-utils/src/bin/block-template-to-proposal/args.rs +++ b/zebra-utils/src/bin/block-template-to-proposal/args.rs @@ -3,8 +3,7 @@ //! For usage please refer to the program help: `block-template-to-proposal --help` use structopt::StructOpt; - -use zebra_rpc::methods::get_block_template_rpcs::get_block_template::proposal::TimeSource; +use zebra_rpc::methods::types::get_block_template::TimeSource; /// block-template-to-proposal arguments #[derive(Clone, Debug, Eq, PartialEq, StructOpt)] diff --git a/zebra-utils/src/bin/block-template-to-proposal/main.rs b/zebra-utils/src/bin/block-template-to-proposal/main.rs index 236f30d20c9..3eb17bfa5e9 100644 --- a/zebra-utils/src/bin/block-template-to-proposal/main.rs +++ b/zebra-utils/src/bin/block-template-to-proposal/main.rs @@ -14,9 +14,9 @@ use zebra_chain::{ parameters::NetworkUpgrade, serialization::{DateTime32, ZcashSerialize}, }; -use zebra_rpc::methods::get_block_template_rpcs::{ - get_block_template::proposal_block_from_template, - types::{get_block_template::GetBlockTemplate, long_poll::LONG_POLL_ID_LENGTH}, +use zebra_rpc::methods::types::{ + get_block_template::{proposal::proposal_block_from_template, GetBlockTemplate}, + long_poll::LONG_POLL_ID_LENGTH, }; use zebra_utils::init_tracing; diff --git a/zebra-utils/src/bin/openapi-generator/main.rs b/zebra-utils/src/bin/openapi-generator/main.rs index 15e5446d855..2e37eb374e1 100644 --- a/zebra-utils/src/bin/openapi-generator/main.rs +++ b/zebra-utils/src/bin/openapi-generator/main.rs @@ -8,6 +8,7 @@ use rand::{distributions::Alphanumeric, thread_rng, Rng}; use serde::Serialize; use syn::LitStr; +use types::{get_mining_info, submit_block, subsidy, validate_address, z_validate_address}; use zebra_rpc::methods::{trees::GetTreestate, *}; // The API server @@ -82,19 +83,10 @@ fn main() -> Result<(), Box> { let current_path = env!("CARGO_MANIFEST_DIR"); // Define the paths to the Zebra RPC methods - let paths = vec![ - ( - format!("{}/../zebra-rpc/src/methods.rs", current_path), - "Rpc", - ), - ( - format!( - "{}/../zebra-rpc/src/methods/get_block_template_rpcs.rs", - current_path - ), - "GetBlockTemplateRpc", - ), - ]; + let paths = vec![( + format!("{}/../zebra-rpc/src/methods.rs", current_path), + "Rpc", + )]; // Create an indexmap to store the method names and configuration let mut methods = IndexMap::new(); @@ -484,43 +476,31 @@ fn get_default_properties(method_name: &str) -> Result default_property(type_, items.clone(), u64::default())?, - "getblocksubsidy" => default_property( - type_, - items.clone(), - get_block_template_rpcs::types::subsidy::BlockSubsidy::default(), - )?, - "getmininginfo" => default_property( - type_, - items.clone(), - get_block_template_rpcs::types::get_mining_info::Response::default(), - )?, + "getblocksubsidy" => { + default_property(type_, items.clone(), subsidy::BlockSubsidy::default())? + } + "getmininginfo" => { + default_property(type_, items.clone(), get_mining_info::Response::default())? + } "getnetworksolps" => default_property(type_, items.clone(), u64::default())?, - "submitblock" => default_property( - type_, - items.clone(), - get_block_template_rpcs::types::submit_block::Response::default(), - )?, + "submitblock" => default_property(type_, items.clone(), submit_block::Response::default())?, // util - "validateaddress" => default_property( - type_, - items.clone(), - get_block_template_rpcs::types::validate_address::Response::default(), - )?, + "validateaddress" => { + default_property(type_, items.clone(), validate_address::Response::default())? + } "z_validateaddress" => default_property( type_, items.clone(), - get_block_template_rpcs::types::z_validate_address::Response::default(), + z_validate_address::Response::default(), )?, // address "getaddressbalance" => default_property(type_, items.clone(), AddressBalance::default())?, "getaddressutxos" => default_property(type_, items.clone(), GetAddressUtxos::default())?, "getaddresstxids" => default_property(type_, items.clone(), Vec::::default())?, // network - "getpeerinfo" => default_property( - type_, - items.clone(), - get_block_template_rpcs::types::peer_info::PeerInfo::default(), - )?, + "getpeerinfo" => { + default_property(type_, items.clone(), types::peer_info::PeerInfo::default())? + } // blockchain "getdifficulty" => default_property(type_, items.clone(), f64::default())?, "getblockchaininfo" => { @@ -539,7 +519,7 @@ fn get_default_properties(method_name: &str) -> Result default_property( type_, items.clone(), - get_block_template_rpcs::types::unified_address::Response::default(), + types::unified_address::Response::default(), )?, // control "getinfo" => default_property(type_, items.clone(), GetInfo::default())?, diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 532c00e4778..1412da5ab72 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -73,21 +73,8 @@ //! //! Some of the diagnostic features are optional, and need to be enabled at compile-time. -use std::sync::Arc; - -use abscissa_core::{config, Command, FrameworkError}; -use color_eyre::eyre::{eyre, Report}; -use futures::FutureExt; -use tokio::{pin, select, sync::oneshot}; -use tower::{builder::ServiceBuilder, util::BoxService, ServiceExt}; -use tracing_futures::Instrument; - -use zebra_chain::block::genesis::regtest_genesis_block; -use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; -use zebra_rpc::server::RpcServer; - -use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; - +#[cfg(feature = "internal-miner")] +use crate::components; use crate::{ application::{build_version, user_agent, LAST_WARN_ERROR_LOG_SENDER}, components::{ @@ -100,6 +87,19 @@ use crate::{ config::ZebradConfig, prelude::*, }; +use abscissa_core::{config, Command, FrameworkError}; +use color_eyre::eyre::{eyre, Report}; +use futures::FutureExt; +use std::sync::Arc; +use tokio::{pin, select, sync::oneshot}; +use tower::{builder::ServiceBuilder, util::BoxService, ServiceExt}; +use tracing_futures::Instrument; +use zebra_chain::block::genesis::regtest_genesis_block; +use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; +use zebra_rpc::{ + methods::{types::submit_block::SubmitBlockChannel, RpcImpl}, + server::RpcServer, +}; /// Start the application (default command) #[derive(Command, Debug, Default, clap::Parser)] @@ -243,33 +243,29 @@ impl StartCmd { let submit_block_channel = SubmitBlockChannel::new(); // Launch RPC server - let (rpc_task_handle, mut rpc_tx_queue_task_handle) = - if let Some(listen_addr) = config.rpc.listen_addr { - info!("spawning RPC server"); - info!("Trying to open RPC endpoint at {}...", listen_addr,); - let rpc_task_handle = RpcServer::spawn( - config.rpc.clone(), - config.mining.clone(), - build_version(), - user_agent(), - mempool.clone(), - read_only_state_service.clone(), - block_verifier_router.clone(), - sync_status.clone(), - address_book.clone(), - latest_chain_tip.clone(), - config.network.network.clone(), - Some(submit_block_channel.sender()), - LAST_WARN_ERROR_LOG_SENDER.subscribe(), - ); - rpc_task_handle.await.unwrap() - } else { - info!("configure a listen_addr to start the RPC server"); - ( - tokio::spawn(std::future::pending().in_current_span()), - tokio::spawn(std::future::pending().in_current_span()), - ) - }; + let (rpc_impl, mut rpc_tx_queue_handle) = RpcImpl::new( + config.network.network.clone(), + config.mining.clone(), + config.rpc.debug_force_finished_sync, + build_version(), + user_agent(), + mempool.clone(), + read_only_state_service.clone(), + block_verifier_router.clone(), + sync_status.clone(), + latest_chain_tip.clone(), + address_book.clone(), + LAST_WARN_ERROR_LOG_SENDER.subscribe(), + Some(submit_block_channel.sender()), + ); + + let rpc_task_handle = if config.rpc.listen_addr.is_some() { + RpcServer::start(rpc_impl.clone(), config.rpc.clone()) + .await + .expect("server should start") + } else { + tokio::spawn(std::future::pending().in_current_span()) + }; // TODO: Add a shutdown signal and start the server with `serve_with_incoming_shutdown()` if // any related unit tests sometimes crash with memory errors @@ -385,20 +381,7 @@ impl StartCmd { #[cfg(feature = "internal-miner")] let miner_task_handle = if config.mining.is_internal_miner_enabled() { info!("spawning Zcash miner"); - - let rpc = zebra_rpc::methods::get_block_template_rpcs::GetBlockTemplateRpcImpl::new( - &config.network.network, - config.mining.clone(), - mempool, - read_only_state_service, - latest_chain_tip, - block_verifier_router, - sync_status, - address_book, - Some(submit_block_channel.sender()), - ); - - crate::components::miner::spawn_init(&config.network.network, &config.mining, rpc) + components::miner::spawn_init(&config.network.network, &config.mining, rpc_impl) } else { tokio::spawn(std::future::pending().in_current_span()) }; @@ -447,7 +430,7 @@ impl StartCmd { Ok(()) } - rpc_tx_queue_result = &mut rpc_tx_queue_task_handle => { + rpc_tx_queue_result = &mut rpc_tx_queue_handle => { rpc_tx_queue_result .expect("unexpected panic in the rpc transaction queue task"); info!("rpc transaction queue task exited"); @@ -537,7 +520,7 @@ impl StartCmd { // ongoing tasks rpc_task_handle.abort(); - rpc_tx_queue_task_handle.abort(); + rpc_tx_queue_handle.abort(); syncer_task_handle.abort(); block_gossip_task_handle.abort(); mempool_crawler_task_handle.abort(); @@ -551,7 +534,9 @@ impl StartCmd { state_checkpoint_verify_handle.abort(); old_databases_task_handle.abort(); - info!("exiting Zebra: all tasks have been asked to stop, waiting for remaining tasks to finish"); + info!( + "exiting Zebra: all tasks have been asked to stop, waiting for remaining tasks to finish" + ); exit_status } diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 965e8481ad3..82f91c34dbe 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -1,12 +1,21 @@ //! Inbound service tests with a fake peer set. -use std::{collections::HashSet, iter, net::SocketAddr, str::FromStr, sync::Arc, time::Duration}; - +use crate::{ + components::{ + inbound::{downloads::MAX_INBOUND_CONCURRENCY, Inbound, InboundSetupData}, + mempool::{ + gossip_mempool_transaction_id, Config as MempoolConfig, Mempool, MempoolError, + SameEffectsChainRejectionError, UnboxMempoolError, + }, + sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, + }, + BoxError, +}; use futures::FutureExt; +use std::{collections::HashSet, iter, net::SocketAddr, str::FromStr, sync::Arc, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle, time::timeout}; use tower::{buffer::Buffer, builder::ServiceBuilder, util::BoxService, Service, ServiceExt}; use tracing::{Instrument, Span}; - use zebra_chain::{ amount::Amount, block::{Block, Height}, @@ -24,22 +33,9 @@ use zebra_network::{ AddressBook, InventoryResponse, Request, Response, }; use zebra_node_services::mempool; -use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; +use zebra_rpc::methods::types::submit_block::SubmitBlockChannel; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; - -use crate::{ - components::{ - inbound::{downloads::MAX_INBOUND_CONCURRENCY, Inbound, InboundSetupData}, - mempool::{ - gossip_mempool_transaction_id, Config as MempoolConfig, Mempool, MempoolError, - SameEffectsChainRejectionError, UnboxMempoolError, - }, - sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, - }, - BoxError, -}; - use InventoryResponse::*; /// Maximum time to wait for a network service request. @@ -848,8 +844,7 @@ async fn caches_getaddr_response() { }; assert_eq!( - peers, - first_result, + peers, first_result, "inbound service should return the same result for every Peers request until the refresh time", ); } diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 4195ede43ad..7a05c24c26e 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -21,7 +21,7 @@ use zebra_network::{ Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, }; use zebra_node_services::mempool; -use zebra_rpc::methods::get_block_template_rpcs::types::submit_block::SubmitBlockChannel; +use zebra_rpc::methods::types::submit_block::SubmitBlockChannel; use zebra_state::Config as StateConfig; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -184,10 +184,10 @@ async fn inbound_block_empty_state_notfound() -> Result<(), crate::BoxError> { assert_eq!(missing_error.inner_debug(), expected.inner_debug()); } else { unreachable!( - "peer::Connection should map missing `BlocksByHash` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ + "peer::Connection should map missing `BlocksByHash` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ actual result: {:?}", - response - ) + response + ) }; let block_gossip_result = block_gossip_task_handle.now_or_never(); @@ -303,10 +303,10 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> { } } else { unreachable!( - "peer::Connection should map missing `TransactionsById` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ + "peer::Connection should map missing `TransactionsById` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ actual result: {:?}", - response - ) + response + ) }; } @@ -420,10 +420,10 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError } } else { unreachable!( - "peer::Connection should map missing `TransactionsById` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ + "peer::Connection should map missing `TransactionsById` responses as `Err(SharedPeerError(NotFoundResponse(_)))`, \ actual result: {:?}", - response - ) + response + ) }; // The peer set only does routing for single-transaction requests. @@ -798,6 +798,7 @@ mod submitblock_test { use std::sync::{Arc, Mutex}; use tracing::{Instrument, Level}; use tracing_subscriber::fmt; + use zebra_rpc::methods::types::submit_block::SubmitBlockChannel; use super::*; diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs index c841720d083..25903dd48c0 100644 --- a/zebrad/src/components/miner.rs +++ b/zebrad/src/components/miner.rs @@ -6,15 +6,13 @@ //! //! - move common code into zebra-chain or zebra-node-services and remove the RPC dependency. -use std::{cmp::min, sync::Arc, thread::available_parallelism, time::Duration}; - use color_eyre::Report; use futures::{stream::FuturesUnordered, StreamExt}; +use std::{cmp::min, sync::Arc, thread::available_parallelism, time::Duration}; use thread_priority::{ThreadBuilder, ThreadPriority}; use tokio::{select, sync::watch, task::JoinHandle, time::sleep}; use tower::Service; use tracing::{Instrument, Span}; - use zebra_chain::{ block::{self, Block, Height}, chain_sync_status::ChainSyncStatus, @@ -30,12 +28,15 @@ use zebra_node_services::mempool; use zebra_rpc::{ config::mining::Config, methods::{ - get_block_template_rpcs::get_block_template::{ - self, proposal::TimeSource, proposal_block_from_template, - GetBlockTemplateCapability::*, GetBlockTemplateRequestMode::*, - }, hex_data::HexData, - GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, + types::get_block_template::{ + self, + parameters::GetBlockTemplateCapability::{CoinbaseTxn, LongPoll}, + proposal::proposal_block_from_template, + GetBlockTemplateRequestMode::Template, + TimeSource, + }, + RpcImpl, RpcServer, }, }; use zebra_state::WatchReceiver; @@ -58,10 +59,10 @@ pub const BLOCK_MINING_WAIT_TIME: Duration = Duration::from_secs(3); /// mining thread. /// /// See [`run_mining_solver()`] for more details. -pub fn spawn_init( +pub fn spawn_init( network: &Network, config: &Config, - rpc: GetBlockTemplateRpcImpl, + rpc: RpcImpl, ) -> JoinHandle> // TODO: simplify or avoid repeating these generics (how?) where @@ -109,7 +110,7 @@ where pub async fn init( network: Network, _config: Config, - rpc: GetBlockTemplateRpcImpl, + rpc: RpcImpl, ) -> Result<(), Report> where Mempool: Service< @@ -219,7 +220,7 @@ pub async fn generate_block_templates< AddressBook, >( network: Network, - rpc: GetBlockTemplateRpcImpl, + rpc: RpcImpl, template_sender: watch::Sender>>, ) -> Result<(), Report> where @@ -330,7 +331,7 @@ where pub async fn run_mining_solver( solver_id: u8, mut template_receiver: WatchReceiver>>, - rpc: GetBlockTemplateRpcImpl, + rpc: RpcImpl, ) -> Result<(), Report> where Mempool: Service< diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index ae36c100bbc..f65b068c55d 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -49,7 +49,7 @@ pub struct ZebradConfig { pub mempool: crate::components::mempool::Config, /// RPC configuration - pub rpc: zebra_rpc::config::Config, + pub rpc: zebra_rpc::config::rpc::Config, /// Mining configuration pub mining: zebra_rpc::config::mining::Config, diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index d65ee68c5f9..1d382fbe351 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -170,7 +170,20 @@ use zebra_chain::{ }; use zebra_consensus::ParameterCheckpoint; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_rpc::server::OPENED_RPC_ENDPOINT_MSG; +use zebra_rpc::{ + methods::{ + types::{ + get_block_template::{ + self, fetch_state_tip_and_local_time, generate_coinbase_and_roots, + proposal::proposal_block_from_template, GetBlockTemplate, + GetBlockTemplateRequestMode, + }, + submit_block::{self, SubmitBlockChannel}, + }, + RpcImpl, RpcServer, + }, + server::OPENED_RPC_ENDPOINT_MSG, +}; use zebra_state::{constants::LOCK_FILE_ERROR, state_database_format_version_in_code}; #[cfg(not(target_os = "windows"))] @@ -2261,7 +2274,7 @@ fn zebra_rpc_conflict() -> Result<()> { // But they will have different Zcash listeners (auto port) and states (ephemeral) let dir2 = testdir()?.with_config(&mut config)?; - check_config_conflict(dir1, regex1.as_str(), dir2, "Unable to start RPC server")?; + check_config_conflict(dir1, regex1.as_str(), dir2, "Address already in use")?; Ok(()) } @@ -3248,21 +3261,9 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { }; use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::mempool; - use zebra_rpc::methods::{ - get_block_template_rpcs::{ - get_block_template::{ - fetch_state_tip_and_local_time, generate_coinbase_and_roots, - proposal_block_from_template, GetBlockTemplate, GetBlockTemplateRequestMode, - }, - types::{ - get_block_template, - submit_block::{self, SubmitBlockChannel}, - }, - }, - hex_data::HexData, - GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, - }; + use zebra_rpc::methods::hex_data::HexData; use zebra_test::mock_service::MockService; + let _init_guard = zebra_test::init(); tracing::info!("running nu6_funding_streams_and_coinbase_balance test"); @@ -3300,12 +3301,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let (state, read_state, latest_chain_tip, _chain_tip_change) = zebra_state::init_test_services(&network); - let ( - block_verifier_router, - _transaction_verifier, - _parameter_download_task_handle, - _max_checkpoint_height, - ) = zebra_consensus::router::init_test( + let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), &network, state.clone(), @@ -3328,15 +3324,21 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let submitblock_channel = SubmitBlockChannel::new(); - let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( - &network, + let (_tx, rx) = tokio::sync::watch::channel(None); + + let (rpc, _) = RpcImpl::new( + network, mining_config, + false, + "0.0.1", + "Zebra tests", mempool.clone(), read_state.clone(), - latest_chain_tip, block_verifier_router, mock_sync_status, + latest_chain_tip, MockAddressBookPeers::default(), + rx, Some(submitblock_channel.sender()), ); @@ -3352,27 +3354,28 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { }); }; - let block_template_fut = get_block_template_rpc_impl.get_block_template(None); + let block_template_fut = rpc.get_block_template(None); let mock_mempool_request_handler = make_mock_mempool_request_handler.clone()(); let (block_template, _) = tokio::join!(block_template_fut, mock_mempool_request_handler); let get_block_template::Response::TemplateMode(block_template) = block_template.expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) }; let proposal_block = proposal_block_from_template(&block_template, None, NetworkUpgrade::Nu6)?; let hex_proposal_block = HexData(proposal_block.zcash_serialize_to_vec()?); // Check that the block template is a valid block proposal - let get_block_template::Response::ProposalMode(block_proposal_result) = - get_block_template_rpc_impl - .get_block_template(Some(get_block_template::JsonParameters { - mode: GetBlockTemplateRequestMode::Proposal, - data: Some(hex_proposal_block), - ..Default::default() - })) - .await? + let get_block_template::Response::ProposalMode(block_proposal_result) = rpc + .get_block_template(Some(get_block_template::JsonParameters { + mode: GetBlockTemplateRequestMode::Proposal, + data: Some(hex_proposal_block), + ..Default::default() + })) + .await? else { panic!( "this getblocktemplate call should return the `ProposalMode` variant of the response" @@ -3385,7 +3388,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { ); // Submit the same block - let submit_block_response = get_block_template_rpc_impl + let submit_block_response = rpc .submit_block(HexData(proposal_block.zcash_serialize_to_vec()?), None) .await?; @@ -3428,13 +3431,15 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { }; // Gets the next block template - let block_template_fut = get_block_template_rpc_impl.get_block_template(None); + let block_template_fut = rpc.get_block_template(None); let mock_mempool_request_handler = make_mock_mempool_request_handler.clone()(); let (block_template, _) = tokio::join!(block_template_fut, mock_mempool_request_handler); let get_block_template::Response::TemplateMode(block_template) = block_template.expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + panic!( + "this getblocktemplate call without parameters should return the `TemplateMode` variant of the response" + ) }; let valid_original_block_template = block_template.clone(); @@ -3473,7 +3478,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let proposal_block = proposal_block_from_template(&block_template, None, NetworkUpgrade::Nu6)?; // Submit the invalid block with an excessive coinbase output value - let submit_block_response = get_block_template_rpc_impl + let submit_block_response = rpc .submit_block(HexData(proposal_block.zcash_serialize_to_vec()?), None) .await?; @@ -3516,7 +3521,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let proposal_block = proposal_block_from_template(&block_template, None, NetworkUpgrade::Nu6)?; // Submit the invalid block with an excessive coinbase input value - let submit_block_response = get_block_template_rpc_impl + let submit_block_response = rpc .submit_block(HexData(proposal_block.zcash_serialize_to_vec()?), None) .await?; @@ -3531,7 +3536,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { // Check that the original block template can be submitted successfully let proposal_block = proposal_block_from_template(&valid_original_block_template, None, NetworkUpgrade::Nu6)?; - let submit_block_response = get_block_template_rpc_impl + let submit_block_response = rpc .submit_block(HexData(proposal_block.zcash_serialize_to_vec()?), None) .await?; diff --git a/zebrad/tests/common/failure_messages.rs b/zebrad/tests/common/failure_messages.rs index 6d41ab024bb..9dcc9cda16c 100644 --- a/zebrad/tests/common/failure_messages.rs +++ b/zebrad/tests/common/failure_messages.rs @@ -25,7 +25,7 @@ pub const ZEBRA_FAILURE_MESSAGES: &[&str] = &[ // Rust-specific panics "The application panicked", // RPC port errors - "Unable to start RPC server", + "Address already in use", // TODO: disable if this actually happens during test zebrad shutdown "Stopping RPC endpoint", // Missing RPCs in zebrad logs (this log is from PR #3860) @@ -135,7 +135,7 @@ pub const ZEBRA_CHECKPOINTS_FAILURE_MESSAGES: &[&str] = &[ // Rust-specific panics "The application panicked", // RPC port errors - "Unable to start RPC server", + "Address already in use", // RPC argument errors: parsing and data // // These logs are produced by jsonrpc_core inside Zebra, diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index daee319b38b..41de4a84e30 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -5,28 +5,22 @@ //! //! After finishing the sync, it will call getblocktemplate. -use std::time::Duration; - +use crate::common::{ + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, + sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, + test_type::TestType, +}; use color_eyre::eyre::{eyre, Context, Result}; - use futures::FutureExt; - +use std::time::Duration; use zebra_chain::{ parameters::{Network, NetworkUpgrade}, serialization::ZcashSerialize, }; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_rpc::methods::get_block_template_rpcs::{ - get_block_template::{ - proposal::TimeSource, GetBlockTemplate, JsonParameters, ProposalResponse, - }, - types::get_block_template::proposal_block_from_template, -}; - -use crate::common::{ - launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, - sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, - test_type::TestType, +use zebra_rpc::methods::types::get_block_template::{ + proposal::proposal_block_from_template, GetBlockTemplate, JsonParameters, ProposalResponse, + TimeSource, }; /// Delay between getting block proposal results and cancelling long poll requests. diff --git a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs index 53494b95f38..3a3f8426f9d 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs @@ -4,7 +4,7 @@ use color_eyre::eyre::{eyre, Context, Result}; use zebra_chain::parameters::Network; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; +use zebra_rpc::methods::types::peer_info::PeerInfo; use crate::common::{ launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index 5134e79bc65..6002988fc62 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -3,12 +3,14 @@ //! This test will get block templates via the `getblocktemplate` RPC method and submit them as new blocks //! via the `submitblock` RPC method on Regtest. -use std::{net::SocketAddr, sync::Arc, time::Duration}; - +use crate::common::{ + config::{os_assigned_rpc_port_config, read_listen_addr_from_logs, testdir}, + launch::ZebradTestDirExt, +}; use color_eyre::eyre::{eyre, Context, Result}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use tower::BoxError; use tracing::*; - use zebra_chain::{ block::{Block, Height}, parameters::{testnet::REGTEST_NU5_ACTIVATION_HEIGHT, Network, NetworkUpgrade}, @@ -18,23 +20,18 @@ use zebra_chain::{ use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::{ methods::{ - get_block_template_rpcs::{ + hex_data::HexData, + types::{ get_block_template::{ - proposal::TimeSource, proposal_block_from_template, GetBlockTemplate, + proposal::proposal_block_from_template, GetBlockTemplate, TimeSource, }, - types::submit_block, + submit_block, }, - hex_data::HexData, }, server::{self, OPENED_RPC_ENDPOINT_MSG}, }; use zebra_test::args; -use crate::common::{ - config::{os_assigned_rpc_port_config, read_listen_addr_from_logs, testdir}, - launch::ZebradTestDirExt, -}; - /// Number of blocks that should be submitted before the test is considered successful. const NUM_BLOCKS_TO_SUBMIT: usize = 200; From 67bfc941fee4cd5fada0b223811138cdfe9243a3 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 14 May 2025 20:36:05 +0200 Subject: [PATCH 174/245] fix(rpc): Regroup imports (#9521) * Regroup imports in `.../tests/common/regtest.rs` * Regroup imports in `.../methods/tests/prop.rs` * Regroup imports in `.../get_block_template.rs` * Regroup imports in `.../zip317.rs` * Regroup imports in `.../get_blockchain_info.rs` * Regroup imports in `.../types/get_raw_mempool.rs` * Regroup imports in `.../types/transaction.rs` * Regroup imports in `.../server/tests/vectors.rs` * Regroup imports in `.../args.rs` * Regroup imports in `.../main.rs` * Regroup imports in `.../openapi-generator/main.rs` * Regroup imports in `zebrad/src/commands/start.rs` * Regroup imports in `.../tests/fake_peer_set.rs` * Regroup imports in `.../components/miner.rs` * Regroup imports in `.../tests/acceptance.rs` * Regroup imports in `.../get_block_template.rs` --- zebra-rpc/src/methods/tests/prop.rs | 31 +++++++------- .../src/methods/types/get_block_template.rs | 41 ++++++++++--------- .../types/get_block_template/zip317.rs | 16 +++++--- .../src/methods/types/get_blockchain_info.rs | 1 + .../src/methods/types/get_raw_mempool.rs | 6 +-- zebra-rpc/src/methods/types/transaction.rs | 10 +++-- zebra-rpc/src/server/tests/vectors.rs | 8 +++- .../bin/block-template-to-proposal/args.rs | 1 + .../bin/block-template-to-proposal/main.rs | 4 +- zebra-utils/src/bin/openapi-generator/main.rs | 3 +- zebrad/src/commands/start.rs | 34 ++++++++------- .../components/inbound/tests/fake_peer_set.rs | 28 +++++++------ zebrad/src/components/miner.rs | 4 +- zebrad/tests/acceptance.rs | 18 ++++---- .../get_block_template.rs | 15 ++++--- zebrad/tests/common/regtest.rs | 13 +++--- 16 files changed, 132 insertions(+), 101 deletions(-) diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 74d9ff89a25..4611f3aea7f 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -1,28 +1,19 @@ //! Randomised property tests for RPC methods. -use crate::methods::{ - self, - types::{ - get_blockchain_info, - get_raw_mempool::{GetRawMempool, MempoolObject}, - }, +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, }; -use super::super::{ - AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, -}; use futures::{join, FutureExt, TryFutureExt}; use hex::{FromHex, ToHex}; use jsonrpsee_types::{ErrorCode, ErrorObject}; use proptest::{collection::vec, prelude::*}; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - sync::Arc, -}; use thiserror::Error; use tokio::sync::oneshot; use tower::buffer::Buffer; + use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, Height}, @@ -41,6 +32,18 @@ use zebra_node_services::mempool; use zebra_state::{BoxError, GetBlockTemplateChainInfo}; use zebra_test::mock_service::MockService; +use crate::methods::{ + self, + types::{ + get_blockchain_info, + get_raw_mempool::{GetRawMempool, MempoolObject}, + }, +}; + +use super::super::{ + AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, +}; + proptest! { /// Test that when sending a raw transaction, it is received by the mempool service. #[test] diff --git a/zebra-rpc/src/methods/types/get_block_template.rs b/zebra-rpc/src/methods/types/get_block_template.rs index 60e7439f2eb..3150cd4686a 100644 --- a/zebra-rpc/src/methods/types/get_block_template.rs +++ b/zebra-rpc/src/methods/types/get_block_template.rs @@ -5,30 +5,13 @@ pub mod parameters; pub mod proposal; pub mod zip317; -pub use constants::{ - CAPABILITIES_FIELD, DEFAULT_SOLUTION_RATE_WINDOW_SIZE, - MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, MEMPOOL_LONG_POLL_INTERVAL, MUTABLE_FIELD, - NONCE_RANGE_FIELD, NOT_SYNCED_ERROR_CODE, ZCASHD_FUNDING_STREAM_ORDER, -}; -pub use parameters::{GetBlockTemplateRequestMode, JsonParameters}; -pub use proposal::{ProposalResponse, TimeSource}; +use std::{collections::HashMap, fmt, iter, sync::Arc}; -use crate::{ - config, - methods::{ - types::{ - default_roots::DefaultRoots, long_poll::LongPollId, submit_block, - transaction::TransactionTemplate, - }, - GetBlockHash, - }, - server::error::OkOrError, -}; use jsonrpsee::core::RpcResult; use jsonrpsee_types::{ErrorCode, ErrorObject}; -use std::{collections::HashMap, fmt, iter, sync::Arc}; use tokio::sync::watch::{self, error::SendError}; use tower::{Service, ServiceExt}; + use zebra_chain::{ amount::{self, Amount, NegativeOrZero, NonNegative}, block::{ @@ -53,6 +36,26 @@ use zebra_consensus::{ use zebra_node_services::mempool::{self, TransactionDependencies}; use zebra_state::GetBlockTemplateChainInfo; +use crate::{ + config, + methods::{ + types::{ + default_roots::DefaultRoots, long_poll::LongPollId, submit_block, + transaction::TransactionTemplate, + }, + GetBlockHash, + }, + server::error::OkOrError, +}; + +pub use constants::{ + CAPABILITIES_FIELD, DEFAULT_SOLUTION_RATE_WINDOW_SIZE, + MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, MEMPOOL_LONG_POLL_INTERVAL, MUTABLE_FIELD, + NONCE_RANGE_FIELD, NOT_SYNCED_ERROR_CODE, ZCASHD_FUNDING_STREAM_ORDER, +}; +pub use parameters::{GetBlockTemplateRequestMode, JsonParameters}; +pub use proposal::{ProposalResponse, TimeSource}; + /// An alias to indicate that a usize value represents the depth of in-block dependencies of a /// transaction. /// diff --git a/zebra-rpc/src/methods/types/get_block_template/zip317.rs b/zebra-rpc/src/methods/types/get_block_template/zip317.rs index 111f7a14b19..4e6cb9dd172 100644 --- a/zebra-rpc/src/methods/types/get_block_template/zip317.rs +++ b/zebra-rpc/src/methods/types/get_block_template/zip317.rs @@ -6,16 +6,13 @@ //! > when computing `size_target`, since there is no consensus requirement for this to be //! > exactly the same between implementations. -#[cfg(test)] -use crate::methods::types::get_block_template::InBlockTxDependenciesDepth; -use crate::methods::{ - get_block_template::generate_coinbase_transaction, types::transaction::TransactionTemplate, -}; +use std::collections::{HashMap, HashSet}; + use rand::{ distributions::{Distribution, WeightedIndex}, prelude::thread_rng, }; -use std::collections::{HashMap, HashSet}; + use zebra_chain::{ amount::NegativeOrZero, block::{Height, MAX_BLOCK_BYTES}, @@ -26,9 +23,16 @@ use zebra_chain::{ use zebra_consensus::MAX_BLOCK_SIGOPS; use zebra_node_services::mempool::TransactionDependencies; +use crate::methods::{ + get_block_template::generate_coinbase_transaction, types::transaction::TransactionTemplate, +}; + #[cfg(test)] mod tests; +#[cfg(test)] +use crate::methods::types::get_block_template::InBlockTxDependenciesDepth; + /// Used in the return type of [`select_mempool_transactions()`] for test compilations. #[cfg(test)] type SelectedMempoolTx = (InBlockTxDependenciesDepth, VerifiedUnminedTx); diff --git a/zebra-rpc/src/methods/types/get_blockchain_info.rs b/zebra-rpc/src/methods/types/get_blockchain_info.rs index 6ac392ff501..8ce65b73187 100644 --- a/zebra-rpc/src/methods/types/get_blockchain_info.rs +++ b/zebra-rpc/src/methods/types/get_blockchain_info.rs @@ -4,6 +4,7 @@ use zebra_chain::{ amount::{Amount, NonNegative}, value_balance::ValueBalance, }; + use zec::Zec; use super::*; diff --git a/zebra-rpc/src/methods/types/get_raw_mempool.rs b/zebra-rpc/src/methods/types/get_raw_mempool.rs index 76e13f7469f..487be865e3b 100644 --- a/zebra-rpc/src/methods/types/get_raw_mempool.rs +++ b/zebra-rpc/src/methods/types/get_raw_mempool.rs @@ -1,12 +1,10 @@ //! Types used in `getrawmempool` RPC method. -use std::collections::HashMap; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use hex::ToHex as _; -use zebra_chain::transaction::VerifiedUnminedTx; -use zebra_chain::{amount::NonNegative, block::Height}; +use zebra_chain::{amount::NonNegative, block::Height, transaction::VerifiedUnminedTx}; use zebra_node_services::mempool::TransactionDependencies; use super::zec::Zec; diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index f1dab1cff97..a264050b306 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -1,13 +1,13 @@ //! Transaction-related types. -use super::zec::Zec; +use std::sync::Arc; + use chrono::{DateTime, Utc}; use hex::ToHex; -use std::sync::Arc; + use zebra_chain::{ amount::{self, Amount, NegativeOrZero, NonNegative}, - block, - block::merkle::AUTH_DIGEST_PLACEHOLDER, + block::{self, merkle::AUTH_DIGEST_PLACEHOLDER}, parameters::Network, sapling::NotSmallOrderValueCommitment, transaction::{self, SerializedTransaction, Transaction, UnminedTx, VerifiedUnminedTx}, @@ -17,6 +17,8 @@ use zebra_consensus::groth16::Description; use zebra_script::CachedFfiTransaction; use zebra_state::IntoDisk; +use super::zec::Zec; + /// Transaction data and fields needed to generate blocks using the `getblocktemplate` RPC. #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(bound = "FeeConstraint: amount::Constraint + Clone")] diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 2422b8ab9c7..8704205cdda 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -3,11 +3,11 @@ // These tests call functions which can take unit arguments if some features aren't enabled. #![allow(clippy::unit_arg)] -use super::super::*; -use config::rpc::Config; use std::net::{Ipv4Addr, SocketAddrV4}; + use tokio::sync::watch; use tower::buffer::Buffer; + use zebra_chain::{ chain_sync_status::MockSyncStatus, chain_tip::NoChainTip, parameters::Network::*, }; @@ -15,6 +15,10 @@ use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::BoxError; use zebra_test::mock_service::MockService; +use super::super::*; + +use config::rpc::Config; + /// Test that the JSON-RPC server spawns. #[tokio::test] async fn rpc_server_spawn_test() { diff --git a/zebra-utils/src/bin/block-template-to-proposal/args.rs b/zebra-utils/src/bin/block-template-to-proposal/args.rs index d6600dc51ac..2f61da10367 100644 --- a/zebra-utils/src/bin/block-template-to-proposal/args.rs +++ b/zebra-utils/src/bin/block-template-to-proposal/args.rs @@ -3,6 +3,7 @@ //! For usage please refer to the program help: `block-template-to-proposal --help` use structopt::StructOpt; + use zebra_rpc::methods::types::get_block_template::TimeSource; /// block-template-to-proposal arguments diff --git a/zebra-utils/src/bin/block-template-to-proposal/main.rs b/zebra-utils/src/bin/block-template-to-proposal/main.rs index 3eb17bfa5e9..6d9731ff2b2 100644 --- a/zebra-utils/src/bin/block-template-to-proposal/main.rs +++ b/zebra-utils/src/bin/block-template-to-proposal/main.rs @@ -4,6 +4,8 @@ //! //! For usage please refer to the program help: `block-template-to-proposal --help` +mod args; + use std::io::Read; use color_eyre::eyre::Result; @@ -20,8 +22,6 @@ use zebra_rpc::methods::types::{ }; use zebra_utils::init_tracing; -mod args; - /// The minimum number of characters in a valid `getblocktemplate JSON response. /// /// The fields we use take up around ~800 bytes. diff --git a/zebra-utils/src/bin/openapi-generator/main.rs b/zebra-utils/src/bin/openapi-generator/main.rs index 2e37eb374e1..ecd398177de 100644 --- a/zebra-utils/src/bin/openapi-generator/main.rs +++ b/zebra-utils/src/bin/openapi-generator/main.rs @@ -8,9 +8,10 @@ use rand::{distributions::Alphanumeric, thread_rng, Rng}; use serde::Serialize; use syn::LitStr; -use types::{get_mining_info, submit_block, subsidy, validate_address, z_validate_address}; use zebra_rpc::methods::{trees::GetTreestate, *}; +use types::{get_mining_info, submit_block, subsidy, validate_address, z_validate_address}; + // The API server const SERVER: &str = "http://localhost:8232"; diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 1412da5ab72..e0c9a4a8a07 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -73,27 +73,15 @@ //! //! Some of the diagnostic features are optional, and need to be enabled at compile-time. -#[cfg(feature = "internal-miner")] -use crate::components; -use crate::{ - application::{build_version, user_agent, LAST_WARN_ERROR_LOG_SENDER}, - components::{ - inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, - mempool::{self, Mempool}, - sync::{self, show_block_chain_progress, VERIFICATION_PIPELINE_SCALING_MULTIPLIER}, - tokio::{RuntimeRun, TokioComponent}, - ChainSync, Inbound, - }, - config::ZebradConfig, - prelude::*, -}; +use std::sync::Arc; + use abscissa_core::{config, Command, FrameworkError}; use color_eyre::eyre::{eyre, Report}; use futures::FutureExt; -use std::sync::Arc; use tokio::{pin, select, sync::oneshot}; use tower::{builder::ServiceBuilder, util::BoxService, ServiceExt}; use tracing_futures::Instrument; + use zebra_chain::block::genesis::regtest_genesis_block; use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; use zebra_rpc::{ @@ -101,6 +89,22 @@ use zebra_rpc::{ server::RpcServer, }; +use crate::{ + application::{build_version, user_agent, LAST_WARN_ERROR_LOG_SENDER}, + components::{ + inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, + mempool::{self, Mempool}, + sync::{self, show_block_chain_progress, VERIFICATION_PIPELINE_SCALING_MULTIPLIER}, + tokio::{RuntimeRun, TokioComponent}, + ChainSync, Inbound, + }, + config::ZebradConfig, + prelude::*, +}; + +#[cfg(feature = "internal-miner")] +use crate::components; + /// Start the application (default command) #[derive(Command, Debug, Default, clap::Parser)] pub struct StartCmd { diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 82f91c34dbe..47174241d76 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -1,21 +1,12 @@ //! Inbound service tests with a fake peer set. -use crate::{ - components::{ - inbound::{downloads::MAX_INBOUND_CONCURRENCY, Inbound, InboundSetupData}, - mempool::{ - gossip_mempool_transaction_id, Config as MempoolConfig, Mempool, MempoolError, - SameEffectsChainRejectionError, UnboxMempoolError, - }, - sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, - }, - BoxError, -}; -use futures::FutureExt; use std::{collections::HashSet, iter, net::SocketAddr, str::FromStr, sync::Arc, time::Duration}; + +use futures::FutureExt; use tokio::{sync::oneshot, task::JoinHandle, time::timeout}; use tower::{buffer::Buffer, builder::ServiceBuilder, util::BoxService, Service, ServiceExt}; use tracing::{Instrument, Span}; + use zebra_chain::{ amount::Amount, block::{Block, Height}, @@ -36,6 +27,19 @@ use zebra_node_services::mempool; use zebra_rpc::methods::types::submit_block::SubmitBlockChannel; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; + +use crate::{ + components::{ + inbound::{downloads::MAX_INBOUND_CONCURRENCY, Inbound, InboundSetupData}, + mempool::{ + gossip_mempool_transaction_id, Config as MempoolConfig, Mempool, MempoolError, + SameEffectsChainRejectionError, UnboxMempoolError, + }, + sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, + }, + BoxError, +}; + use InventoryResponse::*; /// Maximum time to wait for a network service request. diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs index 25903dd48c0..23d2ef4a10b 100644 --- a/zebrad/src/components/miner.rs +++ b/zebrad/src/components/miner.rs @@ -6,13 +6,15 @@ //! //! - move common code into zebra-chain or zebra-node-services and remove the RPC dependency. +use std::{cmp::min, sync::Arc, thread::available_parallelism, time::Duration}; + use color_eyre::Report; use futures::{stream::FuturesUnordered, StreamExt}; -use std::{cmp::min, sync::Arc, thread::available_parallelism, time::Duration}; use thread_priority::{ThreadBuilder, ThreadPriority}; use tokio::{select, sync::watch, task::JoinHandle, time::sleep}; use tower::Service; use tracing::{Instrument, Span}; + use zebra_chain::{ block::{self, Block, Height}, chain_sync_status::ChainSyncStatus, diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 1d382fbe351..68cda2b9a1f 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -148,6 +148,8 @@ //! export TMPDIR=/path/to/disk/directory //! ``` +mod common; + use std::{ cmp::Ordering, collections::HashSet, @@ -185,22 +187,22 @@ use zebra_rpc::{ server::OPENED_RPC_ENDPOINT_MSG, }; use zebra_state::{constants::LOCK_FILE_ERROR, state_database_format_version_in_code}; - -#[cfg(not(target_os = "windows"))] -use zebra_network::constants::PORT_IN_USE_ERROR; - use zebra_test::{ args, command::{to_regex::CollectRegexSet, ContextFrom}, prelude::*, }; +#[cfg(not(target_os = "windows"))] +use zebra_network::constants::PORT_IN_USE_ERROR; #[cfg(not(target_os = "windows"))] use zebra_test::net::random_known_port; -mod common; - use common::{ + cached_state::{ + wait_for_state_version_message, wait_for_state_version_upgrade, + DATABASE_FORMAT_UPGRADE_IS_LONG, + }, check::{is_zebrad_version, EphemeralCheck, EphemeralConfig}, config::{ config_file_full_path, configs_dir, default_test_config, external_address_test_config, @@ -221,10 +223,6 @@ use common::{ test_type::TestType::{self, *}, }; -use crate::common::cached_state::{ - wait_for_state_version_message, wait_for_state_version_upgrade, DATABASE_FORMAT_UPGRADE_IS_LONG, -}; - /// The maximum amount of time that we allow the creation of a future to block the `tokio` executor. /// /// This should be larger than the amount of time between thread time slices on a busy test VM. diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index 41de4a84e30..4ca5b6b0a6e 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -5,14 +5,11 @@ //! //! After finishing the sync, it will call getblocktemplate. -use crate::common::{ - launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, - sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, - test_type::TestType, -}; +use std::time::Duration; + use color_eyre::eyre::{eyre, Context, Result}; use futures::FutureExt; -use std::time::Duration; + use zebra_chain::{ parameters::{Network, NetworkUpgrade}, serialization::ZcashSerialize, @@ -23,6 +20,12 @@ use zebra_rpc::methods::types::get_block_template::{ TimeSource, }; +use crate::common::{ + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, + sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, + test_type::TestType, +}; + /// Delay between getting block proposal results and cancelling long poll requests. /// /// This ensures that a new template can be deserialized and sent to interrupt the diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index 6002988fc62..f99fe062762 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -3,14 +3,12 @@ //! This test will get block templates via the `getblocktemplate` RPC method and submit them as new blocks //! via the `submitblock` RPC method on Regtest. -use crate::common::{ - config::{os_assigned_rpc_port_config, read_listen_addr_from_logs, testdir}, - launch::ZebradTestDirExt, -}; -use color_eyre::eyre::{eyre, Context, Result}; use std::{net::SocketAddr, sync::Arc, time::Duration}; + +use color_eyre::eyre::{eyre, Context, Result}; use tower::BoxError; use tracing::*; + use zebra_chain::{ block::{Block, Height}, parameters::{testnet::REGTEST_NU5_ACTIVATION_HEIGHT, Network, NetworkUpgrade}, @@ -32,6 +30,11 @@ use zebra_rpc::{ }; use zebra_test::args; +use crate::common::{ + config::{os_assigned_rpc_port_config, read_listen_addr_from_logs, testdir}, + launch::ZebradTestDirExt, +}; + /// Number of blocks that should be submitted before the test is considered successful. const NUM_BLOCKS_TO_SUBMIT: usize = 200; From 8b9fdd073d822a02768927634c635e84edb4eee4 Mon Sep 17 00:00:00 2001 From: idky137 <150072198+idky137@users.noreply.github.com> Date: Thu, 15 May 2025 00:19:25 +0100 Subject: [PATCH 175/245] Transaction and block deserialization (#9522) * added deserialisation to block and transaction structs * fixed new test * made TransactionObject::from_transaction pub * renamed valueZat to valueSat * removed double serde(with_hex) * fixed script sig name * Update zebra-chain/src/sapling/commitment.rs Co-authored-by: Conrado Gouvea * qualify cursor --------- Co-authored-by: Conrado Gouvea Co-authored-by: Hazel OHearn Co-authored-by: Conrado Gouvea --- zebra-chain/src/block/serialize.rs | 10 +++ zebra-chain/src/sapling/commitment.rs | 49 +++++++++++- zebra-chain/src/transparent/script.rs | 11 ++- zebra-chain/src/work/equihash.rs | 11 ++- zebra-rpc/src/methods.rs | 87 ++++++++++++++++++---- zebra-rpc/src/methods/types/transaction.rs | 31 ++++---- 6 files changed, 168 insertions(+), 31 deletions(-) diff --git a/zebra-chain/src/block/serialize.rs b/zebra-chain/src/block/serialize.rs index edb4f7afc07..cc8b8267a1b 100644 --- a/zebra-chain/src/block/serialize.rs +++ b/zebra-chain/src/block/serialize.rs @@ -4,6 +4,7 @@ use std::{borrow::Borrow, io}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use chrono::{TimeZone, Utc}; +use hex::{FromHex, FromHexError}; use crate::{ block::{header::ZCASH_BLOCK_VERSION, merkle, Block, CountedHeader, Hash, Header}, @@ -194,3 +195,12 @@ impl From> for SerializedBlock { Self { bytes } } } + +impl FromHex for SerializedBlock { + type Error = FromHexError; + + fn from_hex>(hex: T) -> Result { + let bytes = Vec::from_hex(hex)?; + Ok(SerializedBlock { bytes }) + } +} diff --git a/zebra-chain/src/sapling/commitment.rs b/zebra-chain/src/sapling/commitment.rs index 3e139af08db..d0d2342c6c7 100644 --- a/zebra-chain/src/sapling/commitment.rs +++ b/zebra-chain/src/sapling/commitment.rs @@ -3,7 +3,7 @@ use std::{fmt, io}; use bitvec::prelude::*; -use hex::ToHex; +use hex::{FromHex, FromHexError, ToHex}; use jubjub::ExtendedPoint; use lazy_static::lazy_static; use rand_core::{CryptoRng, RngCore}; @@ -387,6 +387,20 @@ impl ToHex for &NotSmallOrderValueCommitment { } } +impl FromHex for NotSmallOrderValueCommitment { + type Error = FromHexError; + + fn from_hex>(hex: T) -> Result { + // Parse hex string to 32 bytes + let mut bytes = <[u8; 32]>::from_hex(hex)?; + // Convert from big-endian (display) to little-endian (internal) + bytes.reverse(); + + Self::zcash_deserialize(io::Cursor::new(&bytes)) + .map_err(|_| FromHexError::InvalidStringLength) + } +} + #[cfg(test)] mod tests { @@ -544,4 +558,37 @@ mod tests { assert_eq!(sum, doubled_g); } + + #[test] + fn value_commitment_hex_roundtrip() { + use hex::{FromHex, ToHex}; + + let _init_guard = zebra_test::init(); + + let g_point = jubjub::AffinePoint::from_raw_unchecked( + jubjub::Fq::from_raw([ + 0xe4b3_d35d_f1a7_adfe, + 0xcaf5_5d1b_29bf_81af, + 0x8b0f_03dd_d60a_8187, + 0x62ed_cbb8_bf37_87c8, + ]), + jubjub::Fq::from_raw([ + 0x0000_0000_0000_000b, + 0x0000_0000_0000_0000, + 0x0000_0000_0000_0000, + 0x0000_0000_0000_0000, + ]), + ); + + let value_commitment = ValueCommitment(g_point); + let original = NotSmallOrderValueCommitment::try_from(value_commitment) + .expect("constructed point must not be small order"); + + let hex_str = (&original).encode_hex::(); + + let decoded = NotSmallOrderValueCommitment::from_hex(&hex_str) + .expect("hex string should decode successfully"); + + assert_eq!(original, decoded); + } } diff --git a/zebra-chain/src/transparent/script.rs b/zebra-chain/src/transparent/script.rs index e3c49b9e1a7..26ca465f4ae 100644 --- a/zebra-chain/src/transparent/script.rs +++ b/zebra-chain/src/transparent/script.rs @@ -2,7 +2,7 @@ use std::{fmt, io}; -use hex::ToHex; +use hex::{FromHex, FromHexError, ToHex}; use crate::serialization::{ zcash_serialize_bytes, SerializationError, ZcashDeserialize, ZcashSerialize, @@ -76,6 +76,15 @@ impl ToHex for Script { } } +impl FromHex for Script { + type Error = FromHexError; + + fn from_hex>(hex: T) -> Result { + let bytes = Vec::from_hex(hex)?; + Ok(Script::new(&bytes)) + } +} + impl ZcashSerialize for Script { fn zcash_serialize(&self, writer: W) -> Result<(), io::Error> { zcash_serialize_bytes(&self.0, writer) diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index 6f65acffb99..34863f26626 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -2,7 +2,7 @@ use std::{fmt, io}; -use hex::ToHex; +use hex::{FromHex, FromHexError, ToHex}; use serde_big_array::BigArray; use crate::{ @@ -286,3 +286,12 @@ impl ToHex for Solution { (&self).encode_hex_upper() } } + +impl FromHex for Solution { + type Error = FromHexError; + + fn from_hex>(hex: T) -> Result { + let bytes = Vec::from_hex(hex)?; + Solution::from_bytes(&bytes).map_err(|_| FromHexError::InvalidStringLength) + } +} diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index a0baf4a1e74..d73dddb9984 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -3007,7 +3007,7 @@ impl GetBlockChainInfo { /// /// This is used for the input parameter of [`RpcServer::get_address_balance`], /// [`RpcServer::get_address_tx_ids`] and [`RpcServer::get_address_utxos`]. -#[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize, serde::Serialize)] pub struct AddressStrings { /// A list of transparent address strings. addresses: Vec, @@ -3056,7 +3056,9 @@ impl AddressStrings { } /// The transparent balance of a set of addresses. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, serde::Serialize)] +#[derive( + Clone, Copy, Debug, Default, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize, +)] pub struct AddressBalance { /// The total transparent balance. pub balance: u64, @@ -3191,7 +3193,7 @@ impl SentTransactionHash { /// Response to a `getblock` RPC request. /// /// See the notes for the [`RpcServer::get_block`] method. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] #[allow(clippy::large_enum_variant)] //TODO: create a struct for the Object and Box it pub enum GetBlock { @@ -3315,7 +3317,7 @@ impl Default for GetBlock { } } -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] /// The transaction list in a `getblock` call. Can be a list of transaction /// IDs or the full transaction details depending on verbosity. @@ -3329,7 +3331,7 @@ pub enum GetBlockTransaction { /// Response to a `getblockheader` RPC request. /// /// See the notes for the [`RpcServer::get_block_header`] method. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum GetBlockHeader { /// The request block header, hex-encoded. @@ -3339,7 +3341,7 @@ pub enum GetBlockHeader { Object(Box), } -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] /// Verbose response to a `getblockheader` RPC request. /// /// See the notes for the [`RpcServer::get_block_header`] method. @@ -3469,7 +3471,7 @@ impl Default for GetBlockHash { /// Response to a `getrawtransaction` RPC request. /// /// See the notes for the [`Rpc::get_raw_transaction` method]. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum GetRawTransaction { /// The raw transaction, encoded as hex bytes. @@ -3487,7 +3489,7 @@ impl Default for GetRawTransaction { /// Response to a `getaddressutxos` RPC request. /// /// See the notes for the [`Rpc::get_address_utxos` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetAddressUtxos { /// The transparent address, base58check encoded address: transparent::Address, @@ -3574,7 +3576,7 @@ impl GetAddressUtxos { /// A struct to use as parameter of the `getaddresstxids`. /// /// See the notes for the [`Rpc::get_address_tx_ids` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] pub struct GetAddressTxIdsRequest { // A list of addresses to get transactions from. addresses: Vec, @@ -3764,11 +3766,12 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result } } -/// A helper module to serialize `Option` as a hex string. -mod opthex { - use hex::ToHex; - use serde::Serializer; +/// A helper module to serialize and deserialize `Option` as a hex string. +pub mod opthex { + use hex::{FromHex, ToHex}; + use serde::{de, Deserialize, Deserializer, Serializer}; + #[allow(missing_docs)] pub fn serialize(data: &Option, serializer: S) -> Result where S: Serializer, @@ -3782,6 +3785,64 @@ mod opthex { None => serializer.serialize_none(), } } + + #[allow(missing_docs)] + pub fn deserialize<'de, D, T>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + T: FromHex, + { + let opt = Option::::deserialize(deserializer)?; + match opt { + Some(s) => T::from_hex(&s) + .map(Some) + .map_err(|_e| de::Error::custom("failed to convert hex string")), + None => Ok(None), + } + } +} + +/// A helper module to serialize and deserialize `[u8; N]` as a hex string. +pub mod arrayhex { + use serde::{Deserializer, Serializer}; + use std::fmt; + + #[allow(missing_docs)] + pub fn serialize(data: &[u8; N], serializer: S) -> Result + where + S: Serializer, + { + let hex_string = hex::encode(data); + serializer.serialize_str(&hex_string) + } + + #[allow(missing_docs)] + pub fn deserialize<'de, D, const N: usize>(deserializer: D) -> Result<[u8; N], D::Error> + where + D: Deserializer<'de>, + { + struct HexArrayVisitor; + + impl serde::de::Visitor<'_> for HexArrayVisitor { + type Value = [u8; N]; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex string representing exactly {} bytes", N) + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let vec = hex::decode(v).map_err(E::custom)?; + vec.clone().try_into().map_err(|_| { + E::invalid_length(vec.len(), &format!("expected {} bytes", N).as_str()) + }) + } + } + + deserializer.deserialize_str(HexArrayVisitor::) + } } /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index a264050b306..d4ced730757 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use crate::methods::arrayhex; use chrono::{DateTime, Utc}; use hex::ToHex; @@ -143,7 +144,7 @@ impl TransactionTemplate { /// A Transaction object as returned by `getrawtransaction` and `getblock` RPC /// requests. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct TransactionObject { /// The raw transaction, encoded as hex bytes. #[serde(with = "hex")] @@ -196,7 +197,7 @@ pub struct TransactionObject { } /// The transparent input of a transaction. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum Input { /// A coinbase input. @@ -214,14 +215,15 @@ pub enum Input { /// The vout index. vout: u32, /// The script. + #[serde(rename = "scriptSig")] script_sig: ScriptSig, /// The script sequence number. sequence: u32, /// The value of the output being spent in ZEC. #[serde(skip_serializing_if = "Option::is_none")] value: Option, - /// The value of the output being spent, in zats. - #[serde(rename = "valueZat", skip_serializing_if = "Option::is_none")] + /// The value of the output being spent, in zats, named to match zcashd. + #[serde(rename = "valueSat", skip_serializing_if = "Option::is_none")] value_zat: Option, /// The address of the output being spent. #[serde(skip_serializing_if = "Option::is_none")] @@ -230,7 +232,7 @@ pub enum Input { } /// The transparent output of a transaction. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct Output { /// The value in ZEC. value: f64, @@ -245,7 +247,7 @@ pub struct Output { } /// The scriptPubKey of a transaction output. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct ScriptPubKey { /// the asm. // #9330: The `asm` field is not currently populated. @@ -264,18 +266,17 @@ pub struct ScriptPubKey { } /// The scriptSig of a transaction input. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct ScriptSig { /// The asm. // #9330: The `asm` field is not currently populated. asm: String, /// The hex. - #[serde(with = "hex")] hex: Script, } /// A Sapling spend of a transaction. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct ShieldedSpend { /// Value commitment to the input note. #[serde(with = "hex")] @@ -298,7 +299,7 @@ pub struct ShieldedSpend { } /// A Sapling output of a transaction. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct ShieldedOutput { /// Value commitment to the input note. #[serde(with = "hex")] @@ -310,7 +311,7 @@ pub struct ShieldedOutput { #[serde(rename = "ephemeralKey", with = "hex")] ephemeral_key: [u8; 32], /// The output note encrypted to the recipient. - #[serde(rename = "encCiphertext", with = "hex")] + #[serde(rename = "encCiphertext", with = "arrayhex")] enc_ciphertext: [u8; 580], /// A ciphertext enabling the sender to recover the output note. #[serde(rename = "outCiphertext", with = "hex")] @@ -321,7 +322,7 @@ pub struct ShieldedOutput { } /// Object with Orchard-specific information. -#[derive(Clone, Debug, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct Orchard { /// Array of Orchard actions. actions: Vec, @@ -334,7 +335,7 @@ pub struct Orchard { } /// The Orchard action of a transaction. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct OrchardAction { /// A value commitment to the net value of the input note minus the output note. #[serde(with = "hex")] @@ -352,7 +353,7 @@ pub struct OrchardAction { #[serde(rename = "ephemeralKey", with = "hex")] ephemeral_key: [u8; 32], /// The output note encrypted to the recipient. - #[serde(rename = "encCiphertext", with = "hex")] + #[serde(rename = "encCiphertext", with = "arrayhex")] enc_ciphertext: [u8; 580], /// A ciphertext enabling the sender to recover the output note. #[serde(rename = "spendAuthSig", with = "hex")] @@ -386,7 +387,7 @@ impl Default for TransactionObject { impl TransactionObject { /// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format. #[allow(clippy::unwrap_in_result)] - pub(crate) fn from_transaction( + pub fn from_transaction( tx: Arc, height: Option, confirmations: Option, From d171f4a509c4db0f4f884b95df8ff6ef6353471b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 15 May 2025 17:56:57 -0300 Subject: [PATCH 176/245] remove test-scanner from CI (#9530) --- .../sub-ci-integration-tests-gcp.yml | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 2eef386e4ba..0eaf0281fe4 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -480,29 +480,6 @@ jobs: saves_to_disk: false secrets: inherit - # Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. - # - # Runs: - # - after every PR is merged to `main` - # - on every PR update - # - # If the state version has changed, waits for the new cached states to be created. - # Otherwise, if the state rebuild was skipped, runs immediately after the build job. - test-scanner: - name: Scanner tests - needs: [test-full-sync, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - with: - app_name: zebra-scan - test_id: scanner-tests - test_description: Tests the scanner. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCANNER=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" - needs_zebra_state: true - needs_lwd_state: false - saves_to_disk: false - secrets: inherit - failure-issue: name: Open or update issues for main branch failures # When a new test is added to this workflow, add it to this list. @@ -523,7 +500,6 @@ jobs: lightwalletd-grpc-test, get-block-template-test, submit-block-test, - test-scanner, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) From cc062510a2ba8f6931dc91aff08105a3dd414687 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 16 May 2025 11:00:07 -0400 Subject: [PATCH 177/245] fix(clippy, build): fix latest Clippy lints introduced in v1.87.0 and migrate from vergen to vergen_git (#9531) * fixes new lints * rustfmt * migrates from vergen to vergen_git2 --- Cargo.lock | 80 ++++++++++++++++++++---- Cargo.toml | 2 +- zebra-chain/src/block/serialize.rs | 2 +- zebra-chain/src/transparent/serialize.rs | 5 +- zebra-test/src/command.rs | 11 ++-- zebrad/Cargo.toml | 2 +- zebrad/build.rs | 53 +++++++++++----- 7 files changed, 112 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c14c870da9..075174c741d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -640,16 +640,16 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.18.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", "semver", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] @@ -1179,6 +1179,37 @@ dependencies = [ "serde", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.101", +] + [[package]] name = "digest" version = "0.10.7" @@ -1636,9 +1667,9 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "git2" -version = "0.19.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" dependencies = [ "bitflags 2.9.0", "libc", @@ -2534,9 +2565,9 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libgit2-sys" -version = "0.17.0+1.8.1" +version = "0.18.1+1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" +checksum = "e1dcb20f84ffcdd825c7a311ae347cce604a6f084a767dec4a4929829645290e" dependencies = [ "cc", "libc", @@ -5402,18 +5433,43 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.3.2" +version = "9.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" +checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" dependencies = [ "anyhow", "cargo_metadata", - "cfg-if", - "git2", + "derive_builder", "regex", "rustc_version", "rustversion", + "vergen-lib", +] + +[[package]] +name = "vergen-git2" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" +dependencies = [ + "anyhow", + "derive_builder", + "git2", + "rustversion", "time", + "vergen", + "vergen-lib", +] + +[[package]] +name = "vergen-lib" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" +dependencies = [ + "anyhow", + "derive_builder", + "rustversion", ] [[package]] @@ -6684,7 +6740,7 @@ dependencies = [ "tracing-journald", "tracing-subscriber", "tracing-test", - "vergen", + "vergen-git2", "zebra-chain", "zebra-consensus", "zebra-grpc", diff --git a/Cargo.toml b/Cargo.toml index 06680d00137..59a0bf8e412 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -147,7 +147,7 @@ tracing-journald = "0.3.0" tracing-subscriber = "0.3.19" tracing-test = "0.2.4" uint = "0.10.0" -vergen = { version = "8.3.2", default-features = false } +vergen-git2 = { version = "1.0.0", default-features = false } wagyu-zcash-parameters = "0.2.0" x25519-dalek = "2.0.1" zcash_note_encryption = "0.4.1" diff --git a/zebra-chain/src/block/serialize.rs b/zebra-chain/src/block/serialize.rs index cc8b8267a1b..2dd3ee56f37 100644 --- a/zebra-chain/src/block/serialize.rs +++ b/zebra-chain/src/block/serialize.rs @@ -64,7 +64,7 @@ fn check_version(version: u32) -> Result<(), &'static str> { impl ZcashSerialize for Header { #[allow(clippy::unwrap_in_result)] fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { - check_version(self.version).map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; + check_version(self.version).map_err(io::Error::other)?; writer.write_u32::(self.version)?; self.previous_block_hash.zcash_serialize(&mut writer)?; diff --git a/zebra-chain/src/transparent/serialize.rs b/zebra-chain/src/transparent/serialize.rs index 1c769910c88..dfab5d7e68d 100644 --- a/zebra-chain/src/transparent/serialize.rs +++ b/zebra-chain/src/transparent/serialize.rs @@ -208,10 +208,7 @@ pub(crate) fn write_coinbase_height( // TODO: update this check based on the consensus rule changes in // https://github.com/zcash/zips/issues/540 if coinbase_data.0 != GENESIS_COINBASE_DATA { - return Err(io::Error::new( - io::ErrorKind::Other, - "invalid genesis coinbase data", - )); + return Err(io::Error::other("invalid genesis coinbase data")); } } else if let h @ 1..=16 = height.0 { w.write_u8(0x50 + (h as u8))?; diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index 18a529fe32d..9ebaab78614 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -292,10 +292,8 @@ pub fn check_failure_regexes( } // Otherwise, if the process logged a failure message, return an error - let error = std::io::Error::new( - ErrorKind::Other, - format!( - "test command:\n\ + let error = std::io::Error::other(format!( + "test command:\n\ {cmd}\n\n\ Logged a failure message:\n\ {line}\n\n\ @@ -303,9 +301,8 @@ pub fn check_failure_regexes( {failure_matches:#?}\n\n\ All Failure regexes: \ {:#?}\n", - failure_regexes.patterns(), - ), - ); + failure_regexes.patterns(), + )); Err(error) } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 7fcb4326456..edc2cc80601 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -241,7 +241,7 @@ proptest-derive = { workspace = true, optional = true } console-subscriber = { workspace = true, optional = true } [build-dependencies] -vergen = { workspace = true, features = ["cargo", "git", "git2", "rustc"] } +vergen-git2 = { workspace = true, features = ["cargo", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { workspace = true, optional = true } diff --git a/zebrad/build.rs b/zebrad/build.rs index efac0a69774..9a049d88787 100644 --- a/zebrad/build.rs +++ b/zebrad/build.rs @@ -6,36 +6,55 @@ //! When compiling the `lightwalletd` gRPC tests, also builds a gRPC client //! Rust API for `lightwalletd`. -use vergen::EmitBuilder; +use vergen_git2::{CargoBuilder, Emitter, Git2Builder, RustcBuilder}; -/// Returns a new `vergen` builder, configured for everything except for `git` env vars. +/// Configures an [`Emitter`] for everything except for `git` env vars. /// This builder fails the build on error. -fn base_vergen_builder() -> EmitBuilder { - let mut vergen = EmitBuilder::builder(); - - vergen.all_cargo().all_rustc(); - - vergen +fn add_base_emitter_instructions(emitter: &mut Emitter) { + emitter + .add_instructions( + &CargoBuilder::all_cargo().expect("all_cargo() should build successfully"), + ) + .expect("adding all_cargo() instructions should succeed") + .add_instructions( + &RustcBuilder::all_rustc().expect("all_rustc() should build successfully"), + ) + .expect("adding all_rustc() instructions should succeed"); } /// Process entry point for `zebrad`'s build script #[allow(clippy::print_stderr)] fn main() { - let mut vergen = base_vergen_builder(); + let mut emitter = Emitter::default(); + add_base_emitter_instructions(&mut emitter); + + let all_git = Git2Builder::default() + .branch(true) + .commit_author_email(true) + .commit_author_name(true) + .commit_count(true) + .commit_date(true) + .commit_message(true) + .commit_timestamp(true) + .describe(false, false, None) + .sha(true) + .dirty(false) + .describe(true, true, Some("v*.*.*")) + .build() + .expect("all_git + describe + sha should build successfully"); - vergen.all_git().git_sha(true); - // git adds a "-dirty" flag if there are uncommitted changes. - // This doesn't quite match the SemVer 2.0 format, which uses dot separators. - vergen.git_describe(true, true, Some("v*.*.*")); + emitter + .add_instructions(&all_git) + .expect("adding all_git + describe + sha instructions should succeed"); // Disable git if we're building with an invalid `zebra/.git` - match vergen.fail_on_error().emit() { + match emitter.fail_on_error().emit() { Ok(_) => {} Err(e) => { eprintln!("git error in vergen build script: skipping git env vars: {e:?}",); - base_vergen_builder() - .emit() - .expect("non-git vergen should succeed"); + let mut emitter = Emitter::default(); + add_base_emitter_instructions(&mut emitter); + emitter.emit().expect("base emit should succeed"); } } From a77de50caef234e13ccf74452a6c010b65f48e82 Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Sat, 17 May 2025 00:07:06 +0100 Subject: [PATCH 178/245] zebra-rpc: Correctly set optional `scriptPubKey` fields of transactions (#9536) Closes ZcashFoundation/zebra#9535. --- ...k_verbose_hash_verbosity_2@mainnet_10.snap | 4 +-- ...k_verbose_hash_verbosity_2@testnet_10.snap | 4 +-- ...verbose_height_verbosity_2@mainnet_10.snap | 4 +-- ...verbose_height_verbosity_2@testnet_10.snap | 4 +-- ...rawtransaction_verbosity=1@mainnet_10.snap | 4 +-- ...rawtransaction_verbosity=1@testnet_10.snap | 4 +-- zebra-rpc/src/methods/types/transaction.rs | 29 ++++++++++++++----- 7 files changed, 28 insertions(+), 25 deletions(-) diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap index 4794341512a..187cb5a5721 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@mainnet_10.snap @@ -30,9 +30,7 @@ expression: block "scriptPubKey": { "asm": "", "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap index b6a6fc2f242..ce0cb867c03 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_2@testnet_10.snap @@ -30,9 +30,7 @@ expression: block "scriptPubKey": { "asm": "", "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap index 4794341512a..187cb5a5721 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@mainnet_10.snap @@ -30,9 +30,7 @@ expression: block "scriptPubKey": { "asm": "", "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap index b6a6fc2f242..ce0cb867c03 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_2@testnet_10.snap @@ -30,9 +30,7 @@ expression: block "scriptPubKey": { "asm": "", "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap index adac43ce5ee..bd0513812e0 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@mainnet_10.snap @@ -21,9 +21,7 @@ expression: rsp "scriptPubKey": { "asm": "", "hex": "21027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap index 1eddef2b8fd..045d5a31571 100644 --- a/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/getrawtransaction_verbosity=1@testnet_10.snap @@ -21,9 +21,7 @@ expression: rsp "scriptPubKey": { "asm": "", "hex": "21025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac", - "reqSigs": 0, - "type": "", - "addresses": [] + "type": "" } }, { diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index d4ced730757..44596807da7 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -257,12 +257,16 @@ pub struct ScriptPubKey { hex: Script, /// The required sigs. #[serde(rename = "reqSigs")] - req_sigs: u32, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + req_sigs: Option, /// The type, eg 'pubkeyhash'. // #9330: The `type` field is not currently populated. r#type: String, /// The addresses. - addresses: Vec, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + addresses: Option>, } /// The scriptSig of a transaction input. @@ -434,19 +438,30 @@ impl TransactionObject { .iter() .enumerate() .map(|output| { - let addresses = match output.1.address(network) { - Some(address) => vec![address.to_string()], - None => vec![], - }; + // Parse the scriptPubKey to find destination addresses. + let (addresses, req_sigs) = match output.1.address(network) { + // TODO: For multisig scripts, this should populate `addresses` + // with the pubkey IDs and `req_sigs` with the number of + // signatures required to spend. + + // For other standard destinations, `addresses` is populated + // with a single value and `req_sigs` is set to 1. + Some(address) => Some((vec![address.to_string()], 1)), + // For null-data or nonstandard outputs, both are omitted. + None => None, + } + .unzip(); Output { value: Zec::from(output.1.value).lossy_zec(), value_zat: output.1.value.zatoshis(), n: output.0 as u32, script_pub_key: ScriptPubKey { + // TODO: Fill this out. asm: "".to_string(), hex: output.1.lock_script.clone(), - req_sigs: addresses.len() as u32, + req_sigs, + // TODO: Fill this out. r#type: "".to_string(), addresses, }, From d3211081f96176dc3b50436070c570287e13b741 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 22 May 2025 15:30:13 +0100 Subject: [PATCH 179/245] feat(ci): Enable stateful MIGs and fix release deployment skipping (#9516) * fix(ci): Ensure release deployments run and improve script clarity This addresses several issues in the CD workflow: - Modifies the 'build' job to remove its dependency on 'get-disk-name'. This allows the 'build' job and subsequently 'deploy-nodes' to run during release events, as 'get-disk-name' is intentionally skipped for releases. - Updates the 'Create instance template' script in 'deploy-nodes' to correctly handle disk parameters for release events. It now ensures that release deployments attempt to use the existing persistent disk (e.g., 'zebrad-cache-mainnet') for state continuity, rather than relying on a dynamic cached image from the (skipped) 'get-disk-name' job. - Refactors in-script comments within the 'deploy-nodes' job and moved them to the top, to avoid clutter in the script * feat(ci): Implement stateful disk policy for release MIGs This commit introduces a new step "Configure stateful disk policy for release MIG" to the `deploy-nodes` job in the `.github/workflows/cd-deploy-nodes-gcp.yml` workflow. For `release` events, this step applies a stateful policy to the Managed Instance Group (MIG) using the command: `gcloud compute instance-groups managed set-stateful-policy`. The policy is configured with `--stateful-disk "device-name=zebrad-cache-${NETWORK},auto-delete=never"`. This ensures that the specified data disk, which holds the Zebra node's state, is preserved during instance recreations or updates within the MIG, maintaining data persistence for production release deployments. --- .github/workflows/cd-deploy-nodes-gcp.yml | 42 ++++++++++++++++++----- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index c7861b74fd3..ea73101e13f 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -165,7 +165,6 @@ jobs: # The image will be commonly named `zebrad:` build: name: Build CD Docker - needs: get-disk-name uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -272,17 +271,25 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.4 + # Retrieves a static IP address for long-running nodes. + # This step runs only when triggered by a release or a manual workflow_dispatch event. + # - Exits immediately if any command fails. + # - Attempts to retrieve the static IP for the current network and region. + # - Sets the IP_ADDRESS environment variable. - name: Get static IP address for long-running nodes - # Now runs when triggered by a release or a manual workflow_dispatch event. if: ${{ github.event_name == 'release' || github.event_name == 'workflow_dispatch' }} run: | - set -e # Exit immediately if a command exits with a non-zero status. - # Attempt to retrieve the static IP address for the network. + set -e echo "IP_ADDRESS=$(gcloud compute addresses describe zebra-${NETWORK} --region ${{ vars.GCP_REGION }} --format='value(address)')" >> "$GITHUB_ENV" + # Creates a GCP instance template with specific disk handling: + # - Releases: Uses a fixed disk name (e.g., "zebrad-cache-mainnet") and attempts to re-attach this existing + # persistent disk to maintain node state. A new blank disk is created if not found. Dynamic cached images are NOT used. + # - Other Events (push/workflow_dispatch): Uses a unique disk name (branch/SHA). If a cached disk is requested + # and found by 'get-disk-name', its image seeds the new disk. Errors if an expected cached disk isn't available. - name: Create instance template for ${{ matrix.network }} run: | - if [ "${{ github.event_name }}" == "release" ]; then + if [ ${{ github.event_name == 'release' }} ]; then DISK_NAME="zebrad-cache-${NETWORK}" else DISK_NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" @@ -293,17 +300,21 @@ jobs: IP_FLAG="" fi DISK_PARAMS="name=${DISK_NAME},device-name=${DISK_NAME},size=400GB,type=pd-balanced" - if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then + + if [ ${{ github.event_name == 'release' }} ]; then + echo "Release event: Using disk ${DISK_NAME} without a dynamic cached image source." + elif [ -n "${{ env.CACHED_DISK_NAME }}" ]; then + echo "Non-release event: Using cached disk image ${{ env.CACHED_DISK_NAME }} for disk ${DISK_NAME}." DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" elif [ ${{ !inputs.need_cached_disk && github.event_name == 'workflow_dispatch' }} ]; then - echo "No cached disk required" + echo "Workflow dispatch: No cached disk required by input for disk ${DISK_NAME}." else - echo "No cached disk found for ${{ matrix.network }} in main branch" + echo "Error: A cached disk was expected for disk ${{ matrix.network }} but is not available (event: ${{ github.event_name }}, CACHED_DISK_NAME: '${{ env.CACHED_DISK_NAME }}', inputs.need_cached_disk: '${{ inputs.need_cached_disk }}')." exit 1 fi # Set log file based on input or default - if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + if [ ${{ github.event_name == 'workflow_dispatch' }} ]; then LOG_FILE="${{ inputs.log_file }}" else LOG_FILE="${{ vars.CD_LOG_FILE }}" @@ -347,6 +358,19 @@ jobs: --region "${{ vars.GCP_REGION }}" \ --size 1 + # Configure stateful disk policy for release MIGs to ensure disk persistence. + # This policy tells the MIG to preserve the disk with the specified device-name + # when instances are recreated or deleted, and to reattach it. + - name: Configure stateful disk policy for release MIG + if: ${{ github.event_name == 'release' }} + run: | + MIG_NAME="zebrad-${{ needs.versioning.outputs.major_version }}-${NETWORK}" + DEVICE_NAME_TO_PRESERVE="zebrad-cache-${NETWORK}" + echo "Applying stateful policy to MIG: ${MIG_NAME} for device: ${DEVICE_NAME_TO_PRESERVE}" + gcloud compute instance-groups managed set-stateful-policy "${MIG_NAME}" \ + --region "${{ vars.GCP_REGION }}" \ + --stateful-disk "device-name=${DEVICE_NAME_TO_PRESERVE},auto-delete=never" + # Rolls out update to existing group using the new instance template - name: Update managed instance group for ${{ matrix.network }} if: steps.does-group-exist.outcome == 'success' From 5cc4357acbc6f2cf598b84f8abc9ebd8b63ce72e Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 22 May 2025 11:42:10 -0300 Subject: [PATCH 180/245] add raw_value feature to serde_json (#9538) --- zebra-rpc/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 1678209c089..32cec02dba5 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -55,7 +55,7 @@ jsonrpsee-proc-macros = { workspace = true } hyper = { workspace = true } http-body-util = { workspace = true } semver = { workspace = true } -serde_json = { workspace = true } +serde_json = { workspace = true, features = ["raw_value"] } serde_with = { workspace = true, features = ["hex"] } indexmap = { workspace = true, features = ["serde"] } From cbd1bb7fc3d17320dbf2bd11cdcfcbd46ae8f435 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 22 May 2025 12:12:37 -0300 Subject: [PATCH 181/245] feat(rpc): Update `(z_)validateaddress` to validate TEX addresses (#9483) * add TEX support to validate address rpc methods * update call docs * fix comment Co-authored-by: Conrado Gouvea --------- Co-authored-by: Conrado Gouvea Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 1 + Cargo.toml | 1 + zebra-chain/Cargo.toml | 1 + zebra-chain/src/parameters/network.rs | 10 ++ zebra-chain/src/primitives/address.rs | 10 ++ zebra-chain/src/transparent/address.rs | 78 +++++++++++++- zebra-rpc/src/methods.rs | 4 +- zebra-rpc/src/methods/tests/vectors.rs | 100 ++++++++++++++++++ .../disk_format/transparent.rs | 3 + 9 files changed, 202 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 075174c741d..6a2ae592e95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6287,6 +6287,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.46" dependencies = [ + "bech32", "bitflags 2.9.0", "bitflags-serde-legacy", "bitvec", diff --git a/Cargo.toml b/Cargo.toml index 59a0bf8e412..2d414aa7ee3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ zip32 = "0.2" abscissa_core = "0.7.0" atty = "0.2.14" base64 = "0.22.1" +bech32 = "0.11.0" bellman = "0.14.0" bincode = "1.3.3" bitflags = "2.9.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index a514f9f34ed..b87516e8b67 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -83,6 +83,7 @@ secp256k1 = { workspace = true, features = ["serde"] } sha2 = { workspace = true, features = ["compress"] } uint = { workspace = true } x25519-dalek = { workspace = true, features = ["serde"] } +bech32 = { workspace = true } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index bb304f85e13..e23d11c7e01 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -84,6 +84,16 @@ impl NetworkKind { "test".to_string() } } + + /// Returns the 2 bytes prefix for Bech32m-encoded transparent TEX + /// payment addresses for the network as defined in [ZIP-320](https://zips.z.cash/zip-0320.html). + pub fn tex_address_prefix(self) -> [u8; 2] { + // TODO: Add this bytes to `zcash_primitives::constants`? + match self { + Self::Mainnet => [0x1c, 0xb8], + Self::Testnet | Self::Regtest => [0x1d, 0x25], + } + } } impl From for &'static str { diff --git a/zebra-chain/src/primitives/address.rs b/zebra-chain/src/primitives/address.rs index 57745a39e86..aa7023c5771 100644 --- a/zebra-chain/src/primitives/address.rs +++ b/zebra-chain/src/primitives/address.rs @@ -129,6 +129,16 @@ impl zcash_address::TryFromAddress for Address { transparent, }) } + + fn try_from_tex( + network: NetworkType, + data: [u8; 20], + ) -> Result> { + Ok(Self::Transparent(transparent::Address::from_tex( + network.into(), + data, + ))) + } } impl Address { diff --git a/zebra-chain/src/transparent/address.rs b/zebra-chain/src/transparent/address.rs index 1b933af2ef0..c73fb6db309 100644 --- a/zebra-chain/src/transparent/address.rs +++ b/zebra-chain/src/transparent/address.rs @@ -42,6 +42,16 @@ pub enum Address { /// hash of a SHA-256 hash of a compressed ECDSA key encoding. pub_key_hash: [u8; 20], }, + + /// Transparent-Source-Only Address. + /// + /// + Tex { + /// Production, test, or other network + network_kind: NetworkKind, + /// 20 bytes specifying the validating key hash. + validating_key_hash: [u8; 20], + }, } impl fmt::Debug for Address { @@ -63,6 +73,13 @@ impl fmt::Debug for Address { .field("network_kind", network_kind) .field("pub_key_hash", &hex::encode(pub_key_hash)) .finish(), + Address::Tex { + network_kind, + validating_key_hash, + } => debug_struct + .field("network_kind", network_kind) + .field("validating_key_hash", &hex::encode(validating_key_hash)) + .finish(), } } } @@ -80,11 +97,41 @@ impl std::str::FromStr for Address { type Err = SerializationError; fn from_str(s: &str) -> Result { - let result = &bs58::decode(s).with_check(None).into_vec(); + // Try Base58Check (prefixes: t1, t3, tm, t2) + if let Ok(data) = bs58::decode(s).with_check(None).into_vec() { + return Address::zcash_deserialize(&data[..]); + } + + // Try Bech32 (prefixes: tex, textest) + let (hrp, payload) = + bech32::decode(s).map_err(|_| SerializationError::Parse("invalid Bech32 encoding"))?; + + // We can’t meaningfully call `Address::zcash_deserialize` for Bech32 addresses, because + // that method is explicitly reading two binary prefix bytes (the Base58Check version) + 20 hash bytes. + // Bech32 textual addresses carry no such binary "version" on the wire, so there’s nothing in the + // reader for zcash_deserialize to match. + + // Instead, we deserialize the Bech32 address here: - match result { - Ok(bytes) => Self::zcash_deserialize(&bytes[..]), - Err(_) => Err(SerializationError::Parse("t-addr decoding error")), + if payload.len() != 20 { + return Err(SerializationError::Parse("unexpected payload length")); + } + + let mut hash_bytes = [0u8; 20]; + hash_bytes.copy_from_slice(&payload); + + match hrp.as_str() { + zcash_primitives::constants::mainnet::HRP_TEX_ADDRESS => Ok(Address::Tex { + network_kind: NetworkKind::Mainnet, + validating_key_hash: hash_bytes, + }), + + zcash_primitives::constants::testnet::HRP_TEX_ADDRESS => Ok(Address::Tex { + network_kind: NetworkKind::Testnet, + validating_key_hash: hash_bytes, + }), + + _ => Err(SerializationError::Parse("unknown Bech32 HRP")), } } } @@ -106,6 +153,13 @@ impl ZcashSerialize for Address { writer.write_all(&network_kind.b58_pubkey_address_prefix())?; writer.write_all(pub_key_hash)? } + Address::Tex { + network_kind, + validating_key_hash, + } => { + writer.write_all(&network_kind.tex_address_prefix())?; + writer.write_all(validating_key_hash)? + } } Ok(()) @@ -172,6 +226,7 @@ impl Address { match self { Address::PayToScriptHash { network_kind, .. } => *network_kind, Address::PayToPublicKeyHash { network_kind, .. } => *network_kind, + Address::Tex { network_kind, .. } => *network_kind, } } @@ -189,11 +244,17 @@ impl Address { match *self { Address::PayToScriptHash { script_hash, .. } => script_hash, Address::PayToPublicKeyHash { pub_key_hash, .. } => pub_key_hash, + Address::Tex { + validating_key_hash, + .. + } => validating_key_hash, } } /// Given a transparent address (P2SH or a P2PKH), create a script that can be used in a coinbase /// transaction output. + /// + /// TEX addresses are not supported and return an empty script. pub fn create_script_from_address(&self) -> Script { let mut script_bytes = Vec::new(); @@ -214,10 +275,19 @@ impl Address { script_bytes.push(OpCode::EqualVerify as u8); script_bytes.push(OpCode::CheckSig as u8); } + Address::Tex { .. } => {} }; Script::new(&script_bytes) } + + /// Create a TEX address from the given network kind and validating key hash. + pub fn from_tex(network_kind: NetworkKind, validating_key_hash: [u8; 20]) -> Self { + Self::Tex { + network_kind, + validating_key_hash, + } + } } #[cfg(test)] diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index d73dddb9984..37359be0f84 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -497,7 +497,7 @@ pub trait Rpc { #[method(name = "getpeerinfo")] async fn get_peer_info(&self) -> Result>; - /// Checks if a zcash address is valid. + /// Checks if a zcash transparent address of type P2PKH, P2SH or TEX is valid. /// Returns information about the given address if valid. /// /// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html) @@ -510,7 +510,7 @@ pub trait Rpc { #[method(name = "validateaddress")] async fn validate_address(&self, address: String) -> Result; - /// Checks if a zcash address is valid. + /// Checks if a zcash address of type P2PKH, P2SH, TEX, SAPLING or UNIFIED is valid. /// Returns information about the given address if valid. /// /// zcashd reference: [`z_validateaddress`](https://zcash.github.io/rpc/z_validateaddress.html) diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 6cef3fbec1e..92ef19ed3d8 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -2190,6 +2190,18 @@ async fn rpc_validateaddress() { None, ); + // t1 address: valid + let validate_address = rpc + .validate_address("t1fMAAnYrpwt1HQ8ZqxeFqVSSi6PQjwTLUm".to_string()) + .await + .expect("we should have a validate_address::Response"); + + assert!( + validate_address.is_valid, + "t1 address should be valid on Mainnet" + ); + + // t3 address: valid let validate_address = rpc .validate_address("t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR".to_string()) .await @@ -2200,6 +2212,7 @@ async fn rpc_validateaddress() { "Mainnet founder address should be valid on Mainnet" ); + // t2 address: invalid let validate_address = rpc .validate_address("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi".to_string()) .await @@ -2210,6 +2223,32 @@ async fn rpc_validateaddress() { validate_address::Response::invalid(), "Testnet founder address should be invalid on Mainnet" ); + + // tex address: valid + let validate_address = rpc + .validate_address("tex1s2rt77ggv6q989lr49rkgzmh5slsksa9khdgte".to_string()) + .await + .expect("we should have a validate_address::Response"); + + assert!( + validate_address.is_valid, + "ZIP-230 sample address should be valid on Mainnet" + ); + + // sprout address: invalid + let validate_address = rpc + .validate_address( + "zs1z7rejlpsa98s2rrrfkwmaxu53e4ue0ulcrw0h4x5g8jl04tak0d3mm47vdtahatqrlkngh9slya" + .to_string(), + ) + .await + .expect("We should have a validate_address::Response"); + + assert_eq!( + validate_address, + validate_address::Response::invalid(), + "Sapling address should be invalid on Mainnet" + ); } #[tokio::test(flavor = "multi_thread")] @@ -2234,6 +2273,18 @@ async fn rpc_z_validateaddress() { None, ); + // t1 address: valid + let z_validate_address = rpc + .z_validate_address("t1fMAAnYrpwt1HQ8ZqxeFqVSSi6PQjwTLUm".to_string()) + .await + .expect("we should have a validate_address::Response"); + + assert!( + z_validate_address.is_valid, + "t1 address should be valid on Mainnet" + ); + + // t3 address: valid let z_validate_address = rpc .z_validate_address("t3fqvkzrrNaMcamkQMwAyHRjfDdM2xQvDTR".to_string()) .await @@ -2244,6 +2295,7 @@ async fn rpc_z_validateaddress() { "Mainnet founder address should be valid on Mainnet" ); + // t2 address: invalid let z_validate_address = rpc .z_validate_address("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi".to_string()) .await @@ -2254,6 +2306,54 @@ async fn rpc_z_validateaddress() { z_validate_address::Response::invalid(), "Testnet founder address should be invalid on Mainnet" ); + + // tex address: valid + let z_validate_address = rpc + .z_validate_address("tex1s2rt77ggv6q989lr49rkgzmh5slsksa9khdgte".to_string()) + .await + .expect("we should have a z_validate_address::Response"); + + assert!( + z_validate_address.is_valid, + "ZIP-230 sample address should be valid on Mainnet" + ); + + // sprout address: invalid + let z_validate_address = rpc + .z_validate_address("zcWsmqT4X2V4jgxbgiCzyrAfRT1vi1F4sn7M5Pkh66izzw8Uk7LBGAH3DtcSMJeUb2pi3W4SQF8LMKkU2cUuVP68yAGcomL".to_string()) + .await + .expect("We should have a validate_address::Response"); + + assert_eq!( + z_validate_address, + z_validate_address::Response::invalid(), + "Sprout address should be invalid on Mainnet" + ); + + // sapling address: valid + let z_validate_address = rpc + .z_validate_address( + "zs1z7rejlpsa98s2rrrfkwmaxu53e4ue0ulcrw0h4x5g8jl04tak0d3mm47vdtahatqrlkngh9slya" + .to_string(), + ) + .await + .expect("We should have a validate_address::Response"); + + assert!( + z_validate_address.is_valid, + "Sapling address should be valid on Mainnet" + ); + + // unified address: valid + let z_validate_address = rpc + .z_validate_address("u1c4ndwzy9hx70zjdtq4qt4x3c7zm0fnh85g9thsc8sunjcpk905w898pdvw82gdpj2p0mggs9tm23u6mzwk3xn4q25fq4czglssz5l6rlj268wfamxn7z4pvmgxwfl55xf0ua2u03afw66579fplkh8mvx2tp6t8er3zvvvtvf8e43mjv7n32st3zpvamfpvmxdrnzesakax8jrq3l3e".to_string()) + .await + .expect("We should have a validate_address::Response"); + + assert!( + z_validate_address.is_valid, + "Unified address should be valid on Mainnet" + ); } #[tokio::test(flavor = "multi_thread")] diff --git a/zebra-state/src/service/finalized_state/disk_format/transparent.rs b/zebra-state/src/service/finalized_state/disk_format/transparent.rs index b45c211bcf7..b41264ebe00 100644 --- a/zebra-state/src/service/finalized_state/disk_format/transparent.rs +++ b/zebra-state/src/service/finalized_state/disk_format/transparent.rs @@ -510,6 +510,9 @@ fn address_variant(address: &transparent::Address) -> u8 { // address variant for `Regtest` transparent addresses in the db format (Testnet | Regtest, PayToPublicKeyHash { .. }) => 2, (Testnet | Regtest, PayToScriptHash { .. }) => 3, + // TEX address variants + (Mainnet, Tex { .. }) => 4, + (Testnet | Regtest, Tex { .. }) => 5, } } From 7c6b2f9e32c8a217622b211b0fb1d1e5380c1e28 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 26 May 2025 15:08:31 +0100 Subject: [PATCH 182/245] refactor(ci): Simplify cached disk selection logic (#9545) --- .github/workflows/cd-deploy-nodes-gcp.yml | 6 ------ .../workflows/scripts/gcp-get-cached-disks.sh | 16 +++++++--------- .../sub-deploy-integration-tests-gcp.yml | 10 ++-------- .github/workflows/sub-find-cached-disks.yml | 4 ---- 4 files changed, 9 insertions(+), 27 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index ea73101e13f..121485ebe86 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -46,11 +46,6 @@ on: options: - tip - checkpoint - prefer_main_cached_state: - default: false - description: "Prefer cached state from the main branch" - required: false - type: boolean need_cached_disk: default: true description: "Use a cached state disk" @@ -157,7 +152,6 @@ jobs: network: ${{ inputs.network || vars.ZCASH_NETWORK }} disk_prefix: zebrad-cache disk_suffix: ${{ inputs.cached_disk_type || 'tip' }} - prefer_main_cached_state: ${{ inputs.prefer_main_cached_state || (github.event_name == 'push' && github.ref_name == 'main' && true) || false }} # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from Git diff --git a/.github/workflows/scripts/gcp-get-cached-disks.sh b/.github/workflows/scripts/gcp-get-cached-disks.sh index 9716dc9f5a7..ade3285f430 100755 --- a/.github/workflows/scripts/gcp-get-cached-disks.sh +++ b/.github/workflows/scripts/gcp-get-cached-disks.sh @@ -3,9 +3,9 @@ # This script finds a cached Google Cloud Compute image based on specific criteria. # # If there are multiple disks: -# - if `PREFER_MAIN_CACHED_STATE` is "true", then select an image from the `main` branch, else -# - try to find a cached disk image from the current branch (or PR), else -# - try to find an image from any branch. +# - try to find a cached disk image from the current branch (or PR), +# - if no image was found, try to find an image from the `main` branch, +# - if no image was found, try to find an image from any branch. # # Within each of these categories: # - prefer newer images to older images @@ -47,12 +47,10 @@ if [[ -n "${DISK_PREFIX}" && -n "${DISK_SUFFIX}" ]]; then echo "Finding a ${DISK_PREFIX}-${DISK_SUFFIX} disk image for ${NETWORK}..." CACHED_DISK_NAME="" - # Try to find an image based on the `main` branch if that branch is preferred. - if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then - CACHED_DISK_NAME=$(find_cached_disk_image "main-[0-9a-f]+" "main branch") - fi - # If no image was found, try to find one from the current branch (or PR). - CACHED_DISK_NAME=${CACHED_DISK_NAME:-$(find_cached_disk_image ".+-${GITHUB_REF}" "branch")} + # Try to find one from the current branch (or PR). + CACHED_DISK_NAME=$(find_cached_disk_image ".+-${GITHUB_REF}" "branch") + # If no image was found, try to find an image based on the `main` branch. + CACHED_DISK_NAME=${CACHED_DISK_NAME:-$(find_cached_disk_image "main-[0-9a-f]+" "main branch")} # If we still have no image, try to find one from any branch. CACHED_DISK_NAME=${CACHED_DISK_NAME:-$(find_cached_disk_image ".+-[0-9a-f]+" "any branch")} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index ec901c35e06..7323d9c5506 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -64,11 +64,6 @@ on: type: boolean description: 'Does the test use Lightwalletd and Zebra cached state?' # main branch states can be outdated and slower, but they can also be more reliable - prefer_main_cached_state: - required: false - type: boolean - default: false - description: 'Does the test prefer to use a main branch cached state?' saves_to_disk: required: true type: boolean @@ -117,12 +112,11 @@ jobs: get-disk-name: name: Get disk name uses: ./.github/workflows/sub-find-cached-disks.yml - if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} + if: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) || (inputs.saves_to_disk || inputs.force_save_to_disk) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} disk_prefix: ${{ inputs.needs_lwd_state && 'lwd-cache' || inputs.needs_zebra_state && 'zebrad-cache' }} disk_suffix: ${{ inputs.disk_suffix }} - prefer_main_cached_state: ${{ inputs.prefer_main_cached_state }} test_id: ${{ inputs.test_id }} # Show all the test logs, then follow the logs of the test we just launched, until it finishes. @@ -139,7 +133,7 @@ jobs: cached_disk_name: ${{ needs.get-disk-name.outputs.cached_disk_name }} state_version: ${{ needs.get-disk-name.outputs.state_version }} env: - CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} + CACHED_DISK_NAME: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} permissions: contents: 'read' id-token: 'write' diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index d3de188a807..3973c4ee165 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -20,9 +20,6 @@ on: disk_suffix: required: false type: string - prefer_main_cached_state: - required: false - type: boolean test_id: description: 'The test ID requiring the cached state disks' required: false @@ -106,7 +103,6 @@ jobs: NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input DISK_PREFIX: ${{ inputs.disk_prefix }} DISK_SUFFIX: ${{ inputs.disk_suffix }} - PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }} run: | source ./.github/workflows/scripts/gcp-get-cached-disks.sh echo "state_version=${LOCAL_STATE_VERSION}" >> "${GITHUB_OUTPUT}" From 694d11c7acec1b8bb972765e9b98333f699cc76e Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Mon, 26 May 2025 15:19:34 +0100 Subject: [PATCH 183/245] tower-batch-control: Fix `Batch::new` compilation with `tokio_unstable` (#9547) --- tower-batch-control/src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tower-batch-control/src/service.rs b/tower-batch-control/src/service.rs index 414e2452529..394e38a5b69 100644 --- a/tower-batch-control/src/service.rs +++ b/tower-batch-control/src/service.rs @@ -137,6 +137,7 @@ where tokio::task::Builder::new() .name(&format!("{} batch", batch_kind)) .spawn(worker.run().instrument(span)) + .expect("panic on error to match tokio::spawn") }; #[cfg(not(tokio_unstable))] let worker_handle = tokio::spawn(worker.run().instrument(span)); From 806fcab1d2ef6aa0a7ee494afb0507bbe171658f Mon Sep 17 00:00:00 2001 From: Arya Date: Mon, 26 May 2025 17:59:09 -0400 Subject: [PATCH 184/245] add(process): Add hotfix release checklist (#9544) Co-authored-by: Alfredo Garcia --- .../hotfix-release-checklist.md | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md diff --git a/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md new file mode 100644 index 00000000000..e9c9b2f05e4 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md @@ -0,0 +1,102 @@ +--- +name: 'Hotfix Release Checklist Template' +about: 'Checklist to create and publish a hotfix Zebra release' +title: 'Release Zebra (version)' +labels: 'A-release, C-exclude-from-changelog, P-Critical :ambulance:' +assignees: '' + +--- + +A hotfix release should only be created when a bug or critical issue is discovered in an existing release, and waiting for the next scheduled release is impractical or unacceptable. + +## Create the Release PR + +- [ ] Create a branch to fix the issue based on the tag of the release being fixed (not the main branch). + for example: `hotfix-v2.3.1` - this needs to be different to the tag name +- [ ] Make the required changes +- [ ] Create a hotfix release PR by adding `&template=hotfix-release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=hotfix-release-checklist.md)). +- [ ] Add the `C-exclude-from-changelog` label so that the PR is omitted from the next release changelog +- [ ] Add the `A-release` tag to the release pull request in order for the `check_no_git_refs_in_cargo_lock` to run. +- [ ] Ensure the `check_no_git_refs_in_cargo_lock` check passes. +- [ ] Add a changelog entry for the release summarizing user-visible changes. + +## Update Versions + +The release level for a hotfix should always follow semantic versioning as a `patch` release. + +
+Update crate versions, commit the changes to the release branch, and do a release dry-run: + +```sh +# Update everything except for alpha crates and zebrad: +cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta +# Due to a bug in cargo-release, we need to pass exact versions for alpha crates: +cargo release version --verbose --execute --allow-branch '*' --package zebra-scan 0.1.0-alpha.4 +cargo release version --verbose --execute --allow-branch '*' --package zebra-grpc 0.1.0-alpha.2 +# Update zebrad: +cargo release version --verbose --execute --allow-branch '*' --package zebrad patch +# Continue with the release process: +cargo release replace --verbose --execute --allow-branch '*' --package zebrad +cargo release commit --verbose --execute --allow-branch '*' +``` + +
+ +## Update the Release PR + +- [ ] Push the version increments and the release constants to the hotfix release branch. + +# Publish the Zebra Release + +## Create the GitHub Pre-Release + +- [ ] Wait for the hotfix release PR to be reviewed, approved, and merged into main. +- [ ] Create a new release +- [ ] Set the tag name to the version tag, + for example: `v2.3.1` +- [ ] Set the release to target the hotfix release branch +- [ ] Set the release title to `Zebra ` followed by the version tag, + for example: `Zebra 2.3.1` +- [ ] Populate the release description with the final changelog you created; + starting just _after_ the title `## [Zebra ...` of the current version being released, + and ending just _before_ the title of the previous release. +- [ ] Mark the release as 'pre-release', until it has been built and tested +- [ ] Publish the pre-release to GitHub using "Publish Release" + +## Test the Pre-Release + +- [ ] Wait until the Docker binaries have been built on the hotfix release branch, and the quick tests have passed: + - [ ] [ci-tests.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml) +- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) + +## Publish Release + +- [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" + +## Publish Crates + +- [ ] Checkout the hotfix release branch +- [ ] [Run `cargo login`](https://zebra.zfnd.org/dev/crate-owners.html#logging-in-to-cratesio) +- [ ] Run `cargo clean` in the zebra repo +- [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute --allow-branch {hotfix-release-branch}` +- [ ] Check that the published version of Zebra can be installed from `crates.io`: + `cargo install --locked --force --version 2.minor.patch zebrad && ~/.cargo/bin/zebrad` + and put the output in a comment on the PR. + +## Publish Docker Images + +- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml?query=event%3Arelease). +- [ ] Wait for the new tag in the [dockerhub zebra space](https://hub.docker.com/r/zfnd/zebra/tags) + +## Merge hotfix into main + +- [ ] Review and merge the hotfix branch into the main branch. The changes and the update to the changelog must be included in the next release from main as well. +- [ ] If there are conflicts between the hotfix branch and main, the conflicts should be resolved after the hotfix release is tagged and published. + +## Release Failures + +If building or running fails after tagging: + +
+1. Create a new hotfix release, starting from the top of this document. +
From 2e60cedd303f14067796eedef5f28c6b3f845d76 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 30 May 2025 17:33:04 +0100 Subject: [PATCH 185/245] refactor(ci): Run standard GitHub runners in forks and clones (#9362) * refactor(ci): remove larger runners to allow running on forks * fix(ci): some tests can't run with normal runners * fix(ci): use largest runner for crates build * fix(ci): log runner being used * fix(ci): use the corect variable reference * fix(ci): use the same approach on all the jobs * chore: use the same runner as before * imp: single liner * chore: reduce diff --------- Co-authored-by: Conrado Gouvea --- .github/workflows/ci-build-crates.yml | 2 +- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/sub-ci-unit-tests-docker.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 4498fdec2c9..d38835e46b1 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -113,7 +113,7 @@ jobs: timeout-minutes: 90 needs: [ matrix, check-matrix ] # Some of these builds take more than 14GB disk space - runs-on: ubuntu-latest-m + runs-on: ${{ github.repository_owner == 'ZcashFoundation' && 'ubuntu-latest-m' || 'ubuntu-latest' }} strategy: # avoid rate-limit errors by only launching a few of these jobs at a time, # but still finish in a similar time to the longest tests diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 9ce9ddce4fd..98378e5e72b 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -66,7 +66,7 @@ jobs: # The large timeout is to accommodate: # - stable builds (typically 50-90 minutes), and timeout-minutes: 120 - runs-on: ubuntu-latest-xl + runs-on: ${{ github.repository_owner == 'ZcashFoundation' && 'ubuntu-latest-xl' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4.2.2 diff --git a/.github/workflows/sub-ci-unit-tests-docker.yml b/.github/workflows/sub-ci-unit-tests-docker.yml index 42cbbde056f..5f7205e6b4c 100644 --- a/.github/workflows/sub-ci-unit-tests-docker.yml +++ b/.github/workflows/sub-ci-unit-tests-docker.yml @@ -34,7 +34,7 @@ jobs: test-all: name: Test all timeout-minutes: 180 - runs-on: ubuntu-latest-xl + runs-on: ${{ github.repository_owner == 'ZcashFoundation' && 'ubuntu-latest-xl' || 'ubuntu-latest' }} steps: - uses: r7kamura/rust-problem-matchers@v1.5.0 From 1ab3b0f77cc6f35a684b11e7101f6dc82324f376 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 3 Jun 2025 14:01:50 +0100 Subject: [PATCH 186/245] fix(ci): prevent Google Cloud workflow failures on fork PRs (#9573) - Add missing fork condition to build job in main CD workflow - Fix inverted fork condition in patch-external workflow - Resolves authentication errors when workflows run from forked repositories --- .github/workflows/cd-deploy-nodes-gcp.patch-external.yml | 2 +- .github/workflows/cd-deploy-nodes-gcp.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml index f4f599e5638..da0153bc92b 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -24,6 +24,6 @@ jobs: name: Build CD Docker / Build images # Only run on PRs from external repositories, skipping ZF branches and tags. runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} steps: - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 121485ebe86..100e3f4f512 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -160,6 +160,7 @@ jobs: build: name: Build CD Docker uses: ./.github/workflows/sub-build-docker-image.yml + if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime From f8de752f523ebd65c0e0156eb160fa11e119a266 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 3 Jun 2025 12:10:25 -0400 Subject: [PATCH 187/245] change(deps): Allow Zebra crates to be compiled with alternative versions of their dependencies (#9484) * Avoid pinning Zebra crate dependencies to specific patch versions * re-bump packages with required functionality --------- Co-authored-by: Conrado Gouvea --- Cargo.toml | 164 ++++++++++++++++++------------------- zebra-chain/Cargo.toml | 4 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 4 +- zebra-state/Cargo.toml | 2 +- 5 files changed, 88 insertions(+), 88 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2d414aa7ee3..32746a9cc34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,136 +23,136 @@ resolver = "2" [workspace.dependencies] incrementalmerkletree = { version = "0.8.2", features = ["legacy-api"] } -orchard = "0.11.0" -sapling-crypto = "0.5.0" -zcash_address = "0.7.0" +orchard = "0.11" +sapling-crypto = "0.5" +zcash_address = "0.7" zcash_client_backend = "0.18" -zcash_encoding = "0.3.0" -zcash_history = "0.4.0" +zcash_encoding = "0.3" +zcash_history = "0.4" zcash_keys = "0.8" -zcash_primitives = "0.22.0" -zcash_proofs = "0.22.0" +zcash_primitives = "0.22" +zcash_proofs = "0.22" zcash_transparent = "0.2.3" zcash_protocol = "0.5.1" zip32 = "0.2" -abscissa_core = "0.7.0" +abscissa_core = "0.7" atty = "0.2.14" base64 = "0.22.1" bech32 = "0.11.0" -bellman = "0.14.0" -bincode = "1.3.3" -bitflags = "2.9.0" +bellman = "0.14" +bincode = "1.3" +bitflags = "2.9" bitflags-serde-legacy = "0.1.1" -bitvec = "1.0.1" -blake2b_simd = "1.0.3" -blake2s_simd = "1.0.3" -bls12_381 = "0.8.0" -bs58 = "0.5.1" -byteorder = "1.5.0" -bytes = "1.10.1" +bitvec = "1.0" +blake2b_simd = "1.0" +blake2s_simd = "1.0" +bls12_381 = "0.8" +bs58 = "0.5" +byteorder = "1.5" +bytes = "1.10" chrono = { version = "0.4.40", default-features = false } -clap = "4.5.35" +clap = "4.5" color-eyre = { version = "0.6.3", default-features = false } -console-subscriber = "0.4.0" -criterion = "0.5.1" +console-subscriber = "0.4" +criterion = "0.5" crossbeam-channel = "0.5.14" -dirs = "6.0.0" +dirs = "6.0" ed25519-zebra = "4.0.3" elasticsearch = { version = "8.17.0-alpha.1", default-features = false } equihash = "0.2.2" -ff = "0.13.1" +ff = "0.13" futures = "0.3.31" -futures-core = "0.3.28" -futures-util = "0.3.28" -group = "0.13.0" -halo2 = "0.3.0" +futures-core = "0.3.31" +futures-util = "0.3.31" +group = "0.13" +halo2 = "0.3" hex = "0.4.3" -hex-literal = "0.4.1" -howudoin = "0.1.2" +hex-literal = "0.4" +howudoin = "0.1" http-body-util = "0.1.3" -human_bytes = { version = "0.4.3", default-features = false } -humantime = "2.2.0" -humantime-serde = "1.1.1" -hyper = "1.6.0" +human_bytes = { version = "0.4", default-features = false } +humantime = "2.2" +humantime-serde = "1.1" +hyper = "1.6" hyper-util = "0.1.11" -indexmap = "2.8.0" -indicatif = "0.17.11" -inferno = { version = "0.12.2", default-features = false } -insta = "1.42.2" -itertools = "0.14.0" -jsonrpc = "0.18.0" +indexmap = "2.8" +indicatif = "0.17" +inferno = { version = "0.12", default-features = false } +insta = "1.42" +itertools = "0.14" +jsonrpc = "0.18" jsonrpsee = "0.24.8" jsonrpsee-proc-macros = "0.24.9" jsonrpsee-types = "0.24.9" -jubjub = "0.10.0" -lazy_static = "1.4.0" +jubjub = "0.10" +lazy_static = "1.4" log = "0.4.27" -metrics = "0.24.1" -metrics-exporter-prometheus = { version = "0.16.2", default-features = false } -mset = "0.1.1" -nix = "0.29.0" +metrics = "0.24" +metrics-exporter-prometheus = { version = "0.16", default-features = false } +mset = "0.1" +nix = "0.29" num-integer = "0.1.46" -once_cell = "1.21.3" +once_cell = "1.21" ordered-map = "0.4.2" owo-colors = "4.2.0" pin-project = "1.1.10" -primitive-types = "0.12.2" -proptest = "1.6.0" -proptest-derive = "0.5.1" +primitive-types = "0.12" +proptest = "1.6" +proptest-derive = "0.5" prost = "0.13.5" quote = "1.0.40" rand = "0.8.5" -rand_chacha = "0.3.1" -rand_core = "0.6.4" -rayon = "1.10.0" -reddsa = "0.5.1" +rand_chacha = "0.3" +rand_core = "0.6" +rayon = "1.10" +reddsa = "0.5" redjubjub = "0.8" -regex = "1.11.0" -reqwest = { version = "0.12.15", default-features = false } -ripemd = "0.1.3" -rlimit = "0.10.2" -rocksdb = { version = "0.22.0", default-features = false } +regex = "1.11" +reqwest = { version = "0.12", default-features = false } +ripemd = "0.1" +rlimit = "0.10" +rocksdb = { version = "0.22", default-features = false } secp256k1 = "0.29" semver = "1.0.26" -sentry = { version = "0.36.0", default-features = false } +sentry = { version = "0.36", default-features = false } serde = "1.0.219" -serde-big-array = "0.5.1" +serde-big-array = "0.5" serde_json = "1.0.140" -serde_with = "3.12.0" -serde_yml = "0.0.12" -sha2 = "0.10.7" -spandoc = "0.2.2" -static_assertions = "1.1.0" -structopt = "0.3.26" +serde_with = "3.12" +serde_yml = "0.0" +sha2 = "0.10" +spandoc = "0.2" +static_assertions = "1.1" +structopt = "0.3" syn = "2.0.100" -tempfile = "3.19.1" -thiserror = "2.0.12" -thread-priority = "1.2.0" -tinyvec = "1.9.0" -tokio = "1.44.2" +tempfile = "3.19" +thiserror = "2.0" +thread-priority = "1.2" +tinyvec = "1.9" +tokio = "1.44" tokio-stream = "0.1.17" -tokio-test = "0.4.4" +tokio-test = "0.4" tokio-util = "0.7.14" -toml = "0.8.20" +toml = "0.8" tonic = "0.12.3" tonic-build = "0.12.3" tonic-reflection = "0.12.3" tower = "0.4.13" -tower-test = "0.4.0" +tower-test = "0.4" tracing = "0.1.41" -tracing-appender = "0.2.3" -tracing-error = "0.2.1" -tracing-flame = "0.2.0" +tracing-appender = "0.2" +tracing-error = "0.2" +tracing-flame = "0.2" tracing-futures = "0.2.5" -tracing-journald = "0.3.0" +tracing-journald = "0.3" tracing-subscriber = "0.3.19" tracing-test = "0.2.4" -uint = "0.10.0" -vergen-git2 = { version = "1.0.0", default-features = false } -wagyu-zcash-parameters = "0.2.0" +uint = "0.10" +vergen-git2 = { version = "1.0", default-features = false } +wagyu-zcash-parameters = "0.2" x25519-dalek = "2.0.1" zcash_note_encryption = "0.4.1" -zcash_script = "0.2.0" +zcash_script = "0.2" [workspace.metadata.release] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index b87516e8b67..97bbd406cc4 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -86,7 +86,7 @@ x25519-dalek = { workspace = true, features = ["serde"] } bech32 = { workspace = true } # ECC deps -halo2 = { package = "halo2_proofs", version = "0.3.0" } +halo2 = { package = "halo2_proofs", version = "0.3" } orchard.workspace = true zcash_encoding.workspace = true zcash_history.workspace = true @@ -96,7 +96,7 @@ sapling-crypto.workspace = true zcash_protocol.workspace = true zcash_address.workspace = true zcash_transparent.workspace = true -sinsemilla = { version = "0.1.0" } +sinsemilla = { version = "0.1" } # Time chrono = { workspace = true, features = ["clock", "std", "serde"] } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 4fe2bf0de15..576f49f85c1 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -33,7 +33,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "ze blake2b_simd = { workspace = true } bellman = { workspace = true } bls12_381 = { workspace = true } -halo2 = { package = "halo2_proofs", version = "0.3.0" } +halo2 = { package = "halo2_proofs", version = "0.3" } jubjub = { workspace = true } rand = { workspace = true } rayon = { workspace = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 59e5814eea1..c94598c3c81 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -76,8 +76,8 @@ howudoin = { workspace = true, optional = true } # tor dependencies # Wait until `arti-client`'s dependency `x25519-dalek v1.2.0` is updated to a higher version. (#5492) -# arti-client = { version = "0.0.2", optional = true } -# tor-rtcompat = { version = "0.0.2", optional = true } +# arti-client = { version = "0.0", optional = true } +# tor-rtcompat = { version = "0.0", optional = true } # proptest dependencies proptest = { workspace = true, optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 5fe8d111699..8e1d86f489e 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -104,7 +104,7 @@ proptest = { workspace = true } proptest-derive = { workspace = true } rand = { workspace = true } -halo2 = { package = "halo2_proofs", version = "0.3.0" } +halo2 = { package = "halo2_proofs", version = "0.3" } jubjub = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } From d230f31c5a580de3ad160cd0d9b03a6e01e71c93 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 3 Jun 2025 12:10:30 -0400 Subject: [PATCH 188/245] change(consensus): Adds a `Nu6_1` variant to `NetworkUpgrade` (#9526) * Adds a `Nu6_1` variant to `NetworkUpgrade` * updates CONSENSUS_BRANCH_IDS to include NU6.1 and NU7 when compiled with the `zebra-test` feature * Update zebra-chain/src/parameters/network/testnet.rs Co-authored-by: Alfredo Garcia --------- Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/block/commitment.rs | 4 +- zebra-chain/src/history_tree.rs | 10 +- zebra-chain/src/parameters/network/testnet.rs | 6 + zebra-chain/src/parameters/network_upgrade.rs | 24 +- zebra-chain/src/primitives/zcash_history.rs | 1 + zebra-chain/src/transaction/arbitrary.rs | 5 +- zebra-consensus/src/transaction.rs | 6 +- zebra-consensus/src/transaction/tests/prop.rs | 2 +- zebra-network/src/constants.rs | 5 +- zebra-network/src/protocol/external/types.rs | 13 +- .../types/get_block_template/proposal.rs | 2 +- zebrad/tests/common/configs/v2.3.0.toml | 217 ++++++++++++++++++ 12 files changed, 275 insertions(+), 20 deletions(-) create mode 100644 zebrad/tests/common/configs/v2.3.0.toml diff --git a/zebra-chain/src/block/commitment.rs b/zebra-chain/src/block/commitment.rs index ec4ef7d2616..7fa9f3d6361 100644 --- a/zebra-chain/src/block/commitment.rs +++ b/zebra-chain/src/block/commitment.rs @@ -125,7 +125,7 @@ impl Commitment { // NetworkUpgrade::current() returns the latest network upgrade that's activated at the provided height, so // on Regtest for heights above height 0, it could return NU6, and it's possible for the current network upgrade // to be NU6 (or Canopy, or any network upgrade above Heartwood) at the Heartwood activation height. - (Canopy | Nu5 | Nu6 | Nu7, activation_height) + (Canopy | Nu5 | Nu6 | Nu6_1 | Nu7, activation_height) if height == activation_height && Some(height) == Heartwood.activation_height(network) => { @@ -136,7 +136,7 @@ impl Commitment { } } (Heartwood | Canopy, _) => Ok(ChainHistoryRoot(ChainHistoryMmrRootHash(bytes))), - (Nu5 | Nu6 | Nu7, _) => Ok(ChainHistoryBlockTxAuthCommitment( + (Nu5 | Nu6 | Nu6_1 | Nu7, _) => Ok(ChainHistoryBlockTxAuthCommitment( ChainHistoryBlockTxAuthCommitmentHash(bytes), )), } diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index 12dd9c61153..b7a5e96297d 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -102,7 +102,10 @@ impl NonEmptyHistoryTree { )?; InnerHistoryTree::PreOrchard(tree) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { + NetworkUpgrade::Nu5 + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 + | NetworkUpgrade::Nu7 => { let tree = Tree::::new_from_cache( network, network_upgrade, @@ -156,7 +159,10 @@ impl NonEmptyHistoryTree { )?; (InnerHistoryTree::PreOrchard(tree), entry) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { + NetworkUpgrade::Nu5 + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 + | NetworkUpgrade::Nu7 => { let (tree, entry) = Tree::::new_from_block( network, block, diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 16c4906061d..ee2d8e39caf 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -129,6 +129,7 @@ impl From<&BTreeMap> for ConfiguredActivationHeights { NetworkUpgrade::Canopy => &mut configured_activation_heights.canopy, NetworkUpgrade::Nu5 => &mut configured_activation_heights.nu5, NetworkUpgrade::Nu6 => &mut configured_activation_heights.nu6, + NetworkUpgrade::Nu6_1 => &mut configured_activation_heights.nu6_1, NetworkUpgrade::Nu7 => &mut configured_activation_heights.nu7, NetworkUpgrade::Genesis => { continue; @@ -260,6 +261,9 @@ pub struct ConfiguredActivationHeights { /// Activation height for `NU6` network upgrade. #[serde(rename = "NU6")] pub nu6: Option, + /// Activation height for `NU6.1` network upgrade. + #[serde(rename = "NU6.1")] + pub nu6_1: Option, /// Activation height for `NU7` network upgrade. #[serde(rename = "NU7")] pub nu7: Option, @@ -397,6 +401,7 @@ impl ParametersBuilder { canopy, nu5, nu6, + nu6_1, nu7, }: ConfiguredActivationHeights, ) -> Self { @@ -420,6 +425,7 @@ impl ParametersBuilder { .chain(canopy.into_iter().map(|h| (h, Canopy))) .chain(nu5.into_iter().map(|h| (h, Nu5))) .chain(nu6.into_iter().map(|h| (h, Nu6))) + .chain(nu6_1.into_iter().map(|h| (h, Nu6_1))) .chain(nu7.into_iter().map(|h| (h, Nu7))) .map(|(h, nu)| (h.try_into().expect("activation height must be valid"), nu)) .collect(); diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index 0303e1e7344..8b9c3c7517e 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -15,7 +15,7 @@ use hex::{FromHex, ToHex}; use proptest_derive::Arbitrary; /// A list of network upgrades in the order that they must be activated. -const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 10] = [ +const NETWORK_UPGRADES_IN_ORDER: &[NetworkUpgrade] = &[ Genesis, BeforeOverwinter, Overwinter, @@ -25,6 +25,9 @@ const NETWORK_UPGRADES_IN_ORDER: [NetworkUpgrade; 10] = [ Canopy, Nu5, Nu6, + #[cfg(any(test, feature = "zebra-test"))] + Nu6_1, + #[cfg(any(test, feature = "zebra-test"))] Nu7, ]; @@ -62,6 +65,9 @@ pub enum NetworkUpgrade { /// The Zcash protocol after the NU6 upgrade. #[serde(rename = "NU6")] Nu6, + /// The Zcash protocol after the NU6.1 upgrade. + #[serde(rename = "NU6.1")] + Nu6_1, /// The Zcash protocol after the NU7 upgrade. #[serde(rename = "NU7")] Nu7, @@ -120,7 +126,8 @@ const FAKE_MAINNET_ACTIVATION_HEIGHTS: &[(block::Height, NetworkUpgrade)] = &[ (block::Height(30), Canopy), (block::Height(35), Nu5), (block::Height(40), Nu6), - (block::Height(45), Nu7), + (block::Height(45), Nu6_1), + (block::Height(50), Nu7), ]; /// Testnet network upgrade activation heights. @@ -157,6 +164,8 @@ const FAKE_TESTNET_ACTIVATION_HEIGHTS: &[(block::Height, NetworkUpgrade)] = &[ (block::Height(30), Canopy), (block::Height(35), Nu5), (block::Height(40), Nu6), + (block::Height(45), Nu6_1), + (block::Height(50), Nu7), ]; /// The Consensus Branch Id, used to bind transactions and blocks to a @@ -248,6 +257,9 @@ pub(crate) const CONSENSUS_BRANCH_IDS: &[(NetworkUpgrade, ConsensusBranchId)] = (Canopy, ConsensusBranchId(0xe9ff75a6)), (Nu5, ConsensusBranchId(0xc2d6d0b4)), (Nu6, ConsensusBranchId(0xc8e71055)), + #[cfg(any(test, feature = "zebra-test"))] + (Nu6_1, ConsensusBranchId(0x4dec4df0)), + #[cfg(any(test, feature = "zebra-test"))] (Nu7, ConsensusBranchId(0x77190ad8)), ]; @@ -324,8 +336,8 @@ impl Network { /// in ascending height order. pub fn full_activation_list(&self) -> Vec<(block::Height, NetworkUpgrade)> { NETWORK_UPGRADES_IN_ORDER - .into_iter() - .map_while(|nu| Some((NetworkUpgrade::activation_height(&nu, self)?, nu))) + .iter() + .map_while(|&nu| Some((NetworkUpgrade::activation_height(&nu, self)?, nu))) .collect() } } @@ -437,7 +449,7 @@ impl NetworkUpgrade { pub fn target_spacing(&self) -> Duration { let spacing_seconds = match self { Genesis | BeforeOverwinter | Overwinter | Sapling => PRE_BLOSSOM_POW_TARGET_SPACING, - Blossom | Heartwood | Canopy | Nu5 | Nu6 | Nu7 => { + Blossom | Heartwood | Canopy | Nu5 | Nu6 | Nu6_1 | Nu7 => { POST_BLOSSOM_POW_TARGET_SPACING.into() } }; @@ -544,7 +556,7 @@ impl NetworkUpgrade { /// Returns an iterator over [`NetworkUpgrade`] variants. pub fn iter() -> impl DoubleEndedIterator { - NETWORK_UPGRADES_IN_ORDER.into_iter() + NETWORK_UPGRADES_IN_ORDER.iter().copied() } } diff --git a/zebra-chain/src/primitives/zcash_history.rs b/zebra-chain/src/primitives/zcash_history.rs index 4b52c85d8e8..bf348b56f82 100644 --- a/zebra-chain/src/primitives/zcash_history.rs +++ b/zebra-chain/src/primitives/zcash_history.rs @@ -277,6 +277,7 @@ impl Version for zcash_history::V1 { | NetworkUpgrade::Canopy | NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 | NetworkUpgrade::Nu7 => zcash_history::NodeData { consensus_branch_id: branch_id.into(), subtree_commitment: block_hash, diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index a13a8876efe..4e7c182b619 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -778,7 +778,10 @@ impl Arbitrary for Transaction { NetworkUpgrade::Blossom | NetworkUpgrade::Heartwood | NetworkUpgrade::Canopy => { Self::v4_strategy(ledger_state) } - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => prop_oneof![ + NetworkUpgrade::Nu5 + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 + | NetworkUpgrade::Nu7 => prop_oneof![ Self::v4_strategy(ledger_state.clone()), Self::v5_strategy(ledger_state) ] diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 53874c45fa1..26d37f12974 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -939,6 +939,7 @@ where | NetworkUpgrade::Canopy | NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 | NetworkUpgrade::Nu7 => Ok(()), // Does not support V4 transactions @@ -1025,7 +1026,10 @@ where // // Note: Here we verify the transaction version number of the above rule, the group // id is checked in zebra-chain crate, in the transaction serialize. - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => Ok(()), + NetworkUpgrade::Nu5 + | NetworkUpgrade::Nu6 + | NetworkUpgrade::Nu6_1 + | NetworkUpgrade::Nu7 => Ok(()), // Does not support V5 transactions NetworkUpgrade::Genesis diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index 8f3167e2bae..ec530cf4bd4 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -348,7 +348,7 @@ fn sanitize_transaction_version( Overwinter => 3, Sapling | Blossom | Heartwood | Canopy => 4, // FIXME: Use 6 for Nu7 - Nu5 | Nu6 | Nu7 => 5, + Nu5 | Nu6 | Nu6_1 | Nu7 => 5, } }; diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 7d96add03d3..63781e908d2 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -340,8 +340,9 @@ pub const TIMESTAMP_TRUNCATION_SECONDS: u32 = 30 * 60; /// /// This version of Zebra draws the current network protocol version from /// [ZIP-253](https://zips.z.cash/zip-0253). -// TODO: Update this constant to the correct value after NU7 activation, -// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_140); +// TODO: Update this constant to the correct value after NU6.1 & NU7 activation, +// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_140); // NU6.1 +// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_160); // NU7 pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_120); /// The default RTT estimate for peer responses. diff --git a/zebra-network/src/protocol/external/types.rs b/zebra-network/src/protocol/external/types.rs index 78148253ba8..3a5a7bd856d 100644 --- a/zebra-network/src/protocol/external/types.rs +++ b/zebra-network/src/protocol/external/types.rs @@ -106,8 +106,10 @@ impl Version { (Mainnet, Nu5) => 170_100, (Testnet(params), Nu6) if params.is_default_testnet() => 170_110, (Mainnet, Nu6) => 170_120, - (Testnet(params), Nu7) if params.is_default_testnet() => 170_130, - (Mainnet, Nu7) => 170_140, + (Testnet(params), Nu6_1) if params.is_default_testnet() => 170_130, + (Mainnet, Nu6_1) => 170_140, + (Testnet(params), Nu7) if params.is_default_testnet() => 170_150, + (Mainnet, Nu7) => 170_160, // It should be fine to reject peers with earlier network protocol versions on custom testnets for now. (Testnet(_), _) => CURRENT_NETWORK_PROTOCOL_VERSION.0, @@ -208,8 +210,10 @@ mod test { let highest_network_upgrade = NetworkUpgrade::current(network, block::Height::MAX); assert!( - highest_network_upgrade == Nu7 || highest_network_upgrade == Nu6, - "expected coverage of all network upgrades: add the new network upgrade to the list in this test"); + matches!(highest_network_upgrade, Nu6 | Nu6_1 | Nu7), + "expected coverage of all network upgrades: \ + add the new network upgrade to the list in this test" + ); for &network_upgrade in &[ BeforeOverwinter, @@ -220,6 +224,7 @@ mod test { Canopy, Nu5, Nu6, + Nu6_1, Nu7, ] { let height = network_upgrade.activation_height(network); diff --git a/zebra-rpc/src/methods/types/get_block_template/proposal.rs b/zebra-rpc/src/methods/types/get_block_template/proposal.rs index 1b8100c8c29..1574762f970 100644 --- a/zebra-rpc/src/methods/types/get_block_template/proposal.rs +++ b/zebra-rpc/src/methods/types/get_block_template/proposal.rs @@ -217,7 +217,7 @@ pub fn proposal_block_from_template( | NetworkUpgrade::Blossom | NetworkUpgrade::Heartwood => panic!("pre-Canopy block templates not supported"), NetworkUpgrade::Canopy => chain_history_root.bytes_in_serialized_order().into(), - NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu7 => { + NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu6_1 | NetworkUpgrade::Nu7 => { block_commitments_hash.bytes_in_serialized_order().into() } }; diff --git a/zebrad/tests/common/configs/v2.3.0.toml b/zebrad/tests/common/configs/v2.3.0.toml new file mode 100644 index 00000000000..bbdbf231833 --- /dev/null +++ b/zebrad/tests/common/configs/v2.3.0.toml @@ -0,0 +1,217 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Testnet" +peerset_initial_target_size = 25 + +[network.testnet_parameters] +network_name = "ConfiguredTestnet_1" +network_magic = [0, 0, 0, 0] +slow_start_interval = 0 +target_difficulty_limit = "0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f" +disable_pow = true +genesis_hash = "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08" + +[network.testnet_parameters.activation_heights] +BeforeOverwinter = 1 +Overwinter = 207_500 +Sapling = 280_000 +Blossom = 584_000 +Heartwood = 903_800 +Canopy = 1_028_500 +NU5 = 1_842_420 +NU6 = 2_000_000 +"NU6.1" = 2_000_001 +NU7 = 2_000_002 + +[network.testnet_parameters.pre_nu6_funding_streams.height_range] +start = 0 +end = 100 + +[[network.testnet_parameters.post_nu6_funding_streams.recipients]] +receiver = "ECC" +numerator = 7 +addresses = [ + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", +] + +[[network.testnet_parameters.post_nu6_funding_streams.recipients]] +receiver = "ZcashFoundation" +numerator = 5 +addresses = [ + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", + "t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz", +] + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false From c8be9944cf7669bbea108b99efe6af656e67ceef Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 3 Jun 2025 17:10:34 +0100 Subject: [PATCH 189/245] fix(ci): pin lightwalletd to v0.4.17 to prevent CI failures (#9575) Pin lightwalletd container to v0.4.17 instead of using latest tag. The latest version (v0.4.18) broke our lightwalletd integration tests, causing CI pipeline failures. Using pinned versions prevents unexpected test failures when upstream dependencies change and gives us control over when to update and test compatibility. --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index a82c1010fb9..4e6af86e35a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -114,7 +114,7 @@ RUN --mount=type=bind,source=zebrad,target=zebrad \ cp ${HOME}/target/release/zebra-checkpoints /usr/local/bin # Copy the lightwalletd binary and source files to be able to run tests -COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/ +COPY --from=electriccoinco/lightwalletd:v0.4.17 /usr/local/bin/lightwalletd /usr/local/bin/ # Copy the gosu binary to be able to run the entrypoint as non-root user # and allow to change permissions for mounted cache directories From e14d1d8e406e93ed618e75d8045ab7f61dc23058 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 3 Jun 2025 16:49:53 -0400 Subject: [PATCH 190/245] change(state): Upgrade db format to support new fields in RPC outputs (#9539) * updates book/../state-db-upgrades.md * updates zebra-consensus with the changes from PR #9432 and minor test changes from #9295 * updates zebra-chain with the changes from PR #9432 * updates imports in zebra-rpc * Merges and applies changes to zebra-state from PRs #9295 and #9432 * adds a TODO. * minor update to zebra-rpc to use the new ReadResponse::AddressBalance type * renames add_block_info db upgrade * moves migration from `add_balance_received` to `block_info_and_address_received` * replaces `write` with `write_batch` * update disk format snapshots * combines partial_transparent_balance_change() and partial_transparent_received_change() * fixes typo * Replaces redundant code with a call to `construct_column_families()` * refactors `AddressBalanceLocation` and `AddressBalanceLocationChange` into a newtypes around a common inner type. * fixes lints and doc compilation issue * updates snapshots * bumps the db format version to the next major version (27.0.0) instead of the next minor version (26.1.0) * fixes clippy lint, minor cleanups * fixes lint * applies suggestions from code review * fixes a mistake in `is_reusable_major_upgrade` where v26.0.0 is not a restorable db version * Fixes an issue around marking an upgraded db format in the disk version file before completing migrations when reusing a previous major db format and restarting Zebrad before the migration is complete. * Apply suggestions from code review --------- Co-authored-by: Gustavo Valverde --- book/src/dev/state-db-upgrades.md | 16 +- zebra-chain/src/amount.rs | 4 + zebra-chain/src/block_info.rs | 28 ++ zebra-chain/src/lib.rs | 1 + zebra-chain/src/parameters/network/subsidy.rs | 165 ++++++- zebra-chain/src/parameters/network/tests.rs | 316 +++++++++++++ zebra-consensus/src/block.rs | 10 +- zebra-consensus/src/block/check.rs | 12 +- zebra-consensus/src/block/subsidy.rs | 2 - .../src/block/subsidy/funding_streams.rs | 37 +- .../block/subsidy/funding_streams/tests.rs | 6 +- zebra-consensus/src/block/subsidy/general.rs | 430 ------------------ zebra-consensus/src/block/tests.rs | 32 +- zebra-consensus/src/checkpoint.rs | 10 +- zebra-consensus/src/error.rs | 33 +- zebra-consensus/src/lib.rs | 5 +- zebra-consensus/src/transaction/tests.rs | 26 +- zebra-rpc/src/methods.rs | 12 +- zebra-rpc/src/methods/tests/prop.rs | 2 +- .../src/methods/types/get_block_template.rs | 9 +- zebra-state/src/config.rs | 42 +- zebra-state/src/constants.rs | 11 +- zebra-state/src/lib.rs | 6 +- zebra-state/src/request.rs | 9 +- zebra-state/src/response.rs | 18 +- zebra-state/src/service.rs | 36 +- zebra-state/src/service/finalized_state.rs | 10 +- .../src/service/finalized_state/disk_db.rs | 140 ++++-- .../finalized_state/disk_format/chain.rs | 31 ++ .../disk_format/tests/snapshot.rs | 2 +- ...y_transparent_addr_raw_data@mainnet_1.snap | 2 +- ...y_transparent_addr_raw_data@mainnet_2.snap | 2 +- ...y_transparent_addr_raw_data@testnet_1.snap | 2 +- ...y_transparent_addr_raw_data@testnet_2.snap | 2 +- .../block_data_raw_data@mainnet_1.snap | 14 + .../block_data_raw_data@mainnet_2.snap | 18 + .../block_data_raw_data@testnet_1.snap | 14 + .../block_data_raw_data@testnet_2.snap | 18 + .../block_info_raw_data@mainnet_0.snap | 10 + .../block_info_raw_data@mainnet_1.snap | 14 + .../block_info_raw_data@mainnet_2.snap | 18 + .../block_info_raw_data@testnet_0.snap | 10 + .../block_info_raw_data@testnet_1.snap | 14 + .../block_info_raw_data@testnet_2.snap | 18 + .../tests/snapshots/column_family_names.snap | 1 + .../empty_column_families@mainnet_0.snap | 1 - .../empty_column_families@no_blocks.snap | 1 + .../empty_column_families@testnet_0.snap | 1 - ...p_chain_value_pool_raw_data@mainnet_0.snap | 10 + ...p_chain_value_pool_raw_data@testnet_0.snap | 10 + .../disk_format/transparent.rs | 232 ++++++++-- .../finalized_state/disk_format/upgrade.rs | 30 +- .../block_info_and_address_received.rs | 300 ++++++++++++ .../disk_format/upgrade/no_migration.rs | 8 +- .../src/service/finalized_state/zebra_db.rs | 22 +- .../service/finalized_state/zebra_db/block.rs | 30 +- .../zebra_db/block/tests/snapshot.rs | 5 +- .../snapshots/address_balances@mainnet_1.snap | 5 +- .../snapshots/address_balances@mainnet_2.snap | 5 +- .../snapshots/address_balances@testnet_1.snap | 5 +- .../snapshots/address_balances@testnet_2.snap | 5 +- .../service/finalized_state/zebra_db/chain.rs | 60 ++- .../finalized_state/zebra_db/transparent.rs | 89 ++-- .../src/service/non_finalized_state/chain.rs | 66 ++- .../non_finalized_state/chain/index.rs | 6 + .../service/non_finalized_state/tests/prop.rs | 3 + zebra-state/src/service/read.rs | 4 +- .../src/service/read/address/balance.rs | 22 +- zebra-state/src/service/read/block.rs | 22 + 69 files changed, 1757 insertions(+), 803 deletions(-) create mode 100644 zebra-chain/src/block_info.rs delete mode 100644 zebra-consensus/src/block/subsidy/general.rs create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md index 65eb4744b76..7ac34e0fe0f 100644 --- a/book/src/dev/state-db-upgrades.md +++ b/book/src/dev/state-db-upgrades.md @@ -12,7 +12,8 @@ family doesn't exist. Instead: - define the name and type of each column family at the top of the implementation module, - add a method on the database that returns that type, and -- add the column family name to the list of column families in the database: +- add the column family name to the list of column families in the database + (in the `STATE_COLUMN_FAMILIES_IN_CODE` array): For example: ```rust @@ -304,7 +305,7 @@ We use the following rocksdb column families: | `hash_by_tx_loc` | `TransactionLocation` | `transaction::Hash` | Create | | `tx_loc_by_hash` | `transaction::Hash` | `TransactionLocation` | Create | | *Transparent* | | | | -| `balance_by_transparent_addr` | `transparent::Address` | `Amount \|\| AddressLocation` | Update | +| `balance_by_transparent_addr` | `transparent::Address` | `AddressBalanceLocation` | Update | | `tx_loc_by_transparent_addr_loc` | `AddressTransaction` | `()` | Create | | `utxo_by_out_loc` | `OutputLocation` | `transparent::Output` | Delete | | `utxo_loc_by_transparent_addr_loc` | `AddressUnspentOutput` | `()` | Delete | @@ -325,6 +326,7 @@ We use the following rocksdb column families: | *Chain* | | | | | `history_tree` | `()` | `NonEmptyHistoryTree` | Update | | `tip_chain_value_pool` | `()` | `ValueBalance` | Update | +| `block_info` | `block::Height` | `BlockInfo` | Create | With the following additional modifications when compiled with the `indexer` feature: @@ -352,6 +354,7 @@ Block and Transaction Data: - `TransactionIndex`: 16 bits, big-endian, unsigned (max ~23,000 transactions in the 2 MB block limit) - `TransactionCount`: same as `TransactionIndex` - `TransactionLocation`: `Height \|\| TransactionIndex` +- `AddressBalanceLocation`: `Amount \|\| u64 \|\| AddressLocation` - `OutputIndex`: 24 bits, big-endian, unsigned (max ~223,000 transfers in the 2 MB block limit) - transparent and shielded input indexes, and shielded output indexes: 16 bits, big-endian, unsigned (max ~49,000 transfers in the 2 MB block limit) - `OutputLocation`: `TransactionLocation \|\| OutputIndex` @@ -600,9 +603,16 @@ So they should not be used for consensus-critical checks. **TODO:** store the `Root` hash in `sprout_note_commitment_tree`, and use it to look up the note commitment tree. This de-duplicates tree state data. But we currently only store one sprout tree by height. -- The value pools are only stored for the finalized tip. +- The value pools are only stored for the finalized tip. Per-block value pools + are stored in `block_info`, see below. - We do not store the cumulative work for the finalized chain, because the finalized work is equal for all non-finalized chains. So the additional non-finalized work can be used to calculate the relative chain order, and choose the best chain. + +- The `block_info` contains additional per-block data. Currently it stores + the value pools after that block, and its size. It has been implemented + in a future-proof way so it is possible to add more data to it whiles + still allowing database downgrades (i.e. it does not require the + data length to match exactly what is expected and ignores the rest) diff --git a/zebra-chain/src/amount.rs b/zebra-chain/src/amount.rs index 4b10699102a..355a8b63bb0 100644 --- a/zebra-chain/src/amount.rs +++ b/zebra-chain/src/amount.rs @@ -538,6 +538,10 @@ impl Constraint for NegativeAllowed { /// ); /// ``` #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Default)] +#[cfg_attr( + any(test, feature = "proptest-impl"), + derive(proptest_derive::Arbitrary) +)] pub struct NonNegative; impl Constraint for NonNegative { diff --git a/zebra-chain/src/block_info.rs b/zebra-chain/src/block_info.rs new file mode 100644 index 00000000000..7d72380b177 --- /dev/null +++ b/zebra-chain/src/block_info.rs @@ -0,0 +1,28 @@ +//! Extra per-block info tracked in the state. +use crate::{amount::NonNegative, value_balance::ValueBalance}; + +/// Extra per-block info tracked in the state. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct BlockInfo { + /// The pool balances after the block. + value_pools: ValueBalance, + /// The size of the block in bytes. + size: u32, +} + +impl BlockInfo { + /// Creates a new [`BlockInfo`] with the given value pools. + pub fn new(value_pools: ValueBalance, size: u32) -> Self { + BlockInfo { value_pools, size } + } + + /// Returns the value pools of this block. + pub fn value_pools(&self) -> &ValueBalance { + &self.value_pools + } + + /// Returns the size of this block. + pub fn size(&self) -> u32 { + self.size + } +} diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 0dd4a57c2d7..d9cf8001390 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -20,6 +20,7 @@ extern crate tracing; pub mod amount; pub mod block; +pub mod block_info; pub mod chain_sync_status; pub mod chain_tip; pub mod common; diff --git a/zebra-chain/src/parameters/network/subsidy.rs b/zebra-chain/src/parameters/network/subsidy.rs index 68fd90cb77c..effd77383a7 100644 --- a/zebra-chain/src/parameters/network/subsidy.rs +++ b/zebra-chain/src/parameters/network/subsidy.rs @@ -12,14 +12,15 @@ //! Typically, consensus parameters are accessed via a function that takes a //! `Network` and `block::Height`. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use lazy_static::lazy_static; use crate::{ - amount::COIN, + amount::{self, Amount, NonNegative, COIN}, block::{Height, HeightDiff}, parameters::{Network, NetworkUpgrade}, + transaction::Transaction, transparent, }; @@ -600,3 +601,163 @@ pub fn height_for_halving(halving: u32, network: &Network) -> Option { let height = u32::try_from(height).ok()?; height.try_into().ok() } + +/// Returns the `fs.Value(height)` for each stream receiver +/// as described in [protocol specification §7.8][7.8] +/// +/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies +pub fn funding_stream_values( + height: Height, + network: &Network, + expected_block_subsidy: Amount, +) -> Result>, crate::amount::Error> { + let canopy_height = NetworkUpgrade::Canopy.activation_height(network).unwrap(); + let mut results = HashMap::new(); + + if height >= canopy_height { + let funding_streams = network.funding_streams(height); + if funding_streams.height_range().contains(&height) { + for (&receiver, recipient) in funding_streams.recipients() { + // - Spec equation: `fs.value = floor(block_subsidy(height)*(fs.numerator/fs.denominator))`: + // https://zips.z.cash/protocol/protocol.pdf#subsidies + // - In Rust, "integer division rounds towards zero": + // https://doc.rust-lang.org/stable/reference/expressions/operator-expr.html#arithmetic-and-logical-binary-operators + // This is the same as `floor()`, because these numbers are all positive. + let amount_value = ((expected_block_subsidy * recipient.numerator())? + / FUNDING_STREAM_RECEIVER_DENOMINATOR)?; + + results.insert(receiver, amount_value); + } + } + } + + Ok(results) +} + +/// Block subsidy errors. +#[derive(thiserror::Error, Clone, Debug, PartialEq, Eq)] +#[allow(missing_docs)] +pub enum SubsidyError { + #[error("no coinbase transaction in block")] + NoCoinbase, + + #[error("funding stream expected output not found")] + FundingStreamNotFound, + + #[error("miner fees are invalid")] + InvalidMinerFees, + + #[error("a sum of amounts overflowed")] + SumOverflow, + + #[error("unsupported height")] + UnsupportedHeight, + + #[error("invalid amount")] + InvalidAmount(#[from] amount::Error), +} + +/// The divisor used for halvings. +/// +/// `1 << Halving(height)`, as described in [protocol specification §7.8][7.8] +/// +/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies +/// +/// Returns `None` if the divisor would overflow a `u64`. +pub fn halving_divisor(height: Height, network: &Network) -> Option { + // Some far-future shifts can be more than 63 bits + 1u64.checked_shl(num_halvings(height, network)) +} + +/// The halving index for a block height and network. +/// +/// `Halving(height)`, as described in [protocol specification §7.8][7.8] +/// +/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies +pub fn num_halvings(height: Height, network: &Network) -> u32 { + let slow_start_shift = network.slow_start_shift(); + let blossom_height = NetworkUpgrade::Blossom + .activation_height(network) + .expect("blossom activation height should be available"); + + let halving_index = if height < slow_start_shift { + 0 + } else if height < blossom_height { + let pre_blossom_height = height - slow_start_shift; + pre_blossom_height / network.pre_blossom_halving_interval() + } else { + let pre_blossom_height = blossom_height - slow_start_shift; + let scaled_pre_blossom_height = + pre_blossom_height * HeightDiff::from(BLOSSOM_POW_TARGET_SPACING_RATIO); + + let post_blossom_height = height - blossom_height; + + (scaled_pre_blossom_height + post_blossom_height) / network.post_blossom_halving_interval() + }; + + halving_index + .try_into() + .expect("already checked for negatives") +} + +/// `BlockSubsidy(height)` as described in [protocol specification §7.8][7.8] +/// +/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies +pub fn block_subsidy( + height: Height, + network: &Network, +) -> Result, SubsidyError> { + let blossom_height = NetworkUpgrade::Blossom + .activation_height(network) + .expect("blossom activation height should be available"); + + // If the halving divisor is larger than u64::MAX, the block subsidy is zero, + // because amounts fit in an i64. + // + // Note: bitcoind incorrectly wraps here, which restarts large block rewards. + let Some(halving_div) = halving_divisor(height, network) else { + return Ok(Amount::zero()); + }; + + // Zebra doesn't need to calculate block subsidies for blocks with heights in the slow start + // interval because it handles those blocks through checkpointing. + if height < network.slow_start_interval() { + Err(SubsidyError::UnsupportedHeight) + } else if height < blossom_height { + // this calculation is exact, because the halving divisor is 1 here + Ok(Amount::try_from(MAX_BLOCK_SUBSIDY / halving_div)?) + } else { + let scaled_max_block_subsidy = + MAX_BLOCK_SUBSIDY / u64::from(BLOSSOM_POW_TARGET_SPACING_RATIO); + // in future halvings, this calculation might not be exact + // Amount division is implemented using integer division, + // which truncates (rounds down) the result, as specified + Ok(Amount::try_from(scaled_max_block_subsidy / halving_div)?) + } +} + +/// `MinerSubsidy(height)` as described in [protocol specification §7.8][7.8] +/// +/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies +pub fn miner_subsidy( + height: Height, + network: &Network, + expected_block_subsidy: Amount, +) -> Result, amount::Error> { + let total_funding_stream_amount: Result, _> = + funding_stream_values(height, network, expected_block_subsidy)? + .values() + .sum(); + + expected_block_subsidy - total_funding_stream_amount? +} + +/// Returns all output amounts in `Transaction`. +pub fn output_amounts(transaction: &Transaction) -> HashSet> { + transaction + .outputs() + .iter() + .map(|o| &o.value) + .cloned() + .collect() +} diff --git a/zebra-chain/src/parameters/network/tests.rs b/zebra-chain/src/parameters/network/tests.rs index cc95d9d451f..f6658ab7723 100644 --- a/zebra-chain/src/parameters/network/tests.rs +++ b/zebra-chain/src/parameters/network/tests.rs @@ -1,2 +1,318 @@ mod prop; mod vectors; + +use color_eyre::Report; + +use super::Network; +use crate::{ + amount::{Amount, NonNegative}, + block::Height, + parameters::{ + subsidy::{ + block_subsidy, halving_divisor, height_for_halving, num_halvings, + ParameterSubsidy as _, POST_BLOSSOM_HALVING_INTERVAL, + }, + NetworkUpgrade, + }, +}; + +#[test] +fn halving_test() -> Result<(), Report> { + let _init_guard = zebra_test::init(); + for network in Network::iter() { + halving_for_network(&network)?; + } + + Ok(()) +} + +fn halving_for_network(network: &Network) -> Result<(), Report> { + let blossom_height = NetworkUpgrade::Blossom.activation_height(network).unwrap(); + let first_halving_height = network.height_for_first_halving(); + + assert_eq!( + 1, + halving_divisor((network.slow_start_interval() + 1).unwrap(), network).unwrap() + ); + assert_eq!( + 1, + halving_divisor((blossom_height - 1).unwrap(), network).unwrap() + ); + assert_eq!(1, halving_divisor(blossom_height, network).unwrap()); + assert_eq!( + 1, + halving_divisor((first_halving_height - 1).unwrap(), network).unwrap() + ); + + assert_eq!(2, halving_divisor(first_halving_height, network).unwrap()); + assert_eq!( + 2, + halving_divisor((first_halving_height + 1).unwrap(), network).unwrap() + ); + + assert_eq!( + 4, + halving_divisor( + (first_halving_height + POST_BLOSSOM_HALVING_INTERVAL).unwrap(), + network + ) + .unwrap() + ); + assert_eq!( + 8, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 2)).unwrap(), + network + ) + .unwrap() + ); + + assert_eq!( + 1024, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 9)).unwrap(), + network + ) + .unwrap() + ); + assert_eq!( + 1024 * 1024, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 19)).unwrap(), + network + ) + .unwrap() + ); + assert_eq!( + 1024 * 1024 * 1024, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 29)).unwrap(), + network + ) + .unwrap() + ); + assert_eq!( + 1024 * 1024 * 1024 * 1024, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 39)).unwrap(), + network + ) + .unwrap() + ); + + // The largest possible integer divisor + assert_eq!( + (i64::MAX as u64 + 1), + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 62)).unwrap(), + network + ) + .unwrap(), + ); + + // Very large divisors which should also result in zero amounts + assert_eq!( + None, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 63)).unwrap(), + network, + ), + ); + + assert_eq!( + None, + halving_divisor( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 64)).unwrap(), + network, + ), + ); + + assert_eq!( + None, + halving_divisor(Height(Height::MAX_AS_U32 / 4), network), + ); + + assert_eq!( + None, + halving_divisor(Height(Height::MAX_AS_U32 / 2), network), + ); + + assert_eq!(None, halving_divisor(Height::MAX, network)); + + Ok(()) +} + +#[test] +fn block_subsidy_test() -> Result<(), Report> { + let _init_guard = zebra_test::init(); + + for network in Network::iter() { + block_subsidy_for_network(&network)?; + } + + Ok(()) +} + +fn block_subsidy_for_network(network: &Network) -> Result<(), Report> { + let blossom_height = NetworkUpgrade::Blossom.activation_height(network).unwrap(); + let first_halving_height = network.height_for_first_halving(); + + // After slow-start mining and before Blossom the block subsidy is 12.5 ZEC + // https://z.cash/support/faq/#what-is-slow-start-mining + assert_eq!( + Amount::::try_from(1_250_000_000)?, + block_subsidy((network.slow_start_interval() + 1).unwrap(), network)? + ); + assert_eq!( + Amount::::try_from(1_250_000_000)?, + block_subsidy((blossom_height - 1).unwrap(), network)? + ); + + // After Blossom the block subsidy is reduced to 6.25 ZEC without halving + // https://z.cash/upgrade/blossom/ + assert_eq!( + Amount::::try_from(625_000_000)?, + block_subsidy(blossom_height, network)? + ); + + // After the 1st halving, the block subsidy is reduced to 3.125 ZEC + // https://z.cash/upgrade/canopy/ + assert_eq!( + Amount::::try_from(312_500_000)?, + block_subsidy(first_halving_height, network)? + ); + + // After the 2nd halving, the block subsidy is reduced to 1.5625 ZEC + // See "7.8 Calculation of Block Subsidy and Founders' Reward" + assert_eq!( + Amount::::try_from(156_250_000)?, + block_subsidy( + (first_halving_height + POST_BLOSSOM_HALVING_INTERVAL).unwrap(), + network + )? + ); + + // After the 7th halving, the block subsidy is reduced to 0.04882812 ZEC + // Check that the block subsidy rounds down correctly, and there are no errors + assert_eq!( + Amount::::try_from(4_882_812)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 6)).unwrap(), + network + )? + ); + + // After the 29th halving, the block subsidy is 1 zatoshi + // Check that the block subsidy is calculated correctly at the limit + assert_eq!( + Amount::::try_from(1)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 28)).unwrap(), + network + )? + ); + + // After the 30th halving, there is no block subsidy + // Check that there are no errors + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 29)).unwrap(), + network + )? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 39)).unwrap(), + network + )? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 49)).unwrap(), + network + )? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 59)).unwrap(), + network + )? + ); + + // The largest possible integer divisor + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 62)).unwrap(), + network + )? + ); + + // Other large divisors which should also result in zero + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 63)).unwrap(), + network + )? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy( + (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 64)).unwrap(), + network + )? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy(Height(Height::MAX_AS_U32 / 4), network)? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy(Height(Height::MAX_AS_U32 / 2), network)? + ); + + assert_eq!( + Amount::::try_from(0)?, + block_subsidy(Height::MAX, network)? + ); + + Ok(()) +} + +#[test] +fn check_height_for_num_halvings() { + for network in Network::iter() { + for halving in 1..1000 { + let Some(height_for_halving) = height_for_halving(halving, &network) else { + panic!("could not find height for halving {halving}"); + }; + + let prev_height = height_for_halving + .previous() + .expect("there should be a previous height"); + + assert_eq!( + halving, + num_halvings(height_for_halving, &network), + "num_halvings should match the halving index" + ); + + assert_eq!( + halving - 1, + num_halvings(prev_height, &network), + "num_halvings for the prev height should be 1 less than the halving index" + ); + } + } +} diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index d053074a531..5a1fe510f02 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -25,7 +25,10 @@ use tracing::Instrument; use zebra_chain::{ amount::Amount, block, - parameters::{subsidy::FundingStreamReceiver, Network}, + parameters::{ + subsidy::{FundingStreamReceiver, SubsidyError}, + Network, + }, transaction, transparent, work::equihash, }; @@ -227,7 +230,8 @@ where .map_err(VerifyBlockError::Time)?; let coinbase_tx = check::coinbase_is_first(&block)?; - let expected_block_subsidy = subsidy::general::block_subsidy(height, &network)?; + let expected_block_subsidy = + zebra_chain::parameters::subsidy::block_subsidy(height, &network)?; check::subsidy_is_valid(&block, &network, expected_block_subsidy)?; @@ -304,7 +308,7 @@ where } // See [ZIP-1015](https://zips.z.cash/zip-1015). - let expected_deferred_amount = subsidy::funding_streams::funding_stream_values( + let expected_deferred_amount = zebra_chain::parameters::subsidy::funding_stream_values( height, &network, expected_block_subsidy, diff --git a/zebra-consensus/src/block/check.rs b/zebra-consensus/src/block/check.rs index 24ef2ba2ed1..ad53b1ea129 100644 --- a/zebra-consensus/src/block/check.rs +++ b/zebra-consensus/src/block/check.rs @@ -7,7 +7,10 @@ use chrono::{DateTime, Utc}; use zebra_chain::{ amount::{Amount, Error as AmountError, NonNegative}, block::{Block, Hash, Header, Height}, - parameters::{subsidy::FundingStreamReceiver, Network, NetworkUpgrade}, + parameters::{ + subsidy::{FundingStreamReceiver, SubsidyError}, + Network, NetworkUpgrade, + }, transaction::{self, Transaction}, work::{ difficulty::{ExpandedDifficulty, ParameterDifficulty as _}, @@ -153,7 +156,8 @@ pub fn subsidy_is_valid( let coinbase = block.transactions.first().ok_or(SubsidyError::NoCoinbase)?; // Validate funding streams - let Some(halving_div) = subsidy::general::halving_divisor(height, network) else { + let Some(halving_div) = zebra_chain::parameters::subsidy::halving_divisor(height, network) + else { // Far future halving, with no founders reward or funding streams return Ok(()); }; @@ -180,7 +184,7 @@ pub fn subsidy_is_valid( // Note: Canopy activation is at the first halving on mainnet, but not on testnet // ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet // funding stream amount values - let funding_streams = subsidy::funding_streams::funding_stream_values( + let funding_streams = zebra_chain::parameters::subsidy::funding_stream_values( height, network, expected_block_subsidy, @@ -240,7 +244,7 @@ pub fn miner_fees_are_valid( expected_deferred_amount: Amount, network: &Network, ) -> Result<(), BlockError> { - let transparent_value_balance = subsidy::general::output_amounts(coinbase_tx) + let transparent_value_balance = zebra_chain::parameters::subsidy::output_amounts(coinbase_tx) .iter() .sum::, AmountError>>() .map_err(|_| SubsidyError::SumOverflow)? diff --git a/zebra-consensus/src/block/subsidy.rs b/zebra-consensus/src/block/subsidy.rs index c9deeca6fe5..f55438b1ec2 100644 --- a/zebra-consensus/src/block/subsidy.rs +++ b/zebra-consensus/src/block/subsidy.rs @@ -4,5 +4,3 @@ /// Funding Streams functions apply for blocks at and after Canopy. pub mod funding_streams; -/// General subsidy functions apply for blocks after slow-start mining. -pub mod general; diff --git a/zebra-consensus/src/block/subsidy/funding_streams.rs b/zebra-consensus/src/block/subsidy/funding_streams.rs index f1551a224e2..68c94f19006 100644 --- a/zebra-consensus/src/block/subsidy/funding_streams.rs +++ b/zebra-consensus/src/block/subsidy/funding_streams.rs @@ -2,12 +2,9 @@ //! //! [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -use std::collections::HashMap; - use zebra_chain::{ - amount::{Amount, Error, NonNegative}, block::Height, - parameters::{subsidy::*, Network, NetworkUpgrade::*}, + parameters::{subsidy::*, Network}, transaction::Transaction, transparent::{self, Script}, }; @@ -15,38 +12,6 @@ use zebra_chain::{ #[cfg(test)] mod tests; -/// Returns the `fs.Value(height)` for each stream receiver -/// as described in [protocol specification §7.8][7.8] -/// -/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -pub fn funding_stream_values( - height: Height, - network: &Network, - expected_block_subsidy: Amount, -) -> Result>, Error> { - let canopy_height = Canopy.activation_height(network).unwrap(); - let mut results = HashMap::new(); - - if height >= canopy_height { - let funding_streams = network.funding_streams(height); - if funding_streams.height_range().contains(&height) { - for (&receiver, recipient) in funding_streams.recipients() { - // - Spec equation: `fs.value = floor(block_subsidy(height)*(fs.numerator/fs.denominator))`: - // https://zips.z.cash/protocol/protocol.pdf#subsidies - // - In Rust, "integer division rounds towards zero": - // https://doc.rust-lang.org/stable/reference/expressions/operator-expr.html#arithmetic-and-logical-binary-operators - // This is the same as `floor()`, because these numbers are all positive. - let amount_value = ((expected_block_subsidy * recipient.numerator())? - / FUNDING_STREAM_RECEIVER_DENOMINATOR)?; - - results.insert(receiver, amount_value); - } - } - } - - Ok(results) -} - /// Returns the position in the address slice for each funding stream /// as described in [protocol specification §7.10][7.10] /// diff --git a/zebra-consensus/src/block/subsidy/funding_streams/tests.rs b/zebra-consensus/src/block/subsidy/funding_streams/tests.rs index 91faa923c93..8eb0d36bca8 100644 --- a/zebra-consensus/src/block/subsidy/funding_streams/tests.rs +++ b/zebra-consensus/src/block/subsidy/funding_streams/tests.rs @@ -1,10 +1,12 @@ //! Tests for funding streams. +use std::collections::HashMap; + use color_eyre::Report; +use zebra_chain::amount::Amount; +use zebra_chain::parameters::NetworkUpgrade::*; use zebra_chain::parameters::{subsidy::FundingStreamReceiver, NetworkKind}; -use crate::block::subsidy::general::block_subsidy; - use super::*; /// Checks that the Mainnet funding stream values are correct. diff --git a/zebra-consensus/src/block/subsidy/general.rs b/zebra-consensus/src/block/subsidy/general.rs deleted file mode 100644 index 56de345dd7a..00000000000 --- a/zebra-consensus/src/block/subsidy/general.rs +++ /dev/null @@ -1,430 +0,0 @@ -//! Block and Miner subsidies, halvings and target spacing modifiers. - [§7.8][7.8] -//! -//! [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies - -// TODO: Move the contents of this mod to the parent mod and remove this mod. - -use std::collections::HashSet; - -use zebra_chain::{ - amount::{Amount, Error, NonNegative}, - block::{Height, HeightDiff}, - parameters::{subsidy::*, Network, NetworkUpgrade::*}, - transaction::Transaction, -}; - -use crate::{block::SubsidyError, funding_stream_values}; - -/// The divisor used for halvings. -/// -/// `1 << Halving(height)`, as described in [protocol specification §7.8][7.8] -/// -/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -/// -/// Returns `None` if the divisor would overflow a `u64`. -pub fn halving_divisor(height: Height, network: &Network) -> Option { - // Some far-future shifts can be more than 63 bits - 1u64.checked_shl(num_halvings(height, network)) -} - -/// The halving index for a block height and network. -/// -/// `Halving(height)`, as described in [protocol specification §7.8][7.8] -/// -/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -pub fn num_halvings(height: Height, network: &Network) -> u32 { - let slow_start_shift = network.slow_start_shift(); - let blossom_height = Blossom - .activation_height(network) - .expect("blossom activation height should be available"); - - let halving_index = if height < slow_start_shift { - 0 - } else if height < blossom_height { - let pre_blossom_height = height - slow_start_shift; - pre_blossom_height / network.pre_blossom_halving_interval() - } else { - let pre_blossom_height = blossom_height - slow_start_shift; - let scaled_pre_blossom_height = - pre_blossom_height * HeightDiff::from(BLOSSOM_POW_TARGET_SPACING_RATIO); - - let post_blossom_height = height - blossom_height; - - (scaled_pre_blossom_height + post_blossom_height) / network.post_blossom_halving_interval() - }; - - halving_index - .try_into() - .expect("already checked for negatives") -} - -/// `BlockSubsidy(height)` as described in [protocol specification §7.8][7.8] -/// -/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -pub fn block_subsidy( - height: Height, - network: &Network, -) -> Result, SubsidyError> { - let blossom_height = Blossom - .activation_height(network) - .expect("blossom activation height should be available"); - - // If the halving divisor is larger than u64::MAX, the block subsidy is zero, - // because amounts fit in an i64. - // - // Note: bitcoind incorrectly wraps here, which restarts large block rewards. - let Some(halving_div) = halving_divisor(height, network) else { - return Ok(Amount::zero()); - }; - - // Zebra doesn't need to calculate block subsidies for blocks with heights in the slow start - // interval because it handles those blocks through checkpointing. - if height < network.slow_start_interval() { - Err(SubsidyError::UnsupportedHeight) - } else if height < blossom_height { - // this calculation is exact, because the halving divisor is 1 here - Ok(Amount::try_from(MAX_BLOCK_SUBSIDY / halving_div)?) - } else { - let scaled_max_block_subsidy = - MAX_BLOCK_SUBSIDY / u64::from(BLOSSOM_POW_TARGET_SPACING_RATIO); - // in future halvings, this calculation might not be exact - // Amount division is implemented using integer division, - // which truncates (rounds down) the result, as specified - Ok(Amount::try_from(scaled_max_block_subsidy / halving_div)?) - } -} - -/// `MinerSubsidy(height)` as described in [protocol specification §7.8][7.8] -/// -/// [7.8]: https://zips.z.cash/protocol/protocol.pdf#subsidies -pub fn miner_subsidy( - height: Height, - network: &Network, - expected_block_subsidy: Amount, -) -> Result, Error> { - let total_funding_stream_amount: Result, _> = - funding_stream_values(height, network, expected_block_subsidy)? - .values() - .sum(); - - expected_block_subsidy - total_funding_stream_amount? -} - -/// Returns all output amounts in `Transaction`. -pub fn output_amounts(transaction: &Transaction) -> HashSet> { - transaction - .outputs() - .iter() - .map(|o| &o.value) - .cloned() - .collect() -} - -#[cfg(test)] -mod test { - use super::*; - use color_eyre::Report; - - #[test] - fn halving_test() -> Result<(), Report> { - let _init_guard = zebra_test::init(); - for network in Network::iter() { - halving_for_network(&network)?; - } - - Ok(()) - } - - fn halving_for_network(network: &Network) -> Result<(), Report> { - let blossom_height = Blossom.activation_height(network).unwrap(); - let first_halving_height = network.height_for_first_halving(); - - assert_eq!( - 1, - halving_divisor((network.slow_start_interval() + 1).unwrap(), network).unwrap() - ); - assert_eq!( - 1, - halving_divisor((blossom_height - 1).unwrap(), network).unwrap() - ); - assert_eq!(1, halving_divisor(blossom_height, network).unwrap()); - assert_eq!( - 1, - halving_divisor((first_halving_height - 1).unwrap(), network).unwrap() - ); - - assert_eq!(2, halving_divisor(first_halving_height, network).unwrap()); - assert_eq!( - 2, - halving_divisor((first_halving_height + 1).unwrap(), network).unwrap() - ); - - assert_eq!( - 4, - halving_divisor( - (first_halving_height + POST_BLOSSOM_HALVING_INTERVAL).unwrap(), - network - ) - .unwrap() - ); - assert_eq!( - 8, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 2)).unwrap(), - network - ) - .unwrap() - ); - - assert_eq!( - 1024, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 9)).unwrap(), - network - ) - .unwrap() - ); - assert_eq!( - 1024 * 1024, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 19)).unwrap(), - network - ) - .unwrap() - ); - assert_eq!( - 1024 * 1024 * 1024, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 29)).unwrap(), - network - ) - .unwrap() - ); - assert_eq!( - 1024 * 1024 * 1024 * 1024, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 39)).unwrap(), - network - ) - .unwrap() - ); - - // The largest possible integer divisor - assert_eq!( - (i64::MAX as u64 + 1), - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 62)).unwrap(), - network - ) - .unwrap(), - ); - - // Very large divisors which should also result in zero amounts - assert_eq!( - None, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 63)).unwrap(), - network, - ), - ); - - assert_eq!( - None, - halving_divisor( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 64)).unwrap(), - network, - ), - ); - - assert_eq!( - None, - halving_divisor(Height(Height::MAX_AS_U32 / 4), network), - ); - - assert_eq!( - None, - halving_divisor(Height(Height::MAX_AS_U32 / 2), network), - ); - - assert_eq!(None, halving_divisor(Height::MAX, network)); - - Ok(()) - } - - #[test] - fn block_subsidy_test() -> Result<(), Report> { - let _init_guard = zebra_test::init(); - - for network in Network::iter() { - block_subsidy_for_network(&network)?; - } - - Ok(()) - } - - fn block_subsidy_for_network(network: &Network) -> Result<(), Report> { - let blossom_height = Blossom.activation_height(network).unwrap(); - let first_halving_height = network.height_for_first_halving(); - - // After slow-start mining and before Blossom the block subsidy is 12.5 ZEC - // https://z.cash/support/faq/#what-is-slow-start-mining - assert_eq!( - Amount::::try_from(1_250_000_000)?, - block_subsidy((network.slow_start_interval() + 1).unwrap(), network)? - ); - assert_eq!( - Amount::::try_from(1_250_000_000)?, - block_subsidy((blossom_height - 1).unwrap(), network)? - ); - - // After Blossom the block subsidy is reduced to 6.25 ZEC without halving - // https://z.cash/upgrade/blossom/ - assert_eq!( - Amount::::try_from(625_000_000)?, - block_subsidy(blossom_height, network)? - ); - - // After the 1st halving, the block subsidy is reduced to 3.125 ZEC - // https://z.cash/upgrade/canopy/ - assert_eq!( - Amount::::try_from(312_500_000)?, - block_subsidy(first_halving_height, network)? - ); - - // After the 2nd halving, the block subsidy is reduced to 1.5625 ZEC - // See "7.8 Calculation of Block Subsidy and Founders' Reward" - assert_eq!( - Amount::::try_from(156_250_000)?, - block_subsidy( - (first_halving_height + POST_BLOSSOM_HALVING_INTERVAL).unwrap(), - network - )? - ); - - // After the 7th halving, the block subsidy is reduced to 0.04882812 ZEC - // Check that the block subsidy rounds down correctly, and there are no errors - assert_eq!( - Amount::::try_from(4_882_812)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 6)).unwrap(), - network - )? - ); - - // After the 29th halving, the block subsidy is 1 zatoshi - // Check that the block subsidy is calculated correctly at the limit - assert_eq!( - Amount::::try_from(1)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 28)).unwrap(), - network - )? - ); - - // After the 30th halving, there is no block subsidy - // Check that there are no errors - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 29)).unwrap(), - network - )? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 39)).unwrap(), - network - )? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 49)).unwrap(), - network - )? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 59)).unwrap(), - network - )? - ); - - // The largest possible integer divisor - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 62)).unwrap(), - network - )? - ); - - // Other large divisors which should also result in zero - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 63)).unwrap(), - network - )? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy( - (first_halving_height + (POST_BLOSSOM_HALVING_INTERVAL * 64)).unwrap(), - network - )? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy(Height(Height::MAX_AS_U32 / 4), network)? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy(Height(Height::MAX_AS_U32 / 2), network)? - ); - - assert_eq!( - Amount::::try_from(0)?, - block_subsidy(Height::MAX, network)? - ); - - Ok(()) - } - - #[test] - fn check_height_for_num_halvings() { - for network in Network::iter() { - for halving in 1..1000 { - let Some(height_for_halving) = - zebra_chain::parameters::subsidy::height_for_halving(halving, &network) - else { - panic!("could not find height for halving {halving}"); - }; - - let prev_height = height_for_halving - .previous() - .expect("there should be a previous height"); - - assert_eq!( - halving, - num_halvings(height_for_halving, &network), - "num_halvings should match the halving index" - ); - - assert_eq!( - halving - 1, - num_halvings(prev_height, &network), - "num_halvings for the prev height should be 1 less than the halving index" - ); - } - } - } -} diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index eea5f40015e..21a51f66bf0 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -12,7 +12,7 @@ use zebra_chain::{ }, Block, Height, }, - parameters::NetworkUpgrade, + parameters::{subsidy::block_subsidy, NetworkUpgrade}, serialization::{ZcashDeserialize, ZcashDeserializeInto}, transaction::{arbitrary::transaction_to_fake_v5, LockTime, Transaction}, work::difficulty::{ParameterDifficulty as _, INVALID_COMPACT_DIFFICULTY}, @@ -20,7 +20,7 @@ use zebra_chain::{ use zebra_script::CachedFfiTransaction; use zebra_test::transcript::{ExpectedTranscriptError, Transcript}; -use crate::{block_subsidy, transaction}; +use crate::transaction; use super::*; @@ -304,7 +304,8 @@ fn subsidy_is_valid_for_network(network: Network) -> Result<(), Report> { // TODO: first halving, second halving, third halving, and very large halvings if height >= canopy_activation_height { let expected_block_subsidy = - subsidy::general::block_subsidy(height, &network).expect("valid block subsidy"); + zebra_chain::parameters::subsidy::block_subsidy(height, &network) + .expect("valid block subsidy"); check::subsidy_is_valid(&block, &network, expected_block_subsidy) .expect("subsidies should pass for this block"); @@ -326,7 +327,7 @@ fn coinbase_validation_failure() -> Result<(), Report> { .expect("block should deserialize"); let mut block = Arc::try_unwrap(block).expect("block should unwrap"); - let expected_block_subsidy = subsidy::general::block_subsidy( + let expected_block_subsidy = zebra_chain::parameters::subsidy::block_subsidy( block .coinbase_height() .expect("block should have coinbase height"), @@ -352,7 +353,7 @@ fn coinbase_validation_failure() -> Result<(), Report> { .expect("block should deserialize"); let mut block = Arc::try_unwrap(block).expect("block should unwrap"); - let expected_block_subsidy = subsidy::general::block_subsidy( + let expected_block_subsidy = zebra_chain::parameters::subsidy::block_subsidy( block .coinbase_height() .expect("block should have coinbase height"), @@ -392,7 +393,7 @@ fn coinbase_validation_failure() -> Result<(), Report> { let expected = BlockError::Transaction(TransactionError::CoinbaseAfterFirst); assert_eq!(expected, result); - let expected_block_subsidy = subsidy::general::block_subsidy( + let expected_block_subsidy = zebra_chain::parameters::subsidy::block_subsidy( block .coinbase_height() .expect("block should have coinbase height"), @@ -429,7 +430,8 @@ fn funding_stream_validation_for_network(network: Network) -> Result<(), Report> if height >= canopy_activation_height { let block = Block::zcash_deserialize(&block[..]).expect("block should deserialize"); let expected_block_subsidy = - subsidy::general::block_subsidy(height, &network).expect("valid block subsidy"); + zebra_chain::parameters::subsidy::block_subsidy(height, &network) + .expect("valid block subsidy"); // Validate let result = check::subsidy_is_valid(&block, &network, expected_block_subsidy); @@ -476,7 +478,7 @@ fn funding_stream_validation_failure() -> Result<(), Report> { }; // Validate it - let expected_block_subsidy = subsidy::general::block_subsidy( + let expected_block_subsidy = zebra_chain::parameters::subsidy::block_subsidy( block .coinbase_height() .expect("block should have coinbase height"), @@ -516,7 +518,7 @@ fn miner_fees_validation_for_network(network: Network) -> Result<(), Report> { let expected_block_subsidy = block_subsidy(height, &network)?; // See [ZIP-1015](https://zips.z.cash/zip-1015). - let expected_deferred_amount = subsidy::funding_streams::funding_stream_values( + let expected_deferred_amount = zebra_chain::parameters::subsidy::funding_stream_values( height, &network, expected_block_subsidy, @@ -551,10 +553,14 @@ fn miner_fees_validation_failure() -> Result<(), Report> { let expected_block_subsidy = block_subsidy(height, &network)?; // See [ZIP-1015](https://zips.z.cash/zip-1015). let expected_deferred_amount: Amount = - subsidy::funding_streams::funding_stream_values(height, &network, expected_block_subsidy) - .expect("we always expect a funding stream hashmap response even if empty") - .remove(&FundingStreamReceiver::Deferred) - .unwrap_or_default(); + zebra_chain::parameters::subsidy::funding_stream_values( + height, + &network, + expected_block_subsidy, + ) + .expect("we always expect a funding stream hashmap response even if empty") + .remove(&FundingStreamReceiver::Deferred) + .unwrap_or_default(); assert_eq!( check::miner_fees_are_valid( diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 54f36bd460f..8051c31da41 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -30,20 +30,22 @@ use tracing::instrument; use zebra_chain::{ amount, block::{self, Block}, - parameters::{subsidy::FundingStreamReceiver, Network, GENESIS_PREVIOUS_BLOCK_HASH}, + parameters::{ + subsidy::{block_subsidy, funding_stream_values, FundingStreamReceiver, SubsidyError}, + Network, GENESIS_PREVIOUS_BLOCK_HASH, + }, work::equihash, }; use zebra_state::{self as zs, CheckpointVerifiedBlock}; use crate::{ block::VerifyBlockError, - block_subsidy, checkpoint::types::{ Progress::{self, *}, TargetHeight::{self, *}, }, - error::{BlockError, SubsidyError}, - funding_stream_values, BoxError, ParameterCheckpoint as _, + error::BlockError, + BoxError, ParameterCheckpoint as _, }; pub(crate) mod list; diff --git a/zebra-consensus/src/error.rs b/zebra-consensus/src/error.rs index 80c111d949f..ccb5c0d237d 100644 --- a/zebra-consensus/src/error.rs +++ b/zebra-consensus/src/error.rs @@ -9,7 +9,9 @@ use chrono::{DateTime, Utc}; use thiserror::Error; use zebra_chain::{ - amount, block, orchard, sapling, sprout, + amount, block, orchard, + parameters::subsidy::SubsidyError, + sapling, sprout, transparent::{self, MIN_TRANSPARENT_COINBASE_MATURITY}, }; use zebra_state::ValidateContextError; @@ -22,35 +24,6 @@ use proptest_derive::Arbitrary; /// Workaround for format string identifier rules. const MAX_EXPIRY_HEIGHT: block::Height = block::Height::MAX_EXPIRY_HEIGHT; -/// Block subsidy errors. -#[derive(Error, Clone, Debug, PartialEq, Eq)] -#[allow(missing_docs)] -pub enum SubsidyError { - #[error("no coinbase transaction in block")] - NoCoinbase, - - #[error("funding stream expected output not found")] - FundingStreamNotFound, - - #[error("miner fees are invalid")] - InvalidMinerFees, - - #[error("a sum of amounts overflowed")] - SumOverflow, - - #[error("unsupported height")] - UnsupportedHeight, - - #[error("invalid amount")] - InvalidAmount(amount::Error), -} - -impl From for SubsidyError { - fn from(amount: amount::Error) -> Self { - Self::InvalidAmount(amount) - } -} - /// Errors for semantic transaction validation. #[derive(Error, Clone, Debug, PartialEq, Eq)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index 95381fd9e07..893db389c76 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -48,10 +48,7 @@ pub mod transaction; pub use block::check::difficulty_is_valid; pub use block::{ - subsidy::{ - funding_streams::{funding_stream_address, funding_stream_values, new_coinbase_script}, - general::{block_subsidy, miner_subsidy}, - }, + subsidy::funding_streams::{funding_stream_address, new_coinbase_script}, Request, VerifyBlockError, MAX_BLOCK_SIGOPS, }; pub use checkpoint::{ diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index f09e3f6a518..21699963791 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -712,7 +712,7 @@ async fn mempool_request_with_unmined_output_spends_is_accepted() { ); } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn skips_verification_of_block_transactions_in_mempool() { let mut state: MockService<_, _, _, _> = MockService::build().for_prop_tests(); let mempool: MockService<_, _, _, _> = MockService::build().for_prop_tests(); @@ -797,6 +797,9 @@ async fn skips_verification_of_block_transactions_in_mempool() { .respond(mempool::Response::UnspentOutput(output)); }); + // Briefly yield and sleep so the spawned task can first expect an await output request. + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + let verifier_response = verifier .clone() .oneshot(Request::Mempool { @@ -826,7 +829,7 @@ async fn skips_verification_of_block_transactions_in_mempool() { let mut mempool_clone = mempool.clone(); tokio::spawn(async move { - for _ in 0..3 { + for _ in 0..2 { mempool_clone .expect_request(mempool::Request::TransactionWithDepsByMinedId(tx_hash)) .await @@ -847,6 +850,9 @@ async fn skips_verification_of_block_transactions_in_mempool() { time: Utc::now(), }; + // Briefly yield and sleep so the spawned task can first expect the requests. + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + let crate::transaction::Response::Block { .. } = verifier .clone() .oneshot(make_request.clone()(Arc::new([input_outpoint.hash].into()))) @@ -873,27 +879,13 @@ async fn skips_verification_of_block_transactions_in_mempool() { panic!("unexpected response variant from transaction verifier for Block request") }; - let verifier_response_err = *verifier - .clone() - .oneshot(make_request(Arc::new(HashSet::new()))) - .await - .expect_err("should return Err without calling state service") - .downcast::() - .expect("tx verifier error type should be TransactionError"); - - assert_eq!( - verifier_response_err, - TransactionError::TransparentInputNotFound, - "should be a transparent input not found error" - ); - tokio::time::sleep(POLL_MEMPOOL_DELAY * 2).await; // polled before AwaitOutput request, after a mempool transaction with transparent outputs, // is successfully verified, and twice more when checking if a transaction in a block is // already the mempool. assert_eq!( mempool.poll_count(), - 5, + 4, "the mempool service should have been polled 4 times" ); } diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 37359be0f84..c1449343f74 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -39,7 +39,10 @@ use zebra_chain::{ chain_sync_status::ChainSyncStatus, chain_tip::{ChainTip, NetworkChainTipHeightEstimator}, parameters::{ - subsidy::{FundingStreamReceiver, ParameterSubsidy}, + subsidy::{ + block_subsidy, funding_stream_values, miner_subsidy, FundingStreamReceiver, + ParameterSubsidy, + }, ConsensusBranchId, Network, NetworkUpgrade, POW_AVERAGING_WINDOW, }, primitives, @@ -52,10 +55,7 @@ use zebra_chain::{ equihash::Solution, }, }; -use zebra_consensus::{ - block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, - ParameterCheckpoint, RouterError, -}; +use zebra_consensus::{funding_stream_address, ParameterCheckpoint, RouterError}; use zebra_network::address_book_peers::AddressBookPeers; use zebra_node_services::mempool; use zebra_state::{ @@ -1026,7 +1026,7 @@ where let response = state.oneshot(request).await.map_misc_error()?; match response { - zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { + zebra_state::ReadResponse::AddressBalance { balance, .. } => Ok(AddressBalance { balance: u64::from(balance), }), _ => unreachable!("Unexpected response from state service: {response:?}"), diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 4611f3aea7f..e102a1ca7a8 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -647,7 +647,7 @@ proptest! { let state_query = state .expect_request(zebra_state::ReadRequest::AddressBalance(addresses)) .map_ok(|responder| { - responder.respond(zebra_state::ReadResponse::AddressBalance(balance)) + responder.respond(zebra_state::ReadResponse::AddressBalance { balance, received: Default::default() }) }); // Await the RPC call and the state query diff --git a/zebra-rpc/src/methods/types/get_block_template.rs b/zebra-rpc/src/methods/types/get_block_template.rs index 3150cd4686a..e4ef9743ca6 100644 --- a/zebra-rpc/src/methods/types/get_block_template.rs +++ b/zebra-rpc/src/methods/types/get_block_template.rs @@ -22,7 +22,10 @@ use zebra_chain::{ }, chain_sync_status::ChainSyncStatus, chain_tip::ChainTip, - parameters::{subsidy::FundingStreamReceiver, Network, NetworkKind, NetworkUpgrade}, + parameters::{ + subsidy::{block_subsidy, funding_stream_values, miner_subsidy, FundingStreamReceiver}, + Network, NetworkKind, NetworkUpgrade, + }, serialization::{DateTime32, ZcashDeserializeInto}, transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, transparent::{ @@ -30,9 +33,7 @@ use zebra_chain::{ }, work::difficulty::{CompactDifficulty, ExpandedDifficulty}, }; -use zebra_consensus::{ - block_subsidy, funding_stream_address, funding_stream_values, miner_subsidy, MAX_BLOCK_SIGOPS, -}; +use zebra_consensus::{funding_stream_address, MAX_BLOCK_SIGOPS}; use zebra_node_services::mempool::{self, TransactionDependencies}; use zebra_state::GetBlockTemplateChainInfo; diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index 7d175d4d614..9fcfc3f1797 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -15,7 +15,8 @@ use tracing::Span; use zebra_chain::{common::default_cache_dir, parameters::Network}; use crate::{ - constants::{DATABASE_FORMAT_VERSION_FILE_NAME, RESTORABLE_DB_VERSIONS, STATE_DATABASE_KIND}, + constants::{DATABASE_FORMAT_VERSION_FILE_NAME, STATE_DATABASE_KIND}, + service::finalized_state::restorable_db_versions, state_database_format_version_in_code, BoxError, }; @@ -314,7 +315,7 @@ fn check_and_delete_database( } // Don't delete databases that can be reused. - if RESTORABLE_DB_VERSIONS + if restorable_db_versions() .iter() .map(|v| v - 1) .any(|v| v == dir_major_version) @@ -398,7 +399,7 @@ pub fn state_database_format_version_on_disk( /// /// If there is no existing on-disk database, returns `Ok(None)`. /// -/// This is the format of the data on disk, the minor and patch versions +/// This is the format of the data on disk, the version /// implemented by the running Zebra code can be different. pub fn database_format_version_on_disk( config: &Config, @@ -431,7 +432,16 @@ pub(crate) fn database_format_version_at_path( // The database has a version file on disk if let Some(version) = disk_version_file { - return Ok(Some(format!("{major_version}.{version}").parse()?)); + return Ok(Some( + version + .parse() + // Try to parse the previous format of the disk version file if it cannot be parsed as a `Version` directly. + .or_else(|err| { + format!("{major_version}.{version}") + .parse() + .map_err(|err2| format!("failed to parse format version: {err}, {err2}")) + })?, + )); } // There's no version file on disk, so we need to guess the version @@ -472,13 +482,19 @@ pub(crate) mod hidden { changed_version: &Version, network: &Network, ) -> Result<(), BoxError> { - write_database_format_version_to_disk(config, STATE_DATABASE_KIND, changed_version, network) + write_database_format_version_to_disk( + config, + STATE_DATABASE_KIND, + state_database_format_version_in_code().major, + changed_version, + network, + ) } /// Writes `changed_version` to the on-disk database after the format is changed. /// (Or a new database is created.) /// - /// The database path is based on its kind, `changed_version.major`, and network. + /// The database path is based on its kind, `major_version_in_code`, and network. /// /// # Correctness /// @@ -495,20 +511,16 @@ pub(crate) mod hidden { pub fn write_database_format_version_to_disk( config: &Config, db_kind: impl AsRef, + major_version_in_code: u64, changed_version: &Version, network: &Network, ) -> Result<(), BoxError> { - let version_path = config.version_file_path(db_kind, changed_version.major, network); - - let mut version = format!("{}.{}", changed_version.minor, changed_version.patch); - - if !changed_version.build.is_empty() { - version.push_str(&format!("+{}", changed_version.build)); - } - // Write the version file atomically so the cache is not corrupted if Zebra shuts down or // crashes. - atomic_write(version_path, version.as_bytes())??; + atomic_write( + config.version_file_path(db_kind, major_version_in_code, network), + changed_version.to_string().as_bytes(), + )??; Ok(()) } diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index a6373637f59..9e46d79e185 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -46,7 +46,7 @@ pub const STATE_DATABASE_KIND: &str = "state"; /// /// Instead of using this constant directly, use [`constants::state_database_format_version_in_code()`] /// or [`config::database_format_version_on_disk()`] to get the full semantic format version. -const DATABASE_FORMAT_VERSION: u64 = 26; +const DATABASE_FORMAT_VERSION: u64 = 27; /// The database format minor version, incremented each time the on-disk database format has a /// significant data format change. @@ -64,7 +64,7 @@ const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; /// Returns the full semantic version of the currently running state database format code. /// /// This is the version implemented by the Zebra code that's currently running, -/// the minor and patch versions on disk can be different. +/// the version on disk can be different. pub fn state_database_format_version_in_code() -> Version { Version { major: DATABASE_FORMAT_VERSION, @@ -78,7 +78,9 @@ pub fn state_database_format_version_in_code() -> Version { } } -/// The name of the file containing the minor and patch database versions. +/// The name of the file containing the database version. +/// +/// Note: This file has historically omitted the major database version. /// /// Use [`Config::version_file_path()`] to get the path to this file. pub(crate) const DATABASE_FORMAT_VERSION_FILE_NAME: &str = "version"; @@ -107,9 +109,6 @@ pub const MAX_FIND_BLOCK_HASHES_RESULTS: u32 = 500; /// The maximum number of block headers allowed in `getheaders` responses in the Zcash network protocol. pub const MAX_FIND_BLOCK_HEADERS_RESULTS: u32 = 160; -/// These database versions can be recreated from their directly preceding versions. -pub const RESTORABLE_DB_VERSIONS: [u64; 1] = [26]; - /// The maximum number of invalidated block records. /// /// This limits the memory use to around: diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 19fb8321756..ea895093289 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -69,12 +69,10 @@ pub use service::finalized_state::{ // Allow use in the scanner and external tests #[cfg(any(test, feature = "proptest-impl", feature = "shielded-scan"))] -pub use service::finalized_state::{ - DiskWriteBatch, FromDisk, ReadDisk, TypedColumnFamily, WriteDisk, WriteTypedBatch, -}; +pub use service::finalized_state::{ReadDisk, TypedColumnFamily, WriteTypedBatch}; pub use service::{ - finalized_state::{IntoDisk, ZebraDb}, + finalized_state::{DiskWriteBatch, FromDisk, IntoDisk, WriteDisk, ZebraDb}, ReadStateService, }; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 25f865dff69..cc439f1b344 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -918,9 +918,15 @@ pub enum ReadRequest { Tip, /// Returns [`ReadResponse::TipPoolValues(Option<(Height, block::Hash, ValueBalance)>)`](ReadResponse::TipPoolValues) - /// with the current best chain tip. + /// with the pool values of the current best chain tip. TipPoolValues, + /// Looks up the block info after a block by hash or height in the current best chain. + /// + /// * [`ReadResponse::BlockInfo(Some(pool_values))`](ReadResponse::BlockInfo) if the block is in the best chain; + /// * [`ReadResponse::BlockInfo(None)`](ReadResponse::BlockInfo) otherwise. + BlockInfo(HashOrHeight), + /// Computes the depth in the current best chain of the block identified by the given hash. /// /// Returns @@ -1189,6 +1195,7 @@ impl ReadRequest { ReadRequest::UsageInfo => "usage_info", ReadRequest::Tip => "tip", ReadRequest::TipPoolValues => "tip_pool_values", + ReadRequest::BlockInfo(_) => "block_info", ReadRequest::Depth(_) => "depth", ReadRequest::Block(_) => "block", ReadRequest::BlockAndSize(_) => "block_and_size", diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 48132dffa4f..fa4fcdcdf64 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -7,6 +7,7 @@ use chrono::{DateTime, Utc}; use zebra_chain::{ amount::{Amount, NonNegative}, block::{self, Block, ChainHistoryMmrRootHash}, + block_info::BlockInfo, orchard, sapling, serialization::DateTime32, subtree::{NoteCommitmentSubtreeData, NoteCommitmentSubtreeIndex}, @@ -164,6 +165,10 @@ pub enum ReadResponse { value_balance: ValueBalance, }, + /// Response to [`ReadRequest::BlockInfo`] with + /// the block info after the specified block. + BlockInfo(Option), + /// Response to [`ReadRequest::Depth`] with the depth of the specified block. Depth(Option), @@ -238,8 +243,14 @@ pub enum ReadResponse { BTreeMap>, ), - /// Response to [`ReadRequest::AddressBalance`] with the total balance of the addresses. - AddressBalance(Amount), + /// Response to [`ReadRequest::AddressBalance`] with the total balance of the addresses, + /// and the total received funds, including change. + AddressBalance { + /// The total balance of the addresses. + balance: Amount, + /// The total received funds in zatoshis, including change. + received: u64, + }, /// Response to [`ReadRequest::TransactionIdsByAddresses`] /// with the obtained transaction ids, in the order they appear in blocks. @@ -354,12 +365,13 @@ impl TryFrom for Response { ReadResponse::UsageInfo(_) | ReadResponse::TipPoolValues { .. } + | ReadResponse::BlockInfo(_) | ReadResponse::TransactionIdsForBlock(_) | ReadResponse::SaplingTree(_) | ReadResponse::OrchardTree(_) | ReadResponse::SaplingSubtrees(_) | ReadResponse::OrchardSubtrees(_) - | ReadResponse::AddressBalance(_) + | ReadResponse::AddressBalance { .. } | ReadResponse::AddressesTransactionIds(_) | ReadResponse::AddressUtxos(_) | ReadResponse::ChainInfo(_) => { diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 5130b6aca9a..0fa40ba7953 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1243,6 +1243,32 @@ impl Service for ReadStateService { .wait_for_panics() } + // Used by getblock + ReadRequest::BlockInfo(hash_or_height) => { + let state = self.clone(); + + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + let value_balance = state.non_finalized_state_receiver.with_watch_data( + |non_finalized_state| { + read::block_info( + non_finalized_state.best_chain(), + &state.db, + hash_or_height, + ) + }, + ); + + // The work is done in the future. + // TODO: Do this in the Drop impl with the variant name? + timer.finish(module_path!(), line!(), "ReadRequest::BlockInfo"); + + Ok(ReadResponse::BlockInfo(value_balance)) + }) + }) + .wait_for_panics() + } + // Used by the StateService. ReadRequest::Depth(hash) => { let state = self.clone(); @@ -1715,20 +1741,20 @@ impl Service for ReadStateService { tokio::task::spawn_blocking(move || { span.in_scope(move || { - let balance = state.non_finalized_state_receiver.with_watch_data( - |non_finalized_state| { + let (balance, received) = state + .non_finalized_state_receiver + .with_watch_data(|non_finalized_state| { read::transparent_balance( non_finalized_state.best_chain().cloned(), &state.db, addresses, ) - }, - )?; + })?; // The work is done in the future. timer.finish(module_path!(), line!(), "ReadRequest::AddressBalance"); - Ok(ReadResponse::AddressBalance(balance)) + Ok(ReadResponse::AddressBalance { balance, received }) }) }) .wait_for_panics() diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 57d22493cef..0a365f2faba 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -20,7 +20,10 @@ use std::{ }; use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; -use zebra_db::transparent::TX_LOC_BY_SPENT_OUT_LOC; +use zebra_db::{ + chain::BLOCK_INFO, + transparent::{BALANCE_BY_TRANSPARENT_ADDR, TX_LOC_BY_SPENT_OUT_LOC}, +}; use crate::{ constants::{state_database_format_version_in_code, STATE_DATABASE_KIND}, @@ -61,6 +64,8 @@ pub use disk_format::{ #[cfg(any(test, feature = "proptest-impl"))] pub use disk_format::KV; +pub use disk_format::upgrade::restorable_db_versions; + /// The column families supported by the running `zebra-state` database code. /// /// Existing column families that aren't listed here are preserved when the database is opened. @@ -74,7 +79,7 @@ pub const STATE_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ "hash_by_tx_loc", "tx_loc_by_hash", // Transparent - "balance_by_transparent_addr", + BALANCE_BY_TRANSPARENT_ADDR, "tx_loc_by_transparent_addr_loc", "utxo_by_out_loc", "utxo_loc_by_transparent_addr_loc", @@ -96,6 +101,7 @@ pub const STATE_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ // Chain "history_tree", "tip_chain_value_pool", + BLOCK_INFO, ]; /// The finalized part of the chain state, stored in the db. diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 4e9907f2681..0380afe86dc 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -27,11 +27,15 @@ use semver::Version; use zebra_chain::{parameters::Network, primitives::byte_array::increment_big_endian}; use crate::{ - constants::DATABASE_FORMAT_VERSION_FILE_NAME, + database_format_version_on_disk, service::finalized_state::disk_format::{FromDisk, IntoDisk}, - Config, + write_database_format_version_to_disk, Config, }; +use super::zebra_db::transparent::{ + fetch_add_balance_and_received, BALANCE_BY_TRANSPARENT_ADDR, + BALANCE_BY_TRANSPARENT_ADDR_MERGE_OP, +}; // Doc-only imports #[allow(unused_imports)] use super::{TypedColumnFamily, WriteTypedBatch}; @@ -146,6 +150,14 @@ pub trait WriteDisk { K: IntoDisk + Debug, V: IntoDisk; + /// Serialize and merge the given key and value into a rocksdb column family, + /// merging with any existing `value` for `key`. + fn zs_merge(&mut self, cf: &C, key: K, value: V) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + V: IntoDisk; + /// Remove the given key from a rocksdb column family, if it exists. fn zs_delete(&mut self, cf: &C, key: K) where @@ -181,6 +193,17 @@ impl WriteDisk for DiskWriteBatch { self.batch.put_cf(cf, key_bytes, value_bytes); } + fn zs_merge(&mut self, cf: &C, key: K, value: V) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + V: IntoDisk, + { + let key_bytes = key.as_bytes(); + let value_bytes = value.as_bytes(); + self.batch.merge_cf(cf, key_bytes, value_bytes); + } + fn zs_delete(&mut self, cf: &C, key: K) where C: rocksdb::AsColumnFamilyRef, @@ -218,6 +241,15 @@ where (*self).zs_insert(cf, key, value) } + fn zs_merge(&mut self, cf: &C, key: K, value: V) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + V: IntoDisk, + { + (*self).zs_merge(cf, key, value) + } + fn zs_delete(&mut self, cf: &C, key: K) where C: rocksdb::AsColumnFamilyRef, @@ -521,12 +553,12 @@ impl DiskDb { let mut total_size_in_mem = 0; let db: &Arc = &self.db; let db_options = DiskDb::options(); - let column_families = DiskDb::construct_column_families(&db_options, db.path(), &[]); + let column_families = DiskDb::construct_column_families(db_options, db.path(), []); let mut column_families_log_string = String::from(""); write!(column_families_log_string, "Column families and sizes: ").unwrap(); - for cf_descriptor in column_families.iter() { + for cf_descriptor in column_families { let cf_name = &cf_descriptor.name(); let cf_handle = db .cf_handle(cf_name) @@ -575,7 +607,7 @@ impl DiskDb { let db: &Arc = &self.db; let db_options = DiskDb::options(); let mut total_size_on_disk = 0; - for cf_descriptor in DiskDb::construct_column_families(&db_options, db.path(), &[]).iter() { + for cf_descriptor in DiskDb::construct_column_families(db_options, db.path(), []) { let cf_name = &cf_descriptor.name(); let cf_handle = db .cf_handle(cf_name) @@ -807,10 +839,10 @@ impl DiskDb { /// Build a vector of current column families on the disk and optionally any new column families. /// Returns an iterable collection of all column families. fn construct_column_families( - db_options: &Options, + db_options: Options, path: &Path, - column_families_in_code: &[String], - ) -> Vec { + column_families_in_code: impl IntoIterator, + ) -> impl Iterator { // When opening the database in read/write mode, all column families must be opened. // // To make Zebra forward-compatible with databases updated by later versions, @@ -818,16 +850,25 @@ impl DiskDb { // from the current implementation. // // - let column_families_on_disk = DB::list_cf(db_options, path).unwrap_or_default(); - let column_families = column_families_on_disk + let column_families_on_disk = DB::list_cf(&db_options, path).unwrap_or_default(); + let column_families_in_code = column_families_in_code.into_iter(); + + column_families_on_disk .into_iter() - .chain(column_families_in_code.iter().cloned()) + .chain(column_families_in_code) .unique() - .collect::>(); - column_families - .into_iter() - .map(|cf_name| ColumnFamilyDescriptor::new(cf_name, db_options.clone())) - .collect() + .map(move |cf_name: String| { + let mut cf_options = db_options.clone(); + + if cf_name == BALANCE_BY_TRANSPARENT_ADDR { + cf_options.set_merge_operator_associative( + BALANCE_BY_TRANSPARENT_ADDR_MERGE_OP, + fetch_add_balance_and_received, + ); + } + + rocksdb::ColumnFamilyDescriptor::new(cf_name, cf_options.clone()) + }) } /// Opens or creates the database at a path based on the kind, major version and network, @@ -856,21 +897,8 @@ impl DiskDb { let db_options = DiskDb::options(); - // When opening the database in read/write mode, all column families must be opened. - // - // To make Zebra forward-compatible with databases updated by later versions, - // we read any existing column families off the disk, then add any new column families - // from the current implementation. - // - // - let column_families_on_disk = DB::list_cf(&db_options, &path).unwrap_or_default(); - let column_families_in_code = column_families_in_code.into_iter(); - - let column_families = column_families_on_disk - .into_iter() - .chain(column_families_in_code) - .unique() - .map(|cf_name| rocksdb::ColumnFamilyDescriptor::new(cf_name, db_options.clone())); + let column_families = + DiskDb::construct_column_families(db_options.clone(), &path, column_families_in_code); let db_result = if read_only { // Use a tempfile for the secondary instance cache directory @@ -984,23 +1012,28 @@ impl DiskDb { /// db to a new path so it can be used again. It does so by merely trying to rename the path /// corresponding to the db version directly preceding the current version to the path that is /// used by the current db. If successful, it also deletes the db version file. + /// + /// Returns the old disk version if one existed and the db directory was renamed, or None otherwise. + // TODO: Update this function to rename older major db format version to the current version (#9565). + #[allow(clippy::unwrap_in_result)] pub(crate) fn try_reusing_previous_db_after_major_upgrade( restorable_db_versions: &[u64], format_version_in_code: &Version, config: &Config, db_kind: impl AsRef, network: &Network, - ) { + ) -> Option { if let Some(&major_db_ver) = restorable_db_versions .iter() .find(|v| **v == format_version_in_code.major) { let db_kind = db_kind.as_ref(); - let old_path = config.db_path(db_kind, major_db_ver - 1, network); + let old_major_db_ver = major_db_ver - 1; + let old_path = config.db_path(db_kind, old_major_db_ver, network); // Exit early if the path doesn't exist or there's an error checking it. if !fs::exists(&old_path).unwrap_or(false) { - return; + return None; } let new_path = config.db_path(db_kind, major_db_ver, network); @@ -1009,7 +1042,7 @@ impl DiskDb { Ok(canonicalized_old_path) => canonicalized_old_path, Err(e) => { warn!("could not canonicalize {old_path:?}: {e}"); - return; + return None; } }; @@ -1017,7 +1050,7 @@ impl DiskDb { Ok(canonicalized_cache_path) => canonicalized_cache_path, Err(e) => { warn!("could not canonicalize {:?}: {e}", config.cache_dir); - return; + return None; } }; @@ -1032,7 +1065,7 @@ impl DiskDb { // (TOCTOU attacks). Zebra should not be run with elevated privileges. if !old_path.starts_with(&cache_path) { info!("skipped reusing previous state cache: state is outside cache directory"); - return; + return None; } let opts = DiskDb::options(); @@ -1053,7 +1086,7 @@ impl DiskDb { warn!( "could not create new directory for state cache at {new_path:?}: {e}" ); - return; + return None; } }; @@ -1061,12 +1094,21 @@ impl DiskDb { Ok(()) => { info!("moved state cache from {old_path:?} to {new_path:?}"); - match fs::remove_file(new_path.join(DATABASE_FORMAT_VERSION_FILE_NAME)) { - Ok(()) => info!("removed version file at {new_path:?}"), - Err(e) => { - warn!("could not remove version file at {new_path:?}: {e}") - } - } + let mut disk_version = + database_format_version_on_disk(config, db_kind, major_db_ver, network) + .expect("unable to read database format version file") + .expect("unable to parse database format version"); + + disk_version.major = old_major_db_ver; + + write_database_format_version_to_disk( + config, + db_kind, + major_db_ver, + &disk_version, + network, + ) + .expect("unable to write database format version file to disk"); // Get the parent of the old path, e.g. `state/v25/` and delete it if it is // empty. @@ -1091,13 +1133,17 @@ impl DiskDb { } } } + + return Some(disk_version); } Err(e) => { - warn!("could not move state cache from {old_path:?} to {new_path:?}: {e}") + warn!("could not move state cache from {old_path:?} to {new_path:?}: {e}"); } - } + }; } - } + }; + + None } /// Returns the database options for the finalized state database. diff --git a/zebra-state/src/service/finalized_state/disk_format/chain.rs b/zebra-state/src/service/finalized_state/disk_format/chain.rs index b5a2db8de35..272aabfc7dc 100644 --- a/zebra-state/src/service/finalized_state/disk_format/chain.rs +++ b/zebra-state/src/service/finalized_state/disk_format/chain.rs @@ -12,6 +12,7 @@ use bincode::Options; use zebra_chain::{ amount::NonNegative, block::Height, + block_info::BlockInfo, history_tree::{HistoryTreeError, NonEmptyHistoryTree}, parameters::{Network, NetworkKind}, primitives::zcash_history, @@ -93,3 +94,33 @@ impl FromDisk for HistoryTreeParts { .expect("deserialization format should match the serialization format used by IntoDisk") } } + +impl IntoDisk for BlockInfo { + type Bytes = Vec; + + fn as_bytes(&self) -> Self::Bytes { + self.value_pools() + .as_bytes() + .iter() + .copied() + .chain(self.size().to_le_bytes().iter().copied()) + .collect() + } +} + +impl FromDisk for BlockInfo { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + // We want to be forward-compatible, so this must work even if the + // size of the buffer is larger than expected. + match bytes.as_ref().len() { + 44.. => { + let value_pools = ValueBalance::::from_bytes(&bytes.as_ref()[0..40]) + .expect("must work for 40 bytes"); + let size = + u32::from_le_bytes(bytes.as_ref()[40..44].try_into().expect("must be 4 bytes")); + BlockInfo::new(value_pools, size) + } + _ => panic!("invalid format"), + } + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs index eb12cf41f1b..76a1f0cba99 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs @@ -11,7 +11,7 @@ //! //! If this test fails, run: //! ```sh -//! cargo insta test --review +//! cargo insta test --review --release -p zebra-state --lib -- test_raw_rocksdb_column_families //! ``` //! to update the test snapshots, then commit the `test_*.snap` files using git. //! diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_1.snap index cbb88f6fa6e..7c6e4de3e29 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_1.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "017d46a730d31f97b1930d3368a967c309bd4d136a", - v: "d4300000000000000000010000000001", + v: "d4300000000000000000010000000001d430000000000000", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_2.snap index 646afbe0750..0e1efa67d8a 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@mainnet_2.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "017d46a730d31f97b1930d3368a967c309bd4d136a", - v: "7c920000000000000000010000000001", + v: "7c9200000000000000000100000000017c92000000000000", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_1.snap index 3e6546dde3c..73676f5edf6 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_1.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "03ef775f1f997f122a062fff1a2d7443abd1f9c642", - v: "d4300000000000000000010000000001", + v: "d4300000000000000000010000000001d430000000000000", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_2.snap index b7cfb00febe..3b447336a60 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/balance_by_transparent_addr_raw_data@testnet_2.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "03ef775f1f997f122a062fff1a2d7443abd1f9c642", - v: "7c920000000000000000010000000001", + v: "7c9200000000000000000100000000017c92000000000000", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_1.snap new file mode 100644 index 00000000000..ebce729540c --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_1.snap @@ -0,0 +1,14 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_2.snap new file mode 100644 index 00000000000..309f0a05286 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@mainnet_2.snap @@ -0,0 +1,18 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), + KV( + k: "000002", + v: "6cdc020000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_1.snap new file mode 100644 index 00000000000..54deb67a044 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_1.snap @@ -0,0 +1,14 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_2.snap new file mode 100644 index 00000000000..6f02a45e397 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_data_raw_data@testnet_2.snap @@ -0,0 +1,18 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), + KV( + k: "000002", + v: "6cdc020000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..e055cc02251 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_1.snap new file mode 100644 index 00000000000..ebce729540c --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_1.snap @@ -0,0 +1,14 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_2.snap new file mode 100644 index 00000000000..309f0a05286 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@mainnet_2.snap @@ -0,0 +1,18 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), + KV( + k: "000002", + v: "6cdc020000000000000000000000000000000000000000000000000000000000000000000000000051060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_0.snap new file mode 100644 index 00000000000..e055cc02251 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_1.snap new file mode 100644 index 00000000000..54deb67a044 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_1.snap @@ -0,0 +1,14 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_2.snap new file mode 100644 index 00000000000..6f02a45e397 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/block_info_raw_data@testnet_2.snap @@ -0,0 +1,18 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "000000", + v: "000000000000000000000000000000000000000000000000000000000000000000000000000000009c060000", + ), + KV( + k: "000001", + v: "24f4000000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), + KV( + k: "000002", + v: "6cdc020000000000000000000000000000000000000000000000000000000000000000000000000052060000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap index 3a1191beda9..d061f4dd38f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/column_family_names.snap @@ -5,6 +5,7 @@ expression: cf_names [ "balance_by_transparent_addr", "block_header_by_height", + "block_info", "default", "hash_by_height", "hash_by_tx_loc", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap index 5511807d28c..f04da90d0b9 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap @@ -10,7 +10,6 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", - "tip_chain_value_pool: no entries", "tx_loc_by_spent_out_loc: no entries", "tx_loc_by_transparent_addr_loc: no entries", "utxo_by_out_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap index e461b0d0f1e..e4c682a7270 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@no_blocks.snap @@ -5,6 +5,7 @@ expression: empty_column_families [ "balance_by_transparent_addr: no entries", "block_header_by_height: no entries", + "block_info: no entries", "hash_by_height: no entries", "hash_by_tx_loc: no entries", "height_by_hash: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap index 5511807d28c..f04da90d0b9 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap @@ -10,7 +10,6 @@ expression: empty_column_families "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", "sprout_nullifiers: no entries", - "tip_chain_value_pool: no entries", "tx_loc_by_spent_out_loc: no entries", "tx_loc_by_transparent_addr_loc: no entries", "utxo_by_out_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..d679beb271e --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "", + v: "00000000000000000000000000000000000000000000000000000000000000000000000000000000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@testnet_0.snap new file mode 100644 index 00000000000..d679beb271e --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/tip_chain_value_pool_raw_data@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "", + v: "00000000000000000000000000000000000000000000000000000000000000000000000000000000", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/transparent.rs b/zebra-state/src/service/finalized_state/disk_format/transparent.rs index b41264ebe00..c4dce0cedcf 100644 --- a/zebra-state/src/service/finalized_state/disk_format/transparent.rs +++ b/zebra-state/src/service/finalized_state/disk_format/transparent.rs @@ -10,7 +10,7 @@ use std::{cmp::max, fmt::Debug}; use serde::{Deserialize, Serialize}; use zebra_chain::{ - amount::{self, Amount, NonNegative}, + amount::{self, Amount, Constraint, NegativeAllowed, NonNegative}, block::Height, parameters::NetworkKind, serialization::{ZcashDeserializeInto, ZcashSerialize}, @@ -207,80 +207,199 @@ impl OutputLocation { /// derive IntoDisk and FromDisk? pub type AddressLocation = OutputLocation; -/// Data which Zebra indexes for each [`transparent::Address`]. -/// -/// Currently, Zebra tracks this data 1:1 for each address: -/// - the balance [`Amount`] for a transparent address, and -/// - the [`AddressLocation`] for the first [`transparent::Output`] sent to that address -/// (regardless of whether that output is spent or unspent). -/// -/// All other address data is tracked multiple times for each address -/// (UTXOs and transactions). +/// The inner type of [`AddressBalanceLocation`] and [`AddressBalanceLocationChange`]. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr( any(test, feature = "proptest-impl"), - derive(Arbitrary, Serialize, Deserialize) + derive(Arbitrary, Serialize, Deserialize), + serde(bound = "C: Constraint + Clone") )] -pub struct AddressBalanceLocation { +pub struct AddressBalanceLocationInner { /// The total balance of all UTXOs sent to an address. - balance: Amount, + balance: Amount, + + /// The total balance of all spent and unspent outputs sent to an address. + received: u64, /// The location of the first [`transparent::Output`] sent to an address. location: AddressLocation, } -impl AddressBalanceLocation { - /// Creates a new [`AddressBalanceLocation`] from the location of +impl AddressBalanceLocationInner { + /// Creates a new [`AddressBalanceLocationInner`] from the location of /// the first [`transparent::Output`] sent to an address. /// - /// The returned value has a zero initial balance. - pub fn new(first_output: OutputLocation) -> AddressBalanceLocation { - AddressBalanceLocation { + /// The returned value has a zero initial balance and received balance. + fn new(first_output: OutputLocation) -> Self { + Self { balance: Amount::zero(), + received: 0, location: first_output, } } /// Returns the current balance for the address. - pub fn balance(&self) -> Amount { + pub fn balance(&self) -> Amount { self.balance } + /// Returns the current received balance for the address. + pub fn received(&self) -> u64 { + self.received + } + /// Returns a mutable reference to the current balance for the address. - pub fn balance_mut(&mut self) -> &mut Amount { + pub fn balance_mut(&mut self) -> &mut Amount { &mut self.balance } + /// Returns a mutable reference to the current received balance for the address. + pub fn received_mut(&mut self) -> &mut u64 { + &mut self.received + } + + /// Returns the location of the first [`transparent::Output`] sent to an address. + pub fn address_location(&self) -> AddressLocation { + self.location + } + + /// Allows tests to set the height of the address location. + #[cfg(any(test, feature = "proptest-impl"))] + #[allow(dead_code)] + pub fn height_mut(&mut self) -> &mut Height { + &mut self.location.transaction_location.height + } +} + +impl std::ops::Add for AddressBalanceLocationInner { + type Output = Result; + + fn add(self, rhs: Self) -> Self::Output { + Ok(AddressBalanceLocationInner { + balance: (self.balance + rhs.balance)?, + received: self.received.saturating_add(rhs.received), + location: self.location.min(rhs.location), + }) + } +} + +/// Represents a change in the [`AddressBalanceLocation`] of a transparent address +/// in the finalized state. +pub struct AddressBalanceLocationChange(AddressBalanceLocationInner); + +impl AddressBalanceLocationChange { + /// Creates a new [`AddressBalanceLocationChange`]. + /// + /// See [`AddressBalanceLocationInner::new`] for more details. + pub fn new(location: AddressLocation) -> Self { + Self(AddressBalanceLocationInner::new(location)) + } + /// Updates the current balance by adding the supplied output's value. + #[allow(clippy::unwrap_in_result)] pub fn receive_output( &mut self, unspent_output: &transparent::Output, ) -> Result<(), amount::Error> { - self.balance = (self.balance + unspent_output.value())?; - + self.balance = (self + .balance + .zatoshis() + .checked_add(unspent_output.value().zatoshis())) + .expect("adding two Amounts is always within an i64") + .try_into()?; + self.received = self.received.saturating_add(unspent_output.value().into()); Ok(()) } /// Updates the current balance by subtracting the supplied output's value. + #[allow(clippy::unwrap_in_result)] pub fn spend_output( &mut self, spent_output: &transparent::Output, ) -> Result<(), amount::Error> { - self.balance = (self.balance - spent_output.value())?; + self.balance = (self + .balance + .zatoshis() + .checked_sub(spent_output.value().zatoshis())) + .expect("subtracting two Amounts is always within an i64") + .try_into()?; Ok(()) } +} - /// Returns the location of the first [`transparent::Output`] sent to an address. - pub fn address_location(&self) -> AddressLocation { - self.location +impl std::ops::Deref for AddressBalanceLocationChange { + type Target = AddressBalanceLocationInner; + + fn deref(&self) -> &Self::Target { + &self.0 } +} - /// Allows tests to set the height of the address location. - #[cfg(any(test, feature = "proptest-impl"))] - #[allow(dead_code)] - pub fn height_mut(&mut self) -> &mut Height { - &mut self.location.transaction_location.height +impl std::ops::DerefMut for AddressBalanceLocationChange { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl std::ops::Add for AddressBalanceLocationChange { + type Output = Result; + + fn add(self, rhs: Self) -> Self::Output { + (self.0 + rhs.0).map(Self) + } +} + +/// Data which Zebra indexes for each [`transparent::Address`]. +/// +/// Currently, Zebra tracks this data 1:1 for each address: +/// - the balance [`Amount`] for a transparent address, and +/// - the [`AddressLocation`] for the first [`transparent::Output`] sent to that address +/// (regardless of whether that output is spent or unspent). +/// +/// All other address data is tracked multiple times for each address +/// (UTXOs and transactions). +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[cfg_attr( + any(test, feature = "proptest-impl"), + derive(Arbitrary, Serialize, Deserialize) +)] +pub struct AddressBalanceLocation(AddressBalanceLocationInner); + +impl AddressBalanceLocation { + /// Creates a new [`AddressBalanceLocation`]. + /// + /// See [`AddressBalanceLocationInner::new`] for more details. + pub fn new(first_output: OutputLocation) -> Self { + Self(AddressBalanceLocationInner::new(first_output)) + } + + /// Consumes self and returns a new [`AddressBalanceLocationChange`] with + /// a zero balance, zero received balance, and the `location` of `self`. + pub fn into_new_change(self) -> AddressBalanceLocationChange { + AddressBalanceLocationChange::new(self.location) + } +} + +impl std::ops::Deref for AddressBalanceLocation { + type Target = AddressBalanceLocationInner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for AddressBalanceLocation { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl std::ops::Add for AddressBalanceLocation { + type Output = Result; + + fn add(self, rhs: Self) -> Self::Output { + (self.0 + rhs.0).map(Self) } } @@ -549,7 +668,7 @@ impl FromDisk for transparent::Address { } } -impl IntoDisk for Amount { +impl IntoDisk for Amount { type Bytes = [u8; BALANCE_DISK_BYTES]; fn as_bytes(&self) -> Self::Bytes { @@ -647,35 +766,70 @@ impl FromDisk for OutputLocation { } } -impl IntoDisk for AddressBalanceLocation { - type Bytes = [u8; BALANCE_DISK_BYTES + OUTPUT_LOCATION_DISK_BYTES]; +impl IntoDisk for AddressBalanceLocationInner { + type Bytes = [u8; BALANCE_DISK_BYTES + OUTPUT_LOCATION_DISK_BYTES + size_of::()]; fn as_bytes(&self) -> Self::Bytes { let balance_bytes = self.balance().as_bytes().to_vec(); let address_location_bytes = self.address_location().as_bytes().to_vec(); + let received_bytes = self.received().to_le_bytes().to_vec(); - [balance_bytes, address_location_bytes] + [balance_bytes, address_location_bytes, received_bytes] .concat() .try_into() .unwrap() } } -impl FromDisk for AddressBalanceLocation { +impl IntoDisk for AddressBalanceLocation { + type Bytes = [u8; BALANCE_DISK_BYTES + OUTPUT_LOCATION_DISK_BYTES + size_of::()]; + + fn as_bytes(&self) -> Self::Bytes { + self.0.as_bytes() + } +} + +impl IntoDisk for AddressBalanceLocationChange { + type Bytes = [u8; BALANCE_DISK_BYTES + OUTPUT_LOCATION_DISK_BYTES + size_of::()]; + + fn as_bytes(&self) -> Self::Bytes { + self.0.as_bytes() + } +} + +impl FromDisk for AddressBalanceLocationInner { fn from_bytes(disk_bytes: impl AsRef<[u8]>) -> Self { - let (balance_bytes, address_location_bytes) = - disk_bytes.as_ref().split_at(BALANCE_DISK_BYTES); + let (balance_bytes, rest) = disk_bytes.as_ref().split_at(BALANCE_DISK_BYTES); + let (address_location_bytes, rest) = rest.split_at(BALANCE_DISK_BYTES); + let (received_bytes, _) = rest.split_at_checked(size_of::()).unwrap_or_default(); let balance = Amount::from_bytes(balance_bytes.try_into().unwrap()).unwrap(); let address_location = AddressLocation::from_bytes(address_location_bytes); + // # Backwards Compatibility + // + // If the value is missing a `received` field, default to 0. + let received = u64::from_le_bytes(received_bytes.try_into().unwrap_or_default()); - let mut address_balance_location = AddressBalanceLocation::new(address_location); + let mut address_balance_location = Self::new(address_location); *address_balance_location.balance_mut() = balance; + *address_balance_location.received_mut() = received; address_balance_location } } +impl FromDisk for AddressBalanceLocation { + fn from_bytes(disk_bytes: impl AsRef<[u8]>) -> Self { + Self(AddressBalanceLocationInner::from_bytes(disk_bytes)) + } +} + +impl FromDisk for AddressBalanceLocationChange { + fn from_bytes(disk_bytes: impl AsRef<[u8]>) -> Self { + Self(AddressBalanceLocationInner::from_bytes(disk_bytes)) + } +} + impl IntoDisk for transparent::Output { type Bytes = Vec; diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 146c5bc1b7c..cbc9ca932cd 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -23,6 +23,7 @@ use DbFormatChange::*; use crate::service::finalized_state::ZebraDb; pub(crate) mod add_subtrees; +pub(crate) mod block_info_and_address_received; pub(crate) mod cache_genesis_roots; pub(crate) mod fix_tree_key_type; pub(crate) mod no_migration; @@ -78,11 +79,17 @@ pub trait DiskFormatUpgrade { fn needs_migration(&self) -> bool { true } + + /// Returns true if the upgrade is a major upgrade that can reuse the cache in the previous major db format version. + fn is_reusable_major_upgrade(&self) -> bool { + let version = self.version(); + version.minor == 0 && version.patch == 0 + } } fn format_upgrades( min_version: Option, -) -> impl Iterator> { +) -> impl DoubleEndedIterator> { let min_version = move || min_version.clone().unwrap_or(Version::new(0, 0, 0)); // Note: Disk format upgrades must be run in order of database version. @@ -90,13 +97,28 @@ fn format_upgrades( Box::new(prune_trees::PruneTrees), Box::new(add_subtrees::AddSubtrees), Box::new(tree_keys_and_caches_upgrade::FixTreeKeyTypeAndCacheGenesisRoots), - // Value balance upgrade - Box::new(no_migration::NoMigration::new(26, 0, 0)), - ] as [Box; 4]) + Box::new(no_migration::NoMigration::new( + "add value balance upgrade", + Version::new(26, 0, 0), + )), + Box::new(block_info_and_address_received::Upgrade), + ] as [Box; 5]) .into_iter() .filter(move |upgrade| upgrade.version() > min_version()) } +/// Returns a list of all the major db format versions that can restored from the +/// previous major database format. +pub fn restorable_db_versions() -> Vec { + format_upgrades(None) + .filter_map(|upgrade| { + upgrade + .is_reusable_major_upgrade() + .then_some(upgrade.version().major) + }) + .collect() +} + /// The kind of database format change or validity check we're performing. #[derive(Clone, Debug, Eq, PartialEq)] pub enum DbFormatChange { diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs new file mode 100644 index 00000000000..e18c296fe6e --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs @@ -0,0 +1,300 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use crossbeam_channel::TryRecvError; +use itertools::Itertools; +use rayon::iter::{IntoParallelIterator, ParallelIterator as _}; +use zebra_chain::{ + amount::NonNegative, + block::{Block, Height}, + block_info::BlockInfo, + parameters::subsidy::{block_subsidy, funding_stream_values, FundingStreamReceiver}, + transparent::{self, OutPoint, Utxo}, + value_balance::ValueBalance, +}; + +use crate::{ + service::finalized_state::disk_format::transparent::{ + AddressBalanceLocationChange, AddressLocation, + }, + DiskWriteBatch, HashOrHeight, TransactionLocation, WriteDisk, +}; + +use super::{CancelFormatChange, DiskFormatUpgrade}; + +/// Implements [`DiskFormatUpgrade`] for adding additionl block info to the +/// database. +pub struct Upgrade; + +/// The result of loading data to create a [`BlockInfo`]. If the info was +/// already there we only need to ValueBalance to keep track of the totals. +/// Otherwise we need the block, size and utxos to compute the BlockInfo. +enum LoadResult { + HasInfo(ValueBalance), + LoadedInfo { + block: Arc, + size: usize, + utxos: HashMap, + address_balance_changes: HashMap, + }, +} + +impl DiskFormatUpgrade for Upgrade { + fn version(&self) -> semver::Version { + semver::Version::new(27, 0, 0) + } + + fn description(&self) -> &'static str { + "add block info and address received balances upgrade" + } + + #[allow(clippy::unwrap_in_result)] + fn run( + &self, + initial_tip_height: zebra_chain::block::Height, + db: &crate::ZebraDb, + cancel_receiver: &crossbeam_channel::Receiver, + ) -> Result<(), super::CancelFormatChange> { + let network = db.network(); + let balance_by_transparent_addr = db.address_balance_cf(); + let chunk_size = rayon::current_num_threads(); + tracing::info!(chunk_size = ?chunk_size, "adding block info data"); + + let chunks = (0..=initial_tip_height.0).chunks(chunk_size); + // Since transaction parsing is slow, we want to parallelize it. + // Get chunks of block heights and load them in parallel. + let seq_iter = chunks.into_iter().flat_map(|height_span| { + let height_vec = height_span.collect_vec(); + let result_vec = height_vec + .into_par_iter() + .map(|h| { + // Return early if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(super::CancelFormatChange); + } + + let height = Height(h); + + // The upgrade might have been interrupted and some heights might + // have already been filled. Return a value indicating that + // along with the loaded value pool. + if let Some(existing_block_info) = db.block_info_cf().zs_get(&height) { + let value_pool = *existing_block_info.value_pools(); + return Ok((h, LoadResult::HasInfo(value_pool))); + } + + // Load the block. This is slow since transaction + // parsing is slow. + let (block, size) = db + .block_and_size(HashOrHeight::Height(height)) + .expect("block info should be in the database"); + + // Load the utxos for all the transactions inputs in the block. + // This is required to compute the value pool change. + // This is slow because transaction parsing is slow. + let mut utxos = HashMap::new(); + let mut address_balance_changes = HashMap::new(); + for tx in &block.transactions { + for input in tx.inputs() { + if let Some(outpoint) = input.outpoint() { + let (tx, h, _) = db + .transaction(outpoint.hash) + .expect("transaction should be in the database"); + let output = tx + .outputs() + .get(outpoint.index as usize) + .expect("output should exist"); + + let utxo = Utxo { + output: output.clone(), + height: h, + from_coinbase: tx.is_coinbase(), + }; + utxos.insert(outpoint, utxo); + } + } + + for output in tx.outputs() { + if let Some(address) = output.address(&network) { + *address_balance_changes + .entry(address) + .or_insert_with(AddressBalanceLocationChange::empty) + .received_mut() += u64::from(output.value()); + } + } + } + + Ok(( + h, + LoadResult::LoadedInfo { + block, + size, + utxos, + address_balance_changes, + }, + )) + }) + .collect::>(); + // The collected Vec is in-order as required as guaranteed by Rayon. + // Note that since we use flat_map() above, the result iterator will + // iterate through individual results as expected. + result_vec + }); + + // Keep track of the current value pool as we iterate the blocks. + let mut value_pool = ValueBalance::::default(); + + for result in seq_iter { + let (h, load_result) = result?; + let height = Height(h); + if height.0 % 1000 == 0 { + tracing::info!(height = ?height, "adding block info for height"); + } + // Get the data loaded from the parallel iterator + let (block, size, utxos, address_balance_changes) = match load_result { + LoadResult::HasInfo(prev_value_pool) => { + // BlockInfo already stored; we just need the its value pool + // then skip the block + value_pool = prev_value_pool; + continue; + } + LoadResult::LoadedInfo { + block, + size, + utxos, + address_balance_changes, + } => (block, size, utxos, address_balance_changes), + }; + + // Get the deferred amount which is required to update the value pool. + let expected_deferred_amount = if height > network.slow_start_interval() { + // See [ZIP-1015](https://zips.z.cash/zip-1015). + funding_stream_values( + height, + &network, + block_subsidy(height, &network).unwrap_or_default(), + ) + .unwrap_or_default() + .remove(&FundingStreamReceiver::Deferred) + } else { + None + }; + + // Add this block's value pool changes to the total value pool. + value_pool = value_pool + .add_chain_value_pool_change( + block + .chain_value_pool_change(&utxos, expected_deferred_amount) + .unwrap_or_default(), + ) + .expect("value pool change should not overflow"); + + let mut batch = DiskWriteBatch::new(); + + // Create and store the BlockInfo for this block. + let block_info = BlockInfo::new(value_pool, size as u32); + let _ = db + .block_info_cf() + .with_batch_for_writing(&mut batch) + .zs_insert(&height, &block_info); + + // Update transparent addresses that received funds in this block. + for (address, change) in address_balance_changes { + batch.zs_merge(balance_by_transparent_addr, address, change); + } + + db.write_batch(batch) + .expect("writing block info and address received changes should succeed"); + } + + Ok(()) + } + + #[allow(clippy::unwrap_in_result)] + fn validate( + &self, + db: &crate::ZebraDb, + cancel_receiver: &crossbeam_channel::Receiver, + ) -> Result, super::CancelFormatChange> { + let network = db.network(); + + // Return early before the next disk read if the upgrade was cancelled. + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(super::CancelFormatChange); + } + + // Read the finalized tip height or return early if the database is empty. + let Some(tip_height) = db.finalized_tip_height() else { + return Ok(Ok(())); + }; + + // Check any outputs in the last 1000 blocks. + let start_height = (tip_height - 1_000).unwrap_or(Height::MIN); + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + // Check that all blocks in the range have a BlockInfo. + + for height in start_height.0..=tip_height.0 { + if let Some(block_info) = db.block_info_cf().zs_get(&Height(height)) { + if block_info == Default::default() { + return Ok(Err(format!("zero block info for height: {}", height))); + } + } else { + return Ok(Err(format!("missing block info for height: {}", height))); + } + } + + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + // Check that all recipient addresses of transparent transfers in the range have a non-zero received balance. + + // Collect the set of addresses that received transparent funds in the last query range (last 1000 blocks). + let tx_loc_range = TransactionLocation::min_for_height(start_height)..; + let addresses: HashSet<_> = db + .transactions_by_location_range(tx_loc_range) + .flat_map(|(_, tx)| tx.outputs().to_vec()) + .filter_map(|output| { + if output.value != 0 { + output.address(&network) + } else { + None + } + }) + .collect(); + + // Check that no address balances for that set of addresses have a received field of `0`. + for address in addresses { + if !matches!(cancel_receiver.try_recv(), Err(TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + let balance = db + .address_balance_location(&address) + .expect("should have address balances in finalized state"); + + if balance.received() == 0 { + return Ok(Err(format!( + "unexpected balance received for address {}: {}", + address, + balance.received(), + ))); + } + } + + Ok(Ok(())) + } +} + +impl AddressBalanceLocationChange { + /// Creates a new [`AddressBalanceLocationChange`] with all zero values and a dummy location. + fn empty() -> Self { + Self::new(AddressLocation::from_usize(Height(0), 0, 0)) + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs index a312f176a73..3fea72121f7 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/no_migration.rs @@ -12,14 +12,16 @@ use super::{CancelFormatChange, DiskFormatUpgrade}; /// Implements [`DiskFormatUpgrade`] for in-place upgrades that do not involve any migration /// of existing data into the new format. pub struct NoMigration { + description: &'static str, version: Version, } impl NoMigration { /// Creates a new instance of the [`NoMigration`] upgrade. - pub fn new(major: u64, minor: u64, patch: u64) -> Self { + pub fn new(description: &'static str, version: Version) -> Self { Self { - version: Version::new(major, minor, patch), + description, + version, } } } @@ -30,7 +32,7 @@ impl DiskFormatUpgrade for NoMigration { } fn description(&self) -> &'static str { - "no migration" + self.description } #[allow(clippy::unwrap_in_result)] diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index 2c73b059f6a..c3390522850 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -18,7 +18,6 @@ use zebra_chain::parameters::Network; use crate::{ config::database_format_version_on_disk, - constants::RESTORABLE_DB_VERSIONS, service::finalized_state::{ disk_db::DiskDb, disk_format::{ @@ -29,6 +28,8 @@ use crate::{ write_database_format_version_to_disk, BoxError, Config, }; +use super::disk_format::upgrade::restorable_db_versions; + pub mod block; pub mod chain; pub mod metrics; @@ -98,21 +99,17 @@ impl ZebraDb { column_families_in_code: impl IntoIterator, read_only: bool, ) -> ZebraDb { - let disk_version = database_format_version_on_disk( - config, - &db_kind, - format_version_in_code.major, - network, - ) - .expect("unable to read database format version file"); - - DiskDb::try_reusing_previous_db_after_major_upgrade( - &RESTORABLE_DB_VERSIONS, + let disk_version = DiskDb::try_reusing_previous_db_after_major_upgrade( + &restorable_db_versions(), format_version_in_code, config, &db_kind, network, - ); + ) + .or_else(|| { + database_format_version_on_disk(config, &db_kind, format_version_in_code.major, network) + .expect("unable to read database format version file") + }); // Log any format changes before opening the database, in case opening fails. let format_change = DbFormatChange::open_database(format_version_in_code, disk_version); @@ -216,6 +213,7 @@ impl ZebraDb { write_database_format_version_to_disk( self.config(), self.db_kind(), + self.major_version(), new_version, &self.network(), ) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index e8617cf96c2..182d35546ec 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -37,7 +37,7 @@ use crate::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ block::TransactionLocation, - transparent::{AddressBalanceLocation, OutputLocation}, + transparent::{AddressBalanceLocationChange, OutputLocation}, }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, FromDisk, RawBytes, @@ -338,7 +338,7 @@ impl ZebraDb { /// Returns an iterator of all raw [`Transaction`]s in the provided range /// of [`TransactionLocation`]s in finalized state. #[allow(clippy::unwrap_in_result)] - fn raw_transactions_by_location_range( + pub fn raw_transactions_by_location_range( &self, range: R, ) -> impl Iterator + '_ @@ -517,11 +517,16 @@ impl ZebraDb { .collect(); // Get the current address balances, before the transactions in this block - let address_balances: HashMap = + let address_balances: HashMap = changed_addresses .into_iter() .filter_map(|address| { - Some((address.clone(), self.address_balance_location(&address)?)) + // # Correctness + // + // Address balances are updated with the `fetch_add_balance_and_received` merge operator, so + // the values must represent the changes to the balance, not the final balance. + let addr_loc = self.address_balance_location(&address)?.into_new_change(); + Some((address.clone(), addr_loc)) }) .collect(); @@ -597,7 +602,7 @@ impl DiskWriteBatch { transparent::OutPoint, OutputLocation, >, - address_balances: HashMap, + address_balances: HashMap, value_pool: ValueBalance, prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { @@ -638,15 +643,14 @@ impl DiskWriteBatch { &out_loc_by_outpoint, address_balances, )?; - - // Commit UTXOs and value pools - self.prepare_chain_value_pools_batch( - zebra_db, - finalized, - spent_utxos_by_outpoint, - value_pool, - )?; } + // Commit UTXOs and value pools + self.prepare_chain_value_pools_batch( + zebra_db, + finalized, + spent_utxos_by_outpoint, + value_pool, + )?; // The block has passed contextual validation, so update the metrics block_precommit_metrics(&finalized.block, finalized.hash, finalized.height); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index 567de6423ef..832649b9c15 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -26,7 +26,7 @@ //! //! If this test fails, run: //! ```sh -//! cargo insta test --review +//! cargo insta test --review --release -p zebra-state --lib //! ``` //! to update the test snapshots, then commit the `test_*.snap` files using git. @@ -50,6 +50,7 @@ use crate::{ disk_format::{ block::TransactionIndex, transparent::OutputLocation, FromDisk, TransactionLocation, }, + zebra_db::transparent::BALANCE_BY_TRANSPARENT_ADDR, FinalizedState, }, read::ADDRESS_HEIGHTS_FULL_RANGE, @@ -460,7 +461,7 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { /// Snapshot transparent address data, using `cargo insta` and RON serialization. fn snapshot_transparent_address_data(state: &FinalizedState, height: u32) { - let balance_by_transparent_addr = state.cf_handle("balance_by_transparent_addr").unwrap(); + let balance_by_transparent_addr = state.cf_handle(BALANCE_BY_TRANSPARENT_ADDR).unwrap(); let utxo_loc_by_transparent_addr_loc = state.cf_handle("utxo_loc_by_transparent_addr_loc").unwrap(); let tx_loc_by_transparent_addr_loc = state.cf_handle("tx_loc_by_transparent_addr_loc").unwrap(); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_1.snap index 00bf12a206e..3a0109f2528 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_1.snap @@ -3,8 +3,9 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_address_balances --- [ - ("t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd", AddressBalanceLocation( + ("t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd", AddressBalanceLocation(AddressBalanceLocationInner( balance: 12500, + received: 12500, location: OutputLocation( transaction_location: TransactionLocation( height: Height(1), @@ -12,5 +13,5 @@ expression: stored_address_balances ), output_index: OutputIndex(1), ), - )), + ))), ] diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_2.snap index e05c15d9a03..8e939e11216 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@mainnet_2.snap @@ -3,8 +3,9 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_address_balances --- [ - ("t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd", AddressBalanceLocation( + ("t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd", AddressBalanceLocation(AddressBalanceLocationInner( balance: 37500, + received: 37500, location: OutputLocation( transaction_location: TransactionLocation( height: Height(1), @@ -12,5 +13,5 @@ expression: stored_address_balances ), output_index: OutputIndex(1), ), - )), + ))), ] diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_1.snap index 4967e2765b4..a8dbd57b36f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_1.snap @@ -3,8 +3,9 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_address_balances --- [ - ("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi", AddressBalanceLocation( + ("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi", AddressBalanceLocation(AddressBalanceLocationInner( balance: 12500, + received: 12500, location: OutputLocation( transaction_location: TransactionLocation( height: Height(1), @@ -12,5 +13,5 @@ expression: stored_address_balances ), output_index: OutputIndex(1), ), - )), + ))), ] diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_2.snap index c3332729c05..8b8d94ca98d 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/address_balances@testnet_2.snap @@ -3,8 +3,9 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_address_balances --- [ - ("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi", AddressBalanceLocation( + ("t2UNzUUx8mWBCRYPRezvA363EYXyEpHokyi", AddressBalanceLocation(AddressBalanceLocationInner( balance: 37500, + received: 37500, location: OutputLocation( transaction_location: TransactionLocation( height: Height(1), @@ -12,5 +13,5 @@ expression: stored_address_balances ), output_index: OutputIndex(1), ), - )), + ))), ] diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 5653af1c3f7..43fc2935a54 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -17,8 +17,8 @@ use std::{ }; use zebra_chain::{ - amount::NonNegative, block::Height, history_tree::HistoryTree, transparent, - value_balance::ValueBalance, + amount::NonNegative, block::Height, block_info::BlockInfo, history_tree::HistoryTree, + serialization::ZcashSerialize as _, transparent, value_balance::ValueBalance, }; use crate::{ @@ -29,7 +29,7 @@ use crate::{ zebra_db::ZebraDb, TypedColumnFamily, }, - BoxError, + BoxError, HashOrHeight, }; /// The name of the History Tree column family. @@ -51,7 +51,7 @@ pub type LegacyHistoryTreePartsCf<'cf> = TypedColumnFamily<'cf, Height, HistoryT /// This type should not be used in new code. pub type RawHistoryTreePartsCf<'cf> = TypedColumnFamily<'cf, RawBytes, HistoryTreeParts>; -/// The name of the chain value pools column family. +/// The name of the tip-only chain value pools column family. /// /// This constant should be used so the compiler can detect typos. pub const CHAIN_VALUE_POOLS: &str = "tip_chain_value_pool"; @@ -62,6 +62,17 @@ pub const CHAIN_VALUE_POOLS: &str = "tip_chain_value_pool"; /// column family. pub type ChainValuePoolsCf<'cf> = TypedColumnFamily<'cf, (), ValueBalance>; +/// The name of the block info column family. +/// +/// This constant should be used so the compiler can detect typos. +pub const BLOCK_INFO: &str = "block_info"; + +/// The type for reading value pools from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type BlockInfoCf<'cf> = TypedColumnFamily<'cf, Height, BlockInfo>; + impl ZebraDb { // Column family convenience methods @@ -91,6 +102,12 @@ impl ZebraDb { .expect("column family was created when database was created") } + /// Returns a typed handle to the block data column family. + pub(crate) fn block_info_cf(&self) -> BlockInfoCf { + BlockInfoCf::new(&self.db, BLOCK_INFO) + .expect("column family was created when database was created") + } + // History tree methods /// Returns the ZIP-221 history tree of the finalized tip. @@ -162,6 +179,15 @@ impl ZebraDb { .zs_get(&()) .unwrap_or_else(ValueBalance::zero) } + + /// Returns the stored `BlockInfo` for the given block. + pub fn block_info(&self, hash_or_height: HashOrHeight) -> Option { + let height = hash_or_height.height_or_else(|hash| self.height(hash))?; + + let block_info_cf = self.block_info_cf(); + + block_info_cf.zs_get(&height) + } } impl DiskWriteBatch { @@ -227,18 +253,26 @@ impl DiskWriteBatch { utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { + let new_value_pool = value_pool.add_chain_value_pool_change( + finalized + .block + .chain_value_pool_change(&utxos_spent_by_block, finalized.deferred_balance)?, + )?; let _ = db .chain_value_pools_cf() .with_batch_for_writing(self) - .zs_insert( - &(), - &value_pool.add_chain_value_pool_change( - finalized.block.chain_value_pool_change( - &utxos_spent_by_block, - finalized.deferred_balance, - )?, - )?, - ); + .zs_insert(&(), &new_value_pool); + + // Get the block size to store with the BlockInfo. This is a bit wasteful + // since the block header and txs were serialized previously when writing + // them to the DB, and we could get the size if we modified the database + // code to return the size of data written; but serialization should be cheap. + let block_size = finalized.block.zcash_serialized_size(); + + let _ = db.block_info_cf().with_batch_for_writing(self).zs_insert( + &finalized.height, + &BlockInfo::new(new_value_pool, block_size as u32), + ); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 06a09d803e6..39602bad85b 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -17,6 +17,7 @@ use std::{ ops::RangeInclusive, }; +use rocksdb::ColumnFamily; use zebra_chain::{ amount::{self, Amount, NonNegative}, block::Height, @@ -31,23 +32,45 @@ use crate::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ transparent::{ - AddressBalanceLocation, AddressLocation, AddressTransaction, AddressUnspentOutput, - OutputLocation, + AddressBalanceLocation, AddressBalanceLocationChange, AddressLocation, + AddressTransaction, AddressUnspentOutput, OutputLocation, }, TransactionLocation, }, zebra_db::ZebraDb, }, - BoxError, + BoxError, FromDisk, IntoDisk, }; use super::super::TypedColumnFamily; /// The name of the transaction hash by spent outpoints column family. -/// -/// This constant should be used so the compiler can detect typos. pub const TX_LOC_BY_SPENT_OUT_LOC: &str = "tx_loc_by_spent_out_loc"; +/// The name of the [balance](AddressBalanceLocation) by transparent address column family. +pub const BALANCE_BY_TRANSPARENT_ADDR: &str = "balance_by_transparent_addr"; + +/// The name of the [`BALANCE_BY_TRANSPARENT_ADDR`] column family's merge operator +pub const BALANCE_BY_TRANSPARENT_ADDR_MERGE_OP: &str = "fetch_add_balance_and_received"; + +/// A RocksDB merge operator for the [`BALANCE_BY_TRANSPARENT_ADDR`] column family. +pub fn fetch_add_balance_and_received( + _: &[u8], + existing_val: Option<&[u8]>, + operands: &rocksdb::MergeOperands, +) -> Option> { + // # Correctness + // + // Merge operands are ordered, but may be combined without an existing value in partial merges, so + // we may need to return a negative balance here. + existing_val + .into_iter() + .chain(operands) + .map(AddressBalanceLocationChange::from_bytes) + .reduce(|a, b| (a + b).expect("address balance/received should not overflow")) + .map(|address_balance_location| address_balance_location.as_bytes().to_vec()) +} + /// The type for reading value pools from the database. /// /// This constant should be used so the compiler can detect incorrectly typed accesses to the @@ -77,6 +100,11 @@ impl ZebraDb { self.tx_loc_by_spent_output_loc_cf().zs_get(output_location) } + /// Returns a handle to the `balance_by_transparent_addr` RocksDB column family. + pub fn address_balance_cf(&self) -> &ColumnFamily { + self.db.cf_handle(BALANCE_BY_TRANSPARENT_ADDR).unwrap() + } + /// Returns the [`AddressBalanceLocation`] for a [`transparent::Address`], /// if it is in the finalized state. #[allow(clippy::unwrap_in_result)] @@ -84,16 +112,19 @@ impl ZebraDb { &self, address: &transparent::Address, ) -> Option { - let balance_by_transparent_addr = self.db.cf_handle("balance_by_transparent_addr").unwrap(); + let balance_by_transparent_addr = self.address_balance_cf(); self.db.zs_get(&balance_by_transparent_addr, address) } - /// Returns the balance for a [`transparent::Address`], + /// Returns the balance and received balance for a [`transparent::Address`], /// if it is in the finalized state. - pub fn address_balance(&self, address: &transparent::Address) -> Option> { + pub fn address_balance( + &self, + address: &transparent::Address, + ) -> Option<(Amount, u64)> { self.address_balance_location(address) - .map(|abl| abl.balance()) + .map(|abl| (abl.balance(), abl.received())) } /// Returns the first output that sent funds to a [`transparent::Address`], @@ -291,24 +322,30 @@ impl ZebraDb { // Address index queries - /// Returns the total transparent balance for `addresses` in the finalized chain. + /// Returns the total transparent balance and received balance for `addresses` in the finalized chain. /// - /// If none of the addresses has a balance, returns zero. + /// If none of the addresses have a balance, returns zeroes. /// /// # Correctness /// - /// Callers should apply the non-finalized balance change for `addresses` to the returned balance. + /// Callers should apply the non-finalized balance change for `addresses` to the returned balances. /// - /// The total balance will only be correct if the non-finalized chain matches the finalized state. + /// The total balances will only be correct if the non-finalized chain matches the finalized state. /// Specifically, the root of the partial non-finalized chain must be a child block of the finalized tip. pub fn partial_finalized_transparent_balance( &self, addresses: &HashSet, - ) -> Amount { - let balance: amount::Result> = addresses + ) -> (Amount, u64) { + let balance: amount::Result<(Amount, u64)> = addresses .iter() .filter_map(|address| self.address_balance(address)) - .sum(); + .try_fold( + (Amount::zero(), 0), + |(a_balance, a_received): (Amount, u64), (b_balance, b_received)| { + let received = a_received.saturating_add(b_received); + Ok(((a_balance + b_balance)?, received)) + }, + ); balance.expect( "unexpected amount overflow: value balances are valid, so partial sum should be valid", @@ -394,7 +431,7 @@ impl DiskWriteBatch { transparent::OutPoint, OutputLocation, >, - mut address_balances: HashMap, + mut address_balances: HashMap, ) -> Result<(), BoxError> { let db = &zebra_db.db; let FinalizedBlock { block, height, .. } = finalized; @@ -452,7 +489,7 @@ impl DiskWriteBatch { db: &DiskDb, network: &Network, new_outputs_by_out_loc: &BTreeMap, - address_balances: &mut HashMap, + address_balances: &mut HashMap, ) -> Result<(), BoxError> { let utxo_by_out_loc = db.cf_handle("utxo_by_out_loc").unwrap(); let utxo_loc_by_transparent_addr_loc = @@ -476,7 +513,7 @@ impl DiskWriteBatch { // (the first location of the address in the chain). let address_balance_location = address_balances .entry(receiving_address) - .or_insert_with(|| AddressBalanceLocation::new(*new_output_location)); + .or_insert_with(|| AddressBalanceLocationChange::new(*new_output_location)); let receiving_address_location = address_balance_location.address_location(); // Update the balance for the address in memory. @@ -530,7 +567,7 @@ impl DiskWriteBatch { db: &DiskDb, network: &Network, spent_utxos_by_out_loc: &BTreeMap, - address_balances: &mut HashMap, + address_balances: &mut HashMap, ) -> Result<(), BoxError> { let utxo_by_out_loc = db.cf_handle("utxo_by_out_loc").unwrap(); let utxo_loc_by_transparent_addr_loc = @@ -592,7 +629,7 @@ impl DiskWriteBatch { transparent::OutPoint, OutputLocation, >, - address_balances: &HashMap, + address_balances: &HashMap, ) -> Result<(), BoxError> { let db = &zebra_db.db; let tx_loc_by_transparent_addr_loc = @@ -652,17 +689,17 @@ impl DiskWriteBatch { pub fn prepare_transparent_balances_batch( &mut self, db: &DiskDb, - address_balances: HashMap, + address_balances: HashMap, ) -> Result<(), BoxError> { - let balance_by_transparent_addr = db.cf_handle("balance_by_transparent_addr").unwrap(); + let balance_by_transparent_addr = db.cf_handle(BALANCE_BY_TRANSPARENT_ADDR).unwrap(); // Update all the changed address balances in the database. - for (address, address_balance_location) in address_balances.into_iter() { + for (address, address_balance_location_change) in address_balances.into_iter() { // Some of these balances are new, and some are updates - self.zs_insert( + self.zs_merge( &balance_by_transparent_addr, address, - address_balance_location, + address_balance_location_change, ); } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 8001c51e1a6..1b9a0b359d5 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -15,15 +15,20 @@ use tracing::instrument; use zebra_chain::{ amount::{Amount, NegativeAllowed, NonNegative}, block::{self, Height}, + block_info::BlockInfo, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, parameters::Network, primitives::Groth16Proof, - sapling, sprout, + sapling, + serialization::ZcashSerialize as _, + sprout, subtree::{NoteCommitmentSubtree, NoteCommitmentSubtreeData, NoteCommitmentSubtreeIndex}, - transaction::Transaction::*, - transaction::{self, Transaction}, + transaction::{ + self, + Transaction::{self, *}, + }, transparent, value_balance::ValueBalance, work::difficulty::PartialCumulativeWork, @@ -223,6 +228,8 @@ pub struct ChainInner { /// When a new chain is created from the finalized tip, it is initialized with the finalized tip /// chain value pool balances. pub(crate) chain_value_pools: ValueBalance, + /// The block info after the given block height. + pub(crate) block_info_by_height: BTreeMap, } impl Chain { @@ -261,6 +268,7 @@ impl Chain { partial_cumulative_work: Default::default(), history_trees_by_height: Default::default(), chain_value_pools: finalized_tip_chain_value_pools, + block_info_by_height: Default::default(), }; let mut chain = Self { @@ -531,6 +539,15 @@ impl Chain { ) } + /// Returns the total pool balance after the block specified by + /// [`HashOrHeight`], if it exists in the non-finalized [`Chain`]. + pub fn block_info(&self, hash_or_height: HashOrHeight) -> Option { + let height = + hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; + + self.block_info_by_height.get(&height).cloned() + } + /// Returns the Sprout note commitment tree of the tip of this [`Chain`], /// including all finalized notes, and the non-finalized notes in this chain. /// @@ -1322,7 +1339,8 @@ impl Chain { .flat_map(|address| self.partial_transparent_transfers.get(address)) } - /// Returns the transparent balance change for `addresses` in this non-finalized chain. + /// Returns a tuple of the transparent balance change and the total received funds for + /// `addresses` in this non-finalized chain. /// /// If the balance doesn't change for any of the addresses, returns zero. /// @@ -1335,15 +1353,16 @@ impl Chain { pub fn partial_transparent_balance_change( &self, addresses: &HashSet, - ) -> Amount { - let balance_change: Result, _> = self - .partial_transparent_indexes(addresses) - .map(|transfers| transfers.balance()) - .sum(); + ) -> (Amount, u64) { + let (balance, received) = self.partial_transparent_indexes(addresses).fold( + (Ok(Amount::zero()), 0), + |(balance, received), transfers| { + let balance = balance + transfers.balance(); + (balance, received + transfers.received()) + }, + ); - balance_change.expect( - "unexpected amount overflow: value balances are valid, so partial sum should be valid", - ) + (balance.expect("unexpected amount overflow"), received) } /// Returns the transparent UTXO changes for `addresses` in this non-finalized chain. @@ -1616,7 +1635,8 @@ impl Chain { } // update the chain value pool balances - self.update_chain_tip_with(chain_value_pool_change)?; + let size = block.zcash_serialized_size(); + self.update_chain_tip_with(&(*chain_value_pool_change, height, size))?; Ok(()) } @@ -1808,7 +1828,8 @@ impl UpdateWith for Chain { self.remove_history_tree(position, height); // revert the chain value pool balances, if needed - self.revert_chain_with(chain_value_pool_change, position); + // note that size is 0 because it isn't need for reverting + self.revert_chain_with(&(*chain_value_pool_change, height, 0), position); } } @@ -2189,22 +2210,26 @@ impl UpdateWith<(&Option, &SpendingTransactionId)> for Ch } } -impl UpdateWith> for Chain { +impl UpdateWith<(ValueBalance, Height, usize)> for Chain { + #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, - block_value_pool_change: &ValueBalance, + (block_value_pool_change, height, size): &(ValueBalance, Height, usize), ) -> Result<(), ValidateContextError> { match self .chain_value_pools .add_chain_value_pool_change(*block_value_pool_change) { - Ok(chain_value_pools) => self.chain_value_pools = chain_value_pools, + Ok(chain_value_pools) => { + self.chain_value_pools = chain_value_pools; + self.block_info_by_height + .insert(*height, BlockInfo::new(chain_value_pools, *size as u32)); + } Err(value_balance_error) => Err(ValidateContextError::AddValuePool { value_balance_error, chain_value_pools: self.chain_value_pools, block_value_pool_change: *block_value_pool_change, - // assume that the current block is added to `blocks` after `update_chain_tip_with` - height: self.max_block_height().and_then(|height| height + 1), + height: Some(*height), })?, }; @@ -2226,7 +2251,7 @@ impl UpdateWith> for Chain { /// change. fn revert_chain_with( &mut self, - block_value_pool_change: &ValueBalance, + (block_value_pool_change, height, _size): &(ValueBalance, Height, usize), position: RevertPosition, ) { use std::ops::Neg; @@ -2237,6 +2262,7 @@ impl UpdateWith> for Chain { .add_chain_value_pool_change(block_value_pool_change.neg()) .expect("reverting the tip will leave the pools in a previously valid state"); } + self.block_info_by_height.remove(height); } } diff --git a/zebra-state/src/service/non_finalized_state/chain/index.rs b/zebra-state/src/service/non_finalized_state/chain/index.rs index 90e904fa840..d2ef179b3eb 100644 --- a/zebra-state/src/service/non_finalized_state/chain/index.rs +++ b/zebra-state/src/service/non_finalized_state/chain/index.rs @@ -230,6 +230,12 @@ impl TransparentTransfers { self.balance } + /// Returns the partial received balance for this address. + pub fn received(&self) -> u64 { + let received_utxos = self.created_utxos.values(); + received_utxos.map(|out| out.value()).map(u64::from).sum() + } + /// Returns the [`transaction::Hash`]es of the transactions that sent or /// received transparent transfers to this address, in this partial chain, /// filtered by `query_height_range`. diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index 2a1adf65c20..f95b518d18c 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -652,6 +652,9 @@ fn different_blocks_different_chains() -> Result<()> { // chain value pool chain1.chain_value_pools = chain2.chain_value_pools; + // block data + chain1.block_info_by_height = chain2.block_info_by_height.clone(); + // If this check fails, the `Chain` fields are out // of sync with `eq_internal_state` or this test. prop_assert!( diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index 8a015881b8f..c0f8c4f26b7 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -29,8 +29,8 @@ pub use address::{ utxo::{address_utxos, AddressUtxos}, }; pub use block::{ - any_utxo, block, block_and_size, block_header, mined_transaction, transaction_hashes_for_block, - unspent_utxo, + any_utxo, block, block_and_size, block_header, block_info, mined_transaction, + transaction_hashes_for_block, unspent_utxo, }; #[cfg(feature = "indexer")] diff --git a/zebra-state/src/service/read/address/balance.rs b/zebra-state/src/service/read/address/balance.rs index 14ec49fcff1..b8948b1997c 100644 --- a/zebra-state/src/service/read/address/balance.rs +++ b/zebra-state/src/service/read/address/balance.rs @@ -26,14 +26,14 @@ use crate::{ BoxError, }; -/// Returns the total transparent balance for the supplied [`transparent::Address`]es. +/// Returns the total transparent balance and received balance for the supplied [`transparent::Address`]es. /// /// If the addresses do not exist in the non-finalized `chain` or finalized `db`, returns zero. pub fn transparent_balance( chain: Option>, db: &ZebraDb, addresses: HashSet, -) -> Result, BoxError> { +) -> Result<(Amount, u64), BoxError> { let mut balance_result = finalized_transparent_balance(db, &addresses); // Retry the finalized balance query if it was interrupted by a finalizing block @@ -71,7 +71,7 @@ pub fn transparent_balance( fn finalized_transparent_balance( db: &ZebraDb, addresses: &HashSet, -) -> Result<(Amount, Option), BoxError> { +) -> Result<((Amount, u64), Option), BoxError> { // # Correctness // // The StateService can commit additional blocks while we are querying address balances. @@ -101,7 +101,7 @@ fn chain_transparent_balance_change( mut chain: Arc, addresses: &HashSet, finalized_tip: Option, -) -> Amount { +) -> (Amount, u64) { // # Correctness // // Find the balance adjustment that corrects for overlapping finalized and non-finalized blocks. @@ -123,7 +123,7 @@ fn chain_transparent_balance_change( // If we've already committed this entire chain, ignore its balance changes. // This is more likely if the non-finalized state is just getting started. if chain_tip < required_chain_root { - return Amount::zero(); + return (Amount::zero(), 0); } // Correctness: some balances might have duplicate creates or spends, @@ -139,10 +139,12 @@ fn chain_transparent_balance_change( /// Add the supplied finalized and non-finalized balances together, /// and return the result. fn apply_balance_change( - finalized_balance: Amount, - chain_balance_change: Amount, -) -> amount::Result> { + (finalized_balance, finalized_received): (Amount, u64), + (chain_balance_change, chain_received_change): (Amount, u64), +) -> amount::Result<(Amount, u64)> { let balance = finalized_balance.constrain()? + chain_balance_change; - - balance?.constrain() + // Addresses could receive more than the max money supply by sending to themselves, + // use u64::MAX if the addition overflows. + let received = finalized_received.saturating_add(chain_received_change); + Ok((balance?.constrain()?, received)) } diff --git a/zebra-state/src/service/read/block.rs b/zebra-state/src/service/read/block.rs index d0d133e6bad..df0a9673c58 100644 --- a/zebra-state/src/service/read/block.rs +++ b/zebra-state/src/service/read/block.rs @@ -18,6 +18,7 @@ use chrono::{DateTime, Utc}; use zebra_chain::{ block::{self, Block, Height}, + block_info::BlockInfo, serialization::ZcashSerialize as _, transaction::{self, Transaction}, transparent::{self, Utxo}, @@ -256,3 +257,24 @@ pub fn any_utxo( .any_utxo(&outpoint) .or_else(|| db.utxo(&outpoint).map(|utxo| utxo.utxo)) } + +/// Returns the [`BlockInfo`] with [`block::Hash`] or +/// [`Height`], if it exists in the non-finalized `chain` or finalized `db`. +pub fn block_info( + chain: Option, + db: &ZebraDb, + hash_or_height: HashOrHeight, +) -> Option +where + C: AsRef, +{ + // # Correctness + // + // Since blocks are the same in the finalized and non-finalized state, we + // check the most efficient alternative first. (`chain` is always in memory, + // but `db` stores blocks on disk, with a memory cache.) + chain + .as_ref() + .and_then(|chain| chain.as_ref().block_info(hash_or_height)) + .or_else(|| db.block_info(hash_or_height)) +} From f748d3781cb68d5a1d6b49e9eb42506467668125 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Tue, 3 Jun 2025 17:49:57 -0300 Subject: [PATCH 191/245] rpc: add deserialization tests (#9550) * rpc: add deserialization tests * move vectors to files; address misc comments --- zebra-chain/src/subtree.rs | 2 +- zebra-rpc/src/methods/trees.rs | 34 +- .../src/methods/types/get_block_template.rs | 10 +- .../types/get_block_template/parameters.rs | 4 + .../src/methods/types/get_mining_info.rs | 2 +- .../src/methods/types/get_raw_mempool.rs | 4 +- .../src/methods/types/unified_address.rs | 60 +- zebra-rpc/tests/serialization_tests.rs | 766 ++++++++++++++++ .../tests/vectors/getblock_response_1.json | 29 + .../tests/vectors/getblock_response_2.json | 815 ++++++++++++++++++ .../vectors/getblockchaininfo_response.json | 91 ++ .../getblocktemplate_response_template.json | 41 + .../getrawtransaction_response_true.json | 712 +++++++++++++++ zebra-rpc/tests/vectors/mod.rs | 11 + 14 files changed, 2524 insertions(+), 57 deletions(-) create mode 100644 zebra-rpc/tests/serialization_tests.rs create mode 100644 zebra-rpc/tests/vectors/getblock_response_1.json create mode 100644 zebra-rpc/tests/vectors/getblock_response_2.json create mode 100644 zebra-rpc/tests/vectors/getblockchaininfo_response.json create mode 100644 zebra-rpc/tests/vectors/getblocktemplate_response_template.json create mode 100644 zebra-rpc/tests/vectors/getrawtransaction_response_true.json create mode 100644 zebra-rpc/tests/vectors/mod.rs diff --git a/zebra-chain/src/subtree.rs b/zebra-chain/src/subtree.rs index 6402b0e077d..165c0c54957 100644 --- a/zebra-chain/src/subtree.rs +++ b/zebra-chain/src/subtree.rs @@ -87,7 +87,7 @@ impl NoteCommitmentSubtree { /// Subtree root of Sapling or Orchard note commitment tree, with block height, but without the subtree index. /// Used for database key-value serialization, where the subtree index is the key, and this struct is the value. -#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub struct NoteCommitmentSubtreeData { /// Merkle root of the 2^16-leaf subtree. diff --git a/zebra-rpc/src/methods/trees.rs b/zebra-rpc/src/methods/trees.rs index 2bf77992ecc..b6512b32a82 100644 --- a/zebra-rpc/src/methods/trees.rs +++ b/zebra-rpc/src/methods/trees.rs @@ -13,7 +13,7 @@ pub type SubtreeRpcData = NoteCommitmentSubtreeData; /// /// Contains the Sapling or Orchard pool label, the index of the first subtree in the list, /// and a list of subtree roots and end heights. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetSubtrees { /// The shielded pool to which the subtrees belong. // @@ -57,7 +57,7 @@ impl Default for GetSubtrees { /// whereas in `CommitmentTree`, the vector of ommers is sparse with [`None`] values in the gaps. /// /// The dense format might be used in future RPCs. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetTreestate { /// The block hash corresponding to the treestate, hex-encoded. #[serde(with = "hex")] @@ -73,10 +73,10 @@ pub struct GetTreestate { time: u32, /// A treestate containing a Sapling note commitment tree, hex-encoded. - sapling: Treestate>, + sapling: Treestate, /// A treestate containing an Orchard note commitment tree, hex-encoded. - orchard: Treestate>, + orchard: Treestate, } impl GetTreestate { @@ -135,26 +135,26 @@ impl Default for GetTreestate { /// A treestate that is included in the [`z_gettreestate`][1] RPC response. /// /// [1]: https://zcash.github.io/rpc/z_gettreestate.html -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub struct Treestate> { +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct Treestate { /// Contains an Orchard or Sapling serialized note commitment tree, /// hex-encoded. - commitments: Commitments, + commitments: Commitments, } -impl> Treestate { +impl Treestate { /// Returns a new instance of ['Treestate']. - pub fn new(commitments: Commitments) -> Self { + pub fn new(commitments: Commitments) -> Self { Treestate { commitments } } /// Returns a reference to the commitments. - pub fn inner(&self) -> &Commitments { + pub fn inner(&self) -> &Commitments { &self.commitments } } -impl Default for Treestate> { +impl Default for Treestate { fn default() -> Self { Self { commitments: Commitments { final_state: None }, @@ -169,23 +169,23 @@ impl Default for Treestate> { /// /// [1]: https://zcash.github.io/rpc/z_gettreestate.html #[serde_with::serde_as] -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] -pub struct Commitments> { +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct Commitments { /// Orchard or Sapling serialized note commitment tree, hex-encoded. #[serde_as(as = "Option")] #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "finalState")] - final_state: Option, + final_state: Option>, } -impl> Commitments { +impl Commitments { /// Returns a new instance of ['Commitments'] with optional `final_state`. - pub fn new(final_state: Option) -> Self { + pub fn new(final_state: Option>) -> Self { Commitments { final_state } } /// Returns a reference to the optional `final_state`. - pub fn inner(&self) -> &Option { + pub fn inner(&self) -> &Option> { &self.final_state } } diff --git a/zebra-rpc/src/methods/types/get_block_template.rs b/zebra-rpc/src/methods/types/get_block_template.rs index e4ef9743ca6..453e79b08f0 100644 --- a/zebra-rpc/src/methods/types/get_block_template.rs +++ b/zebra-rpc/src/methods/types/get_block_template.rs @@ -717,13 +717,15 @@ where Hint: check your network connection, clock, and time zone settings." ); - return Err(ErrorObject::borrowed( + return Err(ErrorObject::owned( NOT_SYNCED_ERROR_CODE.code(), - "Zebra has not synced to the chain tip, \ + format!( + "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ - Hint: check your network connection, clock, and time zone settings.", - None, + Hint: check your network connection, clock, and time zone settings." + ), + None::<()>, )); } diff --git a/zebra-rpc/src/methods/types/get_block_template/parameters.rs b/zebra-rpc/src/methods/types/get_block_template/parameters.rs index d23a09ef761..3fe96b719a9 100644 --- a/zebra-rpc/src/methods/types/get_block_template/parameters.rs +++ b/zebra-rpc/src/methods/types/get_block_template/parameters.rs @@ -73,22 +73,26 @@ pub struct JsonParameters { /// /// Hex-encoded block data to be validated and checked against the server's usual acceptance rules /// (excluding the check for a valid proof-of-work). + #[serde(skip_serializing_if = "Option::is_none")] pub data: Option, /// A list of client-side supported capability features #[serde(default)] + #[serde(skip_serializing_if = "Vec::is_empty")] pub capabilities: Vec, /// An ID that delays the RPC response until the template changes. /// /// In Zebra, the ID represents the chain tip, max time, and mempool contents. #[serde(rename = "longpollid")] + #[serde(skip_serializing_if = "Option::is_none")] pub long_poll_id: Option, /// The workid for the block template. /// /// currently unused. #[serde(rename = "workid")] + #[serde(skip_serializing_if = "Option::is_none")] pub _work_id: Option, } diff --git a/zebra-rpc/src/methods/types/get_mining_info.rs b/zebra-rpc/src/methods/types/get_mining_info.rs index 1caa1593c27..ce38243c115 100644 --- a/zebra-rpc/src/methods/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/types/get_mining_info.rs @@ -3,7 +3,7 @@ use zebra_chain::parameters::Network; /// Response to a `getmininginfo` RPC request. -#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct Response { /// The current tip height. #[serde(rename = "blocks")] diff --git a/zebra-rpc/src/methods/types/get_raw_mempool.rs b/zebra-rpc/src/methods/types/get_raw_mempool.rs index 487be865e3b..73d2b13e495 100644 --- a/zebra-rpc/src/methods/types/get_raw_mempool.rs +++ b/zebra-rpc/src/methods/types/get_raw_mempool.rs @@ -12,7 +12,7 @@ use super::zec::Zec; /// Response to a `getrawmempool` RPC request. /// /// See the notes for the [`Rpc::get_raw_mempool` method]. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum GetRawMempool { /// The transaction IDs, as hex strings (verbose=0) @@ -24,7 +24,7 @@ pub enum GetRawMempool { /// A mempool transaction details object as returned by `getrawmempool` in /// verbose mode. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct MempoolObject { /// Transaction size in bytes. pub(crate) size: u64, diff --git a/zebra-rpc/src/methods/types/unified_address.rs b/zebra-rpc/src/methods/types/unified_address.rs index 92806ddaded..c485dec6a6e 100644 --- a/zebra-rpc/src/methods/types/unified_address.rs +++ b/zebra-rpc/src/methods/types/unified_address.rs @@ -3,23 +3,23 @@ /// `z_listunifiedreceivers` response #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct Response { - #[serde(skip_serializing_if = "String::is_empty")] - orchard: String, - #[serde(skip_serializing_if = "String::is_empty")] - sapling: String, - #[serde(skip_serializing_if = "String::is_empty")] - p2pkh: String, - #[serde(skip_serializing_if = "String::is_empty")] - p2sh: String, + #[serde(skip_serializing_if = "Option::is_none")] + orchard: Option, + #[serde(skip_serializing_if = "Option::is_none")] + sapling: Option, + #[serde(skip_serializing_if = "Option::is_none")] + p2pkh: Option, + #[serde(skip_serializing_if = "Option::is_none")] + p2sh: Option, } impl Default for Response { fn default() -> Self { Self { - orchard: "orchard address if any".to_string(), - sapling: "sapling address if any".to_string(), - p2pkh: "p2pkh address if any".to_string(), - p2sh: "p2sh address if any".to_string(), + orchard: Some("orchard address if any".to_string()), + sapling: Some("sapling address if any".to_string()), + p2pkh: Some("p2pkh address if any".to_string()), + p2sh: Some("p2sh address if any".to_string()), } } } @@ -28,46 +28,42 @@ impl Response { /// Create a new response for z_listunifiedreceivers given individual addresses. pub fn new(orchard: String, sapling: String, p2pkh: String, p2sh: String) -> Response { Response { - orchard, - sapling, - p2pkh, - p2sh, + orchard: if orchard.is_empty() { + None + } else { + Some(orchard) + }, + sapling: if sapling.is_empty() { + None + } else { + Some(sapling) + }, + p2pkh: if p2pkh.is_empty() { None } else { Some(p2pkh) }, + p2sh: if p2sh.is_empty() { None } else { Some(p2sh) }, } } #[cfg(test)] /// Return the orchard payment address from a response, if any. pub fn orchard(&self) -> Option { - match self.orchard.is_empty() { - true => None, - false => Some(self.orchard.clone()), - } + self.orchard.clone() } #[cfg(test)] /// Return the sapling payment address from a response, if any. pub fn sapling(&self) -> Option { - match self.sapling.is_empty() { - true => None, - false => Some(self.sapling.clone()), - } + self.sapling.clone() } #[cfg(test)] /// Return the p2pkh payment address from a response, if any. pub fn p2pkh(&self) -> Option { - match self.p2pkh.is_empty() { - true => None, - false => Some(self.p2pkh.clone()), - } + self.p2pkh.clone() } #[cfg(test)] /// Return the p2sh payment address from a response, if any. pub fn p2sh(&self) -> Option { - match self.p2sh.is_empty() { - true => None, - false => Some(self.p2sh.clone()), - } + self.p2sh.clone() } } diff --git a/zebra-rpc/tests/serialization_tests.rs b/zebra-rpc/tests/serialization_tests.rs new file mode 100644 index 00000000000..322a6b5a75a --- /dev/null +++ b/zebra-rpc/tests/serialization_tests.rs @@ -0,0 +1,766 @@ +//! Test if the JSON-RPC requests can be serialized and responses can be +//! deserialized. +//! +//! We want to ensure that users can use this crate to build RPC clients, so +//! this is an integration test to ensure only the public API is accessed. + +mod vectors; + +use vectors::{ + GET_BLOCKCHAIN_INFO_RESPONSE, GET_BLOCK_RESPONSE_1, GET_BLOCK_RESPONSE_2, + GET_BLOCK_TEMPLATE_RESPONSE_TEMPLATE, GET_RAW_TRANSACTION_RESPONSE_TRUE, +}; + +use zebra_chain::subtree::NoteCommitmentSubtreeIndex; +use zebra_rpc::methods::{ + trees::{GetSubtrees, GetTreestate, SubtreeRpcData}, + types::{ + get_block_template::{self, GetBlockTemplate}, + get_mining_info, + get_raw_mempool::GetRawMempool, + peer_info::PeerInfo, + submit_block, + subsidy::BlockSubsidy, + transaction::TransactionObject, + unified_address, validate_address, z_validate_address, + }, + AddressBalance, AddressStrings, GetAddressTxIdsRequest, GetAddressUtxos, GetBlock, + GetBlockChainInfo, GetBlockHash, GetBlockHeader, GetBlockHeaderObject, GetBlockHeightAndHash, + GetInfo, GetRawTransaction, SentTransactionHash, +}; + +#[test] +fn test_get_info() -> Result<(), Box> { + let json = r#" +{ + "version": 2030010, + "build": "v2.3.0+10.gc66d6ca.modified", + "subversion": "/Zebra:2.3.0/", + "protocolversion": 170120, + "blocks": 2930822, + "connections": 75, + "difficulty": 68556523.91969073, + "testnet": false, + "paytxfee": 0.0, + "relayfee": 1e-6, + "errors": "no errors", + "errorstimestamp": "2025-05-20 19:33:53.395307694 UTC" +}"#; + let obj: GetInfo = serde_json::from_str(json)?; + + let ( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + errors_timestamp, + ) = obj.clone().into_parts(); + + let new_obj = GetInfo::from_parts( + version, + build, + subversion, + protocol_version, + blocks, + connections, + proxy, + difficulty, + testnet, + pay_tx_fee, + relay_fee, + errors, + errors_timestamp, + ); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_blockchain_info() -> Result<(), Box> { + let json = GET_BLOCKCHAIN_INFO_RESPONSE; + let _obj: GetBlockChainInfo = serde_json::from_str(json)?; + + // TODO: add new() and getters for all fields + + Ok(()) +} + +#[test] +fn test_get_address_balance() -> Result<(), Box> { + // Test request + let json = r#"{"addresses":["t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak"]}"#; + let obj = AddressStrings::new_valid(vec![String::from("t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak")])?; + let new_json = serde_json::to_string(&obj)?; + assert_eq!(json, new_json); + + // Test response + let json = r#" +{ + "balance": 11290259389 +} +"#; + let obj: AddressBalance = serde_json::from_str(json)?; + let new_obj = AddressBalance { + balance: obj.balance, + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_send_raw_transaction() -> Result<(), Box> { + let json = r#""0000000001695b61dd5c82ae33a326126d6153d1641a3a1759d3f687ea377148""#; + let obj: SentTransactionHash = serde_json::from_str(json)?; + + let hash = obj.inner(); + + let new_obj = SentTransactionHash::new(hash); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_block_0() -> Result<(), Box> { + let json = r#""00000000007bacdb373ca240dc6f044f0a816a407bc1924f82a2d84ebfa6103f""#; + let _r: GetBlock = serde_json::from_str(json)?; + + // TODO: change GetBlock::Block to have a struct, add getters to it + + Ok(()) +} + +#[test] +fn test_get_block_1() -> Result<(), Box> { + let json = GET_BLOCK_RESPONSE_1; + let _r: GetBlock = serde_json::from_str(json)?; + + // TODO: change GetBlock::Block to have a struct, add getters to it + + Ok(()) +} + +#[test] +fn test_get_block_2() -> Result<(), Box> { + let json = GET_BLOCK_RESPONSE_2; + let _r: GetBlock = serde_json::from_str(json)?; + + // TODO: change GetBlock::Block to have a struct, add getters to it + + Ok(()) +} + +#[test] +fn test_get_block_header() -> Result<(), Box> { + let json = r#" +{ + "hash": "0000000001695b61dd5c82ae33a326126d6153d1641a3a1759d3f687ea377148", + "confirmations": 47, + "height": 2930583, + "version": 4, + "merkleroot": "4097b67ba0aa552538ed3fce670c756f22452f0273095f10cd693912551ebe3a", + "blockcommitments": "cdf618b251ca2353360d06dc3efd9f16fb45d95d2692e69b2adffa26bf2db884", + "finalsaplingroot": "35a0acf56d25f4e282d345e5a546331487b13a663f0b1f745088d57f878e9d6d", + "time": 1747751624, + "nonce": "7ddc00a80000000000000000000a00000000000000000000000000003e1e6cd7", + "solution": "0038e90b8de2fd3fc1b62218e6caeb60f20d38c0ad38d6dd05176996455c5a54fef2f99eee4fe5b887e808da827951cc9e5adb73542891d451e147f4746eb70bd34a4a2ec5ecfa8fce87ae10e8c55b8b3ffe76e40b56057d714637ac33e6434e849f3bf21aeb14bf3e1b4336eb39493110c5f0ac63d272733fa94f9e7da529fe0c8c436f9c0feb49031a20c8310a419ab670d732cce9fceda95911f8e646ef64fe6462bb449fe2fc053ca4358d8495ee254644a530b1e59dd025d9a2ce131ec187805c1cbbef9362bda8dcaed1ec8697ab570806e1e0ff0b3f1cf891a086664d0efca6127244db1b564dfa960a8527e08029cef05aa71ac10e9923620d6719702685d27938c2910f385d18368f54b588f3129c55e9f9d27e46d563a190deb39dbc877d771ad213559232280a55d4a0f9513e38ba4f6973096bd3811cd70ee63613bdb4dec033a1aeb9b5b6c1f3b96d080082c9c6e683e7f72be7c834fef1dec64c4b75b30730ff374b00968c51d7e093d3867c503e2dce7faf220249d037e49202b5a7de013474e956c61b5e7526ff35637cbfd86abef37406f3a50ec1168ddb8b5ad96c08503de5d75cae433ae4b504f6e995858640151454460e9b2ee669a44969779592682ca56e4e10d60aae11818b708b19db8593e59389d1ff50359d13f67a311d2565749d20724f239407beabf6790e54479cd5d2015e0903f94f0043ac7484c61936832d7fdf7b13de0579969a795149f77eb1a6961461b6c33b9bbcdfd203c706bf634dc1f7bb6841aebaae01e492ef69fca14996eacc9ef54947dfc268b25a74f52e46f2f504d9105d51e6619d224b0e7b47ca0dbeeece2e04552b123056be9d383cb9a1f5cc75ab8c5aa76dc2709cec58108e4df4e74a5ee2dc299192ddc4ecb4e19a7df843138157422d610c690c34a33ae6ccf16d493711827900d82c1366cdb1e147b5d4fc2b4d5fd32ef95eaa4406bd7d52dec5ee30e258311336c27b4e7069faedd608f86cc239cd62006c03923df66d362ca5203026e4780d277f13e73b2163a04858c3c413de5e9c5470c90e59e6d7b391cd85a59cc47a68f5e95ada981eba3d35878435e39c23599efb53a411b6397d062b4e4f9b0f423d2b8ad7a0e2fdbe8489374f23193882bd473a53ac542d81e81dc9eb2b661ca9d6816e242bffb83a00dc6f70a511b469a75271458ef43a66b1ab7b43163fd3ddc0c1d24239d176db980fe5e316fc127adbd005253897ea0867306dc0811a3ea87cd049236e3b5f4ee58bb310ecf7039f33eabaf6e091ff682c9bb6740e0c3171bf7025cba3587827cc5008fb2d6a5cb83c1ba48d58718c4f42f506b4794ffe0721411738bd671d12d20c3a08c9e06c27258f0bd7d295b46fbfc53f48bdcdd7be62cb87a437b9865be5ca6fb6155e7e6801a73a8b335432d303fc22c5a7a27484f46936fe7124a1a363f90fd924a08e540968ecdc71c6f11ddc8a2aa9161c8b532984c911f4e780474785d296b02e4d2d12f9c4c46b735f79c3c9351ef5bebea2a65b48eb0747384a31d7e6c9d3a0c2507cef7df8971fd541570a3174b74ec91401acb5b45f105e8b25dd407c745d08da0cc4d5c88dd33bd3c2876c2af6a4f110c8867638e6dc6e72b3b0ddb37ef6aa4dedbb7dca039a0e08049502e526c8f72121a68ae5385bad3b5bd59efadc0b8882cccad2634937da612098e760c4f9510fcf311517d4ae2c4e0e8f081354194329b42d3a2c0c93924aa985a9b99598377a98489881e83b5eb3f155ca120a28d4bfd2d43d01a6dd368d52626905f26cb3ff9c0d5b98a9796172e54fd1f2b7dc7851fd3c9e191abd14e96c8781c6453f33a198797ee50f02682a7c2a7829420e0b40fe787dfc7f32ce05df3a3a86fc59700e", + "bits": "1c023081", + "difficulty": 61301397.633212306, + "previousblockhash": "0000000000d12367f80be78e624d263faa6e6fda718453cbb6f7dc71205af574", + "nextblockhash": "0000000001d8a2a9c19bc98ecb856c8406ba0b2d7d42654369014e2a14dd9c1d" +} +"#; + let r: GetBlockHeader = serde_json::from_str(json)?; + + let GetBlockHeader::Object(obj) = &r else { + panic!("Expected Object variant"); + }; + + let new_obj = GetBlockHeader::Object(Box::new(GetBlockHeaderObject { + hash: obj.hash, + confirmations: obj.confirmations, + height: obj.height, + version: obj.version, + merkle_root: obj.merkle_root, + block_commitments: obj.block_commitments, + final_sapling_root: obj.final_sapling_root, + sapling_tree_size: obj.sapling_tree_size, + time: obj.time, + nonce: obj.nonce, + solution: obj.solution, + bits: obj.bits, + difficulty: obj.difficulty, + previous_block_hash: obj.previous_block_hash, + next_block_hash: obj.next_block_hash, + })); + + assert_eq!(r, new_obj); + + Ok(()) +} + +#[test] +fn test_get_block_height_hash() -> Result<(), Box> { + let json = r#" +{ + "height": 2931705, + "hash": [35, 5, 244, 118, 21, 236, 8, 168, 3, 119, 95, 171, 238, 9, 233, 152, 250, 106, 153, 253, 6, 176, 155, 7, 155, 161, 146, 1, 0, 0, 0, 0] +} +"#; + let obj: GetBlockHeightAndHash = serde_json::from_str(json)?; + let new_obj = GetBlockHeightAndHash { + height: obj.height, + hash: obj.hash, + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_raw_mempool_false() -> Result<(), Box> { + let json = r#" +[ + "77ec13dde45185e99dba408d592c5b30438e8c71af5b6e2d9f4d29cb4da8ccbf" +] +"#; + let obj: GetRawMempool = serde_json::from_str(json)?; + + let GetRawMempool::TxIds(txids) = &obj else { + panic!("Expected TxIds variant"); + }; + + let new_obj = GetRawMempool::TxIds(txids.clone()); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_raw_mempool_true() -> Result<(), Box> { + let json = r#" +{ + "05cef70f5ed2467bb657664fe9837cdb0490b9cd16780f05ced384fd2c7dc2b2": { + "size": 9165, + "fee": 0.0001, + "modifiedfee": 0.0001, + "time": 1747836987, + "height": 2931716, + "descendantcount": 1, + "descendantsize": 9165, + "descendantfees": 10000, + "depends": [ + ] + }, + "d1e0c4f9c5f19c86aec3df7744aed7a88bc47edd5c95dd4e502b889ea198c701": { + "size": 1374, + "fee": 0.0002, + "modifiedfee": 0.0002, + "time": 1747836995, + "height": 2931716, + "descendantcount": 1, + "descendantsize": 1374, + "descendantfees": 20000, + "depends": [ + ] + } +} +"#; + let obj: GetRawMempool = serde_json::from_str(json)?; + + let GetRawMempool::Verbose(mempool_map) = &obj else { + panic!("Expected Verbose variant"); + }; + + // TODO: add new()/getters to MempoolObject and test them + + let new_obj = GetRawMempool::Verbose(mempool_map.clone()); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_z_get_treestate() -> Result<(), Box> { + let json = r#" +{ + "hash": "000000000154f210e2451c45a192c69d12c0db18a427be13be3913e0feecd6f6", + "height": 2931720, + "time": 1747837185, + "sapling": { + "commitments": { + "finalState": "01f84e35f84dfd9e53effcd74f98e9271b4df9c15e1681b7dc4f9a971e5c98531e001f0105354e35c5daa8831b957f6f702affaa835bc3758e9bd323aafeead50ddfa561000001157a4438a622a0677ec9d1099bf963614a0a65b1e24ea451c9f55eef64c62b650001a5fc8bf61968a934693b7b9a4abd894c4e4a1bd265525538f4877687504fe50a000193d7f432e23c862bf2f831392861199ab4c70d358d82695b6bf8fa9eb36b6b63000184585eb0d4f116b07b9bd359c461a499716a985a001201c66d1016e489a5672f01aad38587c7f2d5ebd1c2eea08a0660e9a9fd1a104b540767c2884354a48f0a6d01ff10064c6bf9aba73d638878a63c31de662f25aea58dc0033a3ada3d0a695b54000001060af6a6c1415a6eaf780073ffa3d0ab35af7bb391bccc4e6ea65a1230dad83001ab58f1ebb2860e257c50350a3e1b54778b7729bdd11eacaa9213c4b5f4dbb44c00017d1ce2f0839bdbf1bad7ae37f845e7fe2116e0c1197536bfbad549f3876c3c590000013e2598f743726006b8de42476ed56a55a75629a7b82e430c4e7c101a69e9b02a011619f99023a69bb647eab2d2aa1a73c3673c74bb033c3c4930eacda19e6fd93b0000000160272b134ca494b602137d89e528c751c06d3ef4a87a45f33af343c15060cc1e0000000000" + } + }, + "orchard": { + "commitments": { + "finalState": "01a110b4b3e1932f4e32e972d34ba5b9128a21b5dec5540dbb50d6f6eabd462237001f01206c514069d4cb68fb0a4d5dfe6eb7a31bcf399bf38a3bd6751ebd4b68cec3130001a73e87cab56a4461a676c7ff01ccbf8d15bbb7d9881b8f991322d721d02ded0a0001bc5a28c4a9014698c66a496bd35aa19c1b5ffe7b511ce8ff26bdcbe6cf0caa0c01ad5ba4f75b9685f7b4e1f47878e83d5bcd888b24359e4a3f2309b738c0211c1e01f12bdfe8eebc656f4f4fefc61ebd8a0b581a10b5cb3c4d8681f26384f907d910000158c6fbe19bb748e830a55b80fc62b414a3763efd461bb1885c10bebf9cee86130101683a742a4b5b3d7e0e802239d70cd480cc56eeaefac844359aa2c32dc41d3700000001756e99d87177e232e3c96f03e412d8bf3547a0fea00434ba153c7dac9990322d016211c99d795da43b33a1397859ae9745bc3e74966fa68b725ce3c90dca2d11300000012d113bc8f6a4f41b3963cfa0717176c2d31ce7bfae4d250a1fff5e061dd9d3250160040850b766b126a2b4843fcdfdffa5d5cab3f53bc860a3bef68958b5f066170001cc2dcaa338b312112db04b435a706d63244dd435238f0aa1e9e1598d35470810012dcc4273c8a0ed2337ecf7879380a07e7d427c7f9d82e538002bd1442978402c01daf63debf5b40df902dae98dadc029f281474d190cddecef1b10653248a234150001e2bca6a8d987d668defba89dc082196a922634ed88e065c669e526bb8815ee1b000000000000" + } + } +} +"#; + let obj: GetTreestate = serde_json::from_str(json)?; + + let (hash, height, time, sapling_final_state, orchard_final_state) = obj.clone().into_parts(); + + let new_obj = + GetTreestate::from_parts(hash, height, time, sapling_final_state, orchard_final_state); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_z_get_subtrees_by_index() -> Result<(), Box> { + let json = r#" +{ + "pool": "orchard", + "start_index": 0, + "subtrees": [ + { + "root": "d4e323b3ae0cabfb6be4087fec8c66d9a9bbfc354bf1d9588b6620448182063b", + "end_height": 1707429 + } + ] +} + +"#; + let obj: GetSubtrees = serde_json::from_str(json)?; + + let pool = obj.pool.clone(); + let start_index = obj.start_index.0; + let subtree_root = obj.subtrees[0].root.clone(); + let subtree_end_height = obj.subtrees[0].end_height; + + let new_obj = GetSubtrees { + pool, + start_index: NoteCommitmentSubtreeIndex(start_index), + subtrees: vec![SubtreeRpcData { + root: subtree_root, + end_height: subtree_end_height, + }], + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_raw_transaction_true() -> Result<(), Box> { + let json = GET_RAW_TRANSACTION_RESPONSE_TRUE; + let obj: GetRawTransaction = serde_json::from_str(json)?; + + let GetRawTransaction::Object(tx) = &obj else { + panic!("Expected GetRawTransaction::Object"); + }; + + let hex = tx.hex.clone(); + let height = tx.height; + let confirmations = tx.confirmations; + let inputs = tx.inputs.clone(); + let outputs = tx.outputs.clone(); + let shielded_spends = tx.shielded_spends.clone(); + let shielded_outputs = tx.shielded_outputs.clone(); + let orchard = tx.orchard.clone(); + let value_balance = tx.value_balance; + let value_balance_zat = tx.value_balance_zat; + let size = tx.size; + let time = tx.time; + + // TODO: add test for ShieldedSpend, ShieldedOutput, Orchard + + let new_obj = GetRawTransaction::Object(Box::new(TransactionObject { + hex, + height, + confirmations, + inputs, + outputs, + shielded_spends, + shielded_outputs, + orchard, + value_balance, + value_balance_zat, + size, + time, + })); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_address_tx_ids() -> Result<(), Box> { + // Test request only (response is trivial) + let json = + r#"{"addresses":["t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak"],"start":2931856,"end":2932856}"#; + // TODO: allow not passing start or end + let obj = GetAddressTxIdsRequest::from_parts( + vec!["t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak".to_string()], + 2931856, + 2932856, + ); + let new_json = serde_json::to_string(&obj)?; + assert_eq!(json, new_json); + Ok(()) +} + +#[test] +fn test_get_address_utxos() -> Result<(), Box> { + let json = r#" +[ + { + "address": "t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak", + "txid": "6ee3e8a86dfeca629aeaf794aacb714db1cf1868bc9fe487de443e6197d8764a", + "outputIndex": 0, + "script": "76a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac", + "satoshis": 125000000, + "height": 2931856 + } +] +"#; + let obj: Vec = serde_json::from_str(json)?; + let (address, txid, output_index, script, satoshis, height) = obj[0].clone().into_parts(); + + let new_obj = vec![GetAddressUtxos::from_parts( + address, + txid, + output_index, + script, + satoshis, + height, + )]; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_block_hash() -> Result<(), Box> { + let json = r#""0000000001695b61dd5c82ae33a326126d6153d1641a3a1759d3f687ea377148""#; + let obj: GetBlockHash = serde_json::from_str(json)?; + + let hash = obj.0; + + let new_obj = GetBlockHash(hash); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_block_template_request() -> Result<(), Box> { + let json = r#"{"mode":"template"}"#; + + // TODO: add new() method + + let new_obj = get_block_template::parameters::JsonParameters { + mode: get_block_template::parameters::GetBlockTemplateRequestMode::Template, + data: None, + capabilities: vec![], + long_poll_id: None, + _work_id: None, + }; + let new_json = serde_json::to_string(&new_obj)?; + assert_eq!(json, new_json); + + Ok(()) +} + +#[test] +fn test_get_block_template_response() -> Result<(), Box> { + let json = GET_BLOCK_TEMPLATE_RESPONSE_TEMPLATE; + let obj: get_block_template::Response = serde_json::from_str(json)?; + + let get_block_template::Response::TemplateMode(template) = &obj else { + panic!("Expected get_block_template::Response::TemplateMode"); + }; + + let capabilities = template.capabilities.clone(); + let version = template.version; + let previous_block_hash = template.previous_block_hash.0 .0; + let block_commitments_hash: [u8; 32] = template.block_commitments_hash.into(); + let light_client_root_hash: [u8; 32] = template.light_client_root_hash.into(); + let final_sapling_root_hash: [u8; 32] = template.final_sapling_root_hash.into(); + let default_roots = template.default_roots.clone(); + // TODO: test all these types to ensure they can be read fully + let transactions = template.transactions.clone(); + let coinbase_txn = template.coinbase_txn.clone(); + let long_poll_id = template.long_poll_id; + let target = template.target; + let min_time = template.min_time; + let mutable = template.mutable.clone(); + let nonce_range = template.nonce_range.clone(); + let sigop_limit = template.sigop_limit; + let size_limit = template.size_limit; + let cur_time = template.cur_time; + let bits = template.bits; + let height = template.height; + let max_time = template.max_time; + let submit_old = template.submit_old; + + let new_obj = get_block_template::Response::TemplateMode(Box::new(GetBlockTemplate { + capabilities, + version, + previous_block_hash: GetBlockHash(zebra_chain::block::Hash(previous_block_hash)), + block_commitments_hash: block_commitments_hash.into(), + light_client_root_hash: light_client_root_hash.into(), + final_sapling_root_hash: final_sapling_root_hash.into(), + default_roots, + transactions, + coinbase_txn, + long_poll_id, + target, + min_time, + mutable, + nonce_range, + sigop_limit, + size_limit, + cur_time, + bits, + height, + max_time, + submit_old, + })); + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_submit_block() -> Result<(), Box> { + let json = r#""duplicate""#; + let obj: submit_block::Response = serde_json::from_str(json)?; + + assert_eq!( + obj, + submit_block::Response::ErrorResponse(submit_block::ErrorResponse::Duplicate) + ); + + Ok(()) +} + +#[test] +fn test_get_mining_info() -> Result<(), Box> { + let json = r#" +{ + "blocks": 2934350, + "currentblocksize": 1629, + "currentblocktx": 0, + "networksolps": 6644588130, + "networkhashps": 6644588130, + "chain": "main", + "testnet": false +} +"#; + let _obj: get_mining_info::Response = serde_json::from_str(json)?; + + // TODO: add getters + + Ok(()) +} + +#[test] +fn test_get_peer_info() -> Result<(), Box> { + let json = r#" +[ + { + "addr": "192.168.0.1:8233", + "inbound": false + }, + { + "addr": "[2000:2000:2000:0000::]:8233", + "inbound": false + } +] +"#; + let obj: Vec = serde_json::from_str(json)?; + + let addr0 = obj[0].addr; + let inbound0 = obj[0].inbound; + let addr1 = obj[1].addr; + let inbound1 = obj[1].inbound; + + // TODO: allow getting IP from PeerInfo (change to SocketAddr?) + // TODO: add getters, new() + + let new_obj = vec![ + PeerInfo { + addr: addr0, + inbound: inbound0, + }, + PeerInfo { + addr: addr1, + inbound: inbound1, + }, + ]; + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_validate_address() -> Result<(), Box> { + let json = r#" +{ + "isvalid": true, + "address": "t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak", + "isscript": false +} +"#; + let obj: validate_address::Response = serde_json::from_str(json)?; + + let is_valid = obj.is_valid; + let address = obj.address.clone(); + let is_script = obj.is_script; + + let new_obj = validate_address::Response { + is_valid, + address, + is_script, + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_z_validate_address() -> Result<(), Box> { + let json = r#" +{ + "isvalid": true, + "address": "u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf", + "address_type": "unified", + "ismine": false +} +"#; + let obj: z_validate_address::Response = serde_json::from_str(json)?; + + let is_valid = obj.is_valid; + let address = obj.address.clone(); + let address_type = obj.address_type.clone(); + let is_mine = obj.is_mine; + + let new_obj = z_validate_address::Response { + is_valid, + address, + address_type, + is_mine, + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_get_block_subsidy() -> Result<(), Box> { + let json = r#" +{ + "fundingstreams": [ + { + "recipient": "Zcash Community Grants NU6", + "specification": "https://zips.z.cash/zip-1015", + "value": 0.125, + "valueZat": 12500000, + "address": "t3cFfPt1Bcvgez9ZbMBFWeZsskxTkPzGCow" + } + ], + "lockboxstreams": [ + { + "recipient": "Lockbox NU6", + "specification": "https://zips.z.cash/zip-1015", + "value": 0.1875, + "valueZat": 18750000 + } + ], + "miner": 1.25, + "founders": 0.0, + "fundingstreamstotal": 0.125, + "lockboxtotal": 0.1875, + "totalblocksubsidy": 1.5625 +} +"#; + let obj: BlockSubsidy = serde_json::from_str(json)?; + + let funding_streams = obj.funding_streams.clone(); + let lockbox_streams = obj.lockbox_streams.clone(); + // TODO: check if FundingStream can be read and recreated + let miner = obj.miner; + let founders = obj.founders; + let funding_streams_total = obj.funding_streams_total; + let lockbox_total = obj.lockbox_total; + let total_block_subsidy = obj.total_block_subsidy; + + // TODO: add getters, new() + + let new_obj = BlockSubsidy { + funding_streams, + lockbox_streams, + miner, + founders, + funding_streams_total, + lockbox_total, + total_block_subsidy, + }; + + assert_eq!(obj, new_obj); + + Ok(()) +} + +#[test] +fn test_z_list_unified_receivers() -> Result<(), Box> { + let json = r#" +{ + "sapling": "zs1mrhc9y7jdh5r9ece8u5khgvj9kg0zgkxzdduyv0whkg7lkcrkx5xqem3e48avjq9wn2rukydkwn", + "p2pkh": "t1V9mnyk5Z5cTNMCkLbaDwSskgJZucTLdgW" +} +"#; + // TODO: fix, allow deserializing with missing fields + let _obj: unified_address::Response = serde_json::from_str(json)?; + + // TODO: add getters + + Ok(()) +} + +#[test] +fn test_generate() -> Result<(), Box> { + let json = r#" +[ + "0000000001695b61dd5c82ae33a326126d6153d1641a3a1759d3f687ea377148", + "0000000001695b61dd5c82ae33a326126d6153d1641a3a1759d3f687ea377149" +] +"#; + // TODO: fix, allow deserializing with missing fields + let obj: Vec = serde_json::from_str(json)?; + let hash0 = obj[0].0; + let hash1 = obj[1].0; + let new_obj = vec![GetBlockHash(hash0), GetBlockHash(hash1)]; + assert_eq!(obj, new_obj); + + Ok(()) +} diff --git a/zebra-rpc/tests/vectors/getblock_response_1.json b/zebra-rpc/tests/vectors/getblock_response_1.json new file mode 100644 index 00000000000..7df2ffbb7fa --- /dev/null +++ b/zebra-rpc/tests/vectors/getblock_response_1.json @@ -0,0 +1,29 @@ +{ + "hash": "00000000007bacdb373ca240dc6f044f0a816a407bc1924f82a2d84ebfa6103f", + "confirmations": 709704, + "height": 2222000, + "version": 4, + "merkleroot": "8ec995e4b5a7541768dffa8c707b8faf859b5808800e2bd53a549a4663782ab5", + "blockcommitments": "fa1c98f49f74831f8a74f1b8908b6c8889a56ffebc20f62c114a7a007efc4dad", + "finalsaplingroot": "5fe3724bebe096cc42c951803dd01e9657ead338327439902d19417ae9bda7e2", + "finalorchardroot": "9cbf6c20965e2ae361322b9e7fd92c7df8b3c6d41040f544a205a4a2d75abf2a", + "tx": [ + "21f2c7d10cc8857e60fd225f0f089126d9274e892f8d4e135e9e915adb06b72d", + "603df6640eade811df35190e544560531b9e4fbe3e13e423a191fa5598b2a0ea" + ], + "time": 1694339033, + "nonce": "ffbc08000000000000000000000200000000000000000000000000008dc7bae7", + "solution": "009d9775a8520e71421201f0bea96e2edafdfded9b219b54caefd8f9216f88e2b96a366f5decaf316cab091b522ab2a9fe89e55ea284f9b5b1bd58593f12492fbdb490fe77d92de9f232fe243966c1b1b3d2a666094f71705553af39895ae2dcfbcae0656c7d5a1754216ee48257c9bb5d3518a6515f4b9442aa0db6a04857c456c22a7215d5eb7db85b36436d66241c9195925b06acac6599dface03a86d0c8598dfac42bb770180292afc02234e9b3afb0707b2fe21b585d6f14837d30511ff268a6a3738f088324a3c98e1d788d7233c32574d6ad964fa9311f16744883bbc2ea32bafb29d364b60fd8c037c80feb18366a9ddf4c2346da1d38360d52638aaac7af9124a2c2ca135adea5fdd3939eaa28fa7b0d855c40ace24d042e5b2bfdba96ddb97ebd1ead1f6e57b8c869c931646585f8914d1a66eab20c473d7aaa89164d1f01de26bc7977915a551bf84e5304bca75dc0102b856b9683b54438795a4d6277ba793c0f4359e39107ef7b2a9c590efb512f2558dd03d50eb8619dbd0b39ad6bd4a4f0f4d87e45e66df721c110de6bf38723d5f9fe4be26fd718e0856924b9f7f31421479af030fa03bf48f88512e7db2fb3133e0a0a1449fa35e8b4ce59d41571e4736c2c40f9be98393a22cc62f281dd1f32ee15e231d574dfd93db6b134e441741fe65a1a11cbddbb64206c60c5f5d96a5752b80a7d99c3d1760d09f562b663625d8d0e19073afe2d56cc14e965ded1c15167175ef0e315020fecdbe2451433fdc2dd90827edafa5447274ee83d4e352ea7e71a333285fa56766d5991d8d612ed376ac139d678861d41559133c87510f0a172587ac41096659174a5ab51c1e5c3ca6c340befe78589bacdcd6e90855d7a3d2661ff71eb0fae44b33414623275580fdc4d1f42f532229ec35e256869632537263ad27007c47b5f92b2026232e4a90da202a34b6183cc421195c68b786dc3088dcce0c050acb98713bc6f0279245388909d145a0b28761e93a747ef6947c249c6f713166f235df7232996ead1772e4d61d558433db6bd35f104967359251dcb2ba89e4a7604b4b975027132aee9fc7ebf16a6293cf3ea823b6cb9fa74f6e773eac58e050656f2ba3462aa9e54cef43327d37385b1ed9ea596fd7272054c21efd3a5e6679b45bc27915ae909c30b045c2cf30e55d9220966faa5a89b46b08539acd5e7a535524d1e463ab5949eef73b6482239f82d234e06209e9fcf20b612225f617d25a9dc8669a8747c3f6c317edb5624ea9f80724a66e70dffc2546caf8254fb6d9a9f03103f418d0ed8bfabc2fcb440cc46fa49aaccd111a2538b54a225241db76f08c7f338772d2218cf73355248f54eb2441887f4fc4225e2965824fdf351b10f8f57bb2f5753ddd5a1727d95be3c307d0d827d7a78930286e75f2dd451d77759667a92f19769b351937a811e668f48fa2e1d05ef7ce4cc87f335ad87727ce3b8038c88fbddd1d44b2c587515d9dcab6275a13620411ca5fb7e36360deff1eb74cbace9dd2579b179a49c08609e86d62cb41fdcbe634fd7fd6d813598cc21a03fcc6ec0c91ec4fbac580bcd1a6714135cc91bb5c82a543d6ff1176e43c76a46fd99f2e89a728d36f0983642b78976d33ecab9d7f81d0e4a7a661ccd372c1d03015c50f6c6680f215920d114bbc71b34187c932a0b28d8fbf32963f94bd4b0d99ae6d05a327b31b1f204171fc0b6edc2d1ee3b6208316e3679975e52d593086807b15487caa4523c87c69e57abfdf2295bbcb5086dad50fd6c100f63614348d2408368f78f6fa839148987b91216ac01ab48b6ddb7d9b7a9de00d09ece100924430d083ae7c25968812077d26e57285c0d1531abd9aaf7ec56d7bd9c16d87353db47c10fde1dee", + "bits": "1c01af61", + "difficulty": 79650795.00685483, + "trees": { + "sapling": { + "size": 72999499 + }, + "orchard": { + "size": 48658043 + } + }, + "previousblockhash": "00000000016a6b2beefb5967219ecaa3dce52c7ab05435a0cb5ded163bc09941", + "nextblockhash": "000000000073705e8066c09043bdb4d2e8eaf5e5ccc83b2d582b6eabdf61de25" +} \ No newline at end of file diff --git a/zebra-rpc/tests/vectors/getblock_response_2.json b/zebra-rpc/tests/vectors/getblock_response_2.json new file mode 100644 index 00000000000..9f76bdd584e --- /dev/null +++ b/zebra-rpc/tests/vectors/getblock_response_2.json @@ -0,0 +1,815 @@ +{ + "hash": "00000000007bacdb373ca240dc6f044f0a816a407bc1924f82a2d84ebfa6103f", + "confirmations": 709705, + "size": 5702, + "height": 2222000, + "version": 4, + "merkleroot": "8ec995e4b5a7541768dffa8c707b8faf859b5808800e2bd53a549a4663782ab5", + "blockcommitments": "fa1c98f49f74831f8a74f1b8908b6c8889a56ffebc20f62c114a7a007efc4dad", + "finalsaplingroot": "5fe3724bebe096cc42c951803dd01e9657ead338327439902d19417ae9bda7e2", + "finalorchardroot": "9cbf6c20965e2ae361322b9e7fd92c7df8b3c6d41040f544a205a4a2d75abf2a", + "tx": [ + { + "hex": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff0503b0e72100ffffffff04e8bbe60e000000001976a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac40787d010000000017a914931fec54c1fea86e574462cc32013f5400b891298738c94d010000000017a914c7a4285ed7aed78d8c0e28d7f1839ccb4046ab0c87286bee000000000017a914d45cb1adffb5215a42720532a076f02c7c778c908700000000b0e721000000000000000000000000", + "height": 2222000, + "confirmations": 709705, + "vin": [ + { + "coinbase": "03b0e72100", + "sequence": 4294967295 + } + ], + "vout": [ + { + "value": 2.50002408, + "valueZat": 250002408, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "76a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak" + ] + } + }, + { + "value": 0.25, + "valueZat": 25000000, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "a914931fec54c1fea86e574462cc32013f5400b8912987", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3XyYW8yBFRuMnfvm5KLGFbEVz25kckZXym" + ] + } + }, + { + "value": 0.21875, + "valueZat": 21875000, + "n": 2, + "scriptPubKey": { + "asm": "", + "hex": "a914c7a4285ed7aed78d8c0e28d7f1839ccb4046ab0c87", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3cmE3vsBc5xfDJKXXZdpydCPSdZqt6AcNi" + ] + } + }, + { + "value": 0.15625, + "valueZat": 15625000, + "n": 3, + "scriptPubKey": { + "asm": "", + "hex": "a914d45cb1adffb5215a42720532a076f02c7c778c9087", + "reqSigs": 1, + "type": "", + "addresses": [ + "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" + ] + } + } + ], + "vShieldedSpend": [], + "vShieldedOutput": [], + "valueBalance": 0.0, + "valueBalanceZat": 0, + "size": 205, + "time": 1694339033 + }, + { + "hex": "050000800a27a726b4d0d6c200000000320c2500003036000000000000001976a914751e76e8199196d454941c45d1b3a323f1433bd688ac36000000000000001976a91406afd46bcdfd22ef94ac122aa11f241244a37ecc88ac36000000000000001976a9147dd65592d0ab2fe0d0257d571abf032cd9db93dc88ac36000000000000001976a914c42e7ef92fdb603af844d064faad95db9bcdfd3d88ac36000000000000001976a9144747e8746cddb33b0f7f95a90f89f89fb387cbb688ac36000000000000001976a9147fda9cf020c16cacf529c87d8de89bfc70b8c9cb88ac36000000000000001976a9145dedfbf9ea599dd4e3ca6a80b333c472fd0b3f6988ac36000000000000001976a9149652d86bedf43ad264362e6e6eba6eb76450812788ac36000000000000001976a914b46abf4d9e1746e33bcc39cea3de876c29c4adf388ac36000000000000001976a914185140bb54704a9e735016faa7a8dbee4449bddc88ac36000000000000001976a914362995a6e6922a04e0b832a80bc56c33709a42d288ac36000000000000001976a914dd100be7d9aea5721158ebde6d6a1fd8fff93bb188ac36000000000000001976a91457526b1a1534d4bde788253281649fc2e91dc70b88ac36000000000000001976a914726d44b7af8228257c030bafe764d3c5839d5c0288ac36000000000000001976a9149fc5dbe5efdce10374a4dd4053c93af54021171888ac36000000000000001976a91460aa32549d990a09863b8fd4ce611ebd70bb310b88ac36000000000000001976a9144f99bbf75707e44bc2afa65337dece914e817aac88ac36000000000000001976a914f0f4189b8cf9f2db0ab8d3a3c009e1823a58842e88ac36000000000000001976a9146b3aaefc2a4c3f37ddc733d1868ddc6f0d2c0e9888ac36000000000000001976a914385defb0ed10fe95817943ed37b4984f8f4255d688ac36000000000000001976a9148f9dff39a81ee4abcbad2ad8bafff090415a2be888ac36000000000000001976a914aa2e99eb9e6a61dded3caa3d5d5e21db34c79c2188ac36000000000000001976a914500de0c9a7c7777e02ab8e0e86c9f55bda5df75688ac36000000000000001976a914ce6525a78d260330058aaf5a5f99c9c420935daa88ac36000000000000001976a9144d2b45716a15de661a26cf1b6b7865fd4f5eb42c88ac36000000000000001976a91411f8d2b7930e3a88a18ae1d9407efc01fc90e9a488ac36000000000000001976a91420d637c1a6404d2227f3561fdbaff5a680dba64888ac36000000000000001976a914de213dbfa5dea6f264528b4aace26d91d1cc3c5b88ac36000000000000001976a914d7fd5d1ba18e281d71b4ff69ffa065b75d8c548b88ac36000000000000001976a914896007cb039c6648498ba434b2d0ed00837c1a3588ac36000000000000001976a9142fbd32c8dd59ee7c17e66cb6ebea7e9846c3040f88ac36000000000000001976a914fcd35ddacad9f2d5be5e464639441c6065e6955d88ac36000000000000001976a914b933285aef52826902abd32c3d70824346095d7788ac36000000000000001976a914e142ca9bfc2d56cd0adb82f8dc870424767389f788ac36000000000000001976a9149cb04e0ed442380c217c0ffdaed032645369daaa88ac36000000000000001976a9142fdf6dcbef16144583a151230ecee67fa3c503d988ac36000000000000001976a914e7616ca66fd2937f140c57a40c6ce9b4b23fc82e88ac36000000000000001976a9149855a09366e06d7d30d5ddbe69b64a0adf3ed93488ac36000000000000001976a91437b891cec5a1c4e0fd96a4e7c023bb294a76e5ad88ac36000000000000001976a9147220c25031708a78712cbee318585135993fcf1a88ac36000000000000001976a914c8d8650cd42ded5d598040f328e6ee9ca552286688ac36000000000000001976a9149290649ba520a35912dab1733b6f098587e432ef88ac36000000000000001976a914cdba1c12fad7c1f0936be8dc0e7260ef6456a04d88ac36000000000000001976a914352f5afc63d272a43c568e1d65ce6403d48fe29e88ac36000000000000001976a914daf0dd5a501fcddb72a0ee47444be2f6f3c4c4c388ac36000000000000001976a91416fe2eecdbb4d50b2d635672c6c0bb6b5925bd7b88ac36000000000000001976a9141481094ba143f80b9c30eceb20a8d07d29f0e8fa88ac36000000000000001976a914d6791b1b29afe3997a5cdbc40f03d37baed379b988ac01fb329ecb39678d9771de60b4850f4248d2347e9ce196f8518c3182e87e6b348d4f4df216521021d0d5da32e09ace8bcd2070e5c174b52b51d7fcc9c9e201adaaccf260055638849e99ef8b99a50326f27678c9b6c4bfe53083b4d42363436322029d01a8f0d112d6f715b49d26ce601abe6cd40f4a00201a3ba2e32f59bacdbab7fcdeb0f9cce13c81b6ddf4d0e99444c3c48871f4a3242045011d0dac296ed21d7702e5569d00133acefe4c5c6ae581903e75af496b205743bbc0566be39aa9dbe5198be82a0b1fa74d86dd16df15f9d67a369945dedf4b5956fc821776f47ebe38e8cda34bfd30021712824d3c005ba2308a2c4ac4717b857930c6cdd9efbc1ac6031d44af41227b2ce0b97545d53c4ca1b8a42e4d88d9949977d7fc95c9364fe12e87deb32120c48943188cfbbef2556c155585a969986d4f52a543bbc4bfc183fb26c77648945298674b6c8126bc9281031936e45a521e08a7c7a7849bd8c5701c537c3dae0f718d909a67eebf2f2282d1a35bad35830be364f4e959bb5c5494a9973ba12fa68f8376a0a6a88d9ee4d403e699c7487b152de82b58086d2784df899da52aa6cc1e5522224cd4e542f7e18c6fd4e9c380640a2be9500b5a9dd3165777e2c4aa372b6af287d67bd3a6cbdcf0dceb5149b37391b3f86648071280ff4525351a916c9f13c070a51f5b5cfa5d809ad46d73667c2dd1ae3246c9374e279361f9be61b5a44abf287fda9f93f8145ef2da95bf12a9bb771f2f98606aae1d3ef9892b8386af2daf5ddfd6f3f362ccb896ded97d74c2ad53bcafc108061711a4df49f55dbdd1af3d512b243fb763b0dae85982fb61f39dd4f7bab1248b4d36ca2aea500e0ebc71d211c60f995ce3e7a30a877d88cc884b862cec34641a6adf7fbfe474ea0e8f73cb8e13eae0d9d6d654b3e926bfe07f11acbceb364f64cb1b964232a245afe0f66daef6dc75f827849972e903a658e7cfb6d49af7e9cf8bce366c0c37a418aed82e324831de5d31a7b5d3d2c0c8fcd9ddaabe142c59aefd583285402f090443609699308c929f9ac7dd8dbebcb23669bdcbc55d4742eda20ae91ce793c0ebefe4cc42f40c062b88121e458d824f87dfaea90ad8804c0765c324c428e4be5d39761a25092428b9b5bf56d0b076053bfdea5f1979e46335051dee26b12a8961314acd75738768d94a1408d67f6b2c2d64c9d3e4e47e2263f71890cd2428a62f39e549e993a87ab5ef81f92f9dd12bdcfcbddf0923beed9999889c404a5c76dafee08cf44ab5bfcba588c82318f17cf21361a385dcdf6cbf0274400607e9f0d7e14e4b1cf1203cd7113898c6c79472789851775ddc342266d29196713a1e99456e539d8b25d6dcd1ae571c972fc45f8b1c62adb74fa58720edfa76f49c61d7f3fd6422901dc9d81ddb8bcdd206df9e081cbd90df4dd9bce00d194bdf1a04d532c910eab946087dca65f9a6d1e578dfd3989bfcdc65a74aeaa18f27f75385e4b215adaeeab039401d316ccfee9a35c672846eeaf3acde6f654ccaf2985e1652eb74b13baf0f5595bc78bf4f72c3cb5539b425c4492b8e393de1203364de2e1a4379b3fc9dc148649595a8238d5f02781e4feaed0167598954359f90493eb9b72b9f453307c1e47b5427127dff5d2728a4cea441852ae11cb667c97f59b0a3eb747803095342fd1af0a80443afc8b9f37421e5682792eeb69f20e5a7385f0651b638509fc06e1d1c716ed5cde7f24650c678d57d8a4a35ac6bc9b33fa67684873ecd02d4dc5c04d49c639e83f9c840c51c99037b984a52ffb70f38b6c448cf8ee3731aaa38d3f02a7c543ded67a38df9d7117e73ea3a7697f3bdd86a8a2f5a504533e18c71286d943df691894f3686d842110e3c799e3bbce78c99b0c8aea529857127b348f6695ceaefda972d6b7a19ca3ad2e10e69dd7ade337f304ba49f49a71873e56021bf74024bc8bb895392f9499764566e09631d6800e6f2470a32e755bff0b3fab625fb643ef43ee580a06567ef3a80fa51607873389797b8137075d7fb240e4ada08c1a640f430ec0eeea22de67b854ca1f28924e9b24d32862aeed73a795107601c70070008343f108de05f3f3864e8613ec006320c12fd82c83c158f04472b859092895cb9ee2d4d52aebb45048b922d6d688e410aa7e60438923be5cc24b530dab5d4bc44a001cfad987e8b91222bda2b41f66dd129e4a8e946107a6673304694cf75d3bb0dcc4feed5aa3f8a6d832dd7ede2d17dba5fed68c4d68d8813000000000000532691acd12ec150f0b62d2c842beeb97c3bd33706e89971296085ac5fc1ba49929c7e46125023fec7d313d1e97e06a81ec622d674d6b16ac57390f87701db99dee253f99e458f220458b4782a78da4ca7ac4b897e4ca84f7b35de06d517bb0e92db1d2e17aa3a585c522612aea40208519d531ca156e82311520b93f9995a8511ff6406430410aaddf801a1b562d7be2b2b48fe7b43c0276f78fa6d2efcdf1b3c95811c6a87465edde9812ac6622c08862b5a6c186f43252f8055f7902159448d89b9121b412c4ad893c2036445a3d4e82441abdd9be7d6ae7525bf892c70a3e7dfe374a99536574e1a04bdb872599c9ce355cbf47cdc5cdd2162538198f8468458bb78735d0b062f8231851af72eba112eec27332eaa3de5a8021179293b03aca803159630d82fd3533c3f93ba8a21ca45b640b40058f3e6ba44971f1bfd3ff54196a76af5760190b90d3c1897067cb4c3584898547c44de6853183a8a239db75d592ed27fc36784cc34d5411dd70ada005790f7e5cb58fbcd34aaa0e59f250c3b4ad4851c351ee1b5afcdafa5f8eb4b6f2541dad8aceb6425b9ce7feeaf8f223b116cfd14487c30c3547d99f57a14926566c7f592542f7c6faedad4ef833d7750b9ad569d3ddd7a44710841902dbbdc8a354b8bd38c5e4c528ca81119aca696ac588d69e360f0a9b5f2e02f4948b12a029e6d762d9a9bb93d9a8cceb8c4fe1c0d813436e7fee186057248a826c4c594d39dee0419b27a857862c7c1c1959a4802144f76647094bae84f55dd1f9611c86452671fb1df744ed720b3a5764902195302bd09a42379ef4dec9f6c6d850fd58865b1cbd40c7e7826cdbc1a88a6a35558a026185a781acdffef6757a77fcfa7bbaa72e2a713818223cef3b561159a42a41d6c49cc0fe16b5e1f1c3565dac12bf6a753d764f751790a8e6ebf066db87a9b3765e388e23d26fd6b00291d59a08736cc21f382a0a8f0fa85a7d9e99cce18dc1b723fe4ed1400c93a134f173299ce36110492e5e59fbc62e7b0fa45640a00", + "height": 2222000, + "confirmations": 709705, + "vin": [], + "vout": [ + { + "value": 5.4e-7, + "valueZat": 54, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "76a914751e76e8199196d454941c45d1b3a323f1433bd688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UYsZVJkLPeMjxEtACvSxfWuNmddpWfxzs" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "76a91406afd46bcdfd22ef94ac122aa11f241244a37ecc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1JUxhMSGFmzKY5BTp1PsQwG4Ceq642SmnB" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 2, + "scriptPubKey": { + "asm": "", + "hex": "76a9147dd65592d0ab2fe0d0257d571abf032cd9db93dc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1VLyEX9gpXZdZeVXeuAvqPRPxj8u8qiVHL" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 3, + "scriptPubKey": { + "asm": "", + "hex": "76a914c42e7ef92fdb603af844d064faad95db9bcdfd3d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1bkv9Xq4zCmG7N4QpgAdwaxTt9Eas6Yz2V" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 4, + "scriptPubKey": { + "asm": "", + "hex": "76a9144747e8746cddb33b0f7f95a90f89f89fb387cbb688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1QNW8DJ9SL7YaQXDQVYRqTPCX5gTjEeRjg" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 5, + "scriptPubKey": { + "asm": "", + "hex": "76a9147fda9cf020c16cacf529c87d8de89bfc70b8c9cb88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1VXdiCTHV8VgcBReUhHHkcoFGyDurvzkL4" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 6, + "scriptPubKey": { + "asm": "", + "hex": "76a9145dedfbf9ea599dd4e3ca6a80b333c472fd0b3f6988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1SSFwcYTiLApC5RXEZvNWsLdTPeWQxduZU" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 7, + "scriptPubKey": { + "asm": "", + "hex": "76a9149652d86bedf43ad264362e6e6eba6eb76450812788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1XaScJtVuFehnJP2dEMDyRVWTkjQXJ4PU7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 8, + "scriptPubKey": { + "asm": "", + "hex": "76a914b46abf4d9e1746e33bcc39cea3de876c29c4adf388ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1aKZWo7shGNnfGMVTiAwKV3LuSN5qrAXUK" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 9, + "scriptPubKey": { + "asm": "", + "hex": "76a914185140bb54704a9e735016faa7a8dbee4449bddc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1L6BZVCojd8Gb79h8D2A1SJdURr89KSyYr" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 10, + "scriptPubKey": { + "asm": "", + "hex": "76a914362995a6e6922a04e0b832a80bc56c33709a42d288ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NozK3N2fVXnV7jqzG4N71zhZn1ofViU3S" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 11, + "scriptPubKey": { + "asm": "", + "hex": "76a914dd100be7d9aea5721158ebde6d6a1fd8fff93bb188ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1e2UZfHarpDAZpcLNvT8BLTBgYPNaS8dd7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 12, + "scriptPubKey": { + "asm": "", + "hex": "76a91457526b1a1534d4bde788253281649fc2e91dc70b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1RqKXZ3gfubxuSDhztysSP7Rp7LZ19WEMi" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 13, + "scriptPubKey": { + "asm": "", + "hex": "76a914726d44b7af8228257c030bafe764d3c5839d5c0288ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UJdwJoPSkzvFwNpyWB6HyaqheiuQoUbir" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 14, + "scriptPubKey": { + "asm": "", + "hex": "76a9149fc5dbe5efdce10374a4dd4053c93af54021171888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1YSQUKUcABTLRJA7Zh62USV766Z2sk3HLX" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 15, + "scriptPubKey": { + "asm": "", + "hex": "76a91460aa32549d990a09863b8fd4ce611ebd70bb310b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1SgimDdjbdaN5hbE8YoXAvXnoHBSLjCPqy" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 16, + "scriptPubKey": { + "asm": "", + "hex": "76a9144f99bbf75707e44bc2afa65337dece914e817aac88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1R8VZ8eSh9AgBqMgg7KknTLXD77toLnCsq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 17, + "scriptPubKey": { + "asm": "", + "hex": "76a914f0f4189b8cf9f2db0ab8d3a3c009e1823a58842e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1fqebUGGG2Y1VaX5apeSmH8w5qg6fYULdx" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 18, + "scriptPubKey": { + "asm": "", + "hex": "76a9146b3aaefc2a4c3f37ddc733d1868ddc6f0d2c0e9888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1TeacGKXCB1RUUvaPRej4ySBsACxy6RDaA" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 19, + "scriptPubKey": { + "asm": "", + "hex": "76a914385defb0ed10fe95817943ed37b4984f8f4255d688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1P1eNtMLMtKmDt4iV4JnzUJLMGLfS96sn7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 20, + "scriptPubKey": { + "asm": "", + "hex": "76a9148f9dff39a81ee4abcbad2ad8bafff090415a2be888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Wxyub9LgLu6gdrRcZGYmRYM5nG51YqEqL" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 21, + "scriptPubKey": { + "asm": "", + "hex": "76a914aa2e99eb9e6a61dded3caa3d5d5e21db34c79c2188ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1ZPShrk9BawqCjQPjJyuuDjyWuyVv6GNdG" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 22, + "scriptPubKey": { + "asm": "", + "hex": "76a914500de0c9a7c7777e02ab8e0e86c9f55bda5df75688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1RAtgwvLmXoHCN9qm22QDHxxXymPcdrRPa" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 23, + "scriptPubKey": { + "asm": "", + "hex": "76a914ce6525a78d260330058aaf5a5f99c9c420935daa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cgvNg8h36BhSGWqAH7JM9s6YsyHqkdLzs" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 24, + "scriptPubKey": { + "asm": "", + "hex": "76a9144d2b45716a15de661a26cf1b6b7865fd4f5eb42c88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1QudseT5VDVMhdBpU1Lakx3PdiifbvpaCg" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 25, + "scriptPubKey": { + "asm": "", + "hex": "76a91411f8d2b7930e3a88a18ae1d9407efc01fc90e9a488ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1KWda8UdDxpKequwdHiCD6EDomaeG2ycZP" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 26, + "scriptPubKey": { + "asm": "", + "hex": "76a91420d637c1a6404d2227f3561fdbaff5a680dba64888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1LsEFb3PkL8ZGFa94bucKgtFDNXqP3UHgd" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 27, + "scriptPubKey": { + "asm": "", + "hex": "76a914de213dbfa5dea6f264528b4aace26d91d1cc3c5b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1e87qZWkY6ZLWv4v9fKu1pzEhS49YrzfCD" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 28, + "scriptPubKey": { + "asm": "", + "hex": "76a914d7fd5d1ba18e281d71b4ff69ffa065b75d8c548b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dZeobdJ5vjmiTTXFKVn9xMy5KHwpPfQuB" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 29, + "scriptPubKey": { + "asm": "", + "hex": "76a914896007cb039c6648498ba434b2d0ed00837c1a3588ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1WPycvoNZFiszvwaLHYptQN1HKLXPaRd71" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 30, + "scriptPubKey": { + "asm": "", + "hex": "76a9142fbd32c8dd59ee7c17e66cb6ebea7e9846c3040f88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NE2R5uKDUQT62hBAAFMd45oT7gR33rTTJ" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 31, + "scriptPubKey": { + "asm": "", + "hex": "76a914fcd35ddacad9f2d5be5e464639441c6065e6955d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1gvRVgoUteCyWGkjvYimkSD7JNpiEZtRFu" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 32, + "scriptPubKey": { + "asm": "", + "hex": "76a914b933285aef52826902abd32c3d70824346095d7788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1akrJKTy2zhHuNAiWiz42PUC21nUbeq8qv" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 33, + "scriptPubKey": { + "asm": "", + "hex": "76a914e142ca9bfc2d56cd0adb82f8dc870424767389f788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1eQg4FMFRNewXy99ehfUA9yeAYdqmiYFAd" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 34, + "scriptPubKey": { + "asm": "", + "hex": "76a9149cb04e0ed442380c217c0ffdaed032645369daaa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1YA6d99YXyMHXhgPCMyXriGmYHS1twZx7B" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 35, + "scriptPubKey": { + "asm": "", + "hex": "76a9142fdf6dcbef16144583a151230ecee67fa3c503d988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NEjRTpdeiJWnH9d8JrZ4sEwuU9epLbiGm" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 36, + "scriptPubKey": { + "asm": "", + "hex": "76a914e7616ca66fd2937f140c57a40c6ce9b4b23fc82e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1ey2opFGssatipNn3x6mziZDeLhSNkR42u" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 37, + "scriptPubKey": { + "asm": "", + "hex": "76a9149855a09366e06d7d30d5ddbe69b64a0adf3ed93488ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Xm5HyksfRawHNFtuw7rp1zENK3Wj7xjf1" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 38, + "scriptPubKey": { + "asm": "", + "hex": "76a91437b891cec5a1c4e0fd96a4e7c023bb294a76e5ad88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NxEH1CN9kPjjhBhugQq7tNMgNnkC73rTp" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 39, + "scriptPubKey": { + "asm": "", + "hex": "76a9147220c25031708a78712cbee318585135993fcf1a88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UH4HLngFrdzj7GAZM2Gxth54qeujMt3fa" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 40, + "scriptPubKey": { + "asm": "", + "hex": "76a914c8d8650cd42ded5d598040f328e6ee9ca552286688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cBaP54hZbYsjgFvuorDdXb48Rpxvom7aW" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 41, + "scriptPubKey": { + "asm": "", + "hex": "76a9149290649ba520a35912dab1733b6f098587e432ef88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1XEZdwj1dgsLYXcKRrk822tnFzsvf3J4HR" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 42, + "scriptPubKey": { + "asm": "", + "hex": "76a914cdba1c12fad7c1f0936be8dc0e7260ef6456a04d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cdPUnbka6ezHPKoEbBSnH11jAhBAnZKqq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 43, + "scriptPubKey": { + "asm": "", + "hex": "76a914352f5afc63d272a43c568e1d65ce6403d48fe29e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NipYpBwkz4mvkhZkRmT5mdyaiVfYiuzTZ" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 44, + "scriptPubKey": { + "asm": "", + "hex": "76a914daf0dd5a501fcddb72a0ee47444be2f6f3c4c4c388ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dqFriyamiBkshmCBi7XXdhdSs4epq2zTq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 45, + "scriptPubKey": { + "asm": "", + "hex": "76a91416fe2eecdbb4d50b2d635672c6c0bb6b5925bd7b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1KyBNXBngtKe1U9383zmxaSNfp8Vdoxx72" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 46, + "scriptPubKey": { + "asm": "", + "hex": "76a9141481094ba143f80b9c30eceb20a8d07d29f0e8fa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Kk26mjwUErXqk8i7EqzzCP2JYLzLPKsyP" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 47, + "scriptPubKey": { + "asm": "", + "hex": "76a914d6791b1b29afe3997a5cdbc40f03d37baed379b988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dRdgyJcxpwF57HNToszQFPi8EPTNkhbfs" + ] + } + } + ], + "vShieldedSpend": [ + { + "cv": "8d346b7ee882318c51f896e19c7e34d248420f85b460de71978d6739cb9e32fb", + "anchor": "49bac15fac8560297199e80637d33b7cb9ee2b842c2db6f050c12ed1ac912653", + "nullifier": "aaad01e2c9c9fcd7512bb574c1e57020cd8bce9ae032dad5d021105216f24d4f", + "rk": "2263436323d4b48330e5bfc4b6c97876f22603a5998bef999e8438560560f2cc", + "proof": "929c7e46125023fec7d313d1e97e06a81ec622d674d6b16ac57390f87701db99dee253f99e458f220458b4782a78da4ca7ac4b897e4ca84f7b35de06d517bb0e92db1d2e17aa3a585c522612aea40208519d531ca156e82311520b93f9995a8511ff6406430410aaddf801a1b562d7be2b2b48fe7b43c0276f78fa6d2efcdf1b3c95811c6a87465edde9812ac6622c08862b5a6c186f43252f8055f7902159448d89b9121b412c4ad893c2036445a3d4e82441abdd9be7d6ae7525bf892c70a3", + "spendAuthSig": "e7dfe374a99536574e1a04bdb872599c9ce355cbf47cdc5cdd2162538198f8468458bb78735d0b062f8231851af72eba112eec27332eaa3de5a8021179293b03" + } + ], + "vShieldedOutput": [ + { + "cv": "b7bacdba592fe3a23b1a20004a0fd46cbe1a60ce269db415f7d612d1f0a8019d", + "cmu": "fcdeb0f9cce13c81b6ddf4d0e99444c3c48871f4a3242045011d0dac296ed21d", + "ephemeralKey": "dba99ae36b56c0bb4357206b49af753e9081e56a5c4cfece3a13009d56e50277", + "encCiphertext": "e5198be82a0b1fa74d86dd16df15f9d67a369945dedf4b5956fc821776f47ebe38e8cda34bfd30021712824d3c005ba2308a2c4ac4717b857930c6cdd9efbc1ac6031d44af41227b2ce0b97545d53c4ca1b8a42e4d88d9949977d7fc95c9364fe12e87deb32120c48943188cfbbef2556c155585a969986d4f52a543bbc4bfc183fb26c77648945298674b6c8126bc9281031936e45a521e08a7c7a7849bd8c5701c537c3dae0f718d909a67eebf2f2282d1a35bad35830be364f4e959bb5c5494a9973ba12fa68f8376a0a6a88d9ee4d403e699c7487b152de82b58086d2784df899da52aa6cc1e5522224cd4e542f7e18c6fd4e9c380640a2be9500b5a9dd3165777e2c4aa372b6af287d67bd3a6cbdcf0dceb5149b37391b3f86648071280ff4525351a916c9f13c070a51f5b5cfa5d809ad46d73667c2dd1ae3246c9374e279361f9be61b5a44abf287fda9f93f8145ef2da95bf12a9bb771f2f98606aae1d3ef9892b8386af2daf5ddfd6f3f362ccb896ded97d74c2ad53bcafc108061711a4df49f55dbdd1af3d512b243fb763b0dae85982fb61f39dd4f7bab1248b4d36ca2aea500e0ebc71d211c60f995ce3e7a30a877d88cc884b862cec34641a6adf7fbfe474ea0e8f73cb8e13eae0d9d6d654b3e926bfe07f11acbceb364f64cb1b964232a245afe0f66daef6dc75f827849972e903a658e7cfb6d49af7e9cf8bce366c0c37a418aed82e324831de5d31a7b5d3d2c0c8fcd9ddaabe142c59aefd583285402f090443609699308c929f9ac7dd8dbebcb23669bdcbc55d4742eda20ae91ce7", + "outCiphertext": "93c0ebefe4cc42f40c062b88121e458d824f87dfaea90ad8804c0765c324c428e4be5d39761a25092428b9b5bf56d0b076053bfdea5f1979e46335051dee26b12a8961314acd75738768d94a1408d67f", + "proof": "aca803159630d82fd3533c3f93ba8a21ca45b640b40058f3e6ba44971f1bfd3ff54196a76af5760190b90d3c1897067cb4c3584898547c44de6853183a8a239db75d592ed27fc36784cc34d5411dd70ada005790f7e5cb58fbcd34aaa0e59f250c3b4ad4851c351ee1b5afcdafa5f8eb4b6f2541dad8aceb6425b9ce7feeaf8f223b116cfd14487c30c3547d99f57a14926566c7f592542f7c6faedad4ef833d7750b9ad569d3ddd7a44710841902dbbdc8a354b8bd38c5e4c528ca81119aca6" + }, + { + "cv": "9d2ff981efb57aa893e949e5392fa62824cd9018f763227ee4e4d3c9642d2c6b", + "cmu": "d12bdcfcbddf0923beed9999889c404a5c76dafee08cf44ab5bfcba588c82318", + "ephemeralKey": "c7c6983811d73c20f11c4b4ee1d7f0e90706407402bf6cdfdc85a36113f27cf1", + "encCiphertext": "9472789851775ddc342266d29196713a1e99456e539d8b25d6dcd1ae571c972fc45f8b1c62adb74fa58720edfa76f49c61d7f3fd6422901dc9d81ddb8bcdd206df9e081cbd90df4dd9bce00d194bdf1a04d532c910eab946087dca65f9a6d1e578dfd3989bfcdc65a74aeaa18f27f75385e4b215adaeeab039401d316ccfee9a35c672846eeaf3acde6f654ccaf2985e1652eb74b13baf0f5595bc78bf4f72c3cb5539b425c4492b8e393de1203364de2e1a4379b3fc9dc148649595a8238d5f02781e4feaed0167598954359f90493eb9b72b9f453307c1e47b5427127dff5d2728a4cea441852ae11cb667c97f59b0a3eb747803095342fd1af0a80443afc8b9f37421e5682792eeb69f20e5a7385f0651b638509fc06e1d1c716ed5cde7f24650c678d57d8a4a35ac6bc9b33fa67684873ecd02d4dc5c04d49c639e83f9c840c51c99037b984a52ffb70f38b6c448cf8ee3731aaa38d3f02a7c543ded67a38df9d7117e73ea3a7697f3bdd86a8a2f5a504533e18c71286d943df691894f3686d842110e3c799e3bbce78c99b0c8aea529857127b348f6695ceaefda972d6b7a19ca3ad2e10e69dd7ade337f304ba49f49a71873e56021bf74024bc8bb895392f9499764566e09631d6800e6f2470a32e755bff0b3fab625fb643ef43ee580a06567ef3a80fa51607873389797b8137075d7fb240e4ada08c1a640f430ec0eeea22de67b854ca1f28924e9b24d32862aeed73a795107601c70070008343f108de05f3f3864e8613ec006320c12fd82c83c158f04472b859092895cb9ee2d4d52aebb45", + "outCiphertext": "048b922d6d688e410aa7e60438923be5cc24b530dab5d4bc44a001cfad987e8b91222bda2b41f66dd129e4a8e946107a6673304694cf75d3bb0dcc4feed5aa3f8a6d832dd7ede2d17dba5fed68c4d68d", + "proof": "96ac588d69e360f0a9b5f2e02f4948b12a029e6d762d9a9bb93d9a8cceb8c4fe1c0d813436e7fee186057248a826c4c594d39dee0419b27a857862c7c1c1959a4802144f76647094bae84f55dd1f9611c86452671fb1df744ed720b3a5764902195302bd09a42379ef4dec9f6c6d850fd58865b1cbd40c7e7826cdbc1a88a6a35558a026185a781acdffef6757a77fcfa7bbaa72e2a713818223cef3b561159a42a41d6c49cc0fe16b5e1f1c3565dac12bf6a753d764f751790a8e6ebf066db8" + } + ], + "valueBalance": 0.00005, + "valueBalanceZat": 5000, + "size": 4009, + "time": 1694339033 + } + ], + "time": 1694339033, + "nonce": "ffbc08000000000000000000000200000000000000000000000000008dc7bae7", + "solution": "009d9775a8520e71421201f0bea96e2edafdfded9b219b54caefd8f9216f88e2b96a366f5decaf316cab091b522ab2a9fe89e55ea284f9b5b1bd58593f12492fbdb490fe77d92de9f232fe243966c1b1b3d2a666094f71705553af39895ae2dcfbcae0656c7d5a1754216ee48257c9bb5d3518a6515f4b9442aa0db6a04857c456c22a7215d5eb7db85b36436d66241c9195925b06acac6599dface03a86d0c8598dfac42bb770180292afc02234e9b3afb0707b2fe21b585d6f14837d30511ff268a6a3738f088324a3c98e1d788d7233c32574d6ad964fa9311f16744883bbc2ea32bafb29d364b60fd8c037c80feb18366a9ddf4c2346da1d38360d52638aaac7af9124a2c2ca135adea5fdd3939eaa28fa7b0d855c40ace24d042e5b2bfdba96ddb97ebd1ead1f6e57b8c869c931646585f8914d1a66eab20c473d7aaa89164d1f01de26bc7977915a551bf84e5304bca75dc0102b856b9683b54438795a4d6277ba793c0f4359e39107ef7b2a9c590efb512f2558dd03d50eb8619dbd0b39ad6bd4a4f0f4d87e45e66df721c110de6bf38723d5f9fe4be26fd718e0856924b9f7f31421479af030fa03bf48f88512e7db2fb3133e0a0a1449fa35e8b4ce59d41571e4736c2c40f9be98393a22cc62f281dd1f32ee15e231d574dfd93db6b134e441741fe65a1a11cbddbb64206c60c5f5d96a5752b80a7d99c3d1760d09f562b663625d8d0e19073afe2d56cc14e965ded1c15167175ef0e315020fecdbe2451433fdc2dd90827edafa5447274ee83d4e352ea7e71a333285fa56766d5991d8d612ed376ac139d678861d41559133c87510f0a172587ac41096659174a5ab51c1e5c3ca6c340befe78589bacdcd6e90855d7a3d2661ff71eb0fae44b33414623275580fdc4d1f42f532229ec35e256869632537263ad27007c47b5f92b2026232e4a90da202a34b6183cc421195c68b786dc3088dcce0c050acb98713bc6f0279245388909d145a0b28761e93a747ef6947c249c6f713166f235df7232996ead1772e4d61d558433db6bd35f104967359251dcb2ba89e4a7604b4b975027132aee9fc7ebf16a6293cf3ea823b6cb9fa74f6e773eac58e050656f2ba3462aa9e54cef43327d37385b1ed9ea596fd7272054c21efd3a5e6679b45bc27915ae909c30b045c2cf30e55d9220966faa5a89b46b08539acd5e7a535524d1e463ab5949eef73b6482239f82d234e06209e9fcf20b612225f617d25a9dc8669a8747c3f6c317edb5624ea9f80724a66e70dffc2546caf8254fb6d9a9f03103f418d0ed8bfabc2fcb440cc46fa49aaccd111a2538b54a225241db76f08c7f338772d2218cf73355248f54eb2441887f4fc4225e2965824fdf351b10f8f57bb2f5753ddd5a1727d95be3c307d0d827d7a78930286e75f2dd451d77759667a92f19769b351937a811e668f48fa2e1d05ef7ce4cc87f335ad87727ce3b8038c88fbddd1d44b2c587515d9dcab6275a13620411ca5fb7e36360deff1eb74cbace9dd2579b179a49c08609e86d62cb41fdcbe634fd7fd6d813598cc21a03fcc6ec0c91ec4fbac580bcd1a6714135cc91bb5c82a543d6ff1176e43c76a46fd99f2e89a728d36f0983642b78976d33ecab9d7f81d0e4a7a661ccd372c1d03015c50f6c6680f215920d114bbc71b34187c932a0b28d8fbf32963f94bd4b0d99ae6d05a327b31b1f204171fc0b6edc2d1ee3b6208316e3679975e52d593086807b15487caa4523c87c69e57abfdf2295bbcb5086dad50fd6c100f63614348d2408368f78f6fa839148987b91216ac01ab48b6ddb7d9b7a9de00d09ece100924430d083ae7c25968812077d26e57285c0d1531abd9aaf7ec56d7bd9c16d87353db47c10fde1dee", + "bits": "1c01af61", + "difficulty": 79650795.00685483, + "trees": { + "sapling": { + "size": 72999499 + }, + "orchard": { + "size": 48658043 + } + }, + "previousblockhash": "00000000016a6b2beefb5967219ecaa3dce52c7ab05435a0cb5ded163bc09941", + "nextblockhash": "000000000073705e8066c09043bdb4d2e8eaf5e5ccc83b2d582b6eabdf61de25" +} \ No newline at end of file diff --git a/zebra-rpc/tests/vectors/getblockchaininfo_response.json b/zebra-rpc/tests/vectors/getblockchaininfo_response.json new file mode 100644 index 00000000000..8530107dbe5 --- /dev/null +++ b/zebra-rpc/tests/vectors/getblockchaininfo_response.json @@ -0,0 +1,91 @@ +{ + "chain": "main", + "blocks": 2932117, + "headers": 2932117, + "difficulty": 81052645.47600047, + "verificationprogress": 0.9999996589496057, + "chainwork": 0, + "pruned": false, + "size_on_disk": 268389432417, + "commitments": 0, + "bestblockhash": "00000000005f47661fb9db53214c1206ddb784de28af47b79e6232ff850349e3", + "estimatedheight": 2932118, + "chainSupply": { + "chainValue": 16071065.7905448, + "chainValueZat": 1607106579054480, + "monitored": true + }, + "valuePools": [ + { + "id": "transparent", + "chainValue": 13019486.6863425, + "chainValueZat": 1301948668634250, + "monitored": true + }, + { + "id": "sprout", + "chainValue": 25789.7816951, + "chainValueZat": 2578978169510, + "monitored": true + }, + { + "id": "sapling", + "chainValue": 827769.01149315, + "chainValueZat": 82776901149315, + "monitored": true + }, + { + "id": "orchard", + "chainValue": 2159448.18601405, + "chainValueZat": 215944818601405, + "monitored": true + }, + { + "id": "deferred", + "chainValue": 38572.125, + "chainValueZat": 3857212500000, + "monitored": true + } + ], + "upgrades": { + "5ba81b19": { + "name": "Overwinter", + "activationheight": 347500, + "status": "active" + }, + "76b809bb": { + "name": "Sapling", + "activationheight": 419200, + "status": "active" + }, + "2bb40e60": { + "name": "Blossom", + "activationheight": 653600, + "status": "active" + }, + "f5b9230b": { + "name": "Heartwood", + "activationheight": 903000, + "status": "active" + }, + "e9ff75a6": { + "name": "Canopy", + "activationheight": 1046400, + "status": "active" + }, + "c2d6d0b4": { + "name": "NU5", + "activationheight": 1687104, + "status": "active" + }, + "c8e71055": { + "name": "NU6", + "activationheight": 2726400, + "status": "active" + } + }, + "consensus": { + "chaintip": "c8e71055", + "nextblock": "c8e71055" + } +} \ No newline at end of file diff --git a/zebra-rpc/tests/vectors/getblocktemplate_response_template.json b/zebra-rpc/tests/vectors/getblocktemplate_response_template.json new file mode 100644 index 00000000000..78568bf56c5 --- /dev/null +++ b/zebra-rpc/tests/vectors/getblocktemplate_response_template.json @@ -0,0 +1,41 @@ +{ + "capabilities": [ + "proposal" + ], + "version": 4, + "previousblockhash": "000000000091f6c52c116112e6ab784cd41550b5a0fbca1c84e6ab62f619eb2e", + "blockcommitmentshash": "c78325bc39115ce5bfdd75215e0104aacb674e2dc19d06a8fca4f963b52c462c", + "lightclientroothash": "c78325bc39115ce5bfdd75215e0104aacb674e2dc19d06a8fca4f963b52c462c", + "finalsaplingroothash": "c78325bc39115ce5bfdd75215e0104aacb674e2dc19d06a8fca4f963b52c462c", + "defaultroots": { + "merkleroot": "c9a6321293a3955b9970750b0fee21e89bc6f8938e51391410c3577ad9d03405", + "chainhistoryroot": "e1c60e427c655cf06400239bc0a42532a993995e7601ef8848c53fd6ea426055", + "authdataroot": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "blockcommitmentshash": "c78325bc39115ce5bfdd75215e0104aacb674e2dc19d06a8fca4f963b52c462c" + }, + "transactions": [], + "coinbasetxn": { + "data": "0400008085202f89010000000000000000000000000000000000000000000000000000000000000000ffffffff05039bbc2c00ffffffff0240597307000000001976a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac20bcbe000000000017a914c20cd5bdf7964ca61764db66bc2531b1792a084d87000000009bbc2c000000000000000000000000", + "hash": "c9a6321293a3955b9970750b0fee21e89bc6f8938e51391410c3577ad9d03405", + "authdigest": "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "depends": [], + "fee": 0, + "sigops": 1, + "required": true + }, + "longpollid": "0002931866cc3a357e1747853232000000000000000000", + "target": "0000000001da6700000000000000000000000000000000000000000000000000", + "mintime": 1747847833, + "mutable": [ + "time", + "transactions", + "prevblock" + ], + "noncerange": "00000000ffffffff", + "sigoplimit": 20000, + "sizelimit": 2000000, + "curtime": 1747848159, + "bits": "1c01da67", + "height": 2931867, + "maxtime": 1747853232 +} \ No newline at end of file diff --git a/zebra-rpc/tests/vectors/getrawtransaction_response_true.json b/zebra-rpc/tests/vectors/getrawtransaction_response_true.json new file mode 100644 index 00000000000..050d4981e25 --- /dev/null +++ b/zebra-rpc/tests/vectors/getrawtransaction_response_true.json @@ -0,0 +1,712 @@ +{ + "hex": "050000800a27a726b4d0d6c200000000320c2500003036000000000000001976a914751e76e8199196d454941c45d1b3a323f1433bd688ac36000000000000001976a91406afd46bcdfd22ef94ac122aa11f241244a37ecc88ac36000000000000001976a9147dd65592d0ab2fe0d0257d571abf032cd9db93dc88ac36000000000000001976a914c42e7ef92fdb603af844d064faad95db9bcdfd3d88ac36000000000000001976a9144747e8746cddb33b0f7f95a90f89f89fb387cbb688ac36000000000000001976a9147fda9cf020c16cacf529c87d8de89bfc70b8c9cb88ac36000000000000001976a9145dedfbf9ea599dd4e3ca6a80b333c472fd0b3f6988ac36000000000000001976a9149652d86bedf43ad264362e6e6eba6eb76450812788ac36000000000000001976a914b46abf4d9e1746e33bcc39cea3de876c29c4adf388ac36000000000000001976a914185140bb54704a9e735016faa7a8dbee4449bddc88ac36000000000000001976a914362995a6e6922a04e0b832a80bc56c33709a42d288ac36000000000000001976a914dd100be7d9aea5721158ebde6d6a1fd8fff93bb188ac36000000000000001976a91457526b1a1534d4bde788253281649fc2e91dc70b88ac36000000000000001976a914726d44b7af8228257c030bafe764d3c5839d5c0288ac36000000000000001976a9149fc5dbe5efdce10374a4dd4053c93af54021171888ac36000000000000001976a91460aa32549d990a09863b8fd4ce611ebd70bb310b88ac36000000000000001976a9144f99bbf75707e44bc2afa65337dece914e817aac88ac36000000000000001976a914f0f4189b8cf9f2db0ab8d3a3c009e1823a58842e88ac36000000000000001976a9146b3aaefc2a4c3f37ddc733d1868ddc6f0d2c0e9888ac36000000000000001976a914385defb0ed10fe95817943ed37b4984f8f4255d688ac36000000000000001976a9148f9dff39a81ee4abcbad2ad8bafff090415a2be888ac36000000000000001976a914aa2e99eb9e6a61dded3caa3d5d5e21db34c79c2188ac36000000000000001976a914500de0c9a7c7777e02ab8e0e86c9f55bda5df75688ac36000000000000001976a914ce6525a78d260330058aaf5a5f99c9c420935daa88ac36000000000000001976a9144d2b45716a15de661a26cf1b6b7865fd4f5eb42c88ac36000000000000001976a91411f8d2b7930e3a88a18ae1d9407efc01fc90e9a488ac36000000000000001976a91420d637c1a6404d2227f3561fdbaff5a680dba64888ac36000000000000001976a914de213dbfa5dea6f264528b4aace26d91d1cc3c5b88ac36000000000000001976a914d7fd5d1ba18e281d71b4ff69ffa065b75d8c548b88ac36000000000000001976a914896007cb039c6648498ba434b2d0ed00837c1a3588ac36000000000000001976a9142fbd32c8dd59ee7c17e66cb6ebea7e9846c3040f88ac36000000000000001976a914fcd35ddacad9f2d5be5e464639441c6065e6955d88ac36000000000000001976a914b933285aef52826902abd32c3d70824346095d7788ac36000000000000001976a914e142ca9bfc2d56cd0adb82f8dc870424767389f788ac36000000000000001976a9149cb04e0ed442380c217c0ffdaed032645369daaa88ac36000000000000001976a9142fdf6dcbef16144583a151230ecee67fa3c503d988ac36000000000000001976a914e7616ca66fd2937f140c57a40c6ce9b4b23fc82e88ac36000000000000001976a9149855a09366e06d7d30d5ddbe69b64a0adf3ed93488ac36000000000000001976a91437b891cec5a1c4e0fd96a4e7c023bb294a76e5ad88ac36000000000000001976a9147220c25031708a78712cbee318585135993fcf1a88ac36000000000000001976a914c8d8650cd42ded5d598040f328e6ee9ca552286688ac36000000000000001976a9149290649ba520a35912dab1733b6f098587e432ef88ac36000000000000001976a914cdba1c12fad7c1f0936be8dc0e7260ef6456a04d88ac36000000000000001976a914352f5afc63d272a43c568e1d65ce6403d48fe29e88ac36000000000000001976a914daf0dd5a501fcddb72a0ee47444be2f6f3c4c4c388ac36000000000000001976a91416fe2eecdbb4d50b2d635672c6c0bb6b5925bd7b88ac36000000000000001976a9141481094ba143f80b9c30eceb20a8d07d29f0e8fa88ac36000000000000001976a914d6791b1b29afe3997a5cdbc40f03d37baed379b988ac01fb329ecb39678d9771de60b4850f4248d2347e9ce196f8518c3182e87e6b348d4f4df216521021d0d5da32e09ace8bcd2070e5c174b52b51d7fcc9c9e201adaaccf260055638849e99ef8b99a50326f27678c9b6c4bfe53083b4d42363436322029d01a8f0d112d6f715b49d26ce601abe6cd40f4a00201a3ba2e32f59bacdbab7fcdeb0f9cce13c81b6ddf4d0e99444c3c48871f4a3242045011d0dac296ed21d7702e5569d00133acefe4c5c6ae581903e75af496b205743bbc0566be39aa9dbe5198be82a0b1fa74d86dd16df15f9d67a369945dedf4b5956fc821776f47ebe38e8cda34bfd30021712824d3c005ba2308a2c4ac4717b857930c6cdd9efbc1ac6031d44af41227b2ce0b97545d53c4ca1b8a42e4d88d9949977d7fc95c9364fe12e87deb32120c48943188cfbbef2556c155585a969986d4f52a543bbc4bfc183fb26c77648945298674b6c8126bc9281031936e45a521e08a7c7a7849bd8c5701c537c3dae0f718d909a67eebf2f2282d1a35bad35830be364f4e959bb5c5494a9973ba12fa68f8376a0a6a88d9ee4d403e699c7487b152de82b58086d2784df899da52aa6cc1e5522224cd4e542f7e18c6fd4e9c380640a2be9500b5a9dd3165777e2c4aa372b6af287d67bd3a6cbdcf0dceb5149b37391b3f86648071280ff4525351a916c9f13c070a51f5b5cfa5d809ad46d73667c2dd1ae3246c9374e279361f9be61b5a44abf287fda9f93f8145ef2da95bf12a9bb771f2f98606aae1d3ef9892b8386af2daf5ddfd6f3f362ccb896ded97d74c2ad53bcafc108061711a4df49f55dbdd1af3d512b243fb763b0dae85982fb61f39dd4f7bab1248b4d36ca2aea500e0ebc71d211c60f995ce3e7a30a877d88cc884b862cec34641a6adf7fbfe474ea0e8f73cb8e13eae0d9d6d654b3e926bfe07f11acbceb364f64cb1b964232a245afe0f66daef6dc75f827849972e903a658e7cfb6d49af7e9cf8bce366c0c37a418aed82e324831de5d31a7b5d3d2c0c8fcd9ddaabe142c59aefd583285402f090443609699308c929f9ac7dd8dbebcb23669bdcbc55d4742eda20ae91ce793c0ebefe4cc42f40c062b88121e458d824f87dfaea90ad8804c0765c324c428e4be5d39761a25092428b9b5bf56d0b076053bfdea5f1979e46335051dee26b12a8961314acd75738768d94a1408d67f6b2c2d64c9d3e4e47e2263f71890cd2428a62f39e549e993a87ab5ef81f92f9dd12bdcfcbddf0923beed9999889c404a5c76dafee08cf44ab5bfcba588c82318f17cf21361a385dcdf6cbf0274400607e9f0d7e14e4b1cf1203cd7113898c6c79472789851775ddc342266d29196713a1e99456e539d8b25d6dcd1ae571c972fc45f8b1c62adb74fa58720edfa76f49c61d7f3fd6422901dc9d81ddb8bcdd206df9e081cbd90df4dd9bce00d194bdf1a04d532c910eab946087dca65f9a6d1e578dfd3989bfcdc65a74aeaa18f27f75385e4b215adaeeab039401d316ccfee9a35c672846eeaf3acde6f654ccaf2985e1652eb74b13baf0f5595bc78bf4f72c3cb5539b425c4492b8e393de1203364de2e1a4379b3fc9dc148649595a8238d5f02781e4feaed0167598954359f90493eb9b72b9f453307c1e47b5427127dff5d2728a4cea441852ae11cb667c97f59b0a3eb747803095342fd1af0a80443afc8b9f37421e5682792eeb69f20e5a7385f0651b638509fc06e1d1c716ed5cde7f24650c678d57d8a4a35ac6bc9b33fa67684873ecd02d4dc5c04d49c639e83f9c840c51c99037b984a52ffb70f38b6c448cf8ee3731aaa38d3f02a7c543ded67a38df9d7117e73ea3a7697f3bdd86a8a2f5a504533e18c71286d943df691894f3686d842110e3c799e3bbce78c99b0c8aea529857127b348f6695ceaefda972d6b7a19ca3ad2e10e69dd7ade337f304ba49f49a71873e56021bf74024bc8bb895392f9499764566e09631d6800e6f2470a32e755bff0b3fab625fb643ef43ee580a06567ef3a80fa51607873389797b8137075d7fb240e4ada08c1a640f430ec0eeea22de67b854ca1f28924e9b24d32862aeed73a795107601c70070008343f108de05f3f3864e8613ec006320c12fd82c83c158f04472b859092895cb9ee2d4d52aebb45048b922d6d688e410aa7e60438923be5cc24b530dab5d4bc44a001cfad987e8b91222bda2b41f66dd129e4a8e946107a6673304694cf75d3bb0dcc4feed5aa3f8a6d832dd7ede2d17dba5fed68c4d68d8813000000000000532691acd12ec150f0b62d2c842beeb97c3bd33706e89971296085ac5fc1ba49929c7e46125023fec7d313d1e97e06a81ec622d674d6b16ac57390f87701db99dee253f99e458f220458b4782a78da4ca7ac4b897e4ca84f7b35de06d517bb0e92db1d2e17aa3a585c522612aea40208519d531ca156e82311520b93f9995a8511ff6406430410aaddf801a1b562d7be2b2b48fe7b43c0276f78fa6d2efcdf1b3c95811c6a87465edde9812ac6622c08862b5a6c186f43252f8055f7902159448d89b9121b412c4ad893c2036445a3d4e82441abdd9be7d6ae7525bf892c70a3e7dfe374a99536574e1a04bdb872599c9ce355cbf47cdc5cdd2162538198f8468458bb78735d0b062f8231851af72eba112eec27332eaa3de5a8021179293b03aca803159630d82fd3533c3f93ba8a21ca45b640b40058f3e6ba44971f1bfd3ff54196a76af5760190b90d3c1897067cb4c3584898547c44de6853183a8a239db75d592ed27fc36784cc34d5411dd70ada005790f7e5cb58fbcd34aaa0e59f250c3b4ad4851c351ee1b5afcdafa5f8eb4b6f2541dad8aceb6425b9ce7feeaf8f223b116cfd14487c30c3547d99f57a14926566c7f592542f7c6faedad4ef833d7750b9ad569d3ddd7a44710841902dbbdc8a354b8bd38c5e4c528ca81119aca696ac588d69e360f0a9b5f2e02f4948b12a029e6d762d9a9bb93d9a8cceb8c4fe1c0d813436e7fee186057248a826c4c594d39dee0419b27a857862c7c1c1959a4802144f76647094bae84f55dd1f9611c86452671fb1df744ed720b3a5764902195302bd09a42379ef4dec9f6c6d850fd58865b1cbd40c7e7826cdbc1a88a6a35558a026185a781acdffef6757a77fcfa7bbaa72e2a713818223cef3b561159a42a41d6c49cc0fe16b5e1f1c3565dac12bf6a753d764f751790a8e6ebf066db87a9b3765e388e23d26fd6b00291d59a08736cc21f382a0a8f0fa85a7d9e99cce18dc1b723fe4ed1400c93a134f173299ce36110492e5e59fbc62e7b0fa45640a00", + "height": 2222000, + "confirmations": 709819, + "vin": [], + "vout": [ + { + "value": 5.4e-7, + "valueZat": 54, + "n": 0, + "scriptPubKey": { + "asm": "", + "hex": "76a914751e76e8199196d454941c45d1b3a323f1433bd688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UYsZVJkLPeMjxEtACvSxfWuNmddpWfxzs" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 1, + "scriptPubKey": { + "asm": "", + "hex": "76a91406afd46bcdfd22ef94ac122aa11f241244a37ecc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1JUxhMSGFmzKY5BTp1PsQwG4Ceq642SmnB" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 2, + "scriptPubKey": { + "asm": "", + "hex": "76a9147dd65592d0ab2fe0d0257d571abf032cd9db93dc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1VLyEX9gpXZdZeVXeuAvqPRPxj8u8qiVHL" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 3, + "scriptPubKey": { + "asm": "", + "hex": "76a914c42e7ef92fdb603af844d064faad95db9bcdfd3d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1bkv9Xq4zCmG7N4QpgAdwaxTt9Eas6Yz2V" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 4, + "scriptPubKey": { + "asm": "", + "hex": "76a9144747e8746cddb33b0f7f95a90f89f89fb387cbb688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1QNW8DJ9SL7YaQXDQVYRqTPCX5gTjEeRjg" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 5, + "scriptPubKey": { + "asm": "", + "hex": "76a9147fda9cf020c16cacf529c87d8de89bfc70b8c9cb88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1VXdiCTHV8VgcBReUhHHkcoFGyDurvzkL4" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 6, + "scriptPubKey": { + "asm": "", + "hex": "76a9145dedfbf9ea599dd4e3ca6a80b333c472fd0b3f6988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1SSFwcYTiLApC5RXEZvNWsLdTPeWQxduZU" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 7, + "scriptPubKey": { + "asm": "", + "hex": "76a9149652d86bedf43ad264362e6e6eba6eb76450812788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1XaScJtVuFehnJP2dEMDyRVWTkjQXJ4PU7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 8, + "scriptPubKey": { + "asm": "", + "hex": "76a914b46abf4d9e1746e33bcc39cea3de876c29c4adf388ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1aKZWo7shGNnfGMVTiAwKV3LuSN5qrAXUK" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 9, + "scriptPubKey": { + "asm": "", + "hex": "76a914185140bb54704a9e735016faa7a8dbee4449bddc88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1L6BZVCojd8Gb79h8D2A1SJdURr89KSyYr" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 10, + "scriptPubKey": { + "asm": "", + "hex": "76a914362995a6e6922a04e0b832a80bc56c33709a42d288ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NozK3N2fVXnV7jqzG4N71zhZn1ofViU3S" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 11, + "scriptPubKey": { + "asm": "", + "hex": "76a914dd100be7d9aea5721158ebde6d6a1fd8fff93bb188ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1e2UZfHarpDAZpcLNvT8BLTBgYPNaS8dd7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 12, + "scriptPubKey": { + "asm": "", + "hex": "76a91457526b1a1534d4bde788253281649fc2e91dc70b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1RqKXZ3gfubxuSDhztysSP7Rp7LZ19WEMi" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 13, + "scriptPubKey": { + "asm": "", + "hex": "76a914726d44b7af8228257c030bafe764d3c5839d5c0288ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UJdwJoPSkzvFwNpyWB6HyaqheiuQoUbir" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 14, + "scriptPubKey": { + "asm": "", + "hex": "76a9149fc5dbe5efdce10374a4dd4053c93af54021171888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1YSQUKUcABTLRJA7Zh62USV766Z2sk3HLX" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 15, + "scriptPubKey": { + "asm": "", + "hex": "76a91460aa32549d990a09863b8fd4ce611ebd70bb310b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1SgimDdjbdaN5hbE8YoXAvXnoHBSLjCPqy" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 16, + "scriptPubKey": { + "asm": "", + "hex": "76a9144f99bbf75707e44bc2afa65337dece914e817aac88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1R8VZ8eSh9AgBqMgg7KknTLXD77toLnCsq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 17, + "scriptPubKey": { + "asm": "", + "hex": "76a914f0f4189b8cf9f2db0ab8d3a3c009e1823a58842e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1fqebUGGG2Y1VaX5apeSmH8w5qg6fYULdx" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 18, + "scriptPubKey": { + "asm": "", + "hex": "76a9146b3aaefc2a4c3f37ddc733d1868ddc6f0d2c0e9888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1TeacGKXCB1RUUvaPRej4ySBsACxy6RDaA" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 19, + "scriptPubKey": { + "asm": "", + "hex": "76a914385defb0ed10fe95817943ed37b4984f8f4255d688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1P1eNtMLMtKmDt4iV4JnzUJLMGLfS96sn7" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 20, + "scriptPubKey": { + "asm": "", + "hex": "76a9148f9dff39a81ee4abcbad2ad8bafff090415a2be888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Wxyub9LgLu6gdrRcZGYmRYM5nG51YqEqL" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 21, + "scriptPubKey": { + "asm": "", + "hex": "76a914aa2e99eb9e6a61dded3caa3d5d5e21db34c79c2188ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1ZPShrk9BawqCjQPjJyuuDjyWuyVv6GNdG" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 22, + "scriptPubKey": { + "asm": "", + "hex": "76a914500de0c9a7c7777e02ab8e0e86c9f55bda5df75688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1RAtgwvLmXoHCN9qm22QDHxxXymPcdrRPa" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 23, + "scriptPubKey": { + "asm": "", + "hex": "76a914ce6525a78d260330058aaf5a5f99c9c420935daa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cgvNg8h36BhSGWqAH7JM9s6YsyHqkdLzs" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 24, + "scriptPubKey": { + "asm": "", + "hex": "76a9144d2b45716a15de661a26cf1b6b7865fd4f5eb42c88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1QudseT5VDVMhdBpU1Lakx3PdiifbvpaCg" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 25, + "scriptPubKey": { + "asm": "", + "hex": "76a91411f8d2b7930e3a88a18ae1d9407efc01fc90e9a488ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1KWda8UdDxpKequwdHiCD6EDomaeG2ycZP" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 26, + "scriptPubKey": { + "asm": "", + "hex": "76a91420d637c1a6404d2227f3561fdbaff5a680dba64888ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1LsEFb3PkL8ZGFa94bucKgtFDNXqP3UHgd" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 27, + "scriptPubKey": { + "asm": "", + "hex": "76a914de213dbfa5dea6f264528b4aace26d91d1cc3c5b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1e87qZWkY6ZLWv4v9fKu1pzEhS49YrzfCD" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 28, + "scriptPubKey": { + "asm": "", + "hex": "76a914d7fd5d1ba18e281d71b4ff69ffa065b75d8c548b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dZeobdJ5vjmiTTXFKVn9xMy5KHwpPfQuB" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 29, + "scriptPubKey": { + "asm": "", + "hex": "76a914896007cb039c6648498ba434b2d0ed00837c1a3588ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1WPycvoNZFiszvwaLHYptQN1HKLXPaRd71" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 30, + "scriptPubKey": { + "asm": "", + "hex": "76a9142fbd32c8dd59ee7c17e66cb6ebea7e9846c3040f88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NE2R5uKDUQT62hBAAFMd45oT7gR33rTTJ" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 31, + "scriptPubKey": { + "asm": "", + "hex": "76a914fcd35ddacad9f2d5be5e464639441c6065e6955d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1gvRVgoUteCyWGkjvYimkSD7JNpiEZtRFu" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 32, + "scriptPubKey": { + "asm": "", + "hex": "76a914b933285aef52826902abd32c3d70824346095d7788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1akrJKTy2zhHuNAiWiz42PUC21nUbeq8qv" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 33, + "scriptPubKey": { + "asm": "", + "hex": "76a914e142ca9bfc2d56cd0adb82f8dc870424767389f788ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1eQg4FMFRNewXy99ehfUA9yeAYdqmiYFAd" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 34, + "scriptPubKey": { + "asm": "", + "hex": "76a9149cb04e0ed442380c217c0ffdaed032645369daaa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1YA6d99YXyMHXhgPCMyXriGmYHS1twZx7B" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 35, + "scriptPubKey": { + "asm": "", + "hex": "76a9142fdf6dcbef16144583a151230ecee67fa3c503d988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NEjRTpdeiJWnH9d8JrZ4sEwuU9epLbiGm" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 36, + "scriptPubKey": { + "asm": "", + "hex": "76a914e7616ca66fd2937f140c57a40c6ce9b4b23fc82e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1ey2opFGssatipNn3x6mziZDeLhSNkR42u" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 37, + "scriptPubKey": { + "asm": "", + "hex": "76a9149855a09366e06d7d30d5ddbe69b64a0adf3ed93488ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Xm5HyksfRawHNFtuw7rp1zENK3Wj7xjf1" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 38, + "scriptPubKey": { + "asm": "", + "hex": "76a91437b891cec5a1c4e0fd96a4e7c023bb294a76e5ad88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NxEH1CN9kPjjhBhugQq7tNMgNnkC73rTp" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 39, + "scriptPubKey": { + "asm": "", + "hex": "76a9147220c25031708a78712cbee318585135993fcf1a88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1UH4HLngFrdzj7GAZM2Gxth54qeujMt3fa" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 40, + "scriptPubKey": { + "asm": "", + "hex": "76a914c8d8650cd42ded5d598040f328e6ee9ca552286688ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cBaP54hZbYsjgFvuorDdXb48Rpxvom7aW" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 41, + "scriptPubKey": { + "asm": "", + "hex": "76a9149290649ba520a35912dab1733b6f098587e432ef88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1XEZdwj1dgsLYXcKRrk822tnFzsvf3J4HR" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 42, + "scriptPubKey": { + "asm": "", + "hex": "76a914cdba1c12fad7c1f0936be8dc0e7260ef6456a04d88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1cdPUnbka6ezHPKoEbBSnH11jAhBAnZKqq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 43, + "scriptPubKey": { + "asm": "", + "hex": "76a914352f5afc63d272a43c568e1d65ce6403d48fe29e88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1NipYpBwkz4mvkhZkRmT5mdyaiVfYiuzTZ" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 44, + "scriptPubKey": { + "asm": "", + "hex": "76a914daf0dd5a501fcddb72a0ee47444be2f6f3c4c4c388ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dqFriyamiBkshmCBi7XXdhdSs4epq2zTq" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 45, + "scriptPubKey": { + "asm": "", + "hex": "76a91416fe2eecdbb4d50b2d635672c6c0bb6b5925bd7b88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1KyBNXBngtKe1U9383zmxaSNfp8Vdoxx72" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 46, + "scriptPubKey": { + "asm": "", + "hex": "76a9141481094ba143f80b9c30eceb20a8d07d29f0e8fa88ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1Kk26mjwUErXqk8i7EqzzCP2JYLzLPKsyP" + ] + } + }, + { + "value": 5.4e-7, + "valueZat": 54, + "n": 47, + "scriptPubKey": { + "asm": "", + "hex": "76a914d6791b1b29afe3997a5cdbc40f03d37baed379b988ac", + "reqSigs": 1, + "type": "", + "addresses": [ + "t1dRdgyJcxpwF57HNToszQFPi8EPTNkhbfs" + ] + } + } + ], + "vShieldedSpend": [ + { + "cv": "8d346b7ee882318c51f896e19c7e34d248420f85b460de71978d6739cb9e32fb", + "anchor": "49bac15fac8560297199e80637d33b7cb9ee2b842c2db6f050c12ed1ac912653", + "nullifier": "aaad01e2c9c9fcd7512bb574c1e57020cd8bce9ae032dad5d021105216f24d4f", + "rk": "2263436323d4b48330e5bfc4b6c97876f22603a5998bef999e8438560560f2cc", + "proof": "929c7e46125023fec7d313d1e97e06a81ec622d674d6b16ac57390f87701db99dee253f99e458f220458b4782a78da4ca7ac4b897e4ca84f7b35de06d517bb0e92db1d2e17aa3a585c522612aea40208519d531ca156e82311520b93f9995a8511ff6406430410aaddf801a1b562d7be2b2b48fe7b43c0276f78fa6d2efcdf1b3c95811c6a87465edde9812ac6622c08862b5a6c186f43252f8055f7902159448d89b9121b412c4ad893c2036445a3d4e82441abdd9be7d6ae7525bf892c70a3", + "spendAuthSig": "e7dfe374a99536574e1a04bdb872599c9ce355cbf47cdc5cdd2162538198f8468458bb78735d0b062f8231851af72eba112eec27332eaa3de5a8021179293b03" + } + ], + "vShieldedOutput": [ + { + "cv": "b7bacdba592fe3a23b1a20004a0fd46cbe1a60ce269db415f7d612d1f0a8019d", + "cmu": "fcdeb0f9cce13c81b6ddf4d0e99444c3c48871f4a3242045011d0dac296ed21d", + "ephemeralKey": "dba99ae36b56c0bb4357206b49af753e9081e56a5c4cfece3a13009d56e50277", + "encCiphertext": "e5198be82a0b1fa74d86dd16df15f9d67a369945dedf4b5956fc821776f47ebe38e8cda34bfd30021712824d3c005ba2308a2c4ac4717b857930c6cdd9efbc1ac6031d44af41227b2ce0b97545d53c4ca1b8a42e4d88d9949977d7fc95c9364fe12e87deb32120c48943188cfbbef2556c155585a969986d4f52a543bbc4bfc183fb26c77648945298674b6c8126bc9281031936e45a521e08a7c7a7849bd8c5701c537c3dae0f718d909a67eebf2f2282d1a35bad35830be364f4e959bb5c5494a9973ba12fa68f8376a0a6a88d9ee4d403e699c7487b152de82b58086d2784df899da52aa6cc1e5522224cd4e542f7e18c6fd4e9c380640a2be9500b5a9dd3165777e2c4aa372b6af287d67bd3a6cbdcf0dceb5149b37391b3f86648071280ff4525351a916c9f13c070a51f5b5cfa5d809ad46d73667c2dd1ae3246c9374e279361f9be61b5a44abf287fda9f93f8145ef2da95bf12a9bb771f2f98606aae1d3ef9892b8386af2daf5ddfd6f3f362ccb896ded97d74c2ad53bcafc108061711a4df49f55dbdd1af3d512b243fb763b0dae85982fb61f39dd4f7bab1248b4d36ca2aea500e0ebc71d211c60f995ce3e7a30a877d88cc884b862cec34641a6adf7fbfe474ea0e8f73cb8e13eae0d9d6d654b3e926bfe07f11acbceb364f64cb1b964232a245afe0f66daef6dc75f827849972e903a658e7cfb6d49af7e9cf8bce366c0c37a418aed82e324831de5d31a7b5d3d2c0c8fcd9ddaabe142c59aefd583285402f090443609699308c929f9ac7dd8dbebcb23669bdcbc55d4742eda20ae91ce7", + "outCiphertext": "93c0ebefe4cc42f40c062b88121e458d824f87dfaea90ad8804c0765c324c428e4be5d39761a25092428b9b5bf56d0b076053bfdea5f1979e46335051dee26b12a8961314acd75738768d94a1408d67f", + "proof": "aca803159630d82fd3533c3f93ba8a21ca45b640b40058f3e6ba44971f1bfd3ff54196a76af5760190b90d3c1897067cb4c3584898547c44de6853183a8a239db75d592ed27fc36784cc34d5411dd70ada005790f7e5cb58fbcd34aaa0e59f250c3b4ad4851c351ee1b5afcdafa5f8eb4b6f2541dad8aceb6425b9ce7feeaf8f223b116cfd14487c30c3547d99f57a14926566c7f592542f7c6faedad4ef833d7750b9ad569d3ddd7a44710841902dbbdc8a354b8bd38c5e4c528ca81119aca6" + }, + { + "cv": "9d2ff981efb57aa893e949e5392fa62824cd9018f763227ee4e4d3c9642d2c6b", + "cmu": "d12bdcfcbddf0923beed9999889c404a5c76dafee08cf44ab5bfcba588c82318", + "ephemeralKey": "c7c6983811d73c20f11c4b4ee1d7f0e90706407402bf6cdfdc85a36113f27cf1", + "encCiphertext": "9472789851775ddc342266d29196713a1e99456e539d8b25d6dcd1ae571c972fc45f8b1c62adb74fa58720edfa76f49c61d7f3fd6422901dc9d81ddb8bcdd206df9e081cbd90df4dd9bce00d194bdf1a04d532c910eab946087dca65f9a6d1e578dfd3989bfcdc65a74aeaa18f27f75385e4b215adaeeab039401d316ccfee9a35c672846eeaf3acde6f654ccaf2985e1652eb74b13baf0f5595bc78bf4f72c3cb5539b425c4492b8e393de1203364de2e1a4379b3fc9dc148649595a8238d5f02781e4feaed0167598954359f90493eb9b72b9f453307c1e47b5427127dff5d2728a4cea441852ae11cb667c97f59b0a3eb747803095342fd1af0a80443afc8b9f37421e5682792eeb69f20e5a7385f0651b638509fc06e1d1c716ed5cde7f24650c678d57d8a4a35ac6bc9b33fa67684873ecd02d4dc5c04d49c639e83f9c840c51c99037b984a52ffb70f38b6c448cf8ee3731aaa38d3f02a7c543ded67a38df9d7117e73ea3a7697f3bdd86a8a2f5a504533e18c71286d943df691894f3686d842110e3c799e3bbce78c99b0c8aea529857127b348f6695ceaefda972d6b7a19ca3ad2e10e69dd7ade337f304ba49f49a71873e56021bf74024bc8bb895392f9499764566e09631d6800e6f2470a32e755bff0b3fab625fb643ef43ee580a06567ef3a80fa51607873389797b8137075d7fb240e4ada08c1a640f430ec0eeea22de67b854ca1f28924e9b24d32862aeed73a795107601c70070008343f108de05f3f3864e8613ec006320c12fd82c83c158f04472b859092895cb9ee2d4d52aebb45", + "outCiphertext": "048b922d6d688e410aa7e60438923be5cc24b530dab5d4bc44a001cfad987e8b91222bda2b41f66dd129e4a8e946107a6673304694cf75d3bb0dcc4feed5aa3f8a6d832dd7ede2d17dba5fed68c4d68d", + "proof": "96ac588d69e360f0a9b5f2e02f4948b12a029e6d762d9a9bb93d9a8cceb8c4fe1c0d813436e7fee186057248a826c4c594d39dee0419b27a857862c7c1c1959a4802144f76647094bae84f55dd1f9611c86452671fb1df744ed720b3a5764902195302bd09a42379ef4dec9f6c6d850fd58865b1cbd40c7e7826cdbc1a88a6a35558a026185a781acdffef6757a77fcfa7bbaa72e2a713818223cef3b561159a42a41d6c49cc0fe16b5e1f1c3565dac12bf6a753d764f751790a8e6ebf066db8" + } + ], + "valueBalance": 0.00005, + "valueBalanceZat": 5000, + "size": 4009, + "time": 1694339033 +} \ No newline at end of file diff --git a/zebra-rpc/tests/vectors/mod.rs b/zebra-rpc/tests/vectors/mod.rs new file mode 100644 index 00000000000..8778a2cfcb4 --- /dev/null +++ b/zebra-rpc/tests/vectors/mod.rs @@ -0,0 +1,11 @@ +pub const GET_BLOCKCHAIN_INFO_RESPONSE: &str = include_str!("getblockchaininfo_response.json"); + +pub const GET_BLOCK_RESPONSE_1: &str = include_str!("getblock_response_1.json"); + +pub const GET_BLOCK_RESPONSE_2: &str = include_str!("getblock_response_2.json"); + +pub const GET_RAW_TRANSACTION_RESPONSE_TRUE: &str = + include_str!("getrawtransaction_response_true.json"); + +pub const GET_BLOCK_TEMPLATE_RESPONSE_TEMPLATE: &str = + include_str!("getblocktemplate_response_template.json"); From 92958f6d9df45d8a6f1f9c4e68362727c7a235d7 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 4 Jun 2025 04:03:46 +0100 Subject: [PATCH 192/245] fix(ci): handle tests that save state without requiring a cached disk (#9582) - Pass empty disk_suffix when no input disk needed to skip disk search - Fix job dependency handling when get-disk-name runs but finds no disk - Use RUNNING_DB_VERSION for image naming instead of less reliable STATE_VERSION - Add validation for version extraction in image creation step Fixes sync-to-checkpoint test failing with "No branch disk found" error when get-disk-name runs for state_version but no input disk is needed. --- .../sub-deploy-integration-tests-gcp.yml | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 7323d9c5506..5a34734bade 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -116,7 +116,7 @@ jobs: with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} disk_prefix: ${{ inputs.needs_lwd_state && 'lwd-cache' || inputs.needs_zebra_state && 'zebrad-cache' }} - disk_suffix: ${{ inputs.disk_suffix }} + disk_suffix: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && inputs.disk_suffix || '' }} test_id: ${{ inputs.test_id }} # Show all the test logs, then follow the logs of the test we just launched, until it finishes. @@ -127,11 +127,11 @@ jobs: name: Run ${{ inputs.test_id }} test runs-on: zfnd-runners needs: [ get-disk-name ] - if: ${{ !cancelled() && !failure() }} + if: ${{ !cancelled() && !failure() && (needs.get-disk-name.result == 'success' || needs.get-disk-name.result == 'skipped') }} timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} outputs: - cached_disk_name: ${{ needs.get-disk-name.outputs.cached_disk_name }} - state_version: ${{ needs.get-disk-name.outputs.state_version }} + cached_disk_name: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} + state_version: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.state_version || '' }} env: CACHED_DISK_NAME: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} permissions: @@ -659,14 +659,30 @@ jobs: run: | MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT)) if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then + + # Use RUNNING_DB_VERSION for image naming (more reliable than STATE_VERSION) + # Extract just the major version number for the image name + IMAGE_VERSION_FOR_NAME=${RUNNING_DB_VERSION#v} # Remove v prefix + IMAGE_VERSION_FOR_NAME=${IMAGE_VERSION_FOR_NAME%%-*} # Keep only major version (before first dash) + + # Validate that we have a version number + if [[ -z $IMAGE_VERSION_FOR_NAME ]] || [[ ! $IMAGE_VERSION_FOR_NAME =~ ^[0-9]+$ ]]; then + echo "ERROR: Invalid version extracted for image naming: $IMAGE_VERSION_FOR_NAME" + echo "RUNNING_DB_VERSION was: $RUNNING_DB_VERSION" + echo "STATE_VERSION was: ${{ env.STATE_VERSION }}" + exit 1 + fi + + echo "Using version $IMAGE_VERSION_FOR_NAME for image naming (from RUNNING_DB_VERSION: $RUNNING_DB_VERSION)" + gcloud compute images create \ - "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ + "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${IMAGE_VERSION_FOR_NAME}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ vars.GCP_ZONE }} \ --storage-location=us \ --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ - --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},branch=${{ env.GITHUB_REF_SLUG_URL }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" + --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},branch=${{ env.GITHUB_REF_SLUG_URL }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${IMAGE_VERSION_FOR_NAME},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" else echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME" fi From bcb52f8548a3863bea5f9b9f39fa762fc28ffcbb Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 4 Jun 2025 05:25:00 +0100 Subject: [PATCH 193/245] refactor(ci): simplify GCP integration with `create-with-container` (#9479) * ci(tests): use create-with-container for integration test deployment Refactors the `sub-deploy-integration-tests-gcp.yml` workflow to launch the Zebra test container directly using `gcloud compute instances create-with-container`. This replaces the previous method of launching a `busybox` container and then using `docker run` via SSH. Key changes include: - Replaced `busybox` + `docker run` with `create-with-container` using the specific test image (`${{ vars.CI_IMAGE_NAME }}`). - Removed separate "Format volume" and "Launch test" steps. - Configured container environment variables and disk mounts directly via `gcloud` parameters. - Reverted log checking and retrieval steps back to the SSH + `docker logs`/`docker wait` approach after experimenting with Cloud Logging, adapting them to find the container ID dynamically. This aligns the instance creation part more closely with the `cd-deploy-nodes-gcp.yml` workflow and removes the indirection layer of the `busybox` container, while retaining the familiar SSH-based log monitoring. * chore: fix * fix(deploy): use trimmed image * fix(deploy): use specific dynamic name * fix: typo * fix(deploy): do not double wait for logs * fix: better deployment configurations * chore: exit status handling to avoid false-positives * Update .github/workflows/sub-deploy-integration-tests-gcp.yml Co-authored-by: Arya --------- Co-authored-by: Arya --- .../sub-ci-integration-tests-gcp.yml | 28 +- .../sub-deploy-integration-tests-gcp.yml | 248 ++++++++---------- 2 files changed, 123 insertions(+), 153 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 0eaf0281fe4..996ebb059e0 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -79,7 +79,7 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_DISK_REBUILD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -108,7 +108,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_CHECKPOINT_SYNC=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -138,7 +138,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=Mainnet,FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -178,7 +178,7 @@ jobs: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_UPDATE_SYNC=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true @@ -209,7 +209,7 @@ jobs: test_id: checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=Mainnet,GENERATE_CHECKPOINTS_MAINNET=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false @@ -241,7 +241,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=Testnet,FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. @@ -285,7 +285,7 @@ jobs: app_name: zebrad test_id: checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=Testnet,GENERATE_CHECKPOINTS_TESTNET=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed @@ -316,7 +316,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_LWD_FULL_SYNC=1,ZEBRA_TEST_LIGHTWALLETD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra,LWD_CACHE_DIR=/home/zebra/.cache/lwd" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -351,7 +351,7 @@ jobs: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_LWD_UPDATE_SYNC=1,ZEBRA_TEST_LIGHTWALLETD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra,LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: true @@ -379,7 +379,7 @@ jobs: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_LWD_RPC_CALL=1,ZEBRA_TEST_LIGHTWALLETD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true saves_to_disk: false secrets: inherit @@ -401,7 +401,7 @@ jobs: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_LWD_TRANSACTIONS=1,ZEBRA_TEST_LIGHTWALLETD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra,LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -424,7 +424,7 @@ jobs: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra -e LWD_CACHE_DIR=/home/zebra/.cache/lwd" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_LWD_GRPC=1,ZEBRA_TEST_LIGHTWALLETD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra,LWD_CACHE_DIR=/home/zebra/.cache/lwd" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -451,7 +451,7 @@ jobs: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_GET_BLOCK_TEMPLATE=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -474,7 +474,7 @@ jobs: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_SUBMIT_BLOCK=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 5a34734bade..8527a7be17f 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -178,9 +178,26 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.4 - # Create a Compute Engine virtual machine and attach a cached state disk using the - # $CACHED_DISK_NAME env as the source image to populate the disk cached state - # if the test needs it. + # Disk Mounting Logic Explanation: + # + # The following step creates a GCP instance using create-with-container. + # The $CONTAINER_MOUNT_DISKS variable, constructed within the run script, + # defines how the created persistent disk (specified in $DISK_PARAMS) + # is mounted into the test container using --container-mount-disk. + # + # If the test needs Lightwalletd state (inputs.needs_lwd_state is true or test_id is lwd-full-sync), + # the same persistent disk is mounted to both the Zebra state path (inputs.zebra_state_dir) + # and the Lightwalletd state path (inputs.lwd_state_dir). + # + # Using a single disk simplifies the VM and container setup. + # Mounting the same disk to multiple paths doesn't cause conflicts because Zebra and + # lightwalletd create different subdirectories for their data within the mounted volume. + # (However, Zebra, lightwalletd, and the test harness must not delete the whole cache directory root.) + # + # The container mount paths (inputs.zebra_state_dir and inputs.lwd_state_dir) must match + # the paths expected by the tests in Rust (also configured in ci-unit-tests-docker.yml). + # The application inside the container will typically use environment variables (like those set in + # $CONTAINER_ENV) or these known paths to access the state data. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance run: | @@ -189,103 +206,41 @@ jobs: if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" fi + + # Mount the disk(s) to the container + CONTAINER_MOUNT_DISKS="--container-mount-disk=mount-path=${{ inputs.zebra_state_dir }},name=${NAME},mode=rw" + # Mount the same disk to the lwd path if needed + if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then + CONTAINER_MOUNT_DISKS+=" --container-mount-disk=mount-path=${{ inputs.lwd_state_dir }},name=${NAME},mode=rw" + fi + + # Environment variables for the container + CONTAINER_ENV="${{ inputs.test_variables }},RUST_LOG=${{ env.RUST_LOG }},RUST_BACKTRACE=${{ env.RUST_BACKTRACE }},RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }},COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }},CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }}" + + # Trim whitespace from GAR_BASE as for some reason it's getting a trailing space + GAR_BASE_TRIMMED=$(echo "${{ vars.GAR_BASE }}" | xargs) + gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + --machine-type ${{ inputs.is_long_test && vars.GCP_LARGE_MACHINE || vars.GCP_SMALL_MACHINE }} \ --boot-disk-size=50GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ --create-disk="${DISK_PARAMS}" \ - --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ inputs.is_long_test && vars.GCP_LARGE_MACHINE || vars.GCP_SMALL_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + ${CONTAINER_MOUNT_DISKS} \ + --container-stdin \ + --container-tty \ + --container-image="${GAR_BASE_TRIMMED}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" \ + --container-env="${CONTAINER_ENV}" \ + --subnet=${{ vars.GCP_SUBNETWORK }} \ --scopes cloud-platform \ - --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ + --service-account=${{ vars.GCP_DEPLOYMENTS_SA }} \ + --metadata=google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - # Format the mounted disk if the test doesn't use a cached state. - - name: Format ${{ inputs.test_id }} volume - if: ${{ !inputs.needs_zebra_state && !inputs.needs_lwd_state }} - shell: /usr/bin/bash -ex {0} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - set -ex; - # Extract the correct disk name based on the device-name - DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); - sudo mkfs.ext4 -v /dev/$DISK_NAME \ - ' - - # Launch the test with the previously created disk or cached state. - # - # This step uses a $MOUNT_FLAGS variable to mount the disk to the docker container. - # If the test needs Lightwalletd state, we add the Lightwalletd state mount to the $MOUNT_FLAGS variable. - # - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. - # - # In this step we're using the same disk for simplicity, as mounting multiple disks to the - # VM and to the container might require more steps in this workflow, and additional - # considerations. - # - # The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker - # container, and might have two different paths (if lightwalletd state is needed): - # - ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} - # - # Currently we do this by mounting the same disk at both paths. - # - # This doesn't cause any path conflicts, because Zebra and lightwalletd create different - # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not - # delete the whole cache directory.) - # - # These paths must match the variables used by the tests in Rust, which are also set in - # `ci-unit-tests-docker.yml` to be able to run this tests. - # - # Although we're mounting the disk root to both directories, Zebra and Lightwalletd, tests - # will only respect the values from $ZEBRA_CACHE_DIR and $LWD_CACHE_DIR, - # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} - # are only used to match those variables paths. - - name: Launch ${{ inputs.test_id }} test - id: launch-test - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - - # Extract the correct disk name based on the device-name - DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-) - - MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.zebra_state_dir }}" - - # Check if we need to mount for Lightwalletd state - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then - MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.lwd_state_dir }}" - fi - - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - -e RUST_LOG=${{ env.RUST_LOG }} \ - -e RUST_BACKTRACE=${{ env.RUST_BACKTRACE }} \ - -e RUST_LIB_BACKTRACE=${{ env.RUST_LIB_BACKTRACE }} \ - -e COLORBT_SHOW_HIDDEN=${{ env.COLORBT_SHOW_HIDDEN }} \ - -e CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }} \ - ${MOUNT_FLAGS} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - ' - # Show debug logs if previous job failed - name: Show debug logs if previous job failed if: ${{ failure() }} @@ -302,36 +257,6 @@ jobs: sudo journalctl -b \ ' - # Show all the logs since the container launched, - # following until we see zebrad startup messages. - # - # This check limits the number of log lines, so tests running on the wrong network don't - # run until the job timeout. If Zebra does a complete recompile, there are a few hundred log - # lines before the startup logs. So that's what we use here. - # - # The log pipeline ignores the exit status of `docker logs`. - # It also ignores the expected 'broken pipe' error from `tee`, - # which happens when `grep` finds a matching output and moves on to the next job. - # - # Errors in the tests are caught by the final test status job. - - name: Check startup logs for ${{ inputs.test_id }} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - sudo docker logs \ - --tail all \ - --follow \ - ${{ inputs.test_id }} | \ - head -700 | \ - tee --output-error=exit-nopipe /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - "Zcash network: ${{ inputs.network }}"; \ - ' - # Check that the container executed at least 1 Rust test harness test, and that all tests passed. # Then wait for the container to finish, and exit with the test's exit status. # Also shows all the test logs. @@ -350,26 +275,41 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ + echo "Waiting for container to start..."; \ + CONTAINER_ID=""; \ + CONTAINER_PREFIX="klt-${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}"; \ + for i in {1..30}; do CONTAINER_ID=$(sudo docker ps --filter name=${CONTAINER_PREFIX} -q --no-trunc); if [ -n "${CONTAINER_ID}" ]; then break; fi; echo "Waiting for container ID starting with ${CONTAINER_PREFIX}... ($i/30)"; sleep 2; done; \ + if [ -z "${CONTAINER_ID}" ]; then echo "Container ID starting with ${CONTAINER_PREFIX} not found after waiting."; exit 1; fi; \ + echo "Found test container ID: ${CONTAINER_ID}"; \ + echo "Streaming logs and waiting for test success message..."; \ sudo docker logs \ --tail all \ --follow \ - ${{ inputs.test_id }} | \ - tee --output-error=exit-nopipe /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ + ${CONTAINER_ID} \ + | tee --output-error=exit-nopipe /dev/stderr \ + | grep --max-count=1 --extended-regexp --color=always \ "test result: .*ok.* [1-9][0-9]* passed.*finished in"; LOGS_EXIT_STATUS=$?; - - EXIT_STATUS=$(sudo docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); - echo "sudo docker exit status: $EXIT_STATUS"; - - # If grep found the pattern, exit with the Docker container"s exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; - fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; \ + echo "Waiting for container ${CONTAINER_ID} to exit..."; + EXIT_STATUS=$(sudo docker wait ${CONTAINER_ID} || echo "Error retrieving exit status"); + echo "Container exit status: $EXIT_STATUS"; + + if [ $LOGS_EXIT_STATUS -ne 0 ]; then + # Grep failed (pattern not found) + echo "Test failed: Success log pattern not found (grep exit status: $LOGS_EXIT_STATUS)."; + exit 1; + else + # Grep succeeded (pattern found), now check specific exit code + if [ "$EXIT_STATUS" -eq 1 ]; then + # Explicit failure code 1 + echo "Test failed: Success log pattern found BUT container exited with status 1."; + exit 1; + else + # Grep succeeded and exit status is not 1 + echo "Test successful: Success log pattern found. Container exit status $EXIT_STATUS ignored (as it is not 1)."; + exit 0; + fi + fi \ ' # create a state image from the instance's state disk, if requested by the caller @@ -480,15 +420,30 @@ jobs: RUNNING_DB_VERSION="" DB_VERSION_SUMMARY="" + # Get Instance Name + INSTANCE_NAME="${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" + + echo "Fetching first 1000 log entries via SSH for instance ${INSTANCE_NAME} to find DB versions..." DOCKER_LOGS=$( \ - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute ssh ${INSTANCE_NAME} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - sudo docker logs ${{ inputs.test_id }} | head -1000 \ - ') + CONTAINER_ID=$(sudo docker ps -q --no-trunc | head -n 1); \ + if [ -n "${CONTAINER_ID}" ]; then \ + sudo docker logs ${CONTAINER_ID} | head -1000; \ + else \ + echo "Error: No running container found."; exit 1; \ + fi; \ + ' \ + ) + + if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]] || [[ "$DOCKER_LOGS" == *"Error: No running container found."* ]]; then + echo "Failed to retrieve logs via SSH or no container found." + exit 1 + fi # either a semantic version or "creating new database" INITIAL_DISK_DB_VERSION=$( \ @@ -568,15 +523,30 @@ jobs: run: | SYNC_HEIGHT="" + # Get Instance Name + INSTANCE_NAME="${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" + + echo "Fetching last 200 log entries via SSH for instance ${INSTANCE_NAME} to find sync height..." DOCKER_LOGS=$( \ - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute ssh ${INSTANCE_NAME} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - sudo docker logs ${{ inputs.test_id }} --tail 200 \ - ') + CONTAINER_ID=$(sudo docker ps -q --no-trunc | head -n 1); \ + if [ -n "${CONTAINER_ID}" ]; then \ + sudo docker logs --tail 200 ${CONTAINER_ID}; \ + else \ + echo "Error: No running container found."; exit 1; \ + fi; \ + ' \ + ) + + if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]] || [[ "$DOCKER_LOGS" == *"Error: No running container found."* ]]; then + echo "Failed to retrieve logs via SSH or no container found." + exit 1 + fi SYNC_HEIGHT=$( \ echo "$DOCKER_LOGS" | \ From c15a8c33cd7f0d315b9daee68f9c9413e6c89322 Mon Sep 17 00:00:00 2001 From: Pili Guerra <1311133+mpguerra@users.noreply.github.com> Date: Fri, 6 Jun 2025 20:30:05 +0200 Subject: [PATCH 194/245] Update `CONTRIBUTING.md` (#9561) * Clarify the kinds of contributions which are welcome and where to contact us * Remove section on coverage reports as we haven't used them for a while --- CONTRIBUTING.md | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 94029dc9a75..8c9025f5fab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,8 +20,8 @@ Please [create an issue](https://github.com/ZcashFoundation/zebra/issues/new?ass [pull-requests]: #pull-requests PRs are welcome for small and large changes, but please don't make large PRs -without coordinating with us via the issue tracker or Discord. This helps -increase development coordination and makes PRs easier to merge. +without coordinating with us via the [issue tracker](https://github.com/ZcashFoundation/zebra/issues) or [Discord](https://discord.gg/yVNhQwQE68). This helps +increase development coordination and makes PRs easier to merge. Low-effort PRs, including but not limited to fixing typos and grammatical corrections, will generally be redone by us to dissuade metric farming. Check out the [help wanted][hw] or [good first issue][gfi] labels if you're looking for a place to get started! @@ -33,19 +33,4 @@ are conformant. [hw]: https://github.com/ZcashFoundation/zebra/labels/E-help-wanted [gfi]: https://github.com/ZcashFoundation/zebra/labels/good%20first%20issue -[conventional]: https://www.conventionalcommits.org/en/v1.0.0/#specification - -## Coverage Reports -[coverage-reports]: #coverage-reports - -Zebra's CI currently generates coverage reports for every PR with rust's new -source based coverage feature. The coverage reports are generated by the -`coverage.yml` file. - -These reports are then saved as html and zipped up into a github action's -artifact. These artifacts can be accessed on the `checks` tab of any PR, next -to the "re-run jobs" button on the `Coverage (+nightly)` CI job's tab -[example](https://github.com/ZcashFoundation/zebra/pull/1907/checks?check_run_id=2127676611). - -To access a report download and extract the zip artifact then open the top -level `index.html`. +[conventional]: https://www.conventionalcommits.org/en/v1.0.0/#specification \ No newline at end of file From 7693a450e12b6636030cf01fb32a28b9bfb6203e Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Jun 2025 23:07:05 +0100 Subject: [PATCH 195/245] fix(ci): mark `sync-to-checkpoint` test as long-running (#9595) * fix(ci): mark sync-to-checkpoint test as long-running The sync-to-checkpoint test has been timing out after database upgrade invalidated cached checkpoint state. This change increases the timeout from 3 hours to allow for longer sync times during state rebuild. * fix(ci): handle exited containers in GCP integration test workflows - Add find-container step to capture container ID when running - Update log extraction steps to use saved container ID instead of discovering it - Remove fallback to exited containers in test result validation step - Improve readability of container discovery logic with better structure and messaging Fixes issue where workflows fail when containers exit due to --container-restart-policy=never * chore: remove duplicate discovery logic * chore: fix variable substitution * chore: typso fix * fix(ci): use correct dependencies for following jobs --- .../sub-ci-integration-tests-gcp.yml | 2 + .../sub-deploy-integration-tests-gcp.yml | 107 ++++++++++-------- 2 files changed, 60 insertions(+), 49 deletions(-) diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 996ebb059e0..9f11bc95323 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -80,6 +80,8 @@ jobs: test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint test_variables: "NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},TEST_DISK_REBUILD=1,ZEBRA_CACHE_DIR=/home/zebra/.cache/zebra" + # This test commonly less than 3 hours by October 2024, but now it takes longer + is_long_test: true needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 8527a7be17f..2414bd2df8e 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -132,6 +132,7 @@ jobs: outputs: cached_disk_name: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} state_version: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.state_version || '' }} + container_id: ${{ steps.find-container.outputs.CONTAINER_ID }} env: CACHED_DISK_NAME: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} permissions: @@ -232,6 +233,7 @@ jobs: --container-tty \ --container-image="${GAR_BASE_TRIMMED}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" \ --container-env="${CONTAINER_ENV}" \ + --container-restart-policy=never \ --subnet=${{ vars.GCP_SUBNETWORK }} \ --scopes cloud-platform \ --service-account=${{ vars.GCP_DEPLOYMENTS_SA }} \ @@ -257,6 +259,36 @@ jobs: sudo journalctl -b \ ' + # Find the container ID and save it for use in subsequent steps + - name: Find container ID + id: find-container + run: | + INSTANCE_NAME="${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" + CONTAINER_PREFIX="klt-${INSTANCE_NAME}" + + echo "Looking for container with prefix: ${CONTAINER_PREFIX}" + + # Wait up to 60 seconds for container to start + for attempt in {1..30}; do + echo "Attempt ${attempt}/30: Checking for running container..." + CONTAINER_ID=$(gcloud compute ssh ${INSTANCE_NAME} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command="sudo docker ps --filter name=${CONTAINER_PREFIX} -q --no-trunc" 2>/dev/null || echo "") + if [ -n "${CONTAINER_ID}" ]; then + echo "Found running container: ${CONTAINER_ID}" + echo "CONTAINER_ID=${CONTAINER_ID}" >> $GITHUB_OUTPUT + exit 0 + fi + echo "No running container found yet, waiting 2 seconds..." + sleep 2 + done + + echo "Container not found after 60 seconds" + exit 1 + # Check that the container executed at least 1 Rust test harness test, and that all tests passed. # Then wait for the container to finish, and exit with the test's exit status. # Also shows all the test logs. @@ -269,48 +301,37 @@ jobs: # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test run: | + CONTAINER_ID="${{ steps.find-container.outputs.CONTAINER_ID }}" + echo "Using pre-discovered container ID: ${CONTAINER_ID}" gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - echo "Waiting for container to start..."; \ - CONTAINER_ID=""; \ - CONTAINER_PREFIX="klt-${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}"; \ - for i in {1..30}; do CONTAINER_ID=$(sudo docker ps --filter name=${CONTAINER_PREFIX} -q --no-trunc); if [ -n "${CONTAINER_ID}" ]; then break; fi; echo "Waiting for container ID starting with ${CONTAINER_PREFIX}... ($i/30)"; sleep 2; done; \ - if [ -z "${CONTAINER_ID}" ]; then echo "Container ID starting with ${CONTAINER_PREFIX} not found after waiting."; exit 1; fi; \ - echo "Found test container ID: ${CONTAINER_ID}"; \ - echo "Streaming logs and waiting for test success message..."; \ - sudo docker logs \ - --tail all \ - --follow \ - ${CONTAINER_ID} \ + --command=" + echo 'Streaming logs and waiting for test success message...'; + sudo docker logs --tail all --follow ${CONTAINER_ID} \ | tee --output-error=exit-nopipe /dev/stderr \ | grep --max-count=1 --extended-regexp --color=always \ - "test result: .*ok.* [1-9][0-9]* passed.*finished in"; - LOGS_EXIT_STATUS=$?; - echo "Waiting for container ${CONTAINER_ID} to exit..."; - EXIT_STATUS=$(sudo docker wait ${CONTAINER_ID} || echo "Error retrieving exit status"); - echo "Container exit status: $EXIT_STATUS"; - - if [ $LOGS_EXIT_STATUS -ne 0 ]; then - # Grep failed (pattern not found) - echo "Test failed: Success log pattern not found (grep exit status: $LOGS_EXIT_STATUS)."; + 'test result: .*ok.* [1-9][0-9]* passed.*finished in'; + LOGS_EXIT_STATUS=\$?; + echo 'Waiting for container ${CONTAINER_ID} to exit...'; + EXIT_STATUS=\$(sudo docker wait ${CONTAINER_ID} || echo 'Error retrieving exit status'); + echo 'Container exit status: '\$EXIT_STATUS; + + if [ \$LOGS_EXIT_STATUS -ne 0 ]; then + echo 'Test failed: Success log pattern not found (grep exit status: '\$LOGS_EXIT_STATUS').'; exit 1; else - # Grep succeeded (pattern found), now check specific exit code - if [ "$EXIT_STATUS" -eq 1 ]; then - # Explicit failure code 1 - echo "Test failed: Success log pattern found BUT container exited with status 1."; + if [ \"\$EXIT_STATUS\" -eq 1 ]; then + echo 'Test failed: Success log pattern found BUT container exited with status 1.'; exit 1; else - # Grep succeeded and exit status is not 1 - echo "Test successful: Success log pattern found. Container exit status $EXIT_STATUS ignored (as it is not 1)."; + echo 'Test successful: Success log pattern found. Container exit status '\$EXIT_STATUS' ignored (as it is not 1).'; exit 0; fi - fi \ - ' + fi + " # create a state image from the instance's state disk, if requested by the caller create-state-image: @@ -424,24 +445,18 @@ jobs: INSTANCE_NAME="${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" echo "Fetching first 1000 log entries via SSH for instance ${INSTANCE_NAME} to find DB versions..." + CONTAINER_ID="${{ needs.test-result.outputs.container_id }}" DOCKER_LOGS=$( \ gcloud compute ssh ${INSTANCE_NAME} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - CONTAINER_ID=$(sudo docker ps -q --no-trunc | head -n 1); \ - if [ -n "${CONTAINER_ID}" ]; then \ - sudo docker logs ${CONTAINER_ID} | head -1000; \ - else \ - echo "Error: No running container found."; exit 1; \ - fi; \ - ' \ + --command="sudo docker logs ${CONTAINER_ID} | head -1000" \ ) - if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]] || [[ "$DOCKER_LOGS" == *"Error: No running container found."* ]]; then - echo "Failed to retrieve logs via SSH or no container found." + if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]]; then + echo "Failed to retrieve logs via SSH." exit 1 fi @@ -527,24 +542,18 @@ jobs: INSTANCE_NAME="${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" echo "Fetching last 200 log entries via SSH for instance ${INSTANCE_NAME} to find sync height..." + CONTAINER_ID="${{ needs.test-result.outputs.container_id }}" DOCKER_LOGS=$( \ gcloud compute ssh ${INSTANCE_NAME} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - CONTAINER_ID=$(sudo docker ps -q --no-trunc | head -n 1); \ - if [ -n "${CONTAINER_ID}" ]; then \ - sudo docker logs --tail 200 ${CONTAINER_ID}; \ - else \ - echo "Error: No running container found."; exit 1; \ - fi; \ - ' \ + --command="sudo docker logs --tail 200 ${CONTAINER_ID}" \ ) - if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]] || [[ "$DOCKER_LOGS" == *"Error: No running container found."* ]]; then - echo "Failed to retrieve logs via SSH or no container found." + if [[ $? -ne 0 ]] || [[ -z "$DOCKER_LOGS" ]]; then + echo "Failed to retrieve logs via SSH." exit 1 fi From d4ea5d76ac0634e413f22a9b938647fc2e841fe9 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 6 Jun 2025 19:51:48 -0400 Subject: [PATCH 196/245] fix(network): Allow local outbound connections on Regtest (#9580) * Allow local outbound connections on Regtest * simplifies docs --- book/src/user/custom-testnets.md | 4 ++-- zebra-network/src/config.rs | 23 ++++++++++++----------- zebra-network/src/meta_addr/peer_addr.rs | 8 +++++++- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/book/src/user/custom-testnets.md b/book/src/user/custom-testnets.md index 76406cdf704..18a19673ce9 100644 --- a/book/src/user/custom-testnets.md +++ b/book/src/user/custom-testnets.md @@ -156,7 +156,7 @@ The remaining consensus differences between Mainnet and Testnet could be made co ## Differences Between Custom Testnets and Regtest Zebra's Regtest network is a special case of a custom Testnet that: -- Won't make peer connections[^fn4], +- Won't make remote peer connections[^fn4], - Skips Proof-of-Work validation, - Uses a reserved network magic and network name, - Activates network upgrades up to and including Canopy at block height 1, @@ -183,4 +183,4 @@ Zebra nodes on custom Testnets will also reject peer connections with nodes that [^fn3]: Configuring any of the Testnet parameters that are currently configurable except the network name will result in an incompatible custom Testnet, these are: the network magic, network upgrade activation heights, slow start interval, genesis hash, disabled Proof-of-Work and target difficulty limit. -[^fn4]: Zebra won't make outbound peer connections on Regtest, but currently still listens for inbound peer connections, which will be rejected unless they use the Regtest network magic, and Zcash nodes using the Regtest network magic should not be making outbound peer connections. It may be updated to skip initialization of the peerset service altogether so that it won't listen for peer connections at all when support for isolated custom Testnets is added. +[^fn4]: Zebra won't make remote outbound peer connections on Regtest, but currently still listens for remote inbound peer connections, which will be rejected unless they use the Regtest network magic, and Zcash nodes using the Regtest network magic should not be making outbound peer connections. It may be updated to skip initialization of the peerset service altogether so that it won't listen for peer connections at all when support for isolated custom Testnets is added. diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 7e289928ee6..bb2094f49df 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -238,9 +238,7 @@ impl Config { pub fn initial_peer_hostnames(&self) -> IndexSet { match &self.network { Network::Mainnet => self.initial_mainnet_peers.clone(), - Network::Testnet(params) if !params.is_regtest() => self.initial_testnet_peers.clone(), - // TODO: Add a `disable_peers` field to `Network` to check instead of `is_regtest()` (#8361) - Network::Testnet(_params) => IndexSet::new(), + Network::Testnet(_params) => self.initial_testnet_peers.clone(), } } @@ -251,19 +249,22 @@ impl Config { /// /// If a configured address is an invalid [`SocketAddr`] or DNS name. pub async fn initial_peers(&self) -> HashSet { - // Return early if network is regtest in case there are somehow any entries in the peer cache - if self.network.is_regtest() { - return HashSet::new(); - } - // TODO: do DNS and disk in parallel if startup speed becomes important let dns_peers = Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await; - // Ignore disk errors because the cache is optional and the method already logs them. - let disk_peers = self.load_peer_cache().await.unwrap_or_default(); + if self.network.is_regtest() { + // Only return local peer addresses and skip loading the peer cache on Regtest. + dns_peers + .into_iter() + .filter(PeerSocketAddr::is_localhost) + .collect() + } else { + // Ignore disk errors because the cache is optional and the method already logs them. + let disk_peers = self.load_peer_cache().await.unwrap_or_default(); - dns_peers.into_iter().chain(disk_peers).collect() + dns_peers.into_iter().chain(disk_peers).collect() + } } /// Concurrently resolves `peers` into zero or more IP addresses, with a diff --git a/zebra-network/src/meta_addr/peer_addr.rs b/zebra-network/src/meta_addr/peer_addr.rs index 92a27defcca..464baf2c42b 100644 --- a/zebra-network/src/meta_addr/peer_addr.rs +++ b/zebra-network/src/meta_addr/peer_addr.rs @@ -3,7 +3,7 @@ use std::{ fmt, - net::{Ipv4Addr, SocketAddr}, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, ops::{Deref, DerefMut}, str::FromStr, }; @@ -76,4 +76,10 @@ impl PeerSocketAddr { pub fn remove_socket_addr_privacy(&self) -> SocketAddr { **self } + + /// Returns true if the inner [`SocketAddr`]'s IP is the localhost IP. + pub fn is_localhost(&self) -> bool { + let ip = self.0.ip(); + ip == Ipv4Addr::LOCALHOST || ip == Ipv6Addr::LOCALHOST + } } From cf3ae8fe16ca054335c5ec8b2c2609bde8b8bf28 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 23:51:51 +0000 Subject: [PATCH 197/245] build(deps): bump the ecc group across 1 directory with 3 updates (#9577) Bumps the ecc group with 3 updates in the / directory: [zcash_address](https://github.com/zcash/librustzcash), [zcash_keys](https://github.com/zcash/librustzcash) and [zcash_protocol](https://github.com/zcash/librustzcash). Updates `zcash_address` from 0.7.0 to 0.7.1 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_address-0.7.0...zcash_address-0.7.1) Updates `zcash_keys` from 0.8.0 to 0.8.1 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_keys-0.8.0...zcash_keys-0.8.1) Updates `zcash_protocol` from 0.5.1 to 0.5.2 - [Release notes](https://github.com/zcash/librustzcash/releases) - [Commits](https://github.com/zcash/librustzcash/compare/zcash_protocol-0.5.1...zcash_protocol-0.5.2) --- updated-dependencies: - dependency-name: zcash_address dependency-version: 0.7.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc - dependency-name: zcash_keys dependency-version: 0.8.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc - dependency-name: zcash_protocol dependency-version: 0.5.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: ecc ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a2ae592e95..ff5642e373e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6051,9 +6051,9 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a21f218c86b350d706c22489af999b098e19bf92ed6dd71770660ea29ee707d" +checksum = "71591bb4eb2fd7622e88eed42e7d7d8501cd1e920a0698c7fb08723a8c1d0b4f" dependencies = [ "bech32", "bs58", @@ -6127,9 +6127,9 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb50fbc9d2d5e5997eefa934297be78312552f393149aa042ab12ac42031070c" +checksum = "f19138db56626babaed67c9f8d8d6094b0413cc34f63b6e5a76071e44d395175" dependencies = [ "bech32", "blake2b_simd", @@ -6230,9 +6230,9 @@ dependencies = [ [[package]] name = "zcash_protocol" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0acf60e235c5ba42c83f1e7e3763cf90a436583e6de71557fed26bab2d65dc" +checksum = "8f116cd111d813b7afc814be013566b16e32a7b887597f906ac42b0f7f6a1681" dependencies = [ "core2", "document-features", From 3b5cbf845146ffe43be2d40b3ed5e2e6ffb7f12c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:45:17 +0000 Subject: [PATCH 198/245] build(deps): bump the devops group across 1 directory with 6 updates (#9476) Bumps the devops group with 6 updates in the / directory: | Package | From | To | | --- | --- | --- | | [google-github-actions/auth](https://github.com/google-github-actions/auth) | `2.1.8` | `2.1.10` | | [codecov/codecov-action](https://github.com/codecov/codecov-action) | `5.4.0` | `5.4.2` | | [docker/build-push-action](https://github.com/docker/build-push-action) | `6.15.0` | `6.16.0` | | [docker/scout-action](https://github.com/docker/scout-action) | `1.17.0` | `1.17.1` | | [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) | `5.4.1` | `6.0.1` | | [github/codeql-action](https://github.com/github/codeql-action) | `3.28.15` | `3.28.16` | Updates `google-github-actions/auth` from 2.1.8 to 2.1.10 - [Release notes](https://github.com/google-github-actions/auth/releases) - [Changelog](https://github.com/google-github-actions/auth/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/auth/compare/v2.1.8...v2.1.10) Updates `codecov/codecov-action` from 5.4.0 to 5.4.2 - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.4.0...v5.4.2) Updates `docker/build-push-action` from 6.15.0 to 6.16.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6.15.0...v6.16.0) Updates `docker/scout-action` from 1.17.0 to 1.17.1 - [Release notes](https://github.com/docker/scout-action/releases) - [Commits](https://github.com/docker/scout-action/compare/v1.17.0...v1.17.1) Updates `astral-sh/setup-uv` from 5.4.1 to 6.0.1 - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/0c5e2b8115b80b4c7c5ddf6ffdd634974642d182...6b9c6063abd6010835644d4c2e1bef4cf5cd0fca) Updates `github/codeql-action` from 3.28.15 to 3.28.16 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/45775bd8235c68ba998cffa5171334d58593da47...28deaeda66b76a05916b6923827895f2b14ab387) --- updated-dependencies: - dependency-name: google-github-actions/auth dependency-version: 2.1.10 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: codecov/codecov-action dependency-version: 5.4.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: docker/build-push-action dependency-version: 6.16.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: devops - dependency-name: docker/scout-action dependency-version: 1.17.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops - dependency-name: astral-sh/setup-uv dependency-version: 6.0.1 dependency-type: direct:production update-type: version-update:semver-major dependency-group: devops - dependency-name: github/codeql-action dependency-version: 3.28.16 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: devops ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cd-deploy-nodes-gcp.yml | 2 +- .github/workflows/chore-delete-gcp-resources.yml | 4 ++-- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/docs-deploy-firebase.yml | 4 ++-- .github/workflows/manual-zcashd-deploy.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 6 +++--- .github/workflows/sub-deploy-integration-tests-gcp.yml | 6 +++--- .github/workflows/sub-find-cached-disks.yml | 2 +- .github/workflows/zizmor.yml | 4 ++-- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 100e3f4f512..9532ec3d501 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -258,7 +258,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: "${{ vars.GCP_WIF }}" service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index c80c6c21f0e..00e254a9e53 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -51,7 +51,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' @@ -123,7 +123,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 98378e5e72b..b5b0c6f0024 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -103,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.4.0 + uses: codecov/codecov-action@v5.4.3 diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 7e2c3d6f025..6b8ce2c3554 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -107,7 +107,7 @@ jobs: - name: Authenticate to Google Cloud if: github.repository_owner == 'ZcashFoundation' id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' @@ -168,7 +168,7 @@ jobs: - name: Authenticate to Google Cloud if: github.repository_owner == 'ZcashFoundation' id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_FIREBASE_SA }}' diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index e8021ab57de..f96f6e482ca 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -52,7 +52,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 3cf09071597..f40c07cf559 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -112,7 +112,7 @@ jobs: - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: "${{ vars.GCP_WIF }}" service_account: "${{ vars.GCP_ARTIFACTS_SA }}" @@ -147,7 +147,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.15.0 + uses: docker/build-push-action@v6.18.0 with: target: ${{ inputs.dockerfile_target }} context: . @@ -179,7 +179,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.17.0 + uses: docker/scout-action@v1.18.1 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 2414bd2df8e..c38cb513308 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -171,7 +171,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' @@ -393,7 +393,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' @@ -693,7 +693,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 3973c4ee165..dc9add90fb5 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -68,7 +68,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.8 + uses: google-github-actions/auth@v2.1.10 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index d7258e20f23..ca5ab84e0ee 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -19,13 +19,13 @@ jobs: with: persist-credentials: false - name: Install the latest version of uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v4 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v4 - name: Run zizmor 🌈 run: uvx zizmor --format sarif . > results.sarif env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: sarif_file: results.sarif category: zizmor From e09bf5d10682f08507fc24a8924265111af92ddc Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 9 Jun 2025 16:08:20 -0300 Subject: [PATCH 199/245] tests(wallet): Add initial framework to test zallet RPC methods (#9554) * build zallet in zebra under a feature * add dummy wallet test * use jsonrpc 2.0 in the proxy * use zallet in wallet test * minor doc fixes * fix build * fix profile to debug in build * match configs network upgrades * add issue numbers * use env var to build zallet instead of a feature * improve the build a bit * clippy fix * update QA docs for zallet * improve wallet port selection --- zebra-rpc/build.rs | 50 ++++++- zebra-rpc/qa/README.md | 7 +- zebra-rpc/qa/pull-tester/rpc-tests.py | 3 +- .../qa/rpc-tests/test_framework/proxy.py | 2 +- zebra-rpc/qa/rpc-tests/test_framework/util.py | 127 +++++++++++++++++- zebra-rpc/qa/rpc-tests/wallet.py | 63 +++++++++ zebra-rpc/qa/zallet-datadir/identity.txt | 3 + zebra-rpc/qa/zallet-datadir/zallet.toml | 29 ++++ 8 files changed, 278 insertions(+), 6 deletions(-) create mode 100755 zebra-rpc/qa/rpc-tests/wallet.py create mode 100644 zebra-rpc/qa/zallet-datadir/identity.txt create mode 100644 zebra-rpc/qa/zallet-datadir/zallet.toml diff --git a/zebra-rpc/build.rs b/zebra-rpc/build.rs index bbb84746f5f..ef9163db528 100644 --- a/zebra-rpc/build.rs +++ b/zebra-rpc/build.rs @@ -3,13 +3,59 @@ fn main() -> Result<(), Box> { #[cfg(feature = "indexer-rpcs")] { - use std::{env, path::PathBuf}; - let out_dir = env::var("OUT_DIR").map(PathBuf::from); + let out_dir = std::env::var("OUT_DIR").map(std::path::PathBuf::from); tonic_build::configure() .type_attribute(".", "#[derive(serde::Deserialize, serde::Serialize)]") .file_descriptor_set_path(out_dir.unwrap().join("indexer_descriptor.bin")) .compile_protos(&["proto/indexer.proto"], &[""])?; } + if std::env::var_os("ZALLET").is_some() { + use std::{env, fs, path::PathBuf, process::Command}; + + // The following code will clone the zallet repo and build the binary, + // then copy the binary to the project target directory. + // + // Code below is fragile and will just build the main branch of the wallet repository + // so we can have it available for `qa` regtests. + + let build_dir = env::var("OUT_DIR").map(PathBuf::from).unwrap_or_default(); + + let profile = "debug".to_string(); + + let target_dir = env::var("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| std::env::current_dir().expect("failed to get current dir")) + .join("../target") + .join(&profile); + + let _ = Command::new("git") + .args([ + "clone", + "https://github.com/zcash/wallet.git", + build_dir.to_str().unwrap(), + ]) + .status() + .expect("failed to clone external binary"); + + let _ = Command::new("cargo") + .args(["build"]) + .current_dir(&build_dir) + .status() + .expect("failed to build external binary"); + + fs::copy( + build_dir.join(format!("target/{}/zallet", profile)), + target_dir.join("zallet"), + ) + .unwrap_or_else(|_| { + panic!( + "failed to copy zallet binary from {} to {}", + build_dir.display(), + target_dir.display() + ) + }); + } + Ok(()) } diff --git a/zebra-rpc/qa/README.md b/zebra-rpc/qa/README.md index cc46d7b54ef..b8fa1acf202 100644 --- a/zebra-rpc/qa/README.md +++ b/zebra-rpc/qa/README.md @@ -34,6 +34,11 @@ Make sure `zebrad` binary exists in the `../target/debug/` folder or set the bin export CARGO_BIN_EXE_zebrad=/path/to/zebrad ``` +For wallet tests, make sure `zallet` binary exists in the `../target/debug/` folder. +You can build `zebrad` and `zallet` with the following command: + + ZALLET=1 cargo build + You can run any single test by calling ./qa/pull-tester/rpc-tests.py @@ -78,7 +83,7 @@ to recover with: ```bash rm -rf cache -killall zcashd +killall zebrad ``` Writing tests diff --git a/zebra-rpc/qa/pull-tester/rpc-tests.py b/zebra-rpc/qa/pull-tester/rpc-tests.py index e8f48ac9861..add14685ed6 100755 --- a/zebra-rpc/qa/pull-tester/rpc-tests.py +++ b/zebra-rpc/qa/pull-tester/rpc-tests.py @@ -40,7 +40,8 @@ # Longest test should go first, to favor running tests in parallel 'reindex.py', 'getmininginfo.py', - 'nuparams.py'] + 'nuparams.py', + 'wallet.py'] ZMQ_SCRIPTS = [ # ZMQ test can only be run if bitcoin was built with zmq-enabled. diff --git a/zebra-rpc/qa/rpc-tests/test_framework/proxy.py b/zebra-rpc/qa/rpc-tests/test_framework/proxy.py index d41c92d3c51..fcb8b00dc87 100644 --- a/zebra-rpc/qa/rpc-tests/test_framework/proxy.py +++ b/zebra-rpc/qa/rpc-tests/test_framework/proxy.py @@ -121,7 +121,7 @@ def __call__(self, *args): log.debug("-%s-> %s %s"%(ServiceProxy.__id_count, self._service_name, json.dumps(args, default=EncodeDecimal))) - postdata = json.dumps({'jsonrpc': '1.0', + postdata = json.dumps({'jsonrpc': '2.0', 'method': self._service_name, 'params': args, 'id': ServiceProxy.__id_count}, default=EncodeDecimal) diff --git a/zebra-rpc/qa/rpc-tests/test_framework/util.py b/zebra-rpc/qa/rpc-tests/test_framework/util.py index c50e730307c..bac061adeac 100644 --- a/zebra-rpc/qa/rpc-tests/test_framework/util.py +++ b/zebra-rpc/qa/rpc-tests/test_framework/util.py @@ -48,12 +48,15 @@ MAX_NODES = 8 # Don't assign rpc or p2p ports lower than this PORT_MIN = 11000 -# The number of ports to "reserve" for p2p and rpc, each +# The number of ports to "reserve" for p2p, rpc and wallet rpc each PORT_RANGE = 5000 def zcashd_binary(): return os.getenv("CARGO_BIN_EXE_zebrad", os.path.join("..", "target", "debug", "zebrad")) +def zallet_binary(): + return os.path.join("..", "target", "debug", "zallet") + def zebrad_config(datadir): base_location = os.path.join('qa', 'base_config.toml') new_location = os.path.join(datadir, "config.toml") @@ -103,6 +106,10 @@ def p2p_port(n): def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) +def wallet_rpc_port(n): + return PORT_MIN + (PORT_RANGE * 2) + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + + def check_json_precision(): """Make sure json library being used does not lose precision converting ZEC values""" n = Decimal("20000000.00000003") @@ -800,3 +807,121 @@ def tarfile_extractall(tarfile, path): tarfile.extractall(path=path, filter='data') else: tarfile.extractall(path=path) + + +# Wallet utilities + +zallet_processes = {} + +def start_wallets(num_wallets, dirname, extra_args=None, rpchost=None, binary=None): + """ + Start multiple wallets, return RPC connections to them + """ + if extra_args is None: extra_args = [ None for _ in range(num_wallets) ] + if binary is None: binary = [ None for _ in range(num_wallets) ] + rpcs = [] + try: + for i in range(num_wallets): + rpcs.append(start_wallet(i, dirname, extra_args[i], rpchost, binary=binary[i])) + except: # If one node failed to start, stop the others + stop_wallets(rpcs) + raise + return rpcs + +def start_wallet(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None): + """ + Start a Zallet wallet and return RPC connection to it + """ + + datadir = os.path.join(dirname, "wallet"+str(i)) + if binary is None: + binary = zallet_binary() + + validator_port = rpc_port(i) + zallet_port = wallet_rpc_port(i) + + config = update_zallet_conf(datadir, validator_port, zallet_port) + args = [ binary, "-c="+config, "start" ] + + if extra_args is not None: args.extend(extra_args) + zallet_processes[i] = subprocess.Popen(args, stderr=stderr) + if os.getenv("PYTHON_DEBUG", ""): + print("start_wallet: wallet started, waiting for RPC to come up") + url = rpc_url_wallet(i, rpchost) + wait_for_wallet_start(zallet_processes[i], url, i) + if os.getenv("PYTHON_DEBUG", ""): + print("start_wallet: RPC successfully started for wallet {} with pid {}".format(i, zallet_processes[i].pid)) + proxy = get_rpc_proxy(url, i, timeout=timewait) + if COVERAGE_DIR: + coverage.write_all_rpc_commands(COVERAGE_DIR, proxy) + + return proxy + +def update_zallet_conf(datadir, validator_port, zallet_port): + import toml + + config_path = zallet_config(datadir) + + with open(config_path, 'r') as f: + config_file = toml.load(f) + + config_file['rpc']['bind'][0] = '127.0.0.1:'+str(zallet_port) + config_file['indexer']['validator_address'] = '127.0.0.1:'+str(validator_port) + + config_file['wallet_db'] = os.path.join(datadir, 'datadir/data.sqlite') + config_file['indexer']['db_path'] = os.path.join(datadir, 'datadir/zaino') + config_file['keystore']['identity'] = os.path.join(datadir, 'datadir/identity.txt') + + with open(config_path, 'w') as f: + toml.dump(config_file, f) + + return config_path + +def stop_wallets(wallets): + for wallet in wallets: + try: + # TODO: Implement `stop` in zallet: https://github.com/zcash/wallet/issues/153 + wallet.stop() + except http.client.CannotSendRequest as e: + print("WARN: Unable to stop wallet: " + repr(e)) + del wallets[:] # Emptying array closes connections as a side effect + +def zallet_config(datadir): + base_location = os.path.join('qa', 'zallet-datadir') + new_location = os.path.join(datadir, "datadir") + shutil.copytree(base_location, new_location) + config = new_location + "/zallet.toml" + return config + +def wait_for_wallet_start(process, url, i): + ''' + Wait for the wallet to start. This means that RPC is accessible and fully initialized. + Raise an exception if zallet exits during initialization. + ''' + time.sleep(1) # give the wallet a moment to start + while True: + if process.poll() is not None: + raise Exception('%s wallet %d exited with status %i during initialization' % (zallet_binary(), i, process.returncode)) + try: + rpc = get_rpc_proxy(url, i) + rpc.getwalletinfo() + break # break out of loop on success + except IOError as e: + if e.errno != errno.ECONNREFUSED: # Port not yet open? + raise # unknown IO error + except JSONRPCException as e: # Initialization phase + if e.error['code'] != -28: # RPC in warmup? + raise # unknown JSON RPC exception + time.sleep(0.25) + +def rpc_url_wallet(i, rpchost=None): + host = '127.0.0.1' + port = wallet_rpc_port(i) + if rpchost: + parts = rpchost.split(':') + if len(parts) == 2: + host, port = parts + else: + host = rpchost + # For zallet, we just use a non-authenticated endpoint. + return "http://%s:%d" % (host, int(port)) diff --git a/zebra-rpc/qa/rpc-tests/wallet.py b/zebra-rpc/qa/rpc-tests/wallet.py new file mode 100755 index 00000000000..b3789561240 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/wallet.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2016-2024 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.authproxy import JSONRPCException +from test_framework.mininode import COIN +from test_framework.util import assert_equal, start_nodes, start_wallets, start_node, \ + connect_nodes_bi, sync_blocks, sync_mempools +from test_framework.zip317 import conventional_fee + +from decimal import Decimal + +class WalletTest (BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.cache_behavior = 'clean' + self.num_nodes = 4 + + def setup_network(self, split=False): + self.nodes = start_nodes(3, self.options.tmpdir) + + # TODO: Connect nodes between them, we need addnode RPC method: + # https://github.com/ZcashFoundation/zebra/issues/9555 + #connect_nodes_bi(self.nodes,0,1) + #connect_nodes_bi(self.nodes,1,2) + #connect_nodes_bi(self.nodes,0,2) + self.is_network_split=False + self.sync_all() + + # If nodes were connected, only one of them would generate a block + self.nodes[0].generate(1) + self.sync_all() + + # But as we can't connect nodes yet, we need to generate a block to each node manually + # TODO: Remove this when we have addnode RPC method: + # https://github.com/ZcashFoundation/zebra/issues/9555 + for i in range(1, len(self.nodes)): + self.nodes[i].generate(1) + + # TODO: Wallets can be started but we need to add miner address at least one of them: + # https://github.com/ZcashFoundation/zebra/issues/9557 + self.wallets = start_wallets(3, self.options.tmpdir) + + def run_test(self): + print("Mining blocks...") + + self.nodes[0].generate(4) + self.sync_all() + + walletinfo = self.wallets[0].getwalletinfo() + # TODO: getwalletinfo data is not implemented: + # https://github.com/zcash/wallet/issues/55 + # TODO: Miner address is not in the wallet: + # https://github.com/ZcashFoundation/zebra/issues/9557 + #assert_equal(Decimal(walletinfo['immature_balance']), Decimal('40')) + assert_equal(Decimal(walletinfo['balance']), Decimal('0')) + +if __name__ == '__main__': + WalletTest ().main () diff --git a/zebra-rpc/qa/zallet-datadir/identity.txt b/zebra-rpc/qa/zallet-datadir/identity.txt new file mode 100644 index 00000000000..558d28b5e13 --- /dev/null +++ b/zebra-rpc/qa/zallet-datadir/identity.txt @@ -0,0 +1,3 @@ +# created: 2025-04-22T10:45:29-03:00 +# public key: age1l5d76x6zzsqy90r05a29gysjny7uvh229p5jnrwyqp5dmy39ha2qks3h9f +AGE-SECRET-KEY-1U7FJQWFKZYP7LJFJG9EVM6CG47P5TWP0NU03JE7CZQ6035H2JWDQKCRG65 diff --git a/zebra-rpc/qa/zallet-datadir/zallet.toml b/zebra-rpc/qa/zallet-datadir/zallet.toml new file mode 100644 index 00000000000..05ba9861c91 --- /dev/null +++ b/zebra-rpc/qa/zallet-datadir/zallet.toml @@ -0,0 +1,29 @@ +network = "regtest" +wallet_db = "data.sqlite" + +regtest_nuparams = [ + "5ba81b19:1", + "76b809bb:1", + "2bb40e60:1", + "f5b9230b:1", + "e9ff75a6:1", + "c2d6d0b4:290", + "c8e71055:291" +] + +[builder] + +[indexer] +validator_address = "127.0.0.1:0" +validator_user = ".." +validator_password = ".." +db_path = "zaino" + +[keystore] +identity = "identity.txt" + +[limits] +orchard_actions = 250 + +[rpc] +bind = ["127.0.0.1:0"] From fb8e6726a94de33f080819107bcaf0c3822c1e60 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 9 Jun 2025 20:27:17 -0300 Subject: [PATCH 200/245] fix(consensus): reuse sighasher when validating txs (#9594) * reuse sighasher when validating txs * expose error when checking tx network upgrade * Apply suggestions from code review Co-authored-by: Marek * fix zebra-chain test * make zebra_chain::Error implement Clone by wrapping io::Error with Arc * reuse errors strings --------- Co-authored-by: Marek --- zebra-chain/src/error.rs | 38 +++- .../src/primitives/zcash_primitives.rs | 58 +++--- zebra-chain/src/serialization/error.rs | 14 +- zebra-chain/src/transaction.rs | 19 +- zebra-chain/src/transaction/sighash.rs | 22 +- zebra-chain/src/transaction/tests/vectors.rs | 194 ++++++++++++------ zebra-consensus/src/block/tests.rs | 24 +-- zebra-consensus/src/script.rs | 10 +- zebra-consensus/src/transaction.rs | 31 +-- zebra-consensus/src/transaction/tests.rs | 8 +- zebra-rpc/src/methods/types/transaction.rs | 9 +- zebra-script/src/lib.rs | 176 ++++++++++------ 12 files changed, 362 insertions(+), 241 deletions(-) diff --git a/zebra-chain/src/error.rs b/zebra-chain/src/error.rs index 755e508e402..9f84be249f5 100644 --- a/zebra-chain/src/error.rs +++ b/zebra-chain/src/error.rs @@ -1,6 +1,6 @@ //! Errors that can occur inside any `zebra-chain` submodule. -use std::io; +use std::{io, sync::Arc}; use thiserror::Error; // TODO: Move all these enums into a common enum at the bottom. @@ -57,17 +57,47 @@ pub enum AddressError { } /// `zebra-chain`'s errors -#[derive(Error, Debug)] +#[derive(Clone, Error, Debug)] pub enum Error { /// Invalid consensus branch ID. #[error("invalid consensus branch id")] InvalidConsensusBranchId, /// Zebra's type could not be converted to its librustzcash equivalent. - #[error("Zebra's type could not be converted to its librustzcash equivalent: ")] - Conversion(#[from] io::Error), + #[error("Zebra's type could not be converted to its librustzcash equivalent: {0}")] + Conversion(#[from] Arc), /// The transaction is missing a network upgrade. #[error("the transaction is missing a network upgrade")] MissingNetworkUpgrade, } + +/// Allow converting `io::Error` to `Error`; we need this since we +/// use `Arc` in `Error::Conversion`. +impl From for Error { + fn from(value: io::Error) -> Self { + Arc::new(value).into() + } +} + +// We need to implement this manually because io::Error does not implement +// PartialEq. +impl PartialEq for Error { + fn eq(&self, other: &Self) -> bool { + match self { + Error::InvalidConsensusBranchId => matches!(other, Error::InvalidConsensusBranchId), + Error::Conversion(e) => { + if let Error::Conversion(o) = other { + // Not perfect, but good enough for testing, which + // is the main purpose for our usage of PartialEq for errors + e.to_string() == o.to_string() + } else { + false + } + } + Error::MissingNetworkUpgrade => matches!(other, Error::MissingNetworkUpgrade), + } + } +} + +impl Eq for Error {} diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 5ef8bd4c6b8..dc304d073e0 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -1,7 +1,7 @@ //! Contains code that interfaces with the zcash_primitives crate from //! librustzcash. -use std::{io, ops::Deref}; +use std::{io, ops::Deref, sync::Arc}; use zcash_primitives::transaction::{self as zp_tx, TxDigests}; use zcash_protocol::value::BalanceError; @@ -12,6 +12,7 @@ use crate::{ serialization::ZcashSerialize, transaction::{AuthDigest, HashType, SigHash, Transaction}, transparent::{self, Script}, + Error, }; // TODO: move copied and modified code to a separate module. @@ -19,17 +20,17 @@ use crate::{ // Used by boilerplate code below. #[derive(Clone, Debug)] -struct TransparentAuth<'a> { - all_prev_outputs: &'a [transparent::Output], +struct TransparentAuth { + all_prev_outputs: Arc>, } -impl zcash_transparent::bundle::Authorization for TransparentAuth<'_> { +impl zcash_transparent::bundle::Authorization for TransparentAuth { type ScriptSig = zcash_primitives::legacy::Script; } // In this block we convert our Output to a librustzcash to TxOut. // (We could do the serialize/deserialize route but it's simple enough to convert manually) -impl zcash_transparent::sighash::TransparentAuthorizingContext for TransparentAuth<'_> { +impl zcash_transparent::sighash::TransparentAuthorizingContext for TransparentAuth { fn input_amounts(&self) -> Vec { self.all_prev_outputs .iter() @@ -56,13 +57,12 @@ impl zcash_transparent::sighash::TransparentAuthorizingContext for TransparentAu // to compute sighash. // TODO: remove/change if they improve the API to not require this. -struct MapTransparent<'a> { - auth: TransparentAuth<'a>, +struct MapTransparent { + auth: TransparentAuth, } -impl<'a> - zcash_transparent::bundle::MapAuth> - for MapTransparent<'a> +impl zcash_transparent::bundle::MapAuth + for MapTransparent { fn map_script_sig( &self, @@ -71,7 +71,7 @@ impl<'a> s } - fn map_authorization(&self, _: zcash_transparent::bundle::Authorized) -> TransparentAuth<'a> { + fn map_authorization(&self, _: zcash_transparent::bundle::Authorized) -> TransparentAuth { // TODO: This map should consume self, so we can move self.auth self.auth.clone() } @@ -133,12 +133,10 @@ impl zp_tx::components::orchard::MapAuth { - _phantom: std::marker::PhantomData<&'a ()>, -} +struct PrecomputedAuth {} -impl<'a> zp_tx::Authorization for PrecomputedAuth<'a> { - type TransparentAuth = TransparentAuth<'a>; +impl zp_tx::Authorization for PrecomputedAuth { + type TransparentAuth = TransparentAuth; type SaplingAuth = sapling_crypto::bundle::Authorized; type OrchardAuth = orchard::bundle::Authorized; } @@ -197,13 +195,13 @@ impl From