diff --git a/.gemini/config.yaml b/.gemini/config.yaml new file mode 100644 index 0000000000..307f783561 --- /dev/null +++ b/.gemini/config.yaml @@ -0,0 +1,36 @@ +# Config for the Gemini Pull Request Review Bot. +# https://developers.google.com/gemini-code-assist/docs/customize-gemini-behavior-github + +# Enables fun features such as a poem in the initial pull request summary. +# Type: boolean, default: false. +have_fun: false + +code_review: + # Disables Gemini from acting on PRs. + # Type: boolean, default: false. + disable: false + + # Minimum severity of comments to post (LOW, MEDIUM, HIGH, CRITICAL). + # Type: string, default: MEDIUM. + comment_severity_threshold: MEDIUM + + # Max number of review comments (-1 for unlimited). + # Type: integer, default: -1. + max_review_comments: -1 + + pull_request_opened: + # Post helpful instructions when PR is opened. + # Type: boolean, default: false. + help: false + + # Post PR summary when opened. + # Type boolean, default: true. + summary: true + + # Post code review on PR open. + # Type boolean, default: true. + code_review: true + +# List of glob patterns to ignore (files and directories). +# Type: array of string, default: []. +ignore_patterns: [] diff --git a/.gemini/styleguide.md b/.gemini/styleguide.md new file mode 100644 index 0000000000..d07329aec9 --- /dev/null +++ b/.gemini/styleguide.md @@ -0,0 +1,291 @@ +# LND Style Guide + +## Code Documentation and Commenting + +- Always use the Golang code style described below in this document. +- Readable code is the most important requirement for any commit created. +- Comments must not explain the code 1:1 but instead explain the _why_ behind a + certain block of code, in case it requires contextual knowledge. +- Unit tests must always use the `require` library. Either table driven unit + tests or tests using the `rapid` library are preferred. +- The line length MUST NOT exceed 80 characters, this is very important. + You must count the Golang indentation (tabulator character) as 8 spaces when + determining the line length. Use creative approaches or the wrapping rules + specified below to make sure the line length isn't exceeded. +- Every function must be commented with its purpose and assumptions. +- Function comments must begin with the function name. +- Function comments should be complete sentences. +- Exported functions require detailed comments for the caller. + +**WRONG** +```go +// generates a revocation key +func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey, + revokePreimage []byte) *btcec.PublicKey { +``` +**RIGHT** +```go +// DeriveRevocationPubkey derives the revocation public key given the +// counterparty's commitment key, and revocation preimage derived via a +// pseudo-random-function. In the event that we (for some reason) broadcast a +// revoked commitment transaction, then if the other party knows the revocation +// preimage, then they'll be able to derive the corresponding private key to +// this private key by exploiting the homomorphism in the elliptic curve group. +// +// The derivation is performed as follows: +// +// revokeKey := commitKey + revokePoint +// := G*k + G*h +// := G * (k+h) +// +// Therefore, once we divulge the revocation preimage, the remote peer is able +// to compute the proper private key for the revokeKey by computing: +// revokePriv := commitPriv + revokePreimge mod N +// +// Where N is the order of the sub-group. +func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey, + revokePreimage []byte) *btcec.PublicKey { +``` +- In-body comments should explain the *intention* of the code. +**WRONG** +```go +// return err if amt is less than 546 +if amt < 546 { + return err +} +``` +**RIGHT** +```go +// Treat transactions with amounts less than the amount which is considered dust +// as non-standard. +if amt < 546 { + return err +} +``` +## Code Spacing and formatting +- Segment code into logical stanzas separated by newlines. +**WRONG** +```go + witness := make([][]byte, 4) + witness[0] = nil + if bytes.Compare(pubA, pubB) == -1 { + witness[1] = sigB + witness[2] = sigA + } else { + witness[1] = sigA + witness[2] = sigB + } + witness[3] = witnessScript + return witness +``` +**RIGHT** +```go + witness := make([][]byte, 4) + + // When spending a p2wsh multi-sig script, rather than an OP_0, we add + // a nil stack element to eat the extra pop. + witness[0] = nil + + // When initially generating the witnessScript, we sorted the serialized + // public keys in descending order. So we do a quick comparison in order + // to ensure the signatures appear on the Script Virtual Machine stack in + // the correct order. + if bytes.Compare(pubA, pubB) == -1 { + witness[1] = sigB + witness[2] = sigA + } else { + witness[1] = sigA + witness[2] = sigB + } + + // Finally, add the preimage as the last witness element. + witness[3] = witnessScript + + return witness +``` +- Use spacing between `case` and `select` stanzas. +**WRONG** +```go + switch { + case a: + + case b: + + case c: + + case d: + + default: + + } +``` +**RIGHT** +```go + switch { + // Brief comment detailing instances of this case (repeat below). + case a: + + + case b: + + + case c: + + + case d: + + + default: + + } +``` +## Additional Style Constraints +### 80 character line length +- Wrap columns at 80 characters. +- Tabs are 8 spaces. +**WRONG** +```go +myKey := "0214cd678a565041d00e6cf8d62ef8add33b4af4786fb2beb87b366a2e151fcee7" +``` +**RIGHT** +```go +myKey := "0214cd678a565041d00e6cf8d62ef8add33b4af4786fb2beb87b366a2e1" + + "51fcee7" +``` +### Wrapping long function calls +- If a function call exceeds the column limit, place the closing parenthesis + on its own line and start all arguments on a new line after the opening + parenthesis. +**WRONG** +```go +value, err := bar(a, + a, b, c) +``` +**RIGHT** +```go +value, err := bar( + a, a, b, c, +) +``` +- Compact form is acceptable if visual symmetry of parentheses is preserved. +**ACCEPTABLE** +```go + response, err := node.AddInvoice( + ctx, &lnrpc.Invoice{ + Memo: "invoice", + ValueMsat: int64(oneUnitMilliSat - 1), + }, + ) +``` +**PREFERRED** +```go + response, err := node.AddInvoice(ctx, &lnrpc.Invoice{ + Memo: "invoice", + ValueMsat: int64(oneUnitMilliSat - 1), + }) +``` +### Exception for log and error message formatting +- Minimize lines for log and error messages, while adhering to the + 80-character limit. +**WRONG** +```go +return fmt.Errorf( + "this is a long error message with a couple (%d) place holders", + len(things), +) + +log.Debugf( + "Something happened here that we need to log: %v", + longVariableNameHere, +) +``` +**RIGHT** +```go +return fmt.Errorf("this is a long error message with a couple (%d) place "+ + "holders", len(things)) + +log.Debugf("Something happened here that we need to log: %v", + longVariableNameHere) +``` +### Exceptions and additional styling for structured logging +- **Static messages:** Use key-value pairs instead of formatted strings for the + `msg` parameter. +- **Key-value attributes:** Use `slog.Attr` helper functions. +- **Line wrapping:** Structured log lines are an exception to the 80-character + rule. Use one line per key-value pair for multiple attributes. +**WRONG** +```go +log.DebugS(ctx, fmt.Sprintf("User %d just spent %.8f to open a channel", userID, 0.0154)) +``` +**RIGHT** +```go +log.InfoS(ctx, "Channel open performed", + slog.Int("user_id", userID), + btclog.Fmt("amount", "%.8f", 0.00154)) +``` +### Wrapping long function definitions +- If function arguments exceed the 80-character limit, maintain indentation + on following lines. +- Do not end a line with an open parenthesis if the function definition is not + finished. +**WRONG** +```go +func foo(a, b, c, +) (d, error) { + +func bar(a, b, c) ( + d, error, +) { + +func baz(a, b, c) ( + d, error) { +``` +**RIGHT** +```go +func foo(a, b, + c) (d, error) { + +func baz(a, b, c) (d, + error) { + +func longFunctionName( + a, b, c) (d, error) { +``` +- If a function declaration spans multiple lines, the body should start with an + empty line. +**WRONG** +```go +func foo(a, b, c, + d, e) error { + var a int +} +``` +**RIGHT** +```go +func foo(a, b, c, + d, e) error { + + var a int +} +``` +## Use of Log Levels +- Available levels: `trace`, `debug`, `info`, `warn`, `error`, `critical`. +- Only use `error` for internal errors not triggered by external sources. +## Testing +- To run all tests for a specific package: + `make unit pkg=$pkg` +- To run a specific test case within a package: + `make unit pkg=$pkg case=$case` +## Git Commit Messages +- **Subject Line:** + - Format: `subsystem: short description of changes` + - `subsystem` should be the package primarily affected (e.g., `peer`, `rpcclient`). + - For multiple packages, use `+` or `,` as a delimiter (e.g., `peer+rpcclient`). + - For widespread changes, use `multi:`. + - Keep it under 50 characters. + - Use the present tense (e.g., "Fix bug", not "Fixed bug"). +- **Message Body:** + - Separate from the subject with a blank line. + - Explain the "what" and "why" of the change. + - Wrap text to 72 characters. + - Use bullet points for lists. diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..709f0bfd9e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,29 @@ +--- +name: Bug report +about: Create a bug report. Please use the discussions section for general or troubleshooting questions. +title: '[bug]: ' +labels: ["bug", "needs triage"] +assignees: '' +--- + +### Background + +Describe your issue here. + +### Your environment + +* version of `btcd` +* which operating system (`uname -a` on *Nix) +* any other relevant environment details + +### Steps to reproduce + +Tell us how to reproduce this issue. Please provide stacktraces and links to code in question. + +### Expected behaviour + +Tell us what should happen + +### Actual behaviour + +Tell us what happens instead diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..8d32770431 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + - name: Discussions + url: https://github.com/btcsuite/btcd/discussions + about: For general or troubleshooting questions or if you're not sure what issue type to pick. + - name: Community Slack + url: https://lightning.engineering/slack.html + about: Please ask and answer questions here. + - name: Security issue disclosure policy + url: https://github.com/lightningnetwork/lnd#security + about: Please refer to this document when reporting security related issues. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..c96ee0af47 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature request +about: Suggest a new feature for `btcd`. +title: '[feature]: ' +labels: enhancement +assignees: '' +--- + +**Is your feature request related to a problem? Please describe.** + + +**Describe the solution you'd like** + + +**Describe alternatives you've considered** + + +**Additional context** + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..dc1639a7a3 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,19 @@ +## Change Description +Description of change / link to associated issue. + +## Steps to Test +Steps for reviewers to follow to test the change. + +## Pull Request Checklist +### Testing +- [ ] Your PR passes all CI checks. +- [ ] Tests covering the positive and negative (error paths) are included. +- [ ] Bug fixes contain tests triggering the bug to prevent regressions. + +### Code Style and Documentation +- [ ] The change is not [insubstantial](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md#substantial-contributions-only). Typo fixes are not accepted to fight bot spam. +- [ ] The change obeys the [Code Documentation and Commenting](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md#code-documentation-and-commenting) guidelines, and lines wrap at 80. +- [ ] Commits follow the [Ideal Git Commit Structure](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md#model-git-commit-messages). +- [ ] Any new logging statements use an appropriate subsystem and logging level. + +📝 Please see our [Contribution Guidelines](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md) for further guidance. diff --git a/.github/workflows/Dockerfile b/.github/workflows/Dockerfile index 371931a5e4..a23b4625c4 100644 --- a/.github/workflows/Dockerfile +++ b/.github/workflows/Dockerfile @@ -5,7 +5,7 @@ ########################### # Build binaries stage ########################### -FROM --platform=$BUILDPLATFORM golang:1.17.8-alpine3.15 AS build +FROM --platform=$BUILDPLATFORM golang:1.22.11-alpine3.21 AS build ADD . /app WORKDIR /app # Arguments required to build binaries targetting the correct OS and CPU architectures diff --git a/.github/workflows/dimagespub.yml b/.github/workflows/dimagespub.yml index 19056ee07b..d1e635539d 100644 --- a/.github/workflows/dimagespub.yml +++ b/.github/workflows/dimagespub.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Docker Setup Buildx id: buildx diff --git a/.github/workflows/go.yml b/.github/workflows/main.yml similarity index 61% rename from .github/workflows/go.yml rename to .github/workflows/main.yml index c1625428d3..2a98f59308 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/main.yml @@ -5,7 +5,8 @@ env: # go needs absolute directories, using the $HOME variable doesn't work here. GOCACHE: /home/runner/work/go/pkg/build GOPATH: /home/runner/work/go - GO_VERSION: 1.17.5 + GOBIN: /home/runner/work/go/bin + GO_VERSION: 1.22.11 jobs: build: @@ -13,12 +14,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Check out source - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Build run: make build @@ -28,46 +29,55 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Check out source - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Test run: make unit-cover - name: Send top-level coverage - uses: shogo82148/actions-goveralls@v1 + uses: coverallsapp/github-action@v2 + continue-on-error: true with: - path-to-profile: coverage.txt + file: coverage.txt flag-name: btcd + format: 'golang' parallel: true - name: Send btcec - uses: shogo82148/actions-goveralls@v1 + uses: coverallsapp/github-action@v2 + continue-on-error: true with: - path-to-profile: btcec/coverage.txt + file: btcec/coverage.txt flag-name: btcec + format: 'golang' parallel: true - name: Send btcutil coverage - uses: shogo82148/actions-goveralls@v1 + uses: coverallsapp/github-action@v2 + continue-on-error: true with: - path-to-profile: btcutil/coverage.txt + file: btcutil/coverage.txt flag-name: btcutil + format: 'golang' parallel: true - name: Send btcutil coverage for psbt package - uses: shogo82148/actions-goveralls@v1 + uses: coverallsapp/github-action@v2 + continue-on-error: true with: - path-to-profile: btcutil/psbt/coverage.txt + file: btcutil/psbt/coverage.txt flag-name: btcutilpsbt + format: 'golang' parallel: true - name: Notify coveralls all reports sent - uses: shogo82148/actions-goveralls@v1 + uses: coverallsapp/github-action@v2 + continue-on-error: true with: parallel-finished: true @@ -76,12 +86,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Check out source - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Test run: make unit-race diff --git a/.gitignore b/.gitignore index adc4fbeadb..0e977aaf3c 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,9 @@ btcutil/psbt/coverage.txt *.swo /.vim +#IDE +.idea + # Binaries produced by "make build" /addblock /btcctl @@ -55,3 +58,6 @@ btcutil/psbt/coverage.txt #Goland .idea + +.DS_Store +.aider* diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..585854d55c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,4 @@ +version: "2" +issues: + # Only show newly introduced problems. + new-from-rev: 80b74d6c5a0088a66dc96df6777d21e15c04849c \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index a715e89ba6..3ee61efc16 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,9 +19,9 @@ ARG ARCH=amd64 # https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests # https://cloud.google.com/architecture/using-container-images # https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md -# âžś ~ crane digest golang:1.17.13-alpine3.16 -# sha256:c80567372be0d486766593cc722d3401038e2f150a0f6c5c719caa63afb4026a -FROM golang@sha256:c80567372be0d486766593cc722d3401038e2f150a0f6c5c719caa63afb4026a AS build-container +# âžś ~ crane digest golang:1.23.12-alpine3.21 +# sha256:4bb4be21ac98da06bc26437ee870c4973f8039f13e9a1a36971b4517632b0fc6 +FROM golang@sha256:4bb4be21ac98da06bc26437ee870c4973f8039f13e9a1a36971b4517632b0fc6 AS build-container ARG ARCH @@ -34,7 +34,7 @@ RUN set -ex \ && echo "Compiling for $GOARCH" \ && go install -v . ./cmd/... -FROM $ARCH/alpine:3.16 +FROM $ARCH/alpine:3.21 COPY --from=build-container /go/bin /bin diff --git a/LICENSE b/LICENSE index 46dcd39508..5eed08580d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ ISC License -Copyright (c) 2013-2023 The btcsuite developers +Copyright (c) 2013-2025 The btcsuite developers Copyright (c) 2015-2016 The Decred developers Permission to use, copy, modify, and distribute this software for any diff --git a/Makefile b/Makefile index e17e6446d7..4ccc503d28 100644 --- a/Makefile +++ b/Makefile @@ -1,37 +1,36 @@ PKG := github.com/btcsuite/btcd -LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint -GOACC_PKG := github.com/ory/go-acc +LINT_PKG := github.com/golangci/golangci-lint/v2/cmd/golangci-lint GOIMPORTS_PKG := golang.org/x/tools/cmd/goimports -GO_BIN := ${GOPATH}/bin +GO_BIN := ${shell go env GOBIN} + +# If GOBIN is not set, default to GOPATH/bin. +ifeq ($(GO_BIN),) +GO_BIN := $(shell go env GOPATH)/bin +endif + LINT_BIN := $(GO_BIN)/golangci-lint -GOACC_BIN := $(GO_BIN)/go-acc +GOIMPORTS_BIN := $(GO_BIN)/goimports -LINT_COMMIT := v1.18.0 -GOACC_COMMIT := 80342ae2e0fcf265e99e76bcc4efd022c7c3811b +LINT_COMMIT := v2.1.6 +GOIMPORTS_COMMIT := a24facf9e5586c95743d2f4ad15d148c7a8cf00b -DEPGET := cd /tmp && go get -v GOBUILD := go build -v GOINSTALL := go install -v DEV_TAGS := rpctest GOTEST_DEV = go test -v -tags=$(DEV_TAGS) GOTEST := go test -v - -GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") - -RM := rm -f -CP := cp -MAKE := make -XARGS := xargs -L 1 +COVER_FLAGS = -coverprofile=coverage.txt -covermode=atomic -coverpkg=$(PKG)/... # Linting uses a lot of memory, so keep it under control by limiting the number # of workers if requested. ifneq ($(workers),) LINT_WORKERS = --concurrency=$(workers) endif +LINT_TIMEOUT := 5m -LINT = $(LINT_BIN) run -v $(LINT_WORKERS) +LINT = $(LINT_BIN) run -v $(LINT_WORKERS) --timeout=$(LINT_TIMEOUT) GREEN := "\\033[0;32m" NC := "\\033[0m" @@ -51,16 +50,12 @@ all: build check $(LINT_BIN): @$(call print, "Fetching linter") - $(DEPGET) $(LINT_PKG)@$(LINT_COMMIT) - -$(GOACC_BIN): - @$(call print, "Fetching go-acc") - $(DEPGET) $(GOACC_PKG)@$(GOACC_COMMIT) + $(GOINSTALL) $(LINT_PKG)@$(LINT_COMMIT) #? goimports: Install goimports goimports: @$(call print, "Installing goimports.") - $(DEPGET) $(GOIMPORTS_PKG) + $(GOINSTALL) $(GOIMPORTS_PKG)@$(GOIMPORTS_COMMIT) # ============ # INSTALLATION @@ -84,7 +79,7 @@ install: $(GOINSTALL) $(PKG)/cmd/findcheckpoint $(GOINSTALL) $(PKG)/cmd/addblock -#? release-install: Install btcd and btcctl release binaries, place them in $GOPATH/bin +#? release-install: Install btcd and btcctl release binaries, place them in $GOBIN release-install: @$(call print, "Installing btcd and btcctl release binaries") env CGO_ENABLED=0 $(GOINSTALL) -trimpath -ldflags="-s -w -buildid=" $(PKG) @@ -101,30 +96,28 @@ check: unit unit: @$(call print, "Running unit tests.") $(GOTEST_DEV) ./... -test.timeout=20m - cd btcec; $(GOTEST_DEV) ./... -test.timeout=20m - cd btcutil; $(GOTEST_DEV) ./... -test.timeout=20m - cd btcutil/psbt; $(GOTEST_DEV) ./... -test.timeout=20m + cd btcec && $(GOTEST_DEV) ./... -test.timeout=20m + cd btcutil && $(GOTEST_DEV) ./... -test.timeout=20m + cd btcutil/psbt && $(GOTEST_DEV) ./... -test.timeout=20m #? unit-cover: Run unit coverage tests -unit-cover: $(GOACC_BIN) +unit-cover: @$(call print, "Running unit coverage tests.") - $(GOACC_BIN) ./... - + $(GOTEST) $(COVER_FLAGS) ./... + # We need to remove the /v2 pathing from the module to have it work # nicely with the CI tool we use to render live code coverage. - cd btcec; $(GOACC_BIN) ./...; sed -i.bak 's/v2\///g' coverage.txt - - cd btcutil; $(GOACC_BIN) ./... - - cd btcutil/psbt; $(GOACC_BIN) ./... + cd btcec && $(GOTEST) $(COVER_FLAGS) ./... && sed -i.bak 's/v2\///g' coverage.txt + cd btcutil && $(GOTEST) $(COVER_FLAGS) ./... + cd btcutil/psbt && $(GOTEST) $(COVER_FLAGS) ./... #? unit-race: Run unit race tests unit-race: @$(call print, "Running unit race tests.") env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... - cd btcec; env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... - cd btcutil; env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... - cd btcutil/psbt; env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... + cd btcec && env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... + cd btcutil && env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... + cd btcutil/psbt && env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... # ========= # UTILITIES @@ -133,9 +126,9 @@ unit-race: #? fmt: Fix imports and formatting source fmt: goimports @$(call print, "Fixing imports.") - goimports -w $(GOFILES_NOVENDOR) + $(GOIMPORTS_BIN) -w . @$(call print, "Formatting source.") - gofmt -l -w -s $(GOFILES_NOVENDOR) + gofmt -l -w -s . #? lint: Lint source lint: $(LINT_BIN) @@ -145,8 +138,8 @@ lint: $(LINT_BIN) #? clean: Clean source clean: @$(call print, "Cleaning source.$(NC)") - $(RM) coverage.txt btcec/coverage.txt btcutil/coverage.txt btcutil/psbt/coverage.txt - + rm -f coverage.txt btcec/coverage.txt btcutil/coverage.txt btcutil/psbt/coverage.txt + #? tidy-module: Run 'go mod tidy' for all modules tidy-module: echo "Running 'go mod tidy' for all modules" @@ -161,11 +154,12 @@ tidy-module: unit-race \ fmt \ lint \ - clean + clean \ + tidy-module #? help: Get more info on make commands help: Makefile @echo " Choose a command run in btcd:" @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' -.PHONY: help +.PHONY: help \ No newline at end of file diff --git a/README.md b/README.md index f70f3f9145..b5d9146461 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ which are both under active development. ## Requirements -[Go](http://golang.org) 1.17 or newer. +[Go](http://golang.org) 1.22 or newer. ## Installation diff --git a/addrmgr/addrmanager_internal_test.go b/addrmgr/addrmanager_internal_test.go index 38218b15f7..a4ed50b8be 100644 --- a/addrmgr/addrmanager_internal_test.go +++ b/addrmgr/addrmanager_internal_test.go @@ -3,7 +3,6 @@ package addrmgr import ( "math/rand" "net" - "os" "testing" "time" @@ -107,11 +106,7 @@ func TestAddrManagerSerialization(t *testing.T) { // We'll start by creating our address manager backed by a temporary // directory. - tempDir, err := os.MkdirTemp("", "addrmgr") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() addrMgr := New(tempDir, nil) @@ -147,11 +142,7 @@ func TestAddrManagerV1ToV2(t *testing.T) { // We'll start by creating our address manager backed by a temporary // directory. - tempDir, err := os.MkdirTemp("", "addrmgr") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() addrMgr := New(tempDir, nil) diff --git a/addrmgr/cov_report.sh b/addrmgr/cov_report.sh old mode 100644 new mode 100755 index 307f05b76c..e41c9282e4 --- a/addrmgr/cov_report.sh +++ b/addrmgr/cov_report.sh @@ -1,17 +1,9 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... + +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/blockchain/chainio.go b/blockchain/chainio.go index 3340dd14a0..27028eac90 100644 --- a/blockchain/chainio.go +++ b/blockchain/chainio.go @@ -1058,7 +1058,14 @@ func dbPutUtxoStateConsistency(dbTx database.Tx, hash *chainhash.Hash) error { // nothing was found. func dbFetchUtxoStateConsistency(dbTx database.Tx) []byte { // Fetch the serialized data from the database. - return dbTx.Metadata().Get(utxoStateConsistencyKeyName) + statusBytes := dbTx.Metadata().Get(utxoStateConsistencyKeyName) + if statusBytes != nil { + result := make([]byte, len(statusBytes)) + copy(result, statusBytes) + return result + } + + return nil } // createChainState initializes both the database and the chain state to the diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index b1e39b9d62..56de778008 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -191,12 +191,20 @@ func calcNextRequiredDifficulty(lastNode HeaderCtx, newBlockTime time.Time, adjustedTimespan = c.MaxRetargetTimespan() } + // Special difficulty rule for Testnet4 + oldTarget := CompactToBig(lastNode.Bits()) + if c.ChainParams().EnforceBIP94 { + // Here we use the first block of the difficulty period. This way + // the real difficulty is always preserved in the first block as + // it is not allowed to use the min-difficulty exception. + oldTarget = CompactToBig(firstNode.Bits()) + } + // Calculate new target difficulty as: // currentDifficulty * (adjustedTimespan / targetTimespan) // The result uses integer division which means it will be slightly // rounded down. Bitcoind also uses integer division to calculate this // result. - oldTarget := CompactToBig(lastNode.Bits()) newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) targetTimeSpan := int64(c.ChainParams().TargetTimespan / time.Second) newTarget.Div(newTarget, big.NewInt(targetTimeSpan)) diff --git a/blockchain/error.go b/blockchain/error.go index dc40222235..8a7d4a7dc7 100644 --- a/blockchain/error.go +++ b/blockchain/error.go @@ -220,6 +220,10 @@ const ( // current chain tip. This is not a block validation rule, but is required // for block proposals submitted via getblocktemplate RPC. ErrPrevBlockNotBest + + // ErrTimewarpAttack indicates a timewarp attack i.e. + // when block's timestamp is too early on diff adjustment block. + ErrTimewarpAttack ) // Map of ErrorCode values back to their constant names for pretty printing. diff --git a/blockchain/thresholdstate.go b/blockchain/thresholdstate.go index d62c2de3c2..880310197d 100644 --- a/blockchain/thresholdstate.go +++ b/blockchain/thresholdstate.go @@ -102,6 +102,11 @@ type thresholdConditionChecker interface { // not the bit associated with the condition is set, but can be more // complex as needed. Condition(*blockNode) (bool, error) + + // ForceActive returns if the deployment should be forced to transition + // to the active state. This is useful on certain testnet, where we + // we'd like for a deployment to always be active. + ForceActive(*blockNode) bool } // thresholdStateCache provides a type to cache the threshold states of each @@ -279,7 +284,17 @@ func thresholdStateTransition(state ThresholdState, prevNode *blockNode, // threshold states for previous windows are only calculated once. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) { +func (b *BlockChain) thresholdState(prevNode *blockNode, + checker thresholdConditionChecker, + cache *thresholdStateCache) (ThresholdState, error) { + + // If the deployment has a nonzero AlwaysActiveHeight and the next + // block’s height is at or above that threshold, then force the state + // to Active. + if checker.ForceActive(prevNode) { + return ThresholdActive, nil + } + // The threshold state for the window that contains the genesis block is // defined by definition. confirmationWindow := int32(checker.MinerConfirmationWindow()) diff --git a/blockchain/thresholdstate_test.go b/blockchain/thresholdstate_test.go index 8d527137e3..28f417a1db 100644 --- a/blockchain/thresholdstate_test.go +++ b/blockchain/thresholdstate_test.go @@ -175,6 +175,10 @@ func (c customDeploymentChecker) Condition(_ *blockNode) (bool, error) { return c.conditionTrue, nil } +func (c customDeploymentChecker) ForceActive(_ *blockNode) bool { + return false +} + // TestThresholdStateTransition tests that the thresholdStateTransition // properly implements the BIP 009 state machine, along with the speedy trial // augments. diff --git a/blockchain/validate.go b/blockchain/validate.go index 5e24405ef9..1cc0ec3325 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -46,6 +46,11 @@ const ( // coinbaseHeightAllocSize is the amount of bytes that the // ScriptBuilder will allocate when validating the coinbase height. coinbaseHeightAllocSize = 5 + + // maxTimeWarp is a maximum number of seconds that the timestamp of the first + // block of a difficulty adjustment period is allowed to + // be earlier than the last block of the previous period (BIP94). + maxTimeWarp = 600 * time.Second ) var ( @@ -684,6 +689,12 @@ func compareScript(height int32, script []byte) error { func CheckBlockHeaderContext(header *wire.BlockHeader, prevNode HeaderCtx, flags BehaviorFlags, c ChainCtx, skipCheckpoint bool) error { + // The height of this block is one more than the referenced previous + // block. + blockHeight := prevNode.Height() + 1 + + params := c.ChainParams() + fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { // Ensure the difficulty specified in the block header matches @@ -710,16 +721,24 @@ func CheckBlockHeaderContext(header *wire.BlockHeader, prevNode HeaderCtx, str = fmt.Sprintf(str, header.Timestamp, medianTime) return ruleError(ErrTimeTooOld, str) } - } - // The height of this block is one more than the referenced previous - // block. - blockHeight := prevNode.Height() + 1 + // Testnet4 only: Check timestamp against prev for + // difficulty-adjustment blocks to prevent timewarp attacks. + if params.EnforceBIP94 { + err := assertNoTimeWarp( + blockHeight, c.BlocksPerRetarget(), + header.Timestamp, + time.Unix(prevNode.Timestamp(), 0), + ) + if err != nil { + return err + } + } + } // Reject outdated block versions once a majority of the network // has upgraded. These were originally voted on by BIP0034, // BIP0065, and BIP0066. - params := c.ChainParams() if header.Version < 2 && blockHeight >= params.BIP0034Height || header.Version < 3 && blockHeight >= params.BIP0066Height || header.Version < 4 && blockHeight >= params.BIP0065Height { @@ -761,6 +780,30 @@ func CheckBlockHeaderContext(header *wire.BlockHeader, prevNode HeaderCtx, return nil } +// assertNoTimeWarp checks the timestamp of the block against the previous +// block's timestamp for the first block of each difficulty adjustment interval +// to prevent timewarp attacks. This is defined in BIP-0094. +func assertNoTimeWarp(blockHeight, blocksPerReTarget int32, headerTimestamp, + prevBlockTimestamp time.Time) error { + + // If this isn't the first block of the difficulty adjustment interval, + // then we can exit early. + if blockHeight%blocksPerReTarget != 0 { + return nil + } + + // Check timestamp for the first block of each difficulty adjustment + // interval, except the genesis block. + if headerTimestamp.Before(prevBlockTimestamp.Add(-maxTimeWarp)) { + str := "block's timestamp %v is too early on diff adjustment " + + "block %v" + str = fmt.Sprintf(str, headerTimestamp, prevBlockTimestamp) + return ruleError(ErrTimewarpAttack, str) + } + + return nil +} + // checkBlockContext performs several validation checks on the block which depend // on its position within the block chain. // @@ -1230,7 +1273,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi if csvState == ThresholdActive { // If the CSV soft-fork is now active, then modify the // scriptFlags to ensure that the CSV op code is properly - // validated during the script checks bleow. + // validated during the script checks below. scriptFlags |= txscript.ScriptVerifyCheckSequenceVerify // We obtain the MTP of the *previous* block in order to diff --git a/blockchain/validate_rapid_test.go b/blockchain/validate_rapid_test.go new file mode 100644 index 0000000000..11b13571f5 --- /dev/null +++ b/blockchain/validate_rapid_test.go @@ -0,0 +1,339 @@ +package blockchain + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" +) + +// TestAssertNoTimeWarpProperties uses property-based testing to verify that +// the assertNoTimeWarp function correctly implements the BIP-94 rule. This +// helps catch edge cases that might be missed with regular unit tests. +func TestAssertNoTimeWarpProperties(t *testing.T) { + t.Parallel() + + // Define constant for blocks per retarget (similar to Bitcoin's 2016). + const blocksPerRetarget = 2016 + + // Rapid test that only the retarget blocks are checked. + t.Run("only_checks_retarget_blocks", rapid.MakeCheck(func(t *rapid.T) { + // Generate block height that is not a retarget block. + height := rapid.Int32Range( + 1, 1000000, + ).Filter(func(h int32) bool { + return h%blocksPerRetarget != 0 + }).Draw(t, "height") + + // Even with an "extreme" time warp, the function should return + // nil because it only applies the check to retarget blocks. + // Define headerTime as the Unix epoch start. + headerTime := time.Unix(0, 0) + + // Define prevBlockTime as the current time (creating an + // extreme gap). + prevBlockTime := time.Now() + + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError( + t, err, "expected nil error for non-retarget block "+ + "but got: %v.", err, + ) + })) + + // Rapid test that retarget blocks with acceptable timestamps pass + // validation. + t.Run("valid_timestamps_pass", rapid.MakeCheck(func(t *rapid.T) { + // Generate block height that is a retarget block + height := rapid.Int32Range(blocksPerRetarget, 1000000). + Filter(func(h int32) bool { + return h%blocksPerRetarget == 0 + }).Draw(t, "height") + + // Generate a previous block timestamp. + prevTimeUnix := rapid.Int64Range( + 1000000, 2000000000, + ).Draw(t, "prev_time") + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // Generate a header timestamp that is not more than + // maxTimeWarp earlier than the previous block timestamp. + minValidHeaderTime := prevBlockTime.Add( + -maxTimeWarp, + ).Add(time.Second) + + // Generate any valid header time between the minimum valid + // time and prevBlockTime to ensure it passes the time warp + // check. + minTimeUnix := minValidHeaderTime.Unix() + maxTimeUnix := prevBlockTime.Unix() + + // Ensure min is always less than max. + if minTimeUnix >= maxTimeUnix { + // If a valid range cannot be generated, use the + // previous block time which is guaranteed to pass the + // test. + headerTime := prevBlockTime + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError(t, err, "expected valid timestamps to "+ + "pass but got: %v.") + return + } + + headerTimeUnix := rapid.Int64Range( + minTimeUnix, maxTimeUnix, + ).Draw(t, "header_time_unix") + headerTime := time.Unix(headerTimeUnix, 0) + + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError(t, err, "expected valid timestamps to pass but "+ + "got: %v.") + })) + + // Rapid test that retarget blocks with invalid timestamps fail + t.Run("invalid_timestamps_fail", rapid.MakeCheck(func(t *rapid.T) { + // validation. + // Generate block height that is a retarget block. + height := rapid.Int32Range(blocksPerRetarget, 1000000). + Filter(func(h int32) bool { + return h%blocksPerRetarget == 0 + }).Draw(t, "height") + + // Generate a previous block timestamp. + prevTimeUnix := rapid.Int64Range( + 1000000, 2000000000, + ).Draw(t, "prev_time") + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // Invalid header timestamp: more than maxTimeWarp earlier than + // prevBlockTime Ensure we generate a time that is definitely + // beyond the maxTimeWarp (which is 600 seconds) by using at + // least 601 seconds. + invalidDelta := time.Duration( + -rapid.Int64Range(601, 86400).Draw(t, "invalid_delta"), + ) * time.Second + headerTime := prevBlockTime.Add(invalidDelta) + + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.Error(t, err, "expected error for time-warped header but got nil.") + + // Verify the correct error type is returned. + require.IsType( + t, RuleError{}, err, "expected RuleError but got: %T.", err, + ) + + // Verify it's the expected ErrTimewarpAttack error. + ruleErr, ok := err.(RuleError) + require.True(t, ok, "expected RuleError but got: %T.", err) + require.Equal( + t, ErrTimewarpAttack, ruleErr.ErrorCode, "expected "+ + "ErrTimewarpAttack but got: %v.", ruleErr.ErrorCode, + ) + })) + + // Test the edge case right at the boundary of maxTimeWarp. + t.Run("boundary_timestamps", rapid.MakeCheck(func(t *rapid.T) { + // Generate block height that is a retarget block. + height := rapid.Int32Range(blocksPerRetarget, 1000000). + Filter(func(h int32) bool { + return h%blocksPerRetarget == 0 + }).Draw(t, "height") + + // Generate a previous block timestamp with enough padding + // to avoid time.Time precision issues. + prevTimeUnix := rapid.Int64Range( + 1000000, 2000000000, + ).Draw(t, "prev_time") + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // Test exact boundary: headerTime is exactly maxTimeWarp earlier. + headerTime := prevBlockTime.Add(-maxTimeWarp) + + // Check the actual implementation (looking at + // validate.go:797-798) The comparison is + // "headerTimestamp.Before(prevBlockTimestamp.Add(-maxTimeWarp))" + // This means at exact boundary (headerTime == + // prevBlockTime.Add(-maxTimeWarp)) it should NOT fail, since + // Before() is strict < not <=. + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError( + t, err, "expected no error at exact boundary but "+ + "got: %v.", + ) + + // Test 1 nanosecond BEYOND the boundary (which should fail). + headerTime = prevBlockTime.Add(-maxTimeWarp).Add( + -time.Nanosecond, + ) + + // This should fail as it is just beyond the maxTimeWarp limit. + err = assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.Error( + t, err, "expected error just beyond boundary but "+ + "got nil.", + ) + })) +} + +// TestAssertNoTimeWarpInvariants uses property-based testing to verify the +// invariants of the assertNoTimeWarp function regardless of inputs. +func TestAssertNoTimeWarpInvariants(t *testing.T) { + t.Parallel() + + // Invariant: The function should never panic regardless of input. + t.Run("never_panics", rapid.MakeCheck(func(t *rapid.T) { + // Generate any possible inputs + height := rapid.Int32().Draw(t, "height") + blocksPerRetarget := rapid.Int32Range( + 1, 10000, + ).Draw(t, "blocks_per_retarget") + headerTimeUnix := rapid.Int64().Draw(t, "header_time") + prevTimeUnix := rapid.Int64().Draw(t, "prev_time") + + headerTime := time.Unix(headerTimeUnix, 0) + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // The function should never panic regardless of input + _ = assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + })) + + // Invariant: For non-retarget blocks, the function always returns nil. + // nolint:lll. + t.Run("non_retarget_blocks_return_nil", rapid.MakeCheck(func(t *rapid.T) { + // Generate height and blocksPerRetarget such that height is + // not a multiple of blocksPerRetarget. + blocksPerRetarget := rapid.Int32Range(2, 10000).Draw( + t, "blocks_per_retarget", + ) + + // Ensure height is not a multiple of blocksPerRetarget. + remainders := rapid.Int32Range(1, blocksPerRetarget-1).Draw( + t, "remainder", + ) + height := rapid.Int32Range(0, 1000000).Draw( + t, "base", + )*blocksPerRetarget + remainders + + // Generate any timestamps, even invalid ones. + headerTime := time.Unix(rapid.Int64().Draw(t, "header_time"), 0) + prevBlockTime := time.Unix( + rapid.Int64().Draw(t, "prev_time"), 0, + ) + + // For non-retarget blocks, should always return nil. + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError( + t, err, "expected nil for non-retarget block "+ + "(height=%d, blocks_per_retarget=%d) but "+ + "got: %v.", height, blocksPerRetarget, err, + ) + })) +} + +// TestAssertNoTimeWarpSecurity tests the security properties of the +// assertNoTimeWarp function. This verifies that the function properly prevents +// "time warp" attacks where miners might attempt to manipulate timestamps for +// difficulty adjustment blocks. +func TestAssertNoTimeWarpSecurity(t *testing.T) { + t.Parallel() + + const blocksPerRetarget = 2016 + + // Test that all difficulty adjustment blocks are protected from timewarp. + t.Run("all_retarget_blocks_protected", rapid.MakeCheck(func(t *rapid.T) { //nolint:lll + // Generate any retarget block height (multiples of + // blocksPerRetarget). + multiplier := rapid.Int32Range(1, 1000).Draw(t, "multiplier") + height := multiplier * blocksPerRetarget + + // Generate a reasonable previous block timestamp. + prevTimeUnix := rapid.Int64Range( + 1000000, 2000000000, + ).Draw(t, "prev_time") + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // Generate a test header timestamp that's significantly before + // the previous timestamp This should always be rejected for + // retarget blocks. + timeDiff := rapid.Int64Range( + int64(maxTimeWarp+time.Second), + int64(maxTimeWarp+time.Hour*24*7), + ).Draw(t, "warp_amount") + invalidDelta := time.Duration(-timeDiff) + headerTime := prevBlockTime.Add(invalidDelta) + + // This should always fail with ErrTimewarpAttack for any retarget block. + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.Error( + t, err, "security vulnerability: Time warp attack not "+ + "detected for height %d.", height, + ) + + // Verify it's the expected error type. + ruleErr, ok := err.(RuleError) + require.True(t, ok, "expected RuleError but got: %T.", err) + require.Equal( + t, ErrTimewarpAttack, ruleErr.ErrorCode, + "expected ErrTimewarpAttack but got: %v.", + ruleErr.ErrorCode, + ) + })) + + // Test that non-adjustment blocks are not subject to the same check. + // nolint:lll. + t.Run("non_retarget_blocks_not_affected", rapid.MakeCheck(func(t *rapid.T) { + // Generate any non-retarget block height. + baseHeight := rapid.Int32Range(0, 1000).Draw( + t, "base_height", + ) * blocksPerRetarget + offset := rapid.Int32Range(1, blocksPerRetarget-1).Draw( + t, "offset", + ) + height := baseHeight + offset + + // Generate a reasonable previous block timestamp. + prevTimeUnix := rapid.Int64Range(1000000, 2000000000).Draw( + t, "prev_time", + ) + prevBlockTime := time.Unix(prevTimeUnix, 0) + + // Generate a test header timestamp that's significantly before + // the previous timestamp. Even though this would be rejected + // for retarget blocks, it shouldn't matter here. + timeDiff := rapid.Int64Range( + int64(maxTimeWarp+time.Second), + int64(maxTimeWarp+time.Hour*24*7), + ).Draw(t, "warp_amount") + invalidDelta := time.Duration(-timeDiff) + headerTime := prevBlockTime.Add(invalidDelta) + + // This should NOT fail for non-retarget blocks, even with + // extreme timewarp. + err := assertNoTimeWarp( + height, blocksPerRetarget, headerTime, prevBlockTime, + ) + require.NoError( + t, err, "non-retarget blocks should not be affected "+ + "by time warp check, but got: %v.", err, + ) + })) +} diff --git a/blockchain/versionbits.go b/blockchain/versionbits.go index 371d4f20e0..493787a7d7 100644 --- a/blockchain/versionbits.go +++ b/blockchain/versionbits.go @@ -134,6 +134,13 @@ func (c bitConditionChecker) IsSpeedy() bool { return false } +// ForceActive returns if the deployment should be forced to transition to the +// active state. This is useful on certain testnet, where we we'd like for a +// deployment to always be active. +func (c bitConditionChecker) ForceActive(node *blockNode) bool { + return false +} + // deploymentChecker provides a thresholdConditionChecker which can be used to // test a specific deployment rule. This is required for properly detecting // and activating consensus rule changes. @@ -207,15 +214,9 @@ func (c deploymentChecker) MinerConfirmationWindow() uint32 { } // EligibleToActivate returns true if a custom deployment can transition from -// the LockedIn to the Active state. For normal deployments, this always -// returns true. However, some deployments add extra rules like a minimum -// activation height, which can be abstracted into a generic arbitrary check at -// the final state via this method. -// -// This implementation always returns true, unless a minimum activation height -// is specified. -// -// This is part of the thresholdConditionChecker interface implementation. +// the LockedIn to the Active state. In addition to the traditional minimum +// activation height (MinActivationHeight), an optional AlwaysActiveHeight can +// force the deployment to be active after a specified height. func (c deploymentChecker) EligibleToActivate(blkNode *blockNode) bool { // No activation height, so it's always ready to go. if c.deployment.MinActivationHeight == 0 { @@ -249,6 +250,28 @@ func (c deploymentChecker) Condition(node *blockNode) (bool, error) { nil } +// ForceActive returns if the deployment should be forced to transition to the +// active state. This is useful on certain testnet, where we we'd like for a +// deployment to always be active. +func (c deploymentChecker) ForceActive(node *blockNode) bool { + if node == nil { + return false + } + + // If the deployment has a nonzero AlwaysActiveHeight and the next + // block’s height is at or above that threshold, then force the state + // to Active. + effectiveHeight := c.deployment.EffectiveAlwaysActiveHeight() + if uint32(node.height)+1 >= effectiveHeight { + log.Debugf("Force activating deployment: next block "+ + "height %d >= EffectiveAlwaysActiveHeight %d", + uint32(node.height)+1, effectiveHeight) + return true + } + + return false +} + // calcNextBlockVersion calculates the expected version of the block after the // passed previous block node based on the state of started and locked in // rule change deployments. diff --git a/btcd.go b/btcd.go index c7f292cbc9..1d9a1e5f6b 100644 --- a/btcd.go +++ b/btcd.go @@ -14,6 +14,7 @@ import ( "runtime" "runtime/debug" "runtime/pprof" + "runtime/trace" "github.com/btcsuite/btcd/blockchain/indexers" "github.com/btcsuite/btcd/database" @@ -100,6 +101,18 @@ func btcdMain(serverChan chan<- *server) error { defer runtime.GC() } + // Write execution trace if requested. + if cfg.TraceProfile != "" { + f, err := os.Create(cfg.TraceProfile) + if err != nil { + btcdLog.Errorf("Unable to create execution trace: %v", err) + return err + } + trace.Start(f) + defer f.Close() + defer trace.Stop() + } + // Perform upgrades to btcd as new versions require it. if err := doUpgrades(); err != nil { btcdLog.Errorf("%v", err) diff --git a/btcec/btcec_test.go b/btcec/btcec_test.go index f5d9395274..b08155e2f0 100644 --- a/btcec/btcec_test.go +++ b/btcec/btcec_test.go @@ -651,14 +651,12 @@ func TestScalarMultRand(t *testing.T) { _, err := rand.Read(data) if err != nil { t.Fatalf("failed to read random data at %d", i) - break } x, y = s256.ScalarMult(x, y, data) exponent.Mul(exponent, new(big.Int).SetBytes(data)) xWant, yWant := s256.ScalarBaseMult(exponent.Bytes()) if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 { t.Fatalf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, data, x, y, xWant, yWant) - break } } } @@ -814,7 +812,6 @@ func TestSplitKRand(t *testing.T) { _, err := rand.Read(bytesK) if err != nil { t.Fatalf("failed to read random data at %d", i) - break } k := new(big.Int).SetBytes(bytesK) k1, k2, k1Sign, k2Sign := splitK(bytesK) diff --git a/btcec/ecdsa/bench_test.go b/btcec/ecdsa/bench_test.go index 3e27994cd4..57864af9fd 100644 --- a/btcec/ecdsa/bench_test.go +++ b/btcec/ecdsa/bench_test.go @@ -170,7 +170,7 @@ func BenchmarkSignCompact(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = SignCompact(privKey, msgHash, true) + _ = SignCompact(privKey, msgHash, true) } } diff --git a/btcec/ecdsa/signature.go b/btcec/ecdsa/signature.go index 11c6267caf..a2574f8794 100644 --- a/btcec/ecdsa/signature.go +++ b/btcec/ecdsa/signature.go @@ -233,9 +233,9 @@ func ParseDERSignature(sigStr []byte) (*Signature, error) { // <(byte of 27+public key solution)+4 if compressed >< padded bytes for signature R> // where the R and S parameters are padde up to the bitlengh of the curve. func SignCompact(key *btcec.PrivateKey, hash []byte, - isCompressedKey bool) ([]byte, error) { + isCompressedKey bool) []byte { - return secp_ecdsa.SignCompact(key, hash, isCompressedKey), nil + return secp_ecdsa.SignCompact(key, hash, isCompressedKey) } // RecoverCompact verifies the compact signature "signature" of "hash" for the diff --git a/btcec/ecdsa/signature_test.go b/btcec/ecdsa/signature_test.go index f36e15db89..7a457b1e66 100644 --- a/btcec/ecdsa/signature_test.go +++ b/btcec/ecdsa/signature_test.go @@ -479,11 +479,7 @@ func testSignCompact(t *testing.T, tag string, curve *btcec.KoblitzCurve, priv, _ := btcec.NewPrivateKey() hashed := []byte("testing") - sig, err := SignCompact(priv, hashed, isCompressed) - if err != nil { - t.Errorf("%s: error signing: %s", tag, err) - return - } + sig := SignCompact(priv, hashed, isCompressed) pk, wasCompressed, err := RecoverCompact(sig, hashed) if err != nil { diff --git a/btcec/ellswift/ellswift.go b/btcec/ellswift/ellswift.go new file mode 100644 index 0000000000..e08e0d598b --- /dev/null +++ b/btcec/ellswift/ellswift.go @@ -0,0 +1,404 @@ +package ellswift + +import ( + "crypto/rand" + "fmt" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +var ( + // c is sqrt(-3) (mod p) + c btcec.FieldVal + + cBytes = [32]byte{ + 0x0a, 0x2d, 0x2b, 0xa9, 0x35, 0x07, 0xf1, 0xdf, + 0x23, 0x37, 0x70, 0xc2, 0xa7, 0x97, 0x96, 0x2c, + 0xc6, 0x1f, 0x6d, 0x15, 0xda, 0x14, 0xec, 0xd4, + 0x7d, 0x8d, 0x27, 0xae, 0x1c, 0xd5, 0xf8, 0x52, + } + + ellswiftTag = []byte("bip324_ellswift_xonly_ecdh") + + // ErrPointNotOnCurve is returned when we're unable to find a point on the + // curve. + ErrPointNotOnCurve = fmt.Errorf("point does not exist on secp256k1 curve") +) + +func init() { + c.SetByteSlice(cBytes[:]) +} + +// XSwiftEC() takes two field elements (u, t) and gives us an x-coordinate that +// is on the secp256k1 curve. This is used to take an ElligatorSwift-encoded +// public key (u, t) and return the point on the curve it maps to. This +// function returns an error if there is no valid x-coordinate. +// +// TODO: Rewrite these to avoid new(btcec.FieldVal).Add(...) usage? +// NOTE: u, t MUST be normalized. The result x is normalized. +func XSwiftEC(u, t *btcec.FieldVal) (*btcec.FieldVal, error) { + // 1. Let u' = u if u != 0, else = 1 + if u.IsZero() { + u.SetInt(1) + } + + // 2. Let t' = t if t != 0, else 1 + if t.IsZero() { + t.SetInt(1) + } + + // 3. Let t'' = t' if g(u') != -(t'^2); t'' = 2t' otherwise + // g(x) = x^3 + ax + b, a = 0, b = 7 + + // Calculate g(u'). + gu := new(btcec.FieldVal).SquareVal(u).Mul(u).AddInt(7).Normalize() + + // Calculate the right-hand side of the equation (-t'^2) + rhs := new(btcec.FieldVal).SquareVal(t).Negate(1).Normalize() + + if gu.Equals(rhs) { + // t'' = 2t' + t = t.Add(t) + } + + // 4. X = (u'^3 + b - t''^2) / (2t'') + tSquared := new(btcec.FieldVal).SquareVal(t).Negate(1) + xNum := new(btcec.FieldVal).SquareVal(u).Mul(u).AddInt(7).Add(tSquared) + xDenom := new(btcec.FieldVal).Add2(t, t).Inverse() + x := xNum.Mul(xDenom) + + // 5. Y = (X+t'') / (u' * c) + yNum := new(btcec.FieldVal).Add2(x, t) + yDenom := new(btcec.FieldVal).Mul2(u, &c).Inverse() + y := yNum.Mul(yDenom) + + // 6. Return the first x in (u'+4Y^2, -X/2Y - u'/2, X/2Y - u'/2) for which + // x^3 + b is square. + + // 6a. Calculate u' +4Y^2 and determine if x^3+7 is square. + ySqr := new(btcec.FieldVal).Add(y).Mul(y) + quadYSqr := new(btcec.FieldVal).Add(ySqr).MulInt(4) + firstX := new(btcec.FieldVal).Add(u).Add(quadYSqr) + + // Determine if firstX is on the curve. + if isXOnCurve(firstX) { + return firstX.Normalize(), nil + } + + // 6b. Calculate -X/2Y - u'/2 and determine if x^3 + 7 is square + doubleYInv := new(btcec.FieldVal).Add(y).Add(y).Inverse() + xDivDoubleYInv := new(btcec.FieldVal).Add(x).Mul(doubleYInv) + negXDivDoubleYInv := new(btcec.FieldVal).Add(xDivDoubleYInv).Negate(1) + invTwo := new(btcec.FieldVal).AddInt(2).Inverse() + negUDivTwo := new(btcec.FieldVal).Add(u).Mul(invTwo).Negate(1) + secondX := new(btcec.FieldVal).Add(negXDivDoubleYInv).Add(negUDivTwo) + + // Determine if secondX is on the curve. + if isXOnCurve(secondX) { + return secondX.Normalize(), nil + } + + // 6c. Calculate X/2Y -u'/2 and determine if x^3 + 7 is square + thirdX := new(btcec.FieldVal).Add(xDivDoubleYInv).Add(negUDivTwo) + + // Determine if thirdX is on the curve. + if isXOnCurve(thirdX) { + return thirdX.Normalize(), nil + } + + // Should have found a square above. + return nil, fmt.Errorf("no calculated x-values were square") +} + +// isXOnCurve returns true if there is a corresponding y-value for the passed +// x-coordinate. +func isXOnCurve(x *btcec.FieldVal) bool { + y := new(btcec.FieldVal).Add(x).Square().Mul(x).AddInt(7) + return new(btcec.FieldVal).SquareRootVal(y) +} + +// XSwiftECInv takes two field elements (u, x) (where x is on the curve) and +// returns a field element t. This is used to take a random field element u and +// a point on the curve and return a field element t where (u, t) forms the +// ElligatorSwift encoding. +// +// TODO: Rewrite these to avoid new(btcec.FieldVal).Add(...) usage? +// NOTE: u, x MUST be normalized. The result `t` is normalized. +func XSwiftECInv(u, x *btcec.FieldVal, caseNum int) *btcec.FieldVal { + v := new(btcec.FieldVal) + s := new(btcec.FieldVal) + twoInv := new(btcec.FieldVal).AddInt(2).Inverse() + + if caseNum&2 == 0 { + // If lift_x(-x-u) succeeds, return None + _, found := liftX(new(btcec.FieldVal).Add(x).Add(u).Negate(2)) + if found { + return nil + } + + // Let v = x + v.Add(x) + + // Let s = -(u^3+7)/(u^2 + uv + v^2) + uSqr := new(btcec.FieldVal).Add(u).Square() + vSqr := new(btcec.FieldVal).Add(v).Square() + sDenom := new(btcec.FieldVal).Add(u).Mul(v).Add(uSqr).Add(vSqr) + sNum := new(btcec.FieldVal).Add(uSqr).Mul(u).AddInt(7) + + s = sDenom.Inverse().Mul(sNum).Negate(1) + } else { + // Let s = x - u + negU := new(btcec.FieldVal).Add(u).Negate(1) + s.Add(x).Add(negU).Normalize() + + // If s = 0, return None + if s.IsZero() { + return nil + } + + // Let r be the square root of -s(4(u^3 + 7) + 3u^2s) + uSqr := new(btcec.FieldVal).Add(u).Square() + lhs := new(btcec.FieldVal).Add(uSqr).Mul(u).AddInt(7).MulInt(4) + rhs := new(btcec.FieldVal).Add(uSqr).MulInt(3).Mul(s) + + // Add the two terms together and multiply by -s. + lhs.Add(rhs).Normalize().Mul(s).Negate(1) + + r := new(btcec.FieldVal) + if !r.SquareRootVal(lhs) { + // If no square root was found, return None. + return nil + } + + if caseNum&1 == 1 && r.Normalize().IsZero() { + // If case & 1 = 1 and r = 0, return None. + return nil + } + + // Let v = (r/s - u)/2 + sInv := new(btcec.FieldVal).Add(s).Inverse() + uNeg := new(btcec.FieldVal).Add(u).Negate(1) + + v.Add(r).Mul(sInv).Add(uNeg).Mul(twoInv) + } + + w := new(btcec.FieldVal) + + if !w.SquareRootVal(s) { + // If no square root was found, return None. + return nil + } + + switch caseNum & 5 { + case 0: + // If case & 5 = 0, return -w(u(1-c)/2 + v) + oneMinusC := new(btcec.FieldVal).Add(&c).Negate(1).AddInt(1) + t := new(btcec.FieldVal).Add(u).Mul(oneMinusC).Mul(twoInv).Add(v). + Mul(w).Negate(1).Normalize() + + return t + + case 1: + // If case & 5 = 1, return w(u(1+c)/2 + v) + onePlusC := new(btcec.FieldVal).Add(&c).AddInt(1) + t := new(btcec.FieldVal).Add(u).Mul(onePlusC).Mul(twoInv).Add(v). + Mul(w).Normalize() + + return t + + case 4: + // If case & 5 = 4, return w(u(1-c)/2 + v) + oneMinusC := new(btcec.FieldVal).Add(&c).Negate(1).AddInt(1) + t := new(btcec.FieldVal).Add(u).Mul(oneMinusC).Mul(twoInv).Add(v). + Mul(w).Normalize() + + return t + + case 5: + // If case & 5 = 5, return -w(u(1+c)/2 + v) + onePlusC := new(btcec.FieldVal).Add(&c).AddInt(1) + t := new(btcec.FieldVal).Add(u).Mul(onePlusC).Mul(twoInv).Add(v). + Mul(w).Negate(1).Normalize() + + return t + } + + panic("should not reach here") +} + +// XElligatorSwift takes the x-coordinate of a point on secp256k1 and generates +// ElligatorSwift encoding of that point composed of two field elements (u, t). +// NOTE: x MUST be normalized. The return values u, t are normalized. +func XElligatorSwift(x *btcec.FieldVal) (*btcec.FieldVal, *btcec.FieldVal, + error) { + + // We'll choose a random `u` value and a random case so that we can + // generate a `t` value. + for { + // Choose random u value. + var randUBytes [32]byte + _, err := rand.Read(randUBytes[:]) + if err != nil { + return nil, nil, err + } + + u := new(btcec.FieldVal) + overflow := u.SetBytes(&randUBytes) + if overflow == 1 { + u.Normalize() + } + + // Choose a random case in the interval [0, 7] + var randCaseByte [1]byte + _, err = rand.Read(randCaseByte[:]) + if err != nil { + return nil, nil, err + } + + caseNum := randCaseByte[0] & 7 + + // Find t, if none is found, continue with the loop. + t := XSwiftECInv(u, x, int(caseNum)) + if t != nil { + return u, t, nil + } + } +} + +// EllswiftCreate generates a random private key and returns that along with +// the ElligatorSwift encoding of its corresponding public key. +func EllswiftCreate() (*btcec.PrivateKey, [64]byte, error) { + var randPrivKeyBytes [32]byte + + // Generate a random private key + _, err := rand.Read(randPrivKeyBytes[:]) + if err != nil { + return nil, [64]byte{}, err + } + + privKey, _ := btcec.PrivKeyFromBytes(randPrivKeyBytes[:]) + + // Fetch the x-coordinate of the public key. + x := getXCoord(privKey) + + // Get the ElligatorSwift encoding of the public key. + u, t, err := XElligatorSwift(x) + if err != nil { + return nil, [64]byte{}, err + } + + uBytes := u.Bytes() + tBytes := t.Bytes() + + // ellswift_pub = bytes(u) || bytes(t), its encoding as 64 bytes + var ellswiftPub [64]byte + copy(ellswiftPub[0:32], (*uBytes)[:]) + copy(ellswiftPub[32:64], (*tBytes)[:]) + + // Return (priv, ellswift_pub) + return privKey, ellswiftPub, nil +} + +// EllswiftECDHXOnly takes the ElligatorSwift-encoded public key of a +// counter-party and performs ECDH with our private key. +func EllswiftECDHXOnly(ellswiftTheirs [64]byte, privKey *btcec.PrivateKey) ( + [32]byte, error) { + + // Let u = int(ellswift_theirs[:32]) mod p. + // Let t = int(ellswift_theirs[32:]) mod p. + uBytesTheirs := ellswiftTheirs[0:32] + tBytesTheirs := ellswiftTheirs[32:64] + + var uTheirs btcec.FieldVal + overflow := uTheirs.SetByteSlice(uBytesTheirs[:]) + if overflow { + uTheirs.Normalize() + } + + var tTheirs btcec.FieldVal + overflow = tTheirs.SetByteSlice(tBytesTheirs[:]) + if overflow { + tTheirs.Normalize() + } + + // Calculate bytes(x(privâ‹…lift_x(XSwiftEC(u, t)))) + xTheirs, err := XSwiftEC(&uTheirs, &tTheirs) + if err != nil { + return [32]byte{}, err + } + + pubKey, found := liftX(xTheirs) + if !found { + return [32]byte{}, ErrPointNotOnCurve + } + + var pubJacobian btcec.JacobianPoint + pubKey.AsJacobian(&pubJacobian) + + var sharedPoint btcec.JacobianPoint + btcec.ScalarMultNonConst(&privKey.Key, &pubJacobian, &sharedPoint) + sharedPoint.ToAffine() + + return *sharedPoint.X.Bytes(), nil +} + +// getXCoord fetches the corresponding public key's x-coordinate given a +// private key. +func getXCoord(privKey *btcec.PrivateKey) *btcec.FieldVal { + var result btcec.JacobianPoint + btcec.ScalarBaseMultNonConst(&privKey.Key, &result) + result.ToAffine() + return &result.X +} + +// liftX returns the point P with x-coordinate `x` and even y-coordinate. If a +// point exists on the curve, it returns true and false otherwise. +// TODO: Use quadratic residue formula instead (see: BIP340)? +func liftX(x *btcec.FieldVal) (*btcec.PublicKey, bool) { + ySqr := new(btcec.FieldVal).Add(x).Square().Mul(x).AddInt(7) + + y := new(btcec.FieldVal) + if !y.SquareRootVal(ySqr) { + // If we've reached here, the point does not exist on the curve. + return nil, false + } + + if !y.Normalize().IsOdd() { + return btcec.NewPublicKey(x, y), true + } + + // Negate y if it's odd. + if !y.Negate(1).Normalize().IsOdd() { + return btcec.NewPublicKey(x, y), true + } + + return nil, false +} + +// V2Ecdh performs x-only ecdh and returns a shared secret composed of a tagged +// hash which itself is composed of two ElligatorSwift-encoded public keys and +// the x-only ecdh point. +func V2Ecdh(priv *btcec.PrivateKey, ellswiftTheirs, ellswiftOurs [64]byte, + initiating bool) (*chainhash.Hash, error) { + + ecdhPoint, err := EllswiftECDHXOnly(ellswiftTheirs, priv) + if err != nil { + return nil, err + } + + if initiating { + // Initiating, place our public key encoding first. + var msg []byte + msg = append(msg, ellswiftOurs[:]...) + msg = append(msg, ellswiftTheirs[:]...) + msg = append(msg, ecdhPoint[:]...) + return chainhash.TaggedHash(ellswiftTag, msg), nil + } + + msg := make([]byte, 0, 64+64+32) + msg = append(msg, ellswiftTheirs[:]...) + msg = append(msg, ellswiftOurs[:]...) + msg = append(msg, ecdhPoint[:]...) + return chainhash.TaggedHash(ellswiftTag, msg), nil +} diff --git a/btcec/ellswift/ellswift_test.go b/btcec/ellswift/ellswift_test.go new file mode 100644 index 0000000000..87433d60f8 --- /dev/null +++ b/btcec/ellswift/ellswift_test.go @@ -0,0 +1,846 @@ +package ellswift + +import ( + "encoding/hex" + "testing" + + "github.com/btcsuite/btcd/btcec/v2" +) + +// setHex decodes the passed big-endian hex string into the internal field value +// representation. Only the first 32-bytes are used. +// +// This is NOT constant time. +// +// The field value is returned to support chaining. This enables syntax like: +// f := new(FieldVal).SetHex("0abc").Add(1) so that f = 0x0abc + 1 +func setHex(hexString string) *btcec.FieldVal { + if len(hexString)%2 != 0 { + hexString = "0" + hexString + } + bytes, _ := hex.DecodeString(hexString) + + var f btcec.FieldVal + f.SetByteSlice(bytes) + + return &f +} + +// TestXSwiftECVectors checks the BIP324 test vectors for the XSwiftEC function. +func TestXSwiftECVectors(t *testing.T) { + tests := []struct { + ellswift string + expectedX string + }{ + { + ellswift: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + expectedX: "edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c", + }, + { + ellswift: "000000000000000000000000000000000000000000000000000000000000000001d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771", + expectedX: "b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c", + }, + { + ellswift: "000000000000000000000000000000000000000000000000000000000000000082277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f", + expectedX: "f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2", + }, + { + ellswift: "00000000000000000000000000000000000000000000000000000000000000008421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0", + expectedX: "9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441", + expectedX: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000d19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42", + expectedX: "70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5", + expectedX: "50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d", + expectedX: "1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7", + expectedX: "12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e", + }, + { + ellswift: "0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9", + expectedX: "7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783", + }, + { + ellswift: "0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f8530000000000000000000000000000000000000000000000000000000000000000", + expectedX: "532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688", + }, + { + ellswift: "0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f853fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688", + }, + { + ellswift: "0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646", + expectedX: "74e880b3ffd18fe3cddf7902522551ddf97fa4a35a3cfda8197f947081a57b8f", + }, + { + ellswift: "0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896", + expectedX: "377b643fce2271f64e5c8101566107c1be4980745091783804f654781ac9217c", + }, + { + ellswift: "123658444f32be8f02ea2034afa7ef4bbe8adc918ceb49b12773b625f490b368ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8dc5fe11", + expectedX: "ed16d65cf3a9538fcb2c139f1ecbc143ee14827120cbc2659e667256800b8142", + }, + { + ellswift: "146f92464d15d36e35382bd3ca5b0f976c95cb08acdcf2d5b3570617990839d7ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3145e93b", + expectedX: "0d5cd840427f941f65193079ab8e2e83024ef2ee7ca558d88879ffd879fb6657", + }, + { + ellswift: "15fdf5cf09c90759add2272d574d2bb5fe1429f9f3c14c65e3194bf61b82aa73ffffffffffffffffffffffffffffffffffffffffffffffffffffffff04cfd906", + expectedX: "16d0e43946aec93f62d57eb8cde68951af136cf4b307938dd1447411e07bffe1", + }, + { + ellswift: "1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d50000000000000000000000000000000000000000000000000000000000000000", + expectedX: "025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c", + }, + { + ellswift: "1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d5fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c", + }, + { + ellswift: "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "98bec3b2a351fa96cfd191c1778351931b9e9ba9ad1149f6d9eadca80981b801", + }, + { + ellswift: "4056a34a210eec7892e8820675c860099f857b26aad85470ee6d3cf1304a9dcf375e70374271f20b13c9986ed7d3c17799698cfc435dbed3a9f34b38c823c2b4", + expectedX: "868aac2003b29dbcad1a3e803855e078a89d16543ac64392d122417298cec76e", + }, + { + ellswift: "4197ec3723c654cfdd32ab075506648b2ff5070362d01a4fff14b336b78f963fffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3ab1e95", + expectedX: "ba5a6314502a8952b8f456e085928105f665377a8ce27726a5b0eb7ec1ac0286", + }, + { + ellswift: "47eb3e208fedcdf8234c9421e9cd9a7ae873bfbdbc393723d1ba1e1e6a8e6b24ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd12cb1", + expectedX: "d192d52007e541c9807006ed0468df77fd214af0a795fe119359666fdcf08f7c", + }, + { + ellswift: "5eb9696a2336fe2c3c666b02c755db4c0cfd62825c7b589a7b7bb442e141c1d693413f0052d49e64abec6d5831d66c43612830a17df1fe4383db896468100221", + expectedX: "ef6e1da6d6c7627e80f7a7234cb08a022c1ee1cf29e4d0f9642ae924cef9eb38", + }, + { + ellswift: "7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0e0000000000000000000000000000000000000000000000000000000000000000", + expectedX: "50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff", + }, + { + ellswift: "7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff", + }, + { + ellswift: "851b1ca94549371c4f1f7187321d39bf51c6b7fb61f7cbf027c9da62021b7a65fc54c96837fb22b362eda63ec52ec83d81bedd160c11b22d965d9f4a6d64d251", + expectedX: "3e731051e12d33237eb324f2aa5b16bb868eb49a1aa1fadc19b6e8761b5a5f7b", + }, + { + ellswift: "943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f91250000000000000000000000000000000000000000000000000000000000000000", + expectedX: "311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942", + }, + { + ellswift: "943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f9125fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942", + }, + { + ellswift: "a0f18492183e61e8063e573606591421b06bc3513631578a73a39c1c3306239f2f32904f0d2a33ecca8a5451705bb537d3bf44e071226025cdbfd249fe0f7ad6", + expectedX: "97a09cf1a2eae7c494df3c6f8a9445bfb8c09d60832f9b0b9d5eabe25fbd14b9", + }, + { + ellswift: "a1ed0a0bd79d8a23cfe4ec5fef5ba5cccfd844e4ff5cb4b0f2e71627341f1c5b17c499249e0ac08d5d11ea1c2c8ca7001616559a7994eadec9ca10fb4b8516dc", + expectedX: "65a89640744192cdac64b2d21ddf989cdac7500725b645bef8e2200ae39691f2", + }, + { + ellswift: "ba94594a432721aa3580b84c161d0d134bc354b690404d7cd4ec57c16d3fbe98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffea507dd7", + expectedX: "5e0d76564aae92cb347e01a62afd389a9aa401c76c8dd227543dc9cd0efe685a", + }, + { + ellswift: "bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a", + expectedX: "2d97f96cac882dfe73dc44db6ce0f1d31d6241358dd5d74eb3d3b50003d24c2b", + }, + { + ellswift: "bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3ffffffffffffffffffffffffffffffffffffffffffffffffffffffff6507d09a", + expectedX: "e7008afe6e8cbd5055df120bd748757c686dadb41cce75e4addcc5e02ec02b44", + }, + { + ellswift: "c5981bae27fd84401c72a155e5707fbb811b2b620645d1028ea270cbe0ee225d4b62aa4dca6506c1acdbecc0552569b4b21436a5692e25d90d3bc2eb7ce24078", + expectedX: "948b40e7181713bc018ec1702d3d054d15746c59a7020730dd13ecf985a010d7", + }, + { + ellswift: "c894ce48bfec433014b931a6ad4226d7dbd8eaa7b6e3faa8d0ef94052bcf8cff336eeb3919e2b4efb746c7f71bbca7e9383230fbbc48ffafe77e8bcc69542471", + expectedX: "f1c91acdc2525330f9b53158434a4d43a1c547cff29f15506f5da4eb4fe8fa5a", + }, + { + ellswift: "cbb0deab125754f1fdb2038b0434ed9cb3fb53ab735391129994a535d925f6730000000000000000000000000000000000000000000000000000000000000000", + expectedX: "872d81ed8831d9998b67cb7105243edbf86c10edfebb786c110b02d07b2e67cd", + }, + { + ellswift: "d917b786dac35670c330c9c5ae5971dfb495c8ae523ed97ee2420117b171f41effffffffffffffffffffffffffffffffffffffffffffffffffffffff2001f6f6", + expectedX: "e45b71e110b831f2bdad8651994526e58393fde4328b1ec04d59897142584691", + }, + { + ellswift: "e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb4260000000000000000000000000000000000000000000000000000000000000000", + expectedX: "66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5", + }, + { + ellswift: "e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb426fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5", + }, + { + ellswift: "e7ee5814c1706bf8a89396a9b032bc014c2cac9c121127dbf6c99278f8bb53d1dfd04dbcda8e352466b6fcd5f2dea3e17d5e133115886eda20db8a12b54de71b", + expectedX: "e842c6e3529b234270a5e97744edc34a04d7ba94e44b6d2523c9cf0195730a50", + }, + { + ellswift: "f292e46825f9225ad23dc057c1d91c4f57fcb1386f29ef10481cb1d22518593fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7011c989", + expectedX: "3cea2c53b8b0170166ac7da67194694adacc84d56389225e330134dab85a4d55", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000", + expectedX: "edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771", + expectedX: "b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f4218f20ae6c646b363db68605822fb14264ca8d2587fdd6fbc750d587e76a7ee", + expectedX: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f", + expectedX: "f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0", + expectedX: "9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fd19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42", + expectedX: "70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5", + expectedX: "50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d", + expectedX: "1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7", + expectedX: "12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9", + expectedX: "7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a70000000000000000000000000000000000000000000000000000000000000000", + expectedX: "649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff15028c590063f64d5a7f1c14915cd61eac886ab295bebd91992504cf77edb028bdd6267f", + expectedX: "3fde5713f8282eead7d39d4201f44a7c85a5ac8a0681f35e54085c6b69543374", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de860000000000000000000000000000000000000000000000000000000000000000", + expectedX: "3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de86fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2c2c5709e7156c417717f2feab147141ec3da19fb759575cc6e37b2ea5ac9309f26f0f66", + expectedX: "d2469ab3e04acbb21c65a1809f39caafe7a77c13d10f9dd38f391c01dc499c52", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a08cc1efffffffffffffffffffffffffffffffffffffffffffffffffffffffff760e9f0", + expectedX: "38e2a5ce6a93e795e16d2c398bc99f0369202ce21e8f09d56777b40fc512bccc", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e91257d932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a", + expectedX: "864b3dc902c376709c10a93ad4bbe29fce0012f3dc8672c6286bba28d7d6d6fc", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff795d6c1c322cadf599dbb86481522b3cc55f15a67932db2afa0111d9ed6981bcd124bf44", + expectedX: "766dfe4a700d9bee288b903ad58870e3d4fe2f0ef780bcac5c823f320d9a9bef", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8e426f0392389078c12b1a89e9542f0593bc96b6bfde8224f8654ef5d5cda935a3582194", + expectedX: "faec7bc1987b63233fbc5f956edbf37d54404e7461c58ab8631bc68e451a0478", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff91192139ffffffffffffffffffffffffffffffffffffffffffffffffffffffff45f0f1eb", + expectedX: "ec29a50bae138dbf7d8e24825006bb5fc1a2cc1243ba335bc6116fb9e498ec1f", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff98eb9ab76e84499c483b3bf06214abfe065dddf43b8601de596d63b9e45a166a580541fe", + expectedX: "1e0ff2dee9b09b136292a9e910f0d6ac3e552a644bba39e64e9dd3e3bbd3d4d4", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646", + expectedX: "8b7dd5c3edba9ee97b70eff438f22dca9849c8254a2f3345a0a572ffeaae0928", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896", + expectedX: "0881950c8f51d6b9a6387465d5f12609ef1bb25412a08a74cb2dfb200c74bfbf", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa2f5cd838816c16c4fe8a1661d606fdb13cf9af04b979a2e159a09409ebc8645d58fde02", + expectedX: "2f083207b9fd9b550063c31cd62b8746bd543bdc5bbf10e3a35563e927f440c8", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c00000000000000000000000000000000000000000000000000000000000000000", + expectedX: "4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c0fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8d0000000000000000000000000000000000000000000000000000000000000000", + expectedX: "16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8dfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + expectedX: "16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2", + }, + { + ellswift: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffef64d162750546ce42b0431361e52d4f5242d8f24f33e6b1f99b591647cbc808f462af51", + expectedX: "d41244d11ca4f65240687759f95ca9efbab767ededb38fd18c36e18cd3b6f6a9", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0e5be52372dd6e894b2a326fc3605a6e8f3c69c710bf27d630dfe2004988b78eb6eab36", + expectedX: "64bf84dd5e03670fdb24c0f5d3c2c365736f51db6c92d95010716ad2d36134c8", + }, + { + ellswift: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffefbb982fffffffffffffffffffffffffffffffffffffffffffffffffffffffff6d6db1f", + expectedX: "1c92ccdfcf4ac550c28db57cff0c8515cb26936c786584a70114008d6c33a34b", + }, + } + + for _, test := range tests { + // The test.ellswift variable is 128-bytes long and is composed of two + // 64-byte hexadecimal strings. The first 64-byte hex string is the u + // value and the other is the t-value. + uVal := setHex(test.ellswift[0:64]).Normalize() + tVal := setHex(test.ellswift[64:128]).Normalize() + + xVal := setHex(test.expectedX) + + xCoord, err := XSwiftEC(uVal, tVal) + if err != nil { + t.Fatalf("received err with XSwiftEC: %v", err) + } + + if !xCoord.Equals(xVal) { + t.Fatalf("encoding not equal to x") + } + } +} + +// TestXSwiftECInvVectors tests that the inverse of the ElligatorSwift encoding +// XSwiftECInv works as expected. In other words, given a u-value and an +// x-value, that the correct t-value is returned. Each test case has a cases +// array that determines which t-value should be returned for each +// corresponding case. +func TestXSwiftECInvVectors(t *testing.T) { + tests := []struct { + u string + x string + cases []string + }{ + { + u: "05ff6bdad900fc3261bc7fe34e2fb0f569f06e091ae437d3a52e9da0cbfb9590", + x: "80cdf63774ec7022c89a5a8558e373a279170285e0ab27412dbce510bdfe23fc", + cases: []string{ + "", + "", + "45654798ece071ba79286d04f7f3eb1c3f1d17dd883610f2ad2efd82a287466b", + "0aeaa886f6b76c7158452418cbf5033adc5747e9e9b5d3b2303db96936528557", + "", + "", + "ba9ab867131f8e4586d792fb080c14e3c0e2e82277c9ef0d52d1027c5d78b5c4", + "f51557790948938ea7badbe7340afcc523a8b816164a2c4dcfc24695c9ad76d8", + }, + }, + { + u: "1737a85f4c8d146cec96e3ffdca76d9903dcf3bd53061868d478c78c63c2aa9e", + x: "39e48dd150d2f429be088dfd5b61882e7e8407483702ae9a5ab35927b15f85ea", + cases: []string{ + "1be8cc0b04be0c681d0c6a68f733f82c6c896e0c8a262fcd392918e303a7abf4", + "605b5814bf9b8cb066667c9e5480d22dc5b6c92f14b4af3ee0a9eb83b03685e3", + "", + "", + "e41733f4fb41f397e2f3959708cc07d3937691f375d9d032c6d6e71bfc58503b", + "9fa4a7eb4064734f99998361ab7f2dd23a4936d0eb4b50c11f56147b4fc9764c", + "", + "", + }, + }, + { + u: "1aaa1ccebf9c724191033df366b36f691c4d902c228033ff4516d122b2564f68", + x: "c75541259d3ba98f207eaa30c69634d187d0b6da594e719e420f4898638fc5b0", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "2323a1d079b0fd72fc8bb62ec34230a815cb0596c2bfac998bd6b84260f5dc26", + x: "239342dfb675500a34a196310b8d87d54f49dcac9da50c1743ceab41a7b249ff", + cases: []string{ + "f63580b8aa49c4846de56e39e1b3e73f171e881eba8c66f614e67e5c975dfc07", + "b6307b332e699f1cf77841d90af25365404deb7fed5edb3090db49e642a156b6", + "", + "", + "09ca7f4755b63b7b921a91c61e4c18c0e8e177e145739909eb1981a268a20028", + "49cf84ccd19660e30887be26f50dac9abfb2148012a124cf6f24b618bd5ea579", + "", + "", + }, + }, + { + u: "2dc90e640cb646ae9164c0b5a9ef0169febe34dc4437d6e46acb0e27e219d1e8", + x: "d236f19bf349b9516e9b3f4a5610fe960141cb23bbc8291b9534f1d71de62a47", + cases: []string{ + "e69df7d9c026c36600ebdf588072675847c0c431c8eb730682533e964b6252c9", + "4f18bbdf7c2d6c5f818c18802fa35cd069eaa79fff74e4fc837c80d93fece2f8", + "", + "", + "196208263fd93c99ff1420a77f8d98a7b83f3bce37148cf97dacc168b49da966", + "b0e7442083d293a07e73e77fd05ca32f96155860008b1b037c837f25c0131937", + "", + "", + }, + }, + { + u: "3edd7b3980e2f2f34d1409a207069f881fda5f96f08027ac4465b63dc278d672", + x: "053a98de4a27b1961155822b3a3121f03b2a14458bd80eb4a560c4c7a85c149c", + cases: []string{ + "", + "", + "b3dae4b7dcf858e4c6968057cef2b156465431526538199cf52dc1b2d62fda30", + "4aa77dd55d6b6d3cfa10cc9d0fe42f79232e4575661049ae36779c1d0c666d88", + "", + "", + "4c251b482307a71b39697fa8310d4ea9b9abcead9ac7e6630ad23e4c29d021ff", + "b558822aa29492c305ef3362f01bd086dcd1ba8a99efb651c98863e1f3998ea7", + }, + }, + { + u: "4295737efcb1da6fb1d96b9ca7dcd1e320024b37a736c4948b62598173069f70", + x: "fa7ffe4f25f88362831c087afe2e8a9b0713e2cac1ddca6a383205a266f14307", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "587c1a0cee91939e7f784d23b963004a3bf44f5d4e32a0081995ba20b0fca59e", + x: "2ea988530715e8d10363907ff25124524d471ba2454d5ce3be3f04194dfd3a3c", + cases: []string{ + "cfd5a094aa0b9b8891b76c6ab9438f66aa1c095a65f9f70135e8171292245e74", + "a89057d7c6563f0d6efa19ae84412b8a7b47e791a191ecdfdf2af84fd97bc339", + "475d0ae9ef46920df07b34117be5a0817de1023e3cc32689e9be145b406b0aef", + "a0759178ad80232454f827ef05ea3e72ad8d75418e6d4cc1cd4f5306c5e7c453", + "302a5f6b55f464776e48939546bc709955e3f6a59a0608feca17e8ec6ddb9dbb", + "576fa82839a9c0f29105e6517bbed47584b8186e5e6e132020d507af268438f6", + "b8a2f51610b96df20f84cbee841a5f7e821efdc1c33cd9761641eba3bf94f140", + "5f8a6e87527fdcdbab07d810fa15c18d52728abe7192b33e32b0acf83a1837dc", + }, + }, + { + u: "5fa88b3365a635cbbcee003cce9ef51dd1a310de277e441abccdb7be1e4ba249", + x: "79461ff62bfcbcac4249ba84dd040f2cec3c63f725204dc7f464c16bf0ff3170", + cases: []string{ + "", + "", + "6bb700e1f4d7e236e8d193ff4a76c1b3bcd4e2b25acac3d51c8dac653fe909a0", + "f4c73410633da7f63a4f1d55aec6dd32c4c6d89ee74075edb5515ed90da9e683", + "", + "", + "9448ff1e0b281dc9172e6c00b5893e4c432b1d4da5353c2ae3725399c016f28f", + "0b38cbef9cc25809c5b0e2aa513922cd3b39276118bf8a124aaea125f25615ac", + }, + }, + { + u: "6fb31c7531f03130b42b155b952779efbb46087dd9807d241a48eac63c3d96d6", + x: "56f81be753e8d4ae4940ea6f46f6ec9fda66a6f96cc95f506cb2b57490e94260", + cases: []string{ + "", + "", + "59059774795bdb7a837fbe1140a5fa59984f48af8df95d57dd6d1c05437dcec1", + "22a644db79376ad4e7b3a009e58b3f13137c54fdf911122cc93667c47077d784", + "", + "", + "a6fa688b86a424857c8041eebf5a05a667b0b7507206a2a82292e3f9bc822d6e", + "dd59bb2486c8952b184c5ff61a74c0ecec83ab0206eeedd336c9983a8f8824ab", + }, + }, + { + u: "704cd226e71cb6826a590e80dac90f2d2f5830f0fdf135a3eae3965bff25ff12", + x: "138e0afa68936ee670bd2b8db53aedbb7bea2a8597388b24d0518edd22ad66ec", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "725e914792cb8c8949e7e1168b7cdd8a8094c91c6ec2202ccd53a6a18771edeb", + x: "8da16eb86d347376b6181ee9748322757f6b36e3913ddfd332ac595d788e0e44", + cases: []string{ + "dd357786b9f6873330391aa5625809654e43116e82a5a5d82ffd1d6624101fc4", + "a0b7efca01814594c59c9aae8e49700186ca5d95e88bcc80399044d9c2d8613d", + "", + "", + "22ca8879460978cccfc6e55a9da7f69ab1bcee917d5a5a27d002e298dbefdc6b", + "5f481035fe7eba6b3a63655171b68ffe7935a26a1774337fc66fbb253d279af2", + "", + "", + }, + }, + { + u: "78fe6b717f2ea4a32708d79c151bf503a5312a18c0963437e865cc6ed3f6ae97", + x: "8701948e80d15b5cd8f72863eae40afc5aced5e73f69cbc8179a33902c094d98", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "7c37bb9c5061dc07413f11acd5a34006e64c5c457fdb9a438f217255a961f50d", + x: "5c1a76b44568eb59d6789a7442d9ed7cdc6226b7752b4ff8eaf8e1a95736e507", + cases: []string{ + "", + "", + "b94d30cd7dbff60b64620c17ca0fafaa40b3d1f52d077a60a2e0cafd145086c2", + "", + "", + "", + "46b2cf32824009f49b9df3e835f05055bf4c2e0ad2f8859f5d1f3501ebaf756d", + "", + }, + }, + { + u: "82388888967f82a6b444438a7d44838e13c0d478b9ca060da95a41fb94303de6", + x: "29e9654170628fec8b4972898b113cf98807f4609274f4f3140d0674157c90a0", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "91298f5770af7a27f0a47188d24c3b7bf98ab2990d84b0b898507e3c561d6472", + x: "144f4ccbd9a74698a88cbf6fd00ad886d339d29ea19448f2c572cac0a07d5562", + cases: []string{ + "e6a0ffa3807f09dadbe71e0f4be4725f2832e76cad8dc1d943ce839375eff248", + "837b8e68d4917544764ad0903cb11f8615d2823cefbb06d89049dbabc69befda", + "", + "", + "195f005c7f80f6252418e1f0b41b8da0d7cd189352723e26bc317c6b8a1009e7", + "7c8471972b6e8abb89b52f6fc34ee079ea2d7dc31044f9276fb6245339640c55", + "", + "", + }, + }, + { + u: "b682f3d03bbb5dee4f54b5ebfba931b4f52f6a191e5c2f483c73c66e9ace97e1", + x: "904717bf0bc0cb7873fcdc38aa97f19e3a62630972acff92b24cc6dda197cb96", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "c17ec69e665f0fb0dbab48d9c2f94d12ec8a9d7eacb58084833091801eb0b80b", + x: "147756e66d96e31c426d3cc85ed0c4cfbef6341dd8b285585aa574ea0204b55e", + cases: []string{ + "6f4aea431a0043bdd03134d6d9159119ce034b88c32e50e8e36c4ee45eac7ae9", + "fd5be16d4ffa2690126c67c3ef7cb9d29b74d397c78b06b3605fda34dc9696a6", + "5e9c60792a2f000e45c6250f296f875e174efc0e9703e628706103a9dd2d82c7", + "", + "90b515bce5ffbc422fcecb2926ea6ee631fcb4773cd1af171c93b11aa1538146", + "02a41e92b005d96fed93983c1083462d648b2c683874f94c9fa025ca23696589", + "a1639f86d5d0fff1ba39daf0d69078a1e8b103f168fc19d78f9efc5522d27968", + "", + }, + }, + { + u: "c25172fc3f29b6fc4a1155b8575233155486b27464b74b8b260b499a3f53cb14", + x: "1ea9cbdb35cf6e0329aa31b0bb0a702a65123ed008655a93b7dcd5280e52e1ab", + cases: []string{ + "", + "", + "7422edc7843136af0053bb8854448a8299994f9ddcefd3a9a92d45462c59298a", + "78c7774a266f8b97ea23d05d064f033c77319f923f6b78bce4e20bf05fa5398d", + "", + "", + "8bdd12387bcec950ffac4477abbb757d6666b06223102c5656d2bab8d3a6d2a5", + "873888b5d990746815dc2fa2f9b0fcc388ce606dc09487431b1df40ea05ac2a2", + }, + }, + { + u: "cab6626f832a4b1280ba7add2fc5322ff011caededf7ff4db6735d5026dc0367", + x: "2b2bef0852c6f7c95d72ac99a23802b875029cd573b248d1f1b3fc8033788eb6", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "d8621b4ffc85b9ed56e99d8dd1dd24aedcecb14763b861a17112dc771a104fd2", + x: "812cabe972a22aa67c7da0c94d8a936296eb9949d70c37cb2b2487574cb3ce58", + cases: []string{ + "fbc5febc6fdbc9ae3eb88a93b982196e8b6275a6d5a73c17387e000c711bd0e3", + "8724c96bd4e5527f2dd195a51c468d2d211ba2fac7cbe0b4b3434253409fb42d", + "", + "", + "043a014390243651c147756c467de691749d8a592a58c3e8c781fff28ee42b4c", + "78db36942b1aad80d22e6a5ae3b972d2dee45d0538341f4b4cbcbdabbf604802", + "", + "", + }, + }, + { + u: "da463164c6f4bf7129ee5f0ec00f65a675a8adf1bd931b39b64806afdcda9a22", + x: "25b9ce9b390b408ed611a0f13ff09a598a57520e426ce4c649b7f94f2325620d", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "dafc971e4a3a7b6dcfb42a08d9692d82ad9e7838523fcbda1d4827e14481ae2d", + x: "250368e1b5c58492304bd5f72696d27d526187c7adc03425e2b7d81dbb7e4e02", + cases: []string{ + "", + "", + "370c28f1be665efacde6aa436bf86fe21e6e314c1e53dd040e6c73a46b4c8c49", + "cd8acee98ffe56531a84d7eb3e48fa4034206ce825ace907d0edf0eaeb5e9ca2", + "", + "", + "c8f3d70e4199a105321955bc9407901de191ceb3e1ac22fbf1938c5a94b36fe6", + "327531167001a9ace57b2814c1b705bfcbdf9317da5316f82f120f1414a15f8d", + }, + }, + { + u: "e0294c8bc1a36b4166ee92bfa70a5c34976fa9829405efea8f9cd54dcb29b99e", + x: "ae9690d13b8d20a0fbbf37bed8474f67a04e142f56efd78770a76b359165d8a1", + cases: []string{ + "", + "", + "dcd45d935613916af167b029058ba3a700d37150b9df34728cb05412c16d4182", + "", + "", + "", + "232ba26ca9ec6e950e984fd6fa745c58ff2c8eaf4620cb8d734fabec3e92baad", + "", + }, + }, + { + u: "e148441cd7b92b8b0e4fa3bd68712cfd0d709ad198cace611493c10e97f5394e", + x: "164a639794d74c53afc4d3294e79cdb3cd25f99f6df45c000f758aba54d699c0", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "e4b00ec97aadcca97644d3b0c8a931b14ce7bcf7bc8779546d6e35aa5937381c", + x: "94e9588d41647b3fcc772dc8d83c67ce3be003538517c834103d2cd49d62ef4d", + cases: []string{ + "c88d25f41407376bb2c03a7fffeb3ec7811cc43491a0c3aac0378cdc78357bee", + "51c02636ce00c2345ecd89adb6089fe4d5e18ac924e3145e6669501cd37a00d4", + "205b3512db40521cb200952e67b46f67e09e7839e0de44004138329ebd9138c5", + "58aab390ab6fb55c1d1b80897a207ce94a78fa5b4aa61a33398bcae9adb20d3e", + "3772da0bebf8c8944d3fc5800014c1387ee33bcb6e5f3c553fc8732287ca8041", + "ae3fd9c931ff3dcba132765249f7601b2a1e7536db1ceba19996afe22c85fb5b", + "dfa4caed24bfade34dff6ad1984b90981f6187c61f21bbffbec7cd60426ec36a", + "a7554c6f54904aa3e2e47f7685df8316b58705a4b559e5ccc6743515524deef1", + }, + }, + { + u: "e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5", + x: "e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + { + u: "e6bcb5c3d63467d490bfa54fbbc6092a7248c25e11b248dc2964a6e15edb1457", + x: "19434a3c29cb982b6f405ab04439f6d58db73da1ee4db723d69b591da124e7d8", + cases: []string{ + "67119877832ab8f459a821656d8261f544a553b89ae4f25c52a97134b70f3426", + "ffee02f5e649c07f0560eff1867ec7b32d0e595e9b1c0ea6e2a4fc70c97cd71f", + "b5e0c189eb5b4bacd025b7444d74178be8d5246cfa4a9a207964a057ee969992", + "5746e4591bf7f4c3044609ea372e908603975d279fdef8349f0b08d32f07619d", + "98ee67887cd5470ba657de9a927d9e0abb5aac47651b0da3ad568eca48f0c809", + "0011fd0a19b63f80fa9f100e7981384cd2f1a6a164e3f1591d5b038e36832510", + "4a1f3e7614a4b4532fda48bbb28be874172adb9305b565df869b5fa71169629d", + "a8b91ba6e4080b3cfbb9f615c8d16f79fc68a2d8602107cb60f4f72bd0f89a92", + }, + }, + { + u: "f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6", + x: "f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6", + cases: []string{ + "4f867ad8bb3d840409d26b67307e62100153273f72fa4b7484becfa14ebe7408", + "5bbc4f59e452cc5f22a99144b10ce8989a89a995ec3cea1c91ae10e8f721bb5d", + "", + "", + "b079852744c27bfbf62d9498cf819deffeacd8c08d05b48b7b41305db1418827", + "a443b0a61bad33a0dd566ebb4ef317676576566a13c315e36e51ef1608de40d2", + "", + "", + }, + }, + { + u: "f455605bc85bf48e3a908c31023faf98381504c6c6d3aeb9ede55f8dd528924d", + x: "d31fbcd5cdb798f6c00db6692f8fe8967fa9c79dd10958f4a194f01374905e99", + cases: []string{ + "", + "", + "0c00c5715b56fe632d814ad8a77f8e66628ea47a6116834f8c1218f3a03cbd50", + "df88e44fac84fa52df4d59f48819f18f6a8cd4151d162afaf773166f57c7ff46", + "", + "", + "f3ff3a8ea4a9019cd27eb527588071999d715b859ee97cb073ede70b5fc33edf", + "20771bb0537b05ad20b2a60b77e60e7095732beae2e9d505088ce98fa837fce9", + }, + }, + { + u: "f58cd4d9830bad322699035e8246007d4be27e19b6f53621317b4f309b3daa9d", + x: "78ec2b3dc0948de560148bbc7c6dc9633ad5df70a5a5750cbed721804f082a3b", + cases: []string{ + "6c4c580b76c7594043569f9dae16dc2801c16a1fbe12860881b75f8ef929bce5", + "94231355e7385c5f25ca436aa64191471aea4393d6e86ab7a35fe2afacaefd0d", + "dff2a1951ada6db574df834048149da3397a75b829abf58c7e69db1b41ac0989", + "a52b66d3c907035548028bf804711bf422aba95f1a666fc86f4648e05f29caae", + "93b3a7f48938a6bfbca9606251e923d7fe3e95e041ed79f77e48a07006d63f4a", + "6bdcecaa18c7a3a0da35bc9559be6eb8e515bc6c291795485ca01d4f5350ff22", + "200d5e6ae525924a8b207cbfb7eb625cc6858a47d6540a73819624e3be53f2a6", + "5ad4992c36f8fcaab7fd7407fb8ee40bdd5456a0e599903790b9b71ea0d63181", + }, + }, + { + u: "fd7d912a40f182a3588800d69ebfb5048766da206fd7ebc8d2436c81cbef6421", + x: "8d37c862054debe731694536ff46b273ec122b35a9bf1445ac3c4ff9f262c952", + cases: []string{ + "", + "", + "", + "", + "", + "", + "", + "", + }, + }, + } + + for _, test := range tests { + uVal := setHex(test.u).Normalize() + xVal := setHex(test.x).Normalize() + + // Loop through each individual case in the list of cases and ensure + // that the correct t-value is calculated. + for caseNum, expTString := range test.cases { + tVal := XSwiftECInv(uVal, xVal, caseNum) + + if tVal == nil { + if expTString != "" { + t.Fatalf("t value different than expected") + } + + continue + } + + expectedT := setHex(expTString) + + if !tVal.Equals(expectedT) { + t.Fatalf("t value different than expected") + } + } + } +} diff --git a/btcec/go.mod b/btcec/go.mod index 452399cfc6..95e86dbc3a 100644 --- a/btcec/go.mod +++ b/btcec/go.mod @@ -1,6 +1,6 @@ module github.com/btcsuite/btcd/btcec/v2 -go 1.17 +go 1.22 require ( github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 diff --git a/btcec/schnorr/musig2/context.go b/btcec/schnorr/musig2/context.go index 8e6b7154d3..3effa71e6e 100644 --- a/btcec/schnorr/musig2/context.go +++ b/btcec/schnorr/musig2/context.go @@ -59,6 +59,11 @@ var ( // ErrNotEnoughSigners is returned if a caller attempts to obtain an // early nonce when it wasn't specified ErrNoEarlyNonce = fmt.Errorf("no early nonce available") + + // ErrCombinedNonceAfterPubNonces is returned if RegisterCombinedNonce + // is called after public nonces have already been registered. + ErrCombinedNonceAfterPubNonces = fmt.Errorf("can't register combined " + + "nonce after public nonces") ) // Context is a managed signing context for musig2. It takes care of things @@ -525,7 +530,7 @@ func (s *Session) RegisterPubNonce(nonce [PubNonceSize]byte) (bool, error) { // If we already have all the nonces, then this method was called too // many times. haveAllNonces := len(s.pubNonces) == s.ctx.opts.numSigners - if haveAllNonces { + if haveAllNonces || s.combinedNonce != nil { return false, ErrAlredyHaveAllNonces } @@ -548,6 +553,57 @@ func (s *Session) RegisterPubNonce(nonce [PubNonceSize]byte) (bool, error) { return haveAllNonces, nil } +// CombinedNonce returns the combined public nonce for the signing session. +// This will be available after either: +// - All individual nonces have been registered via RegisterPubNonce, or +// - A combined nonce has been registered via RegisterCombinedNonce +// +// If the combined nonce is not yet available, this method returns an error. +func (s *Session) CombinedNonce() ([PubNonceSize]byte, error) { + if s.combinedNonce == nil { + return [PubNonceSize]byte{}, ErrCombinedNonceUnavailable + } + + return *s.combinedNonce, nil +} + +// RegisterCombinedNonce allows a caller to directly register a combined nonce +// that was generated externally. This is useful in coordinator-based +// protocols where the coordinator aggregates all nonces and distributes the +// combined nonce to participants, rather than each participant aggregating +// nonces themselves. +func (s *Session) RegisterCombinedNonce( + combinedNonce [PubNonceSize]byte) error { + + // If we already have a combined nonce, then this method was called too + // many times. + if s.combinedNonce != nil { + return ErrAlredyHaveAllNonces + } + + // We also don't allow this method to be called if we already registered + // some public nonces. + if len(s.pubNonces) > 1 { + return ErrCombinedNonceAfterPubNonces + } + + // We'll now try to parse the combined nonce into it's two points to + // ensure it's valid. + _, err := btcec.ParsePubKey(combinedNonce[:33]) + if err != nil { + return fmt.Errorf("invalid combined nonce: %w", err) + } + _, err = btcec.ParsePubKey(combinedNonce[33:]) + if err != nil { + return fmt.Errorf("invalid combined nonce: %w", err) + } + + // Otherwise, we'll just set the combined nonce directly. + s.combinedNonce = &combinedNonce + + return nil +} + // Sign generates a partial signature for the target message, using the target // context. If this method is called more than once per context, then an error // is returned, as that means a nonce was re-used. diff --git a/btcec/schnorr/musig2/musig2_test.go b/btcec/schnorr/musig2/musig2_test.go index dfd48f3e82..ebbe055b6d 100644 --- a/btcec/schnorr/musig2/musig2_test.go +++ b/btcec/schnorr/musig2/musig2_test.go @@ -439,3 +439,406 @@ func (mr *memsetRandReader) Read(buf []byte) (n int, err error) { } return len(buf), nil } + +// TestSigningWithAggregatedNonce tests the aggregated nonce signing flow where +// nonces are aggregated externally and provided to participants via +// RegisterCombinedNonce, rather than each participant aggregating nonces +// themselves via RegisterPubNonce. +func TestSigningWithAggregatedNonce(t *testing.T) { + t.Run("basic flow", func(t *testing.T) { + const numSigners = 5 + + // Generate signers. + signerKeys := make([]*btcec.PrivateKey, numSigners) + signSet := make([]*btcec.PublicKey, numSigners) + for i := 0; i < numSigners; i++ { + privKey, err := btcec.NewPrivateKey() + if err != nil { + t.Fatalf("unable to gen priv key: %v", err) + } + signerKeys[i] = privKey + signSet[i] = privKey.PubKey() + } + + // Each signer creates a context and session. + sessions := make([]*Session, numSigners) + for i, signerKey := range signerKeys { + signCtx, err := NewContext( + signerKey, false, WithKnownSigners(signSet), + ) + if err != nil { + t.Fatalf("unable to generate context: %v", err) + } + + session, err := signCtx.NewSession() + if err != nil { + t.Fatalf("unable to generate new session: %v", err) + } + sessions[i] = session + } + + // Phase 1: Collect all public nonces. + pubNonces := make([][PubNonceSize]byte, numSigners) + for i, session := range sessions { + pubNonces[i] = session.PublicNonce() + } + + // Phase 2: Aggregate nonces externally. + combinedNonce, err := AggregateNonces(pubNonces) + if err != nil { + t.Fatalf("unable to aggregate nonces: %v", err) + } + + // Phase 3: Participants register combined nonce and sign. + msg := sha256.Sum256([]byte("aggregated nonce signing")) + + partialSigs := make([]*PartialSignature, numSigners) + for i, session := range sessions { + err = session.RegisterCombinedNonce(combinedNonce) + if err != nil { + t.Fatalf("signer %d unable to register combined nonce: %v", + i, err) + } + sig, err := session.Sign(msg) + if err != nil { + t.Fatalf("signer %d unable to sign: %v", i, err) + } + partialSigs[i] = sig + } + + // Phase 4: Combine all partial signatures. + finalSig := CombineSigs(partialSigs[0].R, partialSigs) + + // Verify the final signature. + combinedKey, _, _, err := AggregateKeys(signSet, false) + if err != nil { + t.Fatalf("unable to aggregate keys: %v", err) + } + + if !finalSig.Verify(msg[:], combinedKey.FinalKey) { + t.Fatalf("final signature is invalid") + } + }) + + t.Run("error: register combined nonce twice", func(t *testing.T) { + privKey, _ := btcec.NewPrivateKey() + privKey2, _ := btcec.NewPrivateKey() + signSet := []*btcec.PublicKey{privKey.PubKey(), privKey2.PubKey()} + + signCtx, _ := NewContext(privKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + + fakeCombinedNonce := getValidNonce(t) + + // First call should succeed. + err := session.RegisterCombinedNonce(fakeCombinedNonce) + if err != nil { + t.Fatalf("first RegisterCombinedNonce failed: %v", err) + } + + // Second call should fail. + err = session.RegisterCombinedNonce(fakeCombinedNonce) + if err != ErrAlredyHaveAllNonces { + t.Fatalf("expected ErrAlredyHaveAllNonces, got: %v", err) + } + }) + + t.Run("error: register combined nonce after register pub nonce", + func(t *testing.T) { + + privKey, _ := btcec.NewPrivateKey() + privKey2, _ := btcec.NewPrivateKey() + privKey3, _ := btcec.NewPrivateKey() + signSet := []*btcec.PublicKey{ + privKey.PubKey(), + privKey2.PubKey(), + privKey3.PubKey(), + } + + signCtx, _ := NewContext(privKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + + signCtx2, _ := NewContext(privKey2, false, WithKnownSigners(signSet)) + session2, _ := signCtx2.NewSession() + + // Register one public nonce first. + _, err := session.RegisterPubNonce(session2.PublicNonce()) + if err != nil { + t.Fatalf("RegisterPubNonce failed: %v", err) + } + + // Now try to register a combined nonce - this should fail. + fakeCombinedNonce := [PubNonceSize]byte{} + err = session.RegisterCombinedNonce(fakeCombinedNonce) + if err == nil { + t.Fatalf("expected error when calling RegisterCombinedNonce " + + "after RegisterPubNonce") + } + }) + + t.Run("error: register pub nonce after register combined nonce", + func(t *testing.T) { + + const numSigners = 3 + + signerKeys := make([]*btcec.PrivateKey, numSigners) + signSet := make([]*btcec.PublicKey, numSigners) + for i := 0; i < numSigners; i++ { + privKey, _ := btcec.NewPrivateKey() + signerKeys[i] = privKey + signSet[i] = privKey.PubKey() + } + + sessions := make([]*Session, numSigners) + for i, signerKey := range signerKeys { + signCtx, _ := NewContext(signerKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + sessions[i] = session + } + + pubNonces := make([][PubNonceSize]byte, numSigners) + for i, session := range sessions { + pubNonces[i] = session.PublicNonce() + } + + combinedNonce, _ := AggregateNonces(pubNonces) + + // Register the combined nonce first. + err := sessions[0].RegisterCombinedNonce(combinedNonce) + if err != nil { + t.Fatalf("RegisterCombinedNonce failed: %v", err) + } + + // Now try to register individual nonces - this should fail. + _, err = sessions[0].RegisterPubNonce(pubNonces[1]) + if err == nil { + t.Fatalf("expected error when calling RegisterPubNonce " + + "after RegisterCombinedNonce") + } + }) + + t.Run("nonce reuse prevention", func(t *testing.T) { + privKey, _ := btcec.NewPrivateKey() + privKey2, _ := btcec.NewPrivateKey() + signSet := []*btcec.PublicKey{privKey.PubKey(), privKey2.PubKey()} + + signCtx, _ := NewContext(privKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + + fakeCombinedNonce := getValidNonce(t) + session.RegisterCombinedNonce(fakeCombinedNonce) + + msg := sha256.Sum256([]byte("nonce reuse test")) + + // First sign should succeed. + _, err := session.Sign(msg) + if err != nil { + t.Fatalf("first sign failed: %v", err) + } + + // Second sign should fail due to nonce reuse. + _, err = session.Sign(msg) + if err != ErrSigningContextReuse { + t.Fatalf("expected nonce reuse error, got: %v", err) + } + }) + + t.Run("incorrect combined nonce produces invalid sig", func(t *testing.T) { + const numSigners = 3 + + signerKeys := make([]*btcec.PrivateKey, numSigners) + signSet := make([]*btcec.PublicKey, numSigners) + for i := 0; i < numSigners; i++ { + privKey, _ := btcec.NewPrivateKey() + signerKeys[i] = privKey + signSet[i] = privKey.PubKey() + } + + sessions := make([]*Session, numSigners) + for i, signerKey := range signerKeys { + signCtx, _ := NewContext(signerKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + sessions[i] = session + } + + pubNonces := make([][PubNonceSize]byte, numSigners) + for i, session := range sessions { + pubNonces[i] = session.PublicNonce() + } + + // Create INCORRECT combined nonce using only a subset. + wrongNonces := pubNonces[:2] + incorrectCombinedNonce, _ := AggregateNonces(wrongNonces) + + msg := sha256.Sum256([]byte("incorrect nonce test")) + + partialSigs := make([]*PartialSignature, numSigners) + for i, session := range sessions { + session.RegisterCombinedNonce(incorrectCombinedNonce) + sig, _ := session.Sign(msg) + partialSigs[i] = sig + } + + finalSig := CombineSigs(partialSigs[0].R, partialSigs) + combinedKey, _, _, _ := AggregateKeys(signSet, false) + + // Final signature should be INVALID. + if finalSig.Verify(msg[:], combinedKey.FinalKey) { + t.Fatalf("final signature should be invalid with incorrect nonce") + } + }) + + t.Run("mixed registration methods", func(t *testing.T) { + const numSigners = 4 + + signerKeys := make([]*btcec.PrivateKey, numSigners) + signSet := make([]*btcec.PublicKey, numSigners) + for i := 0; i < numSigners; i++ { + privKey, _ := btcec.NewPrivateKey() + signerKeys[i] = privKey + signSet[i] = privKey.PubKey() + } + + sessions := make([]*Session, numSigners) + for i, signerKey := range signerKeys { + signCtx, _ := NewContext(signerKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + sessions[i] = session + } + + pubNonces := make([][PubNonceSize]byte, numSigners) + for i, session := range sessions { + pubNonces[i] = session.PublicNonce() + } + + combinedNonce, _ := AggregateNonces(pubNonces) + msg := sha256.Sum256([]byte("mixed registration test")) + + // Half use RegisterCombinedNonce. + for i := 0; i < numSigners/2; i++ { + sessions[i].RegisterCombinedNonce(combinedNonce) + } + + // Other half use RegisterPubNonce. + for i := numSigners / 2; i < numSigners; i++ { + for j, nonce := range pubNonces { + if i == j { + continue + } + sessions[i].RegisterPubNonce(nonce) + } + } + + // All should be able to sign. + partialSigs := make([]*PartialSignature, numSigners) + for i, session := range sessions { + sig, err := session.Sign(msg) + if err != nil { + t.Fatalf("signer %d unable to sign: %v", i, err) + } + partialSigs[i] = sig + } + + finalSig := CombineSigs(partialSigs[0].R, partialSigs) + combinedKey, _, _, _ := AggregateKeys(signSet, false) + + if !finalSig.Verify(msg[:], combinedKey.FinalKey) { + t.Fatalf("final signature is invalid") + } + }) + + t.Run("get combined nonce after RegisterCombinedNonce", func(t *testing.T) { + privKey, _ := btcec.NewPrivateKey() + privKey2, _ := btcec.NewPrivateKey() + signSet := []*btcec.PublicKey{privKey.PubKey(), privKey2.PubKey()} + + signCtx, _ := NewContext(privKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + + // Should fail before registering combined nonce. + _, err := session.CombinedNonce() + if err != ErrCombinedNonceUnavailable { + t.Fatalf("expected ErrCombinedNonceUnavailable, got: %v", err) + } + + // Register combined nonce. + expectedNonce := getValidNonce(t) + err = session.RegisterCombinedNonce(expectedNonce) + if err != nil { + t.Fatalf("RegisterCombinedNonce failed: %v", err) + } + + // Should succeed after registering. + gotNonce, err := session.CombinedNonce() + if err != nil { + t.Fatalf("CombinedNonce failed: %v", err) + } + + if gotNonce != expectedNonce { + t.Fatalf("expected nonce %x, got %x", expectedNonce, gotNonce) + } + }) + + t.Run("get combined nonce after RegisterPubNonce", func(t *testing.T) { + const numSigners = 3 + + signerKeys := make([]*btcec.PrivateKey, numSigners) + signSet := make([]*btcec.PublicKey, numSigners) + for i := 0; i < numSigners; i++ { + privKey, _ := btcec.NewPrivateKey() + signerKeys[i] = privKey + signSet[i] = privKey.PubKey() + } + + sessions := make([]*Session, numSigners) + for i, signerKey := range signerKeys { + signCtx, _ := NewContext(signerKey, false, WithKnownSigners(signSet)) + session, _ := signCtx.NewSession() + sessions[i] = session + } + + pubNonces := make([][PubNonceSize]byte, numSigners) + for i, session := range sessions { + pubNonces[i] = session.PublicNonce() + } + + // Should fail before all nonces are registered. + _, err := sessions[0].CombinedNonce() + if err != ErrCombinedNonceUnavailable { + t.Fatalf("expected ErrCombinedNonceUnavailable before all nonces, got: %v", err) + } + + // Register all nonces via RegisterPubNonce. + for i := 1; i < numSigners; i++ { + sessions[0].RegisterPubNonce(pubNonces[i]) + } + + // Should succeed after all nonces are registered. + gotNonce, err := sessions[0].CombinedNonce() + if err != nil { + t.Fatalf("CombinedNonce failed: %v", err) + } + + // Verify it matches what AggregateNonces produces. + expectedNonce, _ := AggregateNonces(pubNonces) + if gotNonce != expectedNonce { + t.Fatalf("combined nonce mismatch: expected %x, got %x", + expectedNonce[:8], gotNonce[:8]) + } + }) +} + +func getValidNonce(t *testing.T) [PubNonceSize]byte { + t.Helper() + + var nonce [PubNonceSize]byte + + privKey, err := btcec.NewPrivateKey() + if err != nil { + t.Fatalf("unable to gen priv key: %v", err) + } + copy(nonce[:33], privKey.PubKey().SerializeCompressed()) + copy(nonce[33:], privKey.PubKey().SerializeCompressed()) + + return nonce +} diff --git a/btcjson/chainsvrcmds.go b/btcjson/chainsvrcmds.go index 22552e7bcd..e76a8b6451 100644 --- a/btcjson/chainsvrcmds.go +++ b/btcjson/chainsvrcmds.go @@ -1152,6 +1152,7 @@ func init() { MustRegisterCmd("reconsiderblock", (*ReconsiderBlockCmd)(nil), flags) MustRegisterCmd("searchrawtransactions", (*SearchRawTransactionsCmd)(nil), flags) MustRegisterCmd("sendrawtransaction", (*SendRawTransactionCmd)(nil), flags) + MustRegisterCmd("submitpackage", (*JsonSubmitPackageCmd)(nil), flags) MustRegisterCmd("setgenerate", (*SetGenerateCmd)(nil), flags) MustRegisterCmd("signmessagewithprivkey", (*SignMessageWithPrivKeyCmd)(nil), flags) MustRegisterCmd("stop", (*StopCmd)(nil), flags) diff --git a/btcjson/chainsvrresults.go b/btcjson/chainsvrresults.go index 11c0483d31..b4e4f66492 100644 --- a/btcjson/chainsvrresults.go +++ b/btcjson/chainsvrresults.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/hex" "encoding/json" + "fmt" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -363,6 +364,49 @@ type LocalAddressesResult struct { Score int32 `json:"score"` } +// StringOrArray defines a type that can be used as type that is either a single +// string value or a string array in JSON-RPC commands, depending on the version +// of the chain backend. +type StringOrArray []string + +// MarshalJSON implements the json.Marshaler interface. +func (h StringOrArray) MarshalJSON() ([]byte, error) { + return json.Marshal(h) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (h *StringOrArray) UnmarshalJSON(data []byte) error { + var unmarshalled interface{} + if err := json.Unmarshal(data, &unmarshalled); err != nil { + return err + } + + switch v := unmarshalled.(type) { + case string: + *h = []string{v} + + case []interface{}: + s := make([]string, len(v)) + for i, e := range v { + str, ok := e.(string) + if !ok { + return fmt.Errorf("invalid string_or_array "+ + "value: %v", unmarshalled) + } + + s[i] = str + } + + *h = s + + default: + return fmt.Errorf("invalid string_or_array value: %v", + unmarshalled) + } + + return nil +} + // GetNetworkInfoResult models the data returned from the getnetworkinfo // command. type GetNetworkInfoResult struct { @@ -380,7 +424,7 @@ type GetNetworkInfoResult struct { RelayFee float64 `json:"relayfee"` IncrementalFee float64 `json:"incrementalfee"` LocalAddresses []LocalAddressesResult `json:"localaddresses"` - Warnings string `json:"warnings"` + Warnings StringOrArray `json:"warnings"` } // GetNodeAddressesResult models the data returned from the getnodeaddresses @@ -416,6 +460,7 @@ type GetPeerInfoResult struct { BanScore int32 `json:"banscore"` FeeFilter int64 `json:"feefilter"` SyncNode bool `json:"syncnode"` + V2Connection bool `json:"v2_connection"` } // GetRawMempoolVerboseResult models the data returned from the getrawmempool diff --git a/btcjson/chainsvrresults_test.go b/btcjson/chainsvrresults_test.go index 2566e65f62..122af3dccc 100644 --- a/btcjson/chainsvrresults_test.go +++ b/btcjson/chainsvrresults_test.go @@ -215,3 +215,51 @@ func TestChainSvrMiningInfoResults(t *testing.T) { } } } + +// TestGetNetworkInfoWarnings tests that we can use both a single string value +// and an array of string values for the warnings field in GetNetworkInfoResult. +func TestGetNetworkInfoWarnings(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + result string + expected btcjson.GetNetworkInfoResult + }{ + { + name: "network info with single warning", + result: `{"warnings": "this is a warning"}`, + expected: btcjson.GetNetworkInfoResult{ + Warnings: btcjson.StringOrArray{ + "this is a warning", + }, + }, + }, + { + name: "network info with array of warnings", + result: `{"warnings": ["a", "or", "b"]}`, + expected: btcjson.GetNetworkInfoResult{ + Warnings: btcjson.StringOrArray{ + "a", "or", "b", + }, + }, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + var infoResult btcjson.GetNetworkInfoResult + err := json.Unmarshal([]byte(test.result), &infoResult) + if err != nil { + t.Errorf("Test #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + if !reflect.DeepEqual(infoResult, test.expected) { + t.Errorf("Test #%d (%s) unexpected marhsalled data - "+ + "got %+v, want %+v", i, test.name, infoResult, + test.expected) + continue + } + } +} diff --git a/btcjson/register_test.go b/btcjson/register_test.go index 2d3ab10f3e..18e2ea0040 100644 --- a/btcjson/register_test.go +++ b/btcjson/register_test.go @@ -231,7 +231,7 @@ func TestMustRegisterCmdPanic(t *testing.T) { t.Parallel() // Setup a defer to catch the expected panic to ensure it actually - // paniced. + // panicked. defer func() { if err := recover(); err == nil { t.Error("MustRegisterCmd did not panic as expected") diff --git a/btcjson/submitpackage.go b/btcjson/submitpackage.go new file mode 100644 index 0000000000..d290a913b6 --- /dev/null +++ b/btcjson/submitpackage.go @@ -0,0 +1,243 @@ +package btcjson + +import ( + "encoding/json" + "fmt" + + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// JsonSubmitPackageCmd models the request payload for Bitcoin Core’s +// experimental `submitpackage` RPC (v26+). It submits a related group of +// transactions (a package) to the node's mempool for validation and acceptance. +// +// Package Requirements: +// - Topology: Must be a "child-with-unconfirmed-parents" package: exactly one +// child transaction (last in the list) and all of its unconfirmed parent +// transactions. Parents cannot depend on each other within the package. +// - Order: Transactions MUST be topologically sorted (parents before child). +// - Content: No duplicate transactions. No conflicting transactions (spending +// the same input) within the package. +// - Limits: Subject to node limits on package size (e.g., max 25 txs) and +// total weight (e.g., max 404000 weight units). +// +// Validation & Acceptance: +// - Individual First: Each transaction is first validated individually +// against mempool policy (including minimum relay fee `minrelaytxfee`). +// - Package Logic: Transactions failing individual checks (often due to low +// fee rate) are then evaluated using package logic. +// - Package Feerate: The total fee of non-mempool transactions divided by +// their total virtual size. This can overcome the dynamic mempool minimum +// fee rate (acting like CPFP), but cannot overcome the static +// `minrelaytxfee`. Any transaction below `minrelaytxfee` will cause +// rejection. +// - Deduplication: Transactions already in the mempool (by txid) are ignored +// during submission, preventing rejection and double-counting fees. See +// the `other-wtxid` field in the result if a different witness version +// exists. +// - Package RBF: Limited Replace-By-Fee logic applies. See the +// `replaced-transactions` field in the result. +// +// This RPC is experimental. Refer to Bitcoin Core's `doc/policy/packages.md` +// for details. Successful submission does not guarantee network propagation. +// +// Reference: https://bitcoincore.org/en/doc/29.0.0/rpc/rawtransactions/submitpackage/ +type JsonSubmitPackageCmd struct { + // RawTxs holds the hex-encoded raw transactions forming the package. + // MUST be topologically sorted (parents first, child last) and + // represent a valid "child-with-unconfirmed-parents" structure. + RawTxs []string `jsonrpc:"package"` + + // MaxFeeRate (Optional, BTC/kvB): If set, rejects package transactions + // exceeding this fee rate. Rates > 1 BTC/kvB are always rejected. If + // nil, Core's RPC default (e.g., 0.10 BTC/kvB) applies. Set to 0 for no + // limit (up to 1 BTC/kvB). + MaxFeeRate *float64 `jsonrpc:"maxfeerate,omitempty"` + + // MaxBurnAmount (Optional, BTC): If set, rejects packages where the + // total value of provably unspendable outputs (e.g., OP_RETURN) exceeds + // this amount. If nil, Core's RPC default (0.00 BTC) applies. + MaxBurnAmount *float64 `jsonrpc:"maxburnamount,omitempty"` +} + +// JsonSubmitPackageFees models the "fees" sub-object in a `submitpackage` +// response. Values are in BTC. May be omitted if fee info is not applicable. +type JsonSubmitPackageFees struct { + // Base is the absolute fee of this specific transaction (in BTC). + Base float64 `json:"base"` + + // EffectiveFeeRate (Optional, BTC/kvB): The transaction's effective + // feerate, potentially considering package context or + // `prioritisetransaction`. + EffectiveFeeRate *float64 `json:"effective-feerate,omitempty"` + + // EffectiveIncludes (Optional): wtxids contributing to + // `effective-feerate`. + EffectiveIncludes []string `json:"effective-includes,omitempty"` +} + +// JsonSubmitPackageTxResult represents the processing result for a single +// transaction within the package, keyed by its wtxid in the response map. +type JsonSubmitPackageTxResult struct { + // TxID is the transaction hash (txid) in hex. + TxID string `json:"txid"` + + // OtherWtxid (Optional): Set if a conflicting tx with the same txid but + // different witness was already in the mempool (submitted tx was + // ignored). Relates to deduplication. + OtherWtxid *string `json:"other-wtxid,omitempty"` + + // VSize is the virtual size in vbytes. Note: Optional in RPC; defaults + // to 0 if missing. + VSize int64 `json:"vsize"` + + // Fees contains fee information. Note: Optional in RPC; defaults to + // empty struct if missing. + Fees JsonSubmitPackageFees `json:"fees"` + + // Error (Optional): String describing rejection reason, if any. Can + // result from individual checks (e.g., below `minrelaytxfee`) or + // package validation failures. + Error *string `json:"error,omitempty"` +} + +// JsonSubmitPackageResult mirrors the JSON object returned by `submitpackage`. +type JsonSubmitPackageResult struct { + // PackageMsg is a summary message ("success" or other status). + PackageMsg string `json:"package_msg"` + + // TxResults maps each submitted transaction's wtxid to its result. + TxResults map[string]JsonSubmitPackageTxResult `json:"tx-results"` + + // ReplacedTransactions (Optional): txids of transactions evicted via + // Package RBF. + ReplacedTransactions []string `json:"replaced-transactions,omitempty"` +} + +// NewJsonSubmitPackageCmd constructs a JsonSubmitPackageCmd. +// +// Parameters: +// - rawTxs: Slice of hex-encoded txs (topologically sorted +// child-with-parents). +// - maxFeeRateBtcKvB: Optional max fee rate (BTC/kvB). Nil uses RPC default. +// - maxBurnAmountBtc: Optional max burn amount (BTC). Nil uses RPC default. +func NewJsonSubmitPackageCmd(rawTxs []string, + maxFeeRateBtcKvB, maxBurnAmountBtc *float64) *JsonSubmitPackageCmd { + + return &JsonSubmitPackageCmd{ + RawTxs: rawTxs, + MaxFeeRate: maxFeeRateBtcKvB, + MaxBurnAmount: maxBurnAmountBtc, + } +} + +// SubmitPackageResult mirrors JsonSubmitPackageResult with higher-level types. +type SubmitPackageResult struct { + // PackageMsg is a summary message ("success" or other status). + PackageMsg string + + // TxResults maps each submitted transaction's wtxid to its result. + TxResults map[string]SubmitPackageTxResult + + // ReplacedTransactions (Optional): txids of transactions evicted via + // Package RBF. + ReplacedTransactions []chainhash.Hash +} + +// SubmitPackageTxResult mirrors (a subset of) JsonSubmitPackageTxResult with +// higher-level types. +type SubmitPackageTxResult struct { + // TxID is the transaction hash (txid) in hex. + TxID chainhash.Hash + + // OtherWtxid (Optional): Set if a conflicting tx with the same txid but + // different witness was already in the mempool (submitted tx was + // ignored). Relates to deduplication. + OtherWtxid *chainhash.Hash + + // Error (Optional): String describing rejection reason, if any. Can + // result from individual checks (e.g., below `minrelaytxfee`) or + // package validation failures. + Error *string +} + +// UnmarshalJSON unmarshals the JsonSubmitPackageResult from the JSON response +// to the higher-level SubmitPackageResult type. If the function succeeds, the +// receiver is overwritten with the unmarshalled result. +func (s *SubmitPackageResult) UnmarshalJSON(data []byte) error { + var src JsonSubmitPackageResult + if err := json.Unmarshal(data, &src); err != nil { + return err + } + + dst := SubmitPackageResult{ + PackageMsg: src.PackageMsg, + TxResults: make(map[string]SubmitPackageTxResult), + } + + // Translate TxResults. + if len(src.TxResults) > 0 { + + for wtxid, srcTxRes := range src.TxResults { + var dstTxRes SubmitPackageTxResult + + // Translate TxID. + txID, err := chainhash.NewHashFromStr(srcTxRes.TxID) + if err != nil { + return fmt.Errorf("failed to parse txid '%s' "+ + "for wtxid '%s': %w", srcTxRes.TxID, + wtxid, err) + } + + dstTxRes.TxID = *txID + + // Translate Error (direct copy). + dstTxRes.Error = srcTxRes.Error + + // Translate OtherWtxid. + if srcTxRes.OtherWtxid != nil && + *srcTxRes.OtherWtxid != "" { + + otherWtxidHash, err := chainhash.NewHashFromStr( + *srcTxRes.OtherWtxid, + ) + if err != nil { + return fmt.Errorf("failed to parse "+ + "other_wtxid '%s' for wtxid "+ + "'%s': %w", + *srcTxRes.OtherWtxid, wtxid, + err) + } + + dstTxRes.OtherWtxid = otherWtxidHash + } + + dst.TxResults[wtxid] = dstTxRes + } + } + + // Translate ReplacedTransactions. + if len(src.ReplacedTransactions) > 0 { + dst.ReplacedTransactions = make( + []chainhash.Hash, 0, len(src.ReplacedTransactions), + ) + + for _, txidStr := range src.ReplacedTransactions { + hash, err := chainhash.NewHashFromStr(txidStr) + if err != nil { + return fmt.Errorf("failed to parse "+ + "replaced_transaction txid '%s': %w", + txidStr, err) + } + + dst.ReplacedTransactions = append( + dst.ReplacedTransactions, *hash, + ) + } + } + + // Overwrite the receiver with the translated result. + *s = dst + + return nil +} diff --git a/btcjson/submitpackage_test.go b/btcjson/submitpackage_test.go new file mode 100644 index 0000000000..a678dc51af --- /dev/null +++ b/btcjson/submitpackage_test.go @@ -0,0 +1,426 @@ +// Copyright (c) 2025 The btcsuite developers. +// Use of this source code is governed by an ISC license that can be found in +// the LICENSE file. + +package btcjson_test + +import ( + "encoding/json" + "testing" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/stretchr/testify/require" +) + +// TestSubmitPackageCmd tests all of the submit package commands marshal and +// unmarshal into valid results include handling of optional fields being +// omitted in the marshalled command, while optional fields with defaults have +// the default assigned on unmarshalled commands. +func TestSubmitPackageCmd(t *testing.T) { + t.Parallel() + + const testID = 1 + tests := []struct { + name string + newCmd func() (interface{}, error) + staticCmd func() interface{} + marshalled string + unmarshalled interface{} + }{ + { + name: "submitpackage minimal", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "submitpackage", + []string{"hex1", "hex2"}, + ) + }, + staticCmd: func() interface{} { + return btcjson.NewJsonSubmitPackageCmd( + []string{"hex1", "hex2"}, nil, nil, + ) + }, + marshalled: `{"jsonrpc":"1.0","method":"submitpackage","params":[["hex1","hex2"]],"id":1}`, + unmarshalled: &btcjson.JsonSubmitPackageCmd{ + RawTxs: []string{"hex1", "hex2"}, + MaxFeeRate: nil, + MaxBurnAmount: nil, + }, + }, + { + name: "submitpackage with maxfeerate", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "submitpackage", + []string{"hex1", "hex2"}, 0.1, + ) + }, + staticCmd: func() interface{} { + maxFeeRate := 0.1 + return btcjson.NewJsonSubmitPackageCmd( + []string{"hex1", "hex2"}, + &maxFeeRate, nil, + ) + }, + marshalled: `{"jsonrpc":"1.0","method":"submitpackage","params":[["hex1","hex2"],0.1],"id":1}`, + unmarshalled: &btcjson.JsonSubmitPackageCmd{ + RawTxs: []string{"hex1", "hex2"}, + MaxFeeRate: btcjson.Float64(0.1), + MaxBurnAmount: nil, + }, + }, + { + name: "submitpackage with all optional params", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "submitpackage", + []string{"hex1", "hex2", "hex3"}, + 0.25, 0.001, + ) + }, + staticCmd: func() interface{} { + maxFeeRate := 0.25 + maxBurnAmount := 0.001 + return btcjson.NewJsonSubmitPackageCmd( + []string{"hex1", "hex2", "hex3"}, + &maxFeeRate, &maxBurnAmount, + ) + }, + marshalled: `{"jsonrpc":"1.0","method":"submitpackage","params":[["hex1","hex2","hex3"],0.25,0.001],"id":1}`, + unmarshalled: &btcjson.JsonSubmitPackageCmd{ + RawTxs: []string{"hex1", "hex2", "hex3"}, + MaxFeeRate: btcjson.Float64(0.25), + MaxBurnAmount: btcjson.Float64(0.001), + }, + }, + { + name: "submitpackage single tx", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "submitpackage", + []string{"hex1"}, + ) + }, + staticCmd: func() interface{} { + return btcjson.NewJsonSubmitPackageCmd( + []string{"hex1"}, nil, nil, + ) + }, + marshalled: `{"jsonrpc":"1.0","method":"submitpackage","params":[["hex1"]],"id":1}`, + unmarshalled: &btcjson.JsonSubmitPackageCmd{ + RawTxs: []string{"hex1"}, + MaxFeeRate: nil, + MaxBurnAmount: nil, + }, + }, + { + name: "submitpackage with zero maxfeerate", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "submitpackage", + []string{"hex1", "hex2"}, + 0.0, + ) + }, + staticCmd: func() interface{} { + maxFeeRate := 0.0 + return btcjson.NewJsonSubmitPackageCmd( + []string{"hex1", "hex2"}, + &maxFeeRate, nil, + ) + }, + marshalled: `{"jsonrpc":"1.0","method":"submitpackage","params":[["hex1","hex2"],0],"id":1}`, + unmarshalled: &btcjson.JsonSubmitPackageCmd{ + RawTxs: []string{"hex1", "hex2"}, + MaxFeeRate: btcjson.Float64(0.0), + MaxBurnAmount: nil, + }, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Marshal the command as created by the new static command + // creation function. + marshalled, err := btcjson.MarshalCmd( + btcjson.RpcVersion1, testID, test.staticCmd(), + ) + require.NoError( + t, err, "MarshalCmd #%d (%s) unexpected in test", i, + test.name, + ) + + require.Equal( + t, test.marshalled, string(marshalled), + "Test #%d (%s) unexpected marshalled data", + i, test.name, + ) + + // Ensure the command is created without error via the generic + // new command creation function. + cmd, err := test.newCmd() + require.NoError( + t, err, "NewCmd #%d (%s) unexpected error in test", + i, test.name, + ) + + // Marshal the command as created by the generic new command + // creation function. + marshalled, err = btcjson.MarshalCmd( + btcjson.RpcVersion1, testID, cmd, + ) + require.NoError( + t, err, "MarshalCmd #%d (%s) unexpected in test", i, + test.name, + ) + + // Ensure the marshalled data matches the expected value. + require.Equal( + t, test.marshalled, string(marshalled), + "Test #%d (%s) unexpected marshalled data", + i, test.name, + ) + + var request btcjson.Request + err = json.Unmarshal(marshalled, &request) + require.NoError( + t, err, + "UnmarshalCmd #%d (%s) unexpected error in test", i, + test.name, + ) + + cmd, err = btcjson.UnmarshalCmd(&request) + require.NoError( + t, err, + "UnmarshalCmd #%d (%s) unexpected error in test", i, + test.name, + ) + + require.Equal( + t, test.unmarshalled, cmd, + "Test #%d (%s) unexpected unmarshalled command", + i, test.name, + ) + } +} + +// TestSubmitPackageResultUnmarshalJSON tests the UnmarshalJSON method of +// SubmitPackageResult to ensure it properly converts from JSON format to the +// higher-level Go types. +func TestSubmitPackageResultUnmarshalJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + json string + expected btcjson.SubmitPackageResult + wantErr bool + }{ + { + name: "successful package", + json: `{ + "package_msg": "success", + "tx-results": { + "wtxid1": { + "txid": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "vsize": 150, + "fees": { + "base": 0.00001000, + "effective-feerate": 0.00010000, + "effective-includes": ["wtxid1", "wtxid2"] + } + }, + "wtxid2": { + "txid": "fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321", + "vsize": 200, + "fees": { + "base": 0.00002000 + } + } + }, + "replaced-transactions": ["abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"] + }`, + expected: btcjson.SubmitPackageResult{ + PackageMsg: "success", + TxResults: map[string]btcjson.SubmitPackageTxResult{ + "wtxid1": { + TxID: *mustParseHash("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + "wtxid2": { + TxID: *mustParseHash("fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), + }, + }, + ReplacedTransactions: []chainhash.Hash{ + *mustParseHash("abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"), + }, + }, + }, + { + name: "package with errors", + json: `{ + "package_msg": "transaction failed", + "tx-results": { + "wtxid1": { + "txid": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "vsize": 150, + "fees": { + "base": 0.00001000 + }, + "error": "insufficient fee" + } + } + }`, + expected: btcjson.SubmitPackageResult{ + PackageMsg: "transaction failed", + TxResults: map[string]btcjson.SubmitPackageTxResult{ + "wtxid1": { + TxID: *mustParseHash("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + Error: btcjson.String("insufficient fee"), + }, + }, + }, + }, + { + name: "package with other-wtxid", + json: `{ + "package_msg": "already in mempool", + "tx-results": { + "wtxid1": { + "txid": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "other-wtxid": "fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321", + "vsize": 150, + "fees": { + "base": 0.00001000 + } + } + } + }`, + expected: btcjson.SubmitPackageResult{ + PackageMsg: "already in mempool", + TxResults: map[string]btcjson.SubmitPackageTxResult{ + "wtxid1": { + TxID: *mustParseHash("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + OtherWtxid: mustParseHash("fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), + }, + }, + }, + }, + { + name: "minimal response", + json: `{ + "package_msg": "success", + "tx-results": {} + }`, + expected: btcjson.SubmitPackageResult{ + PackageMsg: "success", + TxResults: map[string]btcjson.SubmitPackageTxResult{}, + ReplacedTransactions: nil, + }, + }, + { + name: "invalid txid", + json: `{ + "package_msg": "success", + "tx-results": { + "wtxid1": { + "txid": "invalid-txid", + "vsize": 150, + "fees": { + "base": 0.00001000 + } + } + } + }`, + wantErr: true, + }, + { + name: "invalid replaced transaction", + json: `{ + "package_msg": "success", + "tx-results": {}, + "replaced-transactions": ["not-a-valid-hash"] + }`, + wantErr: true, + }, + { + name: "invalid other-wtxid", + json: `{ + "package_msg": "success", + "tx-results": { + "wtxid1": { + "txid": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "other-wtxid": "invalid-wtxid", + "vsize": 150, + "fees": { + "base": 0.00001000 + } + } + } + }`, + wantErr: true, + }, + { + name: "empty other-wtxid", + json: `{ + "package_msg": "success", + "tx-results": { + "wtxid1": { + "txid": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "other-wtxid": "", + "vsize": 150, + "fees": { + "base": 0.00001000 + } + } + } + }`, + expected: btcjson.SubmitPackageResult{ + PackageMsg: "success", + TxResults: map[string]btcjson.SubmitPackageTxResult{ + "wtxid1": { + TxID: *mustParseHash("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + OtherWtxid: nil, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var result btcjson.SubmitPackageResult + err := result.UnmarshalJSON([]byte(test.json)) + + if test.wantErr { + require.Error( + t, err, "expected error for test: %s", + test.name, + ) + + return + } + + require.NoError( + t, err, "unexpected error for test: %s", + test.name, + ) + + require.Equal( + t, test.expected.PackageMsg, result.PackageMsg, + "unexpected PackageMsg for test: %s", test.name, + ) + }) + } +} + +// mustParseHash parses a hash string and panics on error. +// This is a helper for tests only. +func mustParseHash(s string) *chainhash.Hash { + hash, err := chainhash.NewHashFromStr(s) + if err != nil { + panic(err) + } + + return hash +} diff --git a/btcjson/walletsvrcmds.go b/btcjson/walletsvrcmds.go index 9b787f60c4..2613acf8a4 100644 --- a/btcjson/walletsvrcmds.go +++ b/btcjson/walletsvrcmds.go @@ -903,9 +903,14 @@ func (r *DescriptorRange) UnmarshalJSON(data []byte) error { if len(v) != 2 { return fmt.Errorf("expected [begin,end] integer range, got: %v", unmarshalled) } + begin, ok1 := v[0].(float64) + end, ok2 := v[1].(float64) + if !ok1 || !ok2 { + return fmt.Errorf("expected both begin and end to be numbers, got: %v", v) + } r.Value = []int{ - int(v[0].(float64)), - int(v[1].(float64)), + int(begin), + int(end), } default: return fmt.Errorf("invalid descriptor range value: %v", unmarshalled) diff --git a/btcutil/base58/cov_report.sh b/btcutil/base58/cov_report.sh index 307f05b76c..e41c9282e4 100644 --- a/btcutil/base58/cov_report.sh +++ b/btcutil/base58/cov_report.sh @@ -1,17 +1,9 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... + +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/btcutil/bloom/cov_report.sh b/btcutil/bloom/cov_report.sh index 307f05b76c..e41c9282e4 100644 --- a/btcutil/bloom/cov_report.sh +++ b/btcutil/bloom/cov_report.sh @@ -1,17 +1,9 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... + +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/btcutil/bloom/filter.go b/btcutil/bloom/filter.go index 2eca228570..e91986290c 100644 --- a/btcutil/bloom/filter.go +++ b/btcutil/bloom/filter.go @@ -76,12 +76,21 @@ func NewFilter(elements, tweak uint32, fprate float64, flags wire.BloomUpdateTyp } } +// normalize adjusts filter parameters for consistency. +func (bf *Filter) normalize() { + if bf.msgFilterLoad != nil && len(bf.msgFilterLoad.Filter) == 0 { + bf.msgFilterLoad.HashFuncs = 0 + } +} + // LoadFilter creates a new Filter instance with the given underlying // wire.MsgFilterLoad. func LoadFilter(filter *wire.MsgFilterLoad) *Filter { - return &Filter{ + bf := &Filter{ msgFilterLoad: filter, } + bf.normalize() + return bf } // IsLoaded returns true if a filter is loaded, otherwise false. @@ -100,6 +109,7 @@ func (bf *Filter) IsLoaded() bool { func (bf *Filter) Reload(filter *wire.MsgFilterLoad) { bf.mtx.Lock() bf.msgFilterLoad = filter + bf.normalize() bf.mtx.Unlock() } @@ -147,7 +157,8 @@ func (bf *Filter) matches(data []byte) bool { return false } } - return true + + return bf.msgFilterLoad.HashFuncs > 0 } // Matches returns true if the bloom filter might contain the passed data and diff --git a/btcutil/bloom/filter_test.go b/btcutil/bloom/filter_test.go index c4b839ad17..942bed5d26 100644 --- a/btcutil/bloom/filter_test.go +++ b/btcutil/bloom/filter_test.go @@ -26,19 +26,52 @@ func TestFilterLarge(t *testing.T) { // TestFilterLoad ensures loading and unloading of a filter pass. func TestFilterLoad(t *testing.T) { - merkle := wire.MsgFilterLoad{} - - f := bloom.LoadFilter(&merkle) - if !f.IsLoaded() { - t.Errorf("TestFilterLoad IsLoaded test failed: want %v got %v", - true, !f.IsLoaded()) - return + // Test various filter configurations + tests := []struct { + name string + filter *wire.MsgFilterLoad + wantMatch bool + }{ + { + "normal filter", + &wire.MsgFilterLoad{ + Filter: []byte{0x00}, + HashFuncs: 1, + }, + false, + }, + { + "empty filter with funcs", + &wire.MsgFilterLoad{ + Filter: []byte{}, + HashFuncs: 1, + }, + false, + }, + { + "minimal filter", + &wire.MsgFilterLoad{}, + false, + }, } - f.Unload() - if f.IsLoaded() { - t.Errorf("TestFilterLoad IsLoaded test failed: want %v got %v", - f.IsLoaded(), false) - return + + for _, test := range tests { + f := bloom.LoadFilter(test.filter) + if !f.IsLoaded() { + t.Errorf("%s: IsLoaded test failed: "+ + "want true got false", test.name) + continue + } + + if f.Matches([]byte("test")) != test.wantMatch { + t.Errorf("%s: unexpected match result", test.name) + } + + f.Unload() + if f.IsLoaded() { + t.Errorf("%s: IsLoaded after Unload failed: "+ + "want false got true", test.name) + } } } @@ -652,9 +685,46 @@ func TestFilterReload(t *testing.T) { t.Errorf("TestFilterReload LoadFilter test failed") return } - bFilter.Reload(nil) - if bFilter.MsgFilterLoad() != nil { - t.Errorf("TestFilterReload Reload test failed") + reloadTests := []struct { + name string + filter *wire.MsgFilterLoad + }{ + { + name: "nil filter", + filter: nil, + }, + { + name: "empty filter", + filter: &wire.MsgFilterLoad{ + Filter: []byte{}, + HashFuncs: 3, + }, + }, + { + name: "normal filter", + filter: &wire.MsgFilterLoad{ + Filter: []byte{0x00}, + HashFuncs: 1, + }, + }, + } + + for _, test := range reloadTests { + bFilter.Reload(test.filter) + if test.filter == nil { + if bFilter.MsgFilterLoad() != nil { + t.Errorf("%s: Reload test failed - "+ + "expected nil", test.name) + } + } else { + bFilter.Add([]byte("test data")) + if bFilter.Matches([]byte("test data")) && + len(test.filter.Filter) == 0 { + + t.Errorf("%s: empty filter should "+ + "not match", test.name) + } + } } } diff --git a/btcutil/coinset/cov_report.sh b/btcutil/coinset/cov_report.sh index 307f05b76c..01dd7457fd 100644 --- a/btcutil/coinset/cov_report.sh +++ b/btcutil/coinset/cov_report.sh @@ -1,17 +1,8 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/btcutil/cov_report.sh b/btcutil/cov_report.sh index 307f05b76c..e41c9282e4 100644 --- a/btcutil/cov_report.sh +++ b/btcutil/cov_report.sh @@ -1,17 +1,9 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... + +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/btcutil/go.mod b/btcutil/go.mod index 9718e6d5fd..b0b8597e55 100644 --- a/btcutil/go.mod +++ b/btcutil/go.mod @@ -1,10 +1,10 @@ module github.com/btcsuite/btcd/btcutil -go 1.16 +go 1.22 require ( github.com/aead/siphash v1.0.1 - github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd + github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/btcec/v2 v2.1.3 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/davecgh/go-spew v1.1.1 @@ -12,3 +12,9 @@ require ( github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) + +require ( + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect + golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed // indirect +) diff --git a/btcutil/go.sum b/btcutil/go.sum index 5ee4cd5207..42dd4584a8 100644 --- a/btcutil/go.sum +++ b/btcutil/go.sum @@ -1,111 +1,37 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= -github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= -github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= -github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed h1:J22ig1FUekjjkmZUM7pTKixYm8DvrYsvrBZdunYeIuQ= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/btcutil/hdkeychain/cov_report.sh b/btcutil/hdkeychain/cov_report.sh index 307f05b76c..e41c9282e4 100644 --- a/btcutil/hdkeychain/cov_report.sh +++ b/btcutil/hdkeychain/cov_report.sh @@ -1,17 +1,9 @@ #!/bin/sh -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. +# This script uses the standard Go test coverage tools to generate a test coverage report. -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report +# Run tests with coverage enabled and generate coverage profile. +go test -cover -coverprofile=coverage.txt ./... + +# Display function-level coverage statistics. +go tool cover -func=coverage.txt diff --git a/btcutil/psbt/bip32.go b/btcutil/psbt/bip32.go index 96a3f67274..4f9bb57de8 100644 --- a/btcutil/psbt/bip32.go +++ b/btcutil/psbt/bip32.go @@ -3,6 +3,15 @@ package psbt import ( "bytes" "encoding/binary" + + "github.com/btcsuite/btcd/btcutil/base58" + "github.com/btcsuite/btcd/btcutil/hdkeychain" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +const ( + // uint32Size is the size of a uint32 in bytes. + uint32Size = 4 ) // Bip32Derivation encapsulates the data for the input and output @@ -38,21 +47,23 @@ func (s Bip32Sorter) Less(i, j int) bool { // ReadBip32Derivation deserializes a byte slice containing chunks of 4 byte // little endian encodings of uint32 values, the first of which is the -// masterkeyfingerprint and the remainder of which are the derivation path. +// MasterKeyFingerprint and the remainder of which are the derivation path. func ReadBip32Derivation(path []byte) (uint32, []uint32, error) { // BIP-0174 defines the derivation path being encoded as // "<32-bit uint> <32-bit uint>*" // with the asterisk meaning 0 to n times. Which in turn means that an // empty path is valid, only the key fingerprint is mandatory. - if len(path)%4 != 0 { + if len(path) < uint32Size || len(path)%uint32Size != 0 { return 0, nil, ErrInvalidPsbtFormat } - masterKeyInt := binary.LittleEndian.Uint32(path[:4]) + masterKeyInt := binary.LittleEndian.Uint32(path[:uint32Size]) var paths []uint32 - for i := 4; i < len(path); i += 4 { - paths = append(paths, binary.LittleEndian.Uint32(path[i:i+4])) + for i := uint32Size; i < len(path); i += uint32Size { + paths = append(paths, binary.LittleEndian.Uint32( + path[i:i+uint32Size], + )) } return masterKeyInt, paths, nil @@ -65,16 +76,81 @@ func ReadBip32Derivation(path []byte) (uint32, []uint32, error) { func SerializeBIP32Derivation(masterKeyFingerprint uint32, bip32Path []uint32) []byte { - var masterKeyBytes [4]byte + var masterKeyBytes [uint32Size]byte binary.LittleEndian.PutUint32(masterKeyBytes[:], masterKeyFingerprint) - derivationPath := make([]byte, 0, 4+4*len(bip32Path)) + derivationPath := make([]byte, 0, uint32Size+uint32Size*len(bip32Path)) derivationPath = append(derivationPath, masterKeyBytes[:]...) for _, path := range bip32Path { - var pathbytes [4]byte - binary.LittleEndian.PutUint32(pathbytes[:], path) - derivationPath = append(derivationPath, pathbytes[:]...) + var pathBytes [uint32Size]byte + binary.LittleEndian.PutUint32(pathBytes[:], path) + derivationPath = append(derivationPath, pathBytes[:]...) } return derivationPath } + +// XPub is a struct that encapsulates an extended public key, as defined in +// BIP-0032. +type XPub struct { + // ExtendedKey is the serialized extended public key as defined in + // BIP-0032. + ExtendedKey []byte + + // MasterFingerprint is the fingerprint of the master pubkey. + MasterKeyFingerprint uint32 + + // Bip32Path is the derivation path of the key, with hardened elements + // having the 0x80000000 offset added, as defined in BIP-0032. The + // number of path elements must match the depth provided in the extended + // public key. + Bip32Path []uint32 +} + +// ReadXPub deserializes a byte slice containing an extended public key and a +// BIP-0032 derivation path. +func ReadXPub(keyData []byte, path []byte) (*XPub, error) { + xPub, err := DecodeExtendedKey(keyData) + if err != nil { + return nil, ErrInvalidPsbtFormat + } + numPathElements := xPub.Depth() + + // The path also contains the master key fingerprint, + expectedSize := int(uint32Size * (numPathElements + 1)) + if len(path) != expectedSize { + return nil, ErrInvalidPsbtFormat + } + + masterKeyFingerprint, bip32Path, err := ReadBip32Derivation(path) + if err != nil { + return nil, err + } + + return &XPub{ + ExtendedKey: keyData, + MasterKeyFingerprint: masterKeyFingerprint, + Bip32Path: bip32Path, + }, nil +} + +// EncodeExtendedKey serializes an extended key to a byte slice, without the +// checksum. +func EncodeExtendedKey(key *hdkeychain.ExtendedKey) []byte { + serializedKey := key.String() + decodedKey := base58.Decode(serializedKey) + return decodedKey[:len(decodedKey)-uint32Size] +} + +// DecodeExtendedKey deserializes an extended key from a byte slice that does +// not contain the checksum. +func DecodeExtendedKey(encodedKey []byte) (*hdkeychain.ExtendedKey, error) { + checkSum := chainhash.DoubleHashB(encodedKey)[:uint32Size] + serializedBytes := append(encodedKey, checkSum...) + xPub, err := hdkeychain.NewKeyFromString(base58.Encode(serializedBytes)) + if err != nil { + return nil, err + } + + return xPub, nil +} diff --git a/btcutil/psbt/bip32_test.go b/btcutil/psbt/bip32_test.go new file mode 100644 index 0000000000..88caa5c336 --- /dev/null +++ b/btcutil/psbt/bip32_test.go @@ -0,0 +1,74 @@ +package psbt + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestReadBip32Derivation tests the ReadBip32Derivation function to ensure +// it correctly deserializes the BIP32 derivation path and master key +// fingerprint from a byte slice. +func TestReadBip32Derivation(t *testing.T) { + tests := []struct { + name string + path []byte + expectFingerprint uint32 + expectPath []uint32 + expectErr error + }{ + { + name: "valid path with multiple derivations", + path: []byte{ + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00}, + expectFingerprint: 1, + expectPath: []uint32{2, 3}, + }, + { + name: "valid path with single derivation", + path: []byte{ + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + }, + expectFingerprint: 1, + expectPath: []uint32{2}, + }, + { + name: "valid path with no derivations", + path: []byte{ + 0x01, 0x00, 0x00, 0x00, + }, + expectFingerprint: 1, + expectPath: nil, + }, + { + name: "invalid path length", + path: []byte{ + 0x01, 0x00, 0x00, + }, + expectErr: ErrInvalidPsbtFormat, + }, + { + name: "invalid path length not multiple of 4", + path: []byte{ + 0x01, 0x00, 0x00, 0x00, 0x02, + }, + expectErr: ErrInvalidPsbtFormat, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fp, path, err := ReadBip32Derivation(tt.path) + if tt.expectErr != nil { + require.ErrorIs(t, err, tt.expectErr) + + return + } + + require.NoError(t, err) + require.Equal(t, tt.expectFingerprint, fp) + require.Equal(t, tt.expectPath, path) + }) + } +} diff --git a/btcutil/psbt/go.mod b/btcutil/psbt/go.mod index 81ccca266b..09f4c3f7dd 100644 --- a/btcutil/psbt/go.mod +++ b/btcutil/psbt/go.mod @@ -1,22 +1,22 @@ module github.com/btcsuite/btcd/btcutil/psbt -go 1.17 +go 1.22 require ( - github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f - github.com/btcsuite/btcd/btcec/v2 v2.1.3 - github.com/btcsuite/btcd/btcutil v1.1.4 + github.com/btcsuite/btcd v0.24.2 + github.com/btcsuite/btcd/btcec/v2 v2.3.4 + github.com/btcsuite/btcd/btcutil v1.1.5 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/davecgh/go-spew v1.1.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.4 ) require ( - github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/btcsuite/btclog v1.0.0 // indirect github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect - golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/sys v0.30.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/btcutil/psbt/go.sum b/btcutil/psbt/go.sum index 74a2ce8a3d..2cb9575394 100644 --- a/btcutil/psbt/go.sum +++ b/btcutil/psbt/go.sum @@ -2,21 +2,23 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= -github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f h1:E+dQ8sNtK/lOdfeflUKkRLXe/zW7I333C7HhaoASjZA= -github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f/go.mod h1:KVEB81PybLGYzpf1db/kKNi1ZEbUsiVGeTGhKuOl5AM= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= -github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/btcutil v1.1.4 h1:mWvWRLRIPuoeZsVRpc0xNCkfeNxWy1E4jIZ06ZpGI1A= -github.com/btcsuite/btcd/btcutil v1.1.4/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btclog v1.0.0 h1:sEkpKJMmfGiyZjADwEIgB1NSwMyfdD1FB8v6+w1T0Ns= +github.com/btcsuite/btclog v1.0.0/go.mod h1:w7xnGOhwT3lmrS4H3b/D1XAXxvh+tbhUm8xeHN2y3TQ= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= @@ -65,13 +67,15 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -86,8 +90,9 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed h1:J22ig1FUekjjkmZUM7pTKixYm8DvrYsvrBZdunYeIuQ= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -107,5 +112,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/btcutil/psbt/partialsig.go b/btcutil/psbt/partialsig.go index dfb7004999..9d24c49b80 100644 --- a/btcutil/psbt/partialsig.go +++ b/btcutil/psbt/partialsig.go @@ -2,6 +2,7 @@ package psbt import ( "bytes" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2/ecdsa" ) diff --git a/btcutil/psbt/psbt.go b/btcutil/psbt/psbt.go index 964061bdc5..5249aad4e1 100644 --- a/btcutil/psbt/psbt.go +++ b/btcutil/psbt/psbt.go @@ -37,6 +37,12 @@ const MaxPsbtValueLength = 4000000 // deserialize from the wire. Anything more will return ErrInvalidKeyData. const MaxPsbtKeyLength = 10000 +// MaxPsbtKeyValue is the maximum value of a key type in a PSBT. This maximum +// isn't specified by the BIP but used by bitcoind in various places to limit +// the number of items processed. So we use it to validate the key type in order +// to have a consistent behavior. +const MaxPsbtKeyValue = 0x02000000 + var ( // ErrInvalidPsbtFormat is a generic error for any situation in which a @@ -129,6 +135,13 @@ type Packet struct { // produced by this PSBT. Outputs []POutput + // XPubs is a list of extended public keys that can be used to derive + // public keys used in the inputs and outputs of this transaction. It + // should be the public key at the highest hardened derivation index so + // that the unhardened child keys used in the transaction can be + // derived. + XPubs []XPub + // Unknowns are the set of custom types (global only) within this PSBT. Unknowns []*Unknown } @@ -155,12 +168,14 @@ func NewFromUnsignedTx(tx *wire.MsgTx) (*Packet, error) { inSlice := make([]PInput, len(tx.TxIn)) outSlice := make([]POutput, len(tx.TxOut)) + xPubSlice := make([]XPub, 0) unknownSlice := make([]*Unknown, 0) return &Packet{ UnsignedTx: tx, Inputs: inSlice, Outputs: outSlice, + XPubs: xPubSlice, Unknowns: unknownSlice, }, nil } @@ -224,7 +239,10 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { // Next we parse any unknowns that may be present, making sure that we // break at the separator. - var unknownSlice []*Unknown + var ( + xPubSlice []XPub + unknownSlice []*Unknown + ) for { keyint, keydata, err := getKey(r) if err != nil { @@ -241,14 +259,32 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { return nil, err } - keyintanddata := []byte{byte(keyint)} - keyintanddata = append(keyintanddata, keydata...) - - newUnknown := &Unknown{ - Key: keyintanddata, - Value: value, + switch GlobalType(keyint) { + case XPubType: + xPub, err := ReadXPub(keydata, value) + if err != nil { + return nil, err + } + + // Duplicate keys are not allowed + for _, x := range xPubSlice { + if bytes.Equal(x.ExtendedKey, keyData) { + return nil, ErrDuplicateKey + } + } + + xPubSlice = append(xPubSlice, *xPub) + + default: + keyintanddata := []byte{byte(keyint)} + keyintanddata = append(keyintanddata, keydata...) + + newUnknown := &Unknown{ + Key: keyintanddata, + Value: value, + } + unknownSlice = append(unknownSlice, newUnknown) } - unknownSlice = append(unknownSlice, newUnknown) } // Next we parse the INPUT section. @@ -280,6 +316,7 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { UnsignedTx: msgTx, Inputs: inSlice, Outputs: outSlice, + XPubs: xPubSlice, Unknowns: unknownSlice, } @@ -319,6 +356,19 @@ func (p *Packet) Serialize(w io.Writer) error { return err } + // Serialize the global xPubs. + for _, xPub := range p.XPubs { + pathBytes := SerializeBIP32Derivation( + xPub.MasterKeyFingerprint, xPub.Bip32Path, + ) + err := serializeKVPairWithType( + w, uint8(XPubType), xPub.ExtendedKey, pathBytes, + ) + if err != nil { + return err + } + } + // Unknown is a special case; we don't have a key type, only a key and // a value field for _, kv := range p.Unknowns { diff --git a/btcutil/psbt/psbt_test.go b/btcutil/psbt/psbt_test.go index 2309b07e40..0dfa44c56c 100644 --- a/btcutil/psbt/psbt_test.go +++ b/btcutil/psbt/psbt_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "encoding/binary" "encoding/hex" + "math" "strings" "testing" @@ -57,20 +58,42 @@ func createPsbtFromSignedTx(serializedSignedTx []byte) ( return unsignedPsbt, scriptSigs, witnesses, nil } -// These are all valid PSBTs encoded as hex. +// These are all valid PSBTs encoded as hex. The items with a comment are taken +// from the BIP174 test vectors: +// https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#test-vectors var validPsbtHex = map[int]string{ + // Case: PSBT with one P2PKH input. Outputs are empty. 0: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab300000000000000", + // Case: PSBT with one P2PKH input and one P2SH-P2WPKH input. First + // input is signed and finalized. Outputs are empty. 1: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac000000000001076a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa882920001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000", + // Case: PSBT with one P2PKH input which has a non-final scriptSig and + // has a sighash type specified. Outputs are empty. 2: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001030401000000000000", + // Case: PSBT with one P2PKH input and one P2SH-P2WPKH input both with + // non-final scriptSigs. P2SH-P2WPKH input's redeemScript is available. + // Outputs filled. 3: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000100df0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e13000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb8230800220202ead596687ca806043edc3de116cdf29d5e9257c196cd055cf698c8d02bf24e9910b4a6ba670000008000000080020000800022020394f62be9df19952c5587768aeb7698061ad2c4a25c894f47d8c162b4d7213d0510b4a6ba6700000080010000800200008000", + // Case: PSBT with one P2SH-P2WSH input of a 2-of-2 multisig, + // redeemScript, witnessScript, and keypaths are available. Contains one + // signature. 4: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000", 5: "70736274ff01003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000a0f0102030405060708090f0102030405060708090a0b0c0d0e0f0000", 6: "70736274ff01003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000002206030d097466b7f59162ac4d90bf65f2a31a8bad82fcd22e98138dcf279401939bd104ffffffff0a0f0102030405060708090f0102030405060708090a0b0c0d0e0f0000", 7: "70736274ff01002001000000000100000000000000000d6a0b68656c6c6f20776f726c64000000000000", + // Case: PSBT with one P2WSH input of a 2-of-2 multisig. witnessScript, + // keypaths, and global xpubs are available. Contains no signatures. + // Outputs filled. + 8: "70736274ff01005202000000019dfc6628c26c5899fe1bd3dc338665bfd55d7ada10f6220973df2d386dec12760100000000ffffffff01f03dcd1d000000001600147b3a00bfdc14d27795c2b74901d09da6ef133579000000004f01043587cf02da3fd0088000000097048b1ad0445b1ec8275517727c87b4e4ebc18a203ffa0f94c01566bd38e9000351b743887ee1d40dc32a6043724f2d6459b3b5a4d73daec8fbae0472f3bc43e20cd90c6a4fae000080000000804f01043587cf02da3fd00880000001b90452427139cd78c2cff2444be353cd58605e3e513285e528b407fae3f6173503d30a5e97c8adbc557dac2ad9a7e39c1722ebac69e668b6f2667cc1d671c83cab0cd90c6a4fae000080010000800001012b0065cd1d000000002200202c5486126c4978079a814e13715d65f36459e4d6ccaded266d0508645bafa6320105475221029da12cdb5b235692b91536afefe5c91c3ab9473d8e43b533836ab456299c88712103372b34234ed7cf9c1fea5d05d441557927be9542b162eb02e1ab2ce80224c00b52ae2206029da12cdb5b235692b91536afefe5c91c3ab9473d8e43b533836ab456299c887110d90c6a4fae0000800000008000000000220603372b34234ed7cf9c1fea5d05d441557927be9542b162eb02e1ab2ce80224c00b10d90c6a4fae0000800100008000000000002202039eff1f547a1d5f92dfa2ba7af6ac971a4bd03ba4a734b03156a256b8ad3a1ef910ede45cc500000080000000800100008000", + // Case: PSBT with `PSBT_GLOBAL_XPUB`. + 9: "70736274ff01009d0100000002710ea76ab45c5cb6438e607e59cc037626981805ae9e0dfd9089012abb0be5350100000000ffffffff190994d6a8b3c8c82ccbcfb2fba4106aa06639b872a8d447465c0d42588d6d670000000000ffffffff0200e1f505000000001976a914b6bc2c0ee5655a843d79afedd0ccc3f7dd64340988ac605af405000000001600141188ef8e4ce0449eaac8fb141cbf5a1176e6a088000000004f010488b21e039e530cac800000003dbc8a5c9769f031b17e77fea1518603221a18fd18f2b9a54c6c8c1ac75cbc3502f230584b155d1c7f1cd45120a653c48d650b431b67c5b2c13f27d7142037c1691027569c503100008000000080000000800001011f00e1f5050000000016001433b982f91b28f160c920b4ab95e58ce50dda3a4a220203309680f33c7de38ea6a47cd4ecd66f1f5a49747c6ffb8808ed09039243e3ad5c47304402202d704ced830c56a909344bd742b6852dccd103e963bae92d38e75254d2bb424502202d86c437195df46c0ceda084f2a291c3da2d64070f76bf9b90b195e7ef28f77201220603309680f33c7de38ea6a47cd4ecd66f1f5a49747c6ffb8808ed09039243e3ad5c1827569c5031000080000000800000008000000000010000000001011f00e1f50500000000160014388fb944307eb77ef45197d0b0b245e079f011de220202c777161f73d0b7c72b9ee7bde650293d13f095bc7656ad1f525da5fd2e10b11047304402204cb1fb5f869c942e0e26100576125439179ae88dca8a9dc3ba08f7953988faa60220521f49ca791c27d70e273c9b14616985909361e25be274ea200d7e08827e514d01220602c777161f73d0b7c72b9ee7bde650293d13f095bc7656ad1f525da5fd2e10b1101827569c5031000080000000800000008000000000000000000000220202d20ca502ee289686d21815bd43a80637b0698e1fbcdbe4caed445f6c1a0a90ef1827569c50310000800000008000000080000000000400000000", } -// These are additional valid PSBTs encoded as base64. +// These are additional valid PSBTs encoded as base64. The items with a comment +// are taken from the BIP174 test vectors: +// https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#test-vectors var validPsbtBase64 = map[int]string{ + // PSBT with one P2PKH input. Outputs are empty. 0: "cHNidP8BAHUCAAAAASaBcTce3/KF6Tet7qSze3gADAVmy7OtZGQXE8pCFxv2AAAAAAD+////AtPf9QUAAAAAGXapFNDFmQPFusKGh2DpD9UhpGZap2UgiKwA4fUFAAAAABepFDVF5uM7gyxHBQ8k0+65PJwDlIvHh7MuEwAAAQD9pQEBAAAAAAECiaPHHqtNIOA3G7ukzGmPopXJRjr6Ljl/hTPMti+VZ+UBAAAAFxYAFL4Y0VKpsBIDna89p95PUzSe7LmF/////4b4qkOnHf8USIk6UwpyN+9rRgi7st0tAXHmOuxqSJC0AQAAABcWABT+Pp7xp0XpdNkCxDVZQ6vLNL1TU/////8CAMLrCwAAAAAZdqkUhc/xCX/Z4Ai7NK9wnGIZeziXikiIrHL++E4sAAAAF6kUM5cluiHv1irHU6m80GfWx6ajnQWHAkcwRAIgJxK+IuAnDzlPVoMR3HyppolwuAJf3TskAinwf4pfOiQCIAGLONfc0xTnNMkna9b7QPZzMlvEuqFEyADS8vAtsnZcASED0uFWdJQbrUqZY3LLh+GFbTZSYG2YVi/jnF6efkE/IQUCSDBFAiEA0SuFLYXc2WHS9fSrZgZU327tzHlMDDPOXMMJ/7X85Y0CIGczio4OFyXBl/saiK9Z9R5E5CVbIBZ8hoQDHAXR8lkqASECI7cr7vCWXRC+B3jv7NYfysb3mk6haTkzgHNEZPhPKrMAAAAAIQ12pWrO2RXSUT3NhMLDeLLoqlzWMrW3HKLyrFsOOmSb2wIBAiENnBLP3ATHRYTXh6w9I3chMsGFJLx6so3sQhm4/FtCX3ABAQAAAA==", 1: "cHNidP8BAFICAAAAASd0Srq/MCf+DWzyOpbu4u+xiO9SMBlUWFiD5ptmJLJCAAAAAAD/////AUjmBSoBAAAAFgAUdo4e60z0IIZgM/gKzv8PlyB0SWkAAAAAAAEBKwDyBSoBAAAAIlEgWiws9bUs8x+DrS6Npj/wMYPs2PYJx1EK6KSOA5EKB1chFv40kGTJjW4qhT+jybEr2LMEoZwZXGDvp+4jkwRtP6IyGQB3Ky2nVgAAgAEAAIAAAACAAQAAAAAAAAABFyD+NJBkyY1uKoU/o8mxK9izBKGcGVxg76fuI5MEbT+iMgAiAgNrdyptt02HU8mKgnlY3mx4qzMSEJ830+AwRIQkLs5z2Bh3Ky2nVAAAgAEAAIAAAACAAAAAAAAAAAAA", 2: "cHNidP8BAFICAAAAASd0Srq/MCf+DWzyOpbu4u+xiO9SMBlUWFiD5ptmJLJCAAAAAAD/////AUjmBSoBAAAAFgAUdo4e60z0IIZgM/gKzv8PlyB0SWkAAAAAAAEBKwDyBSoBAAAAIlEgWiws9bUs8x+DrS6Npj/wMYPs2PYJx1EK6KSOA5EKB1cBE0C7U+yRe62dkGrxuocYHEi4as5aritTYFpyXKdGJWMUdvxvW67a9PLuD0d/NvWPOXDVuCc7fkl7l68uPxJcl680IRb+NJBkyY1uKoU/o8mxK9izBKGcGVxg76fuI5MEbT+iMhkAdystp1YAAIABAACAAAAAgAEAAAAAAAAAARcg/jSQZMmNbiqFP6PJsSvYswShnBlcYO+n7iOTBG0/ojIAIgIDa3cqbbdNh1PJioJ5WN5seKszEhCfN9PgMESEJC7Oc9gYdystp1QAAIABAACAAAAAgAAAAAAAAAAAAA==", @@ -78,6 +101,12 @@ var validPsbtBase64 = map[int]string{ 4: "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgg2mORYxmZOFZXXXaJZfeHiLul9eY5wbEwKS1qYI810MAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJiFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wG99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwEV8uSQr3zEXE94UR82BXzlxaXFYyWin7RN/CA/NW4fgjICyxOsaCSN6AaqajZZzzwD62gh0JyBFKToaP696GW7bSrMBCFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wJfG5v6l/3FP9XJEmZkIEOQG6YqhD1v35fZ4S8HQqabOIyBDILC/FvARtT6nvmFZJKp/J+XSmtIOoRVdhIZ2w7rRsqzAYhXBUJKbdMGgSVS3i0tgNel6XgeKWg8o7JbVR7/ums6AOsDNlw4V9T/AyC+VD9Vg/6kZt2FyvgFzaKiZE68HT0ALCRFfLkkK98xFxPeFEfNgV85cWlxWMlop+0TfwgPzVuH4IyD6D3o87zsdDAps59JuF62gsuXJLRnvrUi0GFnLikUcqazAIRYssTrGgkjegGqmo2Wc88A+toIdCcgRSk6Gj+vehlu20jkBzZcOFfU/wMgvlQ/VYP+pGbdhcr4Bc2iomROvB09ACwl3Ky2nVgAAgAEAAIACAACAAAAAAAAAAAAhFkMgsL8W8BG1Pqe+YVkkqn8n5dKa0g6hFV2EhnbDutGyOQERXy5JCvfMRcT3hRHzYFfOXFpcVjJaKftE38ID81bh+HcrLadWAACAAQAAgAEAAIAAAAAAAAAAACEWUJKbdMGgSVS3i0tgNel6XgeKWg8o7JbVR7/ums6AOsAFAHxGHl0hFvoPejzvOx0MCmzn0m4XraCy5cktGe+tSLQYWcuKRRypOQFvfWIFnpSXoaSiZ1admHbaYBAa/zjjUpubk5zn+RrpcHcrLadWAACAAQAAgAMAAIAAAAAAAAAAAAEXIFCSm3TBoElUt4tLYDXpel4HiloPKOyW1Ue/7prOgDrAARgg8DYuL3Wm9CClvePrIh2WrmcgzyX4GJDJWx13WstRXmUAAQUgESTaeuySzNBslUViZH9DexOLlXIahL4r8idrvdqz5nEhBxEk2nrskszQbJVFYmR/Q3sTi5VyGoS+K/Ina73as+ZxGQB3Ky2nVgAAgAEAAIAAAACAAAAAAAUAAAAA", 5: "cHNidP8BAF4CAAAAASd0Srq/MCf+DWzyOpbu4u+xiO9SMBlUWFiD5ptmJLJCAAAAAAD/////AUjmBSoBAAAAIlEgCoy9yG3hzhwPnK6yLW33ztNoP+Qj4F0eQCqHk0HW9vUAAAAAAAEBKwDyBSoBAAAAIlEgWiws9bUs8x+DrS6Npj/wMYPs2PYJx1EK6KSOA5EKB1chFv40kGTJjW4qhT+jybEr2LMEoZwZXGDvp+4jkwRtP6IyGQB3Ky2nVgAAgAEAAIAAAACAAQAAAAAAAAABFyD+NJBkyY1uKoU/o8mxK9izBKGcGVxg76fuI5MEbT+iMgABBSBQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wAEGbwLAIiBzblcpAP4SUliaIUPI88efcaBBLSNTr3VelwHHgmlKAqwCwCIgYxxfO1gyuPvev7GXBM7rMjwh9A96JPQ9aO8MwmsSWWmsAcAiIET6pJoDON5IjI3//s37bzKfOAvVZu8gyN9tgT6rHEJzrCEHRPqkmgM43kiMjf/+zftvMp84C9Vm7yDI322BPqscQnM5AfBreYuSoQ7ZqdC7/Trxc6U7FhfaOkFZygCCFs2Fay4Odystp1YAAIABAACAAQAAgAAAAAADAAAAIQdQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wAUAfEYeXSEHYxxfO1gyuPvev7GXBM7rMjwh9A96JPQ9aO8MwmsSWWk5ARis5AmIl4Xg6nDO67jhyokqenjq7eDy4pbPQ1lhqPTKdystp1YAAIABAACAAgAAgAAAAAADAAAAIQdzblcpAP4SUliaIUPI88efcaBBLSNTr3VelwHHgmlKAjkBKaW0kVCQFi11mv0/4Pk/ozJgVtC0CIy5M8rngmy42Cx3Ky2nVgAAgAEAAIADAACAAAAAAAMAAAAA", 6: "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgg2mORYxmZOFZXXXaJZfeHiLul9eY5wbEwKS1qYI810MAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJBFCyxOsaCSN6AaqajZZzzwD62gh0JyBFKToaP696GW7bSzZcOFfU/wMgvlQ/VYP+pGbdhcr4Bc2iomROvB09ACwlAv4GNl1fW/+tTi6BX+0wfxOD17xhudlvrVkeR4Cr1/T1eJVHU404z2G8na4LJnHmu0/A5Wgge/NLMLGXdfmk9eUEUQyCwvxbwEbU+p75hWSSqfyfl0prSDqEVXYSGdsO60bIRXy5JCvfMRcT3hRHzYFfOXFpcVjJaKftE38ID81bh+EDh8atvq/omsjbyGDNxncHUKKt2jYD5H5mI2KvvR7+4Y7sfKlKfdowV8AzjTsKDzcB+iPhCi+KPbvZAQ8MpEYEaQRT6D3o87zsdDAps59JuF62gsuXJLRnvrUi0GFnLikUcqW99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwQOwfA3kgZGHIM0IoVCMyZwirAx8NpKJT7kWq+luMkgNNi2BUkPjNE+APmJmJuX4hX6o28S3uNpPS2szzeBwXV/ZiFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wG99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwEV8uSQr3zEXE94UR82BXzlxaXFYyWin7RN/CA/NW4fgjICyxOsaCSN6AaqajZZzzwD62gh0JyBFKToaP696GW7bSrMBCFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wJfG5v6l/3FP9XJEmZkIEOQG6YqhD1v35fZ4S8HQqabOIyBDILC/FvARtT6nvmFZJKp/J+XSmtIOoRVdhIZ2w7rRsqzAYhXBUJKbdMGgSVS3i0tgNel6XgeKWg8o7JbVR7/ums6AOsDNlw4V9T/AyC+VD9Vg/6kZt2FyvgFzaKiZE68HT0ALCRFfLkkK98xFxPeFEfNgV85cWlxWMlop+0TfwgPzVuH4IyD6D3o87zsdDAps59JuF62gsuXJLRnvrUi0GFnLikUcqazAIRYssTrGgkjegGqmo2Wc88A+toIdCcgRSk6Gj+vehlu20jkBzZcOFfU/wMgvlQ/VYP+pGbdhcr4Bc2iomROvB09ACwl3Ky2nVgAAgAEAAIACAACAAAAAAAAAAAAhFkMgsL8W8BG1Pqe+YVkkqn8n5dKa0g6hFV2EhnbDutGyOQERXy5JCvfMRcT3hRHzYFfOXFpcVjJaKftE38ID81bh+HcrLadWAACAAQAAgAEAAIAAAAAAAAAAACEWUJKbdMGgSVS3i0tgNel6XgeKWg8o7JbVR7/ums6AOsAFAHxGHl0hFvoPejzvOx0MCmzn0m4XraCy5cktGe+tSLQYWcuKRRypOQFvfWIFnpSXoaSiZ1admHbaYBAa/zjjUpubk5zn+RrpcHcrLadWAACAAQAAgAMAAIAAAAAAAAAAAAEXIFCSm3TBoElUt4tLYDXpel4HiloPKOyW1Ue/7prOgDrAARgg8DYuL3Wm9CClvePrIh2WrmcgzyX4GJDJWx13WstRXmUAAQUgESTaeuySzNBslUViZH9DexOLlXIahL4r8idrvdqz5nEhBxEk2nrskszQbJVFYmR/Q3sTi5VyGoS+K/Ina73as+ZxGQB3Ky2nVgAAgAEAAIAAAACAAAAAAAUAAAAA", + // Case: PSBT with one P2WSH input of a 2-of-2 multisig. witnessScript, + // keypaths, and global xpubs are available. Contains no signatures. + // Outputs filled. + 7: "cHNidP8BAFICAAAAAZ38ZijCbFiZ/hvT3DOGZb/VXXraEPYiCXPfLTht7BJ2AQAAAAD/////AfA9zR0AAAAAFgAUezoAv9wU0neVwrdJAdCdpu8TNXkAAAAATwEENYfPAto/0AiAAAAAlwSLGtBEWx7IJ1UXcnyHtOTrwYogP/oPlMAVZr046QADUbdDiH7h1A3DKmBDck8tZFmztaTXPa7I+64EcvO8Q+IM2QxqT64AAIAAAACATwEENYfPAto/0AiAAAABuQRSQnE5zXjCz/JES+NTzVhgXj5RMoXlKLQH+uP2FzUD0wpel8itvFV9rCrZp+OcFyLrrGnmaLbyZnzB1nHIPKsM2QxqT64AAIABAACAAAEBKwBlzR0AAAAAIgAgLFSGEmxJeAeagU4TcV1l82RZ5NbMre0mbQUIZFuvpjIBBUdSIQKdoSzbWyNWkrkVNq/v5ckcOrlHPY5DtTODarRWKZyIcSEDNys0I07Xz5wf6l0F1EFVeSe+lUKxYusC4ass6AIkwAtSriIGAp2hLNtbI1aSuRU2r+/lyRw6uUc9jkO1M4NqtFYpnIhxENkMak+uAACAAAAAgAAAAAAiBgM3KzQjTtfPnB/qXQXUQVV5J76VQrFi6wLhqyzoAiTACxDZDGpPrgAAgAEAAIAAAAAAACICA57/H1R6HV+S36K6evaslxpL0DukpzSwMVaiVritOh75EO3kXMUAAACAAAAAgAEAAIAA", + // Case: PSBT with `PSBT_GLOBAL_XPUB`. + 8: "cHNidP8BAJ0BAAAAAnEOp2q0XFy2Q45gflnMA3YmmBgFrp4N/ZCJASq7C+U1AQAAAAD/////GQmU1qizyMgsy8+y+6QQaqBmObhyqNRHRlwNQliNbWcAAAAAAP////8CAOH1BQAAAAAZdqkUtrwsDuVlWoQ9ea/t0MzD991kNAmIrGBa9AUAAAAAFgAUEYjvjkzgRJ6qyPsUHL9aEXbmoIgAAAAATwEEiLIeA55TDKyAAAAAPbyKXJdp8DGxfnf+oVGGAyIaGP0Y8rmlTGyMGsdcvDUC8jBYSxVdHH8c1FEgplPEjWULQxtnxbLBPyfXFCA3wWkQJ1acUDEAAIAAAACAAAAAgAABAR8A4fUFAAAAABYAFDO5gvkbKPFgySC0q5XljOUN2jpKIgIDMJaA8zx9446mpHzU7NZvH1pJdHxv+4gI7QkDkkPjrVxHMEQCIC1wTO2DDFapCTRL10K2hS3M0QPpY7rpLTjnUlTSu0JFAiAthsQ3GV30bAztoITyopHD2i1kBw92v5uQsZXn7yj3cgEiBgMwloDzPH3jjqakfNTs1m8fWkl0fG/7iAjtCQOSQ+OtXBgnVpxQMQAAgAAAAIAAAACAAAAAAAEAAAAAAQEfAOH1BQAAAAAWABQ4j7lEMH63fvRRl9CwskXgefAR3iICAsd3Fh9z0LfHK57nveZQKT0T8JW8dlatH1Jdpf0uELEQRzBEAiBMsftfhpyULg4mEAV2ElQ5F5rojcqKncO6CPeVOYj6pgIgUh9JynkcJ9cOJzybFGFphZCTYeJb4nTqIA1+CIJ+UU0BIgYCx3cWH3PQt8crnue95lApPRPwlbx2Vq0fUl2l/S4QsRAYJ1acUDEAAIAAAACAAAAAgAAAAAAAAAAAAAAiAgLSDKUC7iiWhtIYFb1DqAY3sGmOH7zb5MrtRF9sGgqQ7xgnVpxQMQAAgAAAAIAAAACAAAAAAAQAAAAA", } // These are all invalid PSBTs for the indicated reasons. @@ -123,6 +152,8 @@ var invalidPsbtHex = map[int]string{ 18: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000", // Invalid duplicate BIP32 derivation (different derivs, same key) 19: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba670000008000000080050000800000", + // Invalid var int for key type + 20: "70736274ff01001c000000000002000000000000000000000000736210ff01000001010010ff70ff01001c00000000000000000000000000000000000000000000", } // All following PSBTs are Taproot specific invalid packets taken from @@ -682,6 +713,9 @@ func TestFinalize2of3(t *testing.T) { t.Fatalf("Error decoding hex: %v", err) } p, err := NewFromRawBytes(bytes.NewReader(b), false) + if err != nil { + t.Fatalf("Error parsing PSBT: %v", err) + } if p.IsComplete() { t.Fatalf("Psbt is complete") } @@ -1271,7 +1305,7 @@ func TestMaybeFinalizeAll(t *testing.T) { } func TestFromUnsigned(t *testing.T) { - serTx, err := hex.DecodeString("00000000000101e165f072311e71825b47a4797221d7ae56d4b40b7707c540049aee43302448a40000000000feffffff0212f1126a0000000017a9143e836801b2b15aa193449d815c62d6c4b6227c898780778e060000000017a914ba4bdb0b07d67bc60f59c1f4fe54170565254974870000000000") + serTx, err := hex.DecodeString("0000000001e165f072311e71825b47a4797221d7ae56d4b40b7707c540049aee43302448a40000000000feffffff0212f1126a0000000017a9143e836801b2b15aa193449d815c62d6c4b6227c898780778e060000000017a914ba4bdb0b07d67bc60f59c1f4fe541705652549748700000000") if err != nil { t.Fatalf("Error: %v", err) } @@ -1414,6 +1448,89 @@ func TestNonWitnessToWitness(t *testing.T) { } } +// TestPSBTNumberOfHashesOverflow tests the case where the number of hashes +// in the PSBT exceeds the maximum allowed value. This is a regression test +// for a bug that was fixed in the PSBT library. +func TestPSBTNumberOfHashesOverflow(t *testing.T) { + // This hex string represents a PSBT with an invalid number of hashes. The + // PSBT library should return an error when trying to parse this PSBT. + // + // TODO(ffranr): Is there a more minimal PSBT example? + hexString := "70736274ff01007374ff01030100000000002f0000002e2873007374" + + "ff01070100000000000000000000000000000000000000060680050000736274f" + + "f01000a0000000060c70006060000736274ff01000a0000010000010024070100" + + "00000000000000000000000000000000000006060000736274ff01000a0000000" + + "000010024c760002a707362c760000b0500000000000000060605000073626274" + + "ff01000a000000000001002421212121212121212121212121212121212121212" + + "12121212121212121212121212121212107010000000000000000000000000000" + + "000000000006060000736274ff01000a000eff000001000a0a040404040404040" + + "400" + + // Convert hex string to byte slice + buffer, err := hex.DecodeString(hexString) + require.NoError(t, err) + + // Attempt to parse the PSBT. + psbt, err := NewFromRawBytes(bytes.NewBuffer(buffer), false) + require.Nil(t, psbt) + require.ErrorIs(t, err, ErrInvalidPsbtFormat) +} + +// TestMinTaprootBip32DerivationByteSize tests the +// minTaprootBip32DerivationByteSize function to ensure it correctly calculates +// the minimum byte size of the Taproot BIP32 derivation path. +func TestMinTaprootBip32DerivationByteSize(t *testing.T) { + tests := []struct { + label string + numHashes uint64 + expectedSize uint64 + expectErr bool + }{ + { + label: "only compact size + fingerprint", + numHashes: 0, + expectedSize: 5, + expectErr: false, + }, + { + label: "numHashes == 1, therefore: 1 * 32 + 5", + numHashes: 1, + expectedSize: 37, + expectErr: false, + }, + { + label: "numHashes == 2, therefore: 2 * 32 + 5", + numHashes: 2, + expectedSize: 69, + expectErr: false, + }, + { + label: "overflow expected", + numHashes: math.MaxUint64, + expectedSize: 0, + expectErr: true, + }, + } + + for _, tt := range tests { + actualSize, err := minTaprootBip32DerivationByteSize(tt.numHashes) + + if (err != nil) != tt.expectErr { + t.Errorf( + "%s (numHashes=%d, unexpected_error=%v)", tt.label, + tt.numHashes, err, + ) + continue + } + + if err == nil && actualSize != tt.expectedSize { + t.Errorf("%s (numHashes=%d, actualSize=%d, expectedSize=%d)", + tt.label, tt.numHashes, actualSize, tt.expectedSize, + ) + } + } +} + // TestEmptyInputSerialization tests the special serialization case for a wire // transaction that has no inputs. func TestEmptyInputSerialization(t *testing.T) { diff --git a/btcutil/psbt/taproot.go b/btcutil/psbt/taproot.go index b9df860c95..02a0e3a2a0 100644 --- a/btcutil/psbt/taproot.go +++ b/btcutil/psbt/taproot.go @@ -2,6 +2,8 @@ package psbt import ( "bytes" + "math" + "math/bits" "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/txscript" @@ -93,12 +95,61 @@ func (s *TaprootBip32Derivation) SortBefore(other *TaprootBip32Derivation) bool return bytes.Compare(s.XOnlyPubKey, other.XOnlyPubKey) < 0 } +// minTaprootBip32DerivationByteSize returns the minimum number of bytes +// required to encode a Taproot BIP32 derivation field, given the number of +// leaf hashes. +// +// NOTE: This function does not account for the size of the BIP32 child indexes, +// as we are only computing the minimum size (which occurs when the path is +// empty). The bits package is used to safely detect and handle overflows. +func minTaprootBip32DerivationByteSize(numHashes uint64) (uint64, error) { + // The Taproot BIP32 derivation field is encoded as: + // [compact size uint: number of leaf hashes] + // [N Ă— 32 bytes: leaf hashes] + // [4 bytes: master key fingerprint] + // [M Ă— 4 bytes: BIP32 child indexes] + // + // To compute the minimum size given the number of hashes only, we assume: + // - N = numHashes (provided) + // - M = 0 (no child indexes) + // + // So the base byte size is: + // 1 (leaf hash count) + (N Ă— 32) + 4 (fingerprint) + // + // First, we calculate the total number of bytes for the leaf hashes. + mulCarry, totalHashesBytes := bits.Mul64(numHashes, 32) + if mulCarry != 0 { + return 0, ErrInvalidPsbtFormat + } + + // Since we're computing the minimum possible size, we add a constant that + // accounts for the fixed size fields: + // * 1 byte for the compact size leaf hash count (assumes numHashes < 0xfd) + // * 4 bytes for the master key fingerprint + // Total: 5 bytes. + // All other fields (e.g., BIP32 path) are assumed absent for minimum size + // calculation. + result, addCarry := bits.Add64(5, totalHashesBytes, 0) + if addCarry != 0 { + return 0, ErrInvalidPsbtFormat + } + + return result, nil +} + // ReadTaprootBip32Derivation deserializes a byte slice containing the Taproot // BIP32 derivation info that consists of a list of leaf hashes as well as the // normal BIP32 derivation info. func ReadTaprootBip32Derivation(xOnlyPubKey, value []byte) (*TaprootBip32Derivation, error) { + // This function allocates additional memory while parsing the serialized + // data. To prevent potential out-of-memory (OOM) issues, we must validate + // the length of the value slice before proceeding. + if len(value) > MaxPsbtValueLength { + return nil, ErrInvalidPsbtFormat + } + // The taproot key BIP 32 derivation path is defined as: // * <4 byte fingerprint> <32-bit uint>* // So we get at least 5 bytes for the length and the 4 byte fingerprint. @@ -113,9 +164,22 @@ func ReadTaprootBip32Derivation(xOnlyPubKey, return nil, ErrInvalidPsbtFormat } - // A hash is 32 bytes in size, so we need at least numHashes*32 + 5 - // bytes to be present. - if len(value) < (int(numHashes)*32)+5 { + // As a safety/sanity check, verify that the hash count fits in a `uint32`. + // This isn’t mandated by BIP‑371, but it prevents overflow and limits + // derivations to about 137 GiB of data. + if numHashes > math.MaxUint32 { + return nil, ErrInvalidPsbtFormat + } + + // Given the number of hashes, we can calculate the minimum byte size + // of the taproot BIP32 derivation. + minByteSize, err := minTaprootBip32DerivationByteSize(numHashes) + if err != nil { + return nil, err + } + + // Ensure that value is at least the minimum size. + if uint64(len(value)) < minByteSize { return nil, ErrInvalidPsbtFormat } diff --git a/btcutil/psbt/types.go b/btcutil/psbt/types.go index e833e1af35..ca555101b9 100644 --- a/btcutil/psbt/types.go +++ b/btcutil/psbt/types.go @@ -13,22 +13,22 @@ const ( // invalid. UnsignedTxType GlobalType = 0 - // XpubType houses a global xpub for the entire PSBT packet. + // XPubType houses a global xPub for the entire PSBT packet. // - // The key ({0x01}|{xpub}) is he 78 byte serialized extended public key - // as defined by BIP 32. Extended public keys are those that can be + // The key ({0x01}|{xpub}) is the 78 byte serialized extended public key + // as defined by BIP-0032. Extended public keys are those that can be // used to derive public keys used in the inputs and outputs of this // transaction. It should be the public key at the highest hardened - // derivation index so that - // the unhardened child keys used in the transaction can be derived. + // derivation index so that the unhardened child keys used in the + // transaction can be derived. // - // The value is the master key fingerprint as defined by BIP 32 + // The value is the master key fingerprint as defined by BIP-0032 // concatenated with the derivation path of the public key. The // derivation path is represented as 32-bit little endian unsigned - // integer indexes concatenated with each other. The number of 32 bit + // integer indexes concatenated with each other. The number of 32-bit // unsigned integer indexes must match the depth provided in the // extended public key. - XpubType GlobalType = 1 + XPubType GlobalType = 1 // VersionType houses the global version number of this PSBT. There is // no key (only contains the byte type), then the value if omitted, is diff --git a/btcutil/psbt/utils.go b/btcutil/psbt/utils.go index 0a9002798e..c47f6afd4d 100644 --- a/btcutil/psbt/utils.go +++ b/btcutil/psbt/utils.go @@ -250,23 +250,32 @@ func getKey(r io.Reader) (int, []byte, error) { // Next, we ready out the designated number of bytes, which may include // a type, key, and optional data. - keyTypeAndData := make([]byte, count) - if _, err := io.ReadFull(r, keyTypeAndData[:]); err != nil { - return -1, nil, err + keyTypeReader := io.LimitReader(r, int64(count)) + keyType, err := wire.ReadVarInt(keyTypeReader, 0) + if err != nil { + return -1, nil, ErrInvalidPsbtFormat } - keyType := int(string(keyTypeAndData)[0]) + // The maximum value of a compact size int is capped in bitcoind, do the + // same here to mimic the behavior. + if keyType > MaxPsbtKeyValue { + return -1, nil, ErrInvalidPsbtFormat + } + + keyData, err := io.ReadAll(keyTypeReader) + if err != nil { + return -1, nil, ErrInvalidPsbtFormat + } // Note that the second return value will usually be empty, since most // keys contain no more than the key type byte. - if len(keyTypeAndData) == 1 { - return keyType, nil, nil + if len(keyData) == 0 { + return int(keyType), nil, nil } // Otherwise, we return the key, along with any data that it may // contain. - return keyType, keyTypeAndData[1:], nil - + return int(keyType), keyData, nil } // readTxOut is a limited version of wire.ReadTxOut, because the latter is not diff --git a/chaincfg/chainhash/go.mod b/chaincfg/chainhash/go.mod index 1d865254dd..538221a493 100644 --- a/chaincfg/chainhash/go.mod +++ b/chaincfg/chainhash/go.mod @@ -1,3 +1,3 @@ module github.com/btcsuite/btcd/chaincfg/chainhash -go 1.17 +go 1.22 diff --git a/chaincfg/genesis.go b/chaincfg/genesis.go index 73d286102b..f734feb27a 100644 --- a/chaincfg/genesis.go +++ b/chaincfg/genesis.go @@ -143,6 +143,77 @@ var testNet3GenesisBlock = wire.MsgBlock{ Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, } +// testNet4GenesisTx is the transaction for the genesis blocks for test network (version 4). +var testNet4GenesisTx = wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{ + // Message: `03/May/2024 000000000000000000001ebd58c244970b3aa9d783bb001011fbe8ea8e98e00e` + 0x4, 0xff, 0xff, 0x0, 0x1d, 0x1, 0x4, 0x4c, + 0x4c, 0x30, 0x33, 0x2f, 0x4d, 0x61, 0x79, 0x2f, + 0x32, 0x30, 0x32, 0x34, 0x20, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x31, 0x65, 0x62, 0x64, 0x35, 0x38, 0x63, + 0x32, 0x34, 0x34, 0x39, 0x37, 0x30, 0x62, 0x33, + 0x61, 0x61, 0x39, 0x64, 0x37, 0x38, 0x33, 0x62, + 0x62, 0x30, 0x30, 0x31, 0x30, 0x31, 0x31, 0x66, + 0x62, 0x65, 0x38, 0x65, 0x61, 0x38, 0x65, 0x39, + 0x38, 0x65, 0x30, 0x30, 0x65}, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x12a05f200, + PkScript: []byte{ + 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0xac}, + }, + }, + LockTime: 0, +} + +// testNet4GenesisHash is the hash of the first block in the block chain for the +// test network (version 4). +var testNet4GenesisHash = chainhash.Hash([chainhash.HashSize]byte{ + 0x43, 0xf0, 0x8b, 0xda, 0xb0, 0x50, 0xe3, 0x5b, + 0x56, 0x7c, 0x86, 0x4b, 0x91, 0xf4, 0x7f, 0x50, + 0xae, 0x72, 0x5a, 0xe2, 0xde, 0x53, 0xbc, 0xfb, + 0xba, 0xf2, 0x84, 0xda, 0x00, 0x00, 0x00, 0x00}) + +// testNet4GenesisMerkleRoot is the hash of the first transaction in the genesis +// block for the test network (version 4). It is the same as the merkle root +// for the main network. +var testNet4GenesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + 0x4e, 0x7b, 0x2b, 0x91, 0x28, 0xfe, 0x02, 0x91, + 0xdb, 0x06, 0x93, 0xaf, 0x2a, 0xe4, 0x18, 0xb7, + 0x67, 0xe6, 0x57, 0xcd, 0x40, 0x7e, 0x80, 0xcb, + 0x14, 0x34, 0x22, 0x1e, 0xae, 0xa7, 0xa0, 0x7a, +}) + +// testNet4GenesisBlock defines the genesis block of the block chain which +// serves as the public transaction ledger for the test network (version 3). +var testNet4GenesisBlock = wire.MsgBlock{ + Header: wire.BlockHeader{ + Version: 1, + PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000 + MerkleRoot: testNet4GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + Timestamp: time.Unix(1714777860, 0), // 2024-05-03 23:11:00 +0000 UTC + Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000] + Nonce: 0x17780cbb, // 393743547 + }, + Transactions: []*wire.MsgTx{&testNet4GenesisTx}, +} + // simNetGenesisHash is the hash of the first block in the block chain for the // simulation test network. var simNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. diff --git a/chaincfg/genesis_test.go b/chaincfg/genesis_test.go index 1daf847916..b2975ffbb2 100644 --- a/chaincfg/genesis_test.go +++ b/chaincfg/genesis_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/require" ) // TestGenesisBlock tests the genesis block of the main network for validity by @@ -91,6 +92,34 @@ func TestTestNet3GenesisBlock(t *testing.T) { } } +// TestTestNet4GenesisBlock tests the genesis block of the test network (version +// 4) for validity by checking the encoded bytes and hashes. +func TestTestNet4GenesisBlock(t *testing.T) { + // Encode the genesis block to raw bytes. + var buf bytes.Buffer + err := TestNet4Params.GenesisBlock.Serialize(&buf) + require.NoError(t, err) + + // Ensure the encoded block matches the expected bytes. + if !bytes.Equal(buf.Bytes(), testNet4GenesisBlockBytes) { + t.Fatalf("TestTestNet4GenesisBlock: Genesis block does not "+ + "appear valid - got %v, want %v", + spew.Sdump(buf.Bytes()), + spew.Sdump(testNet4GenesisBlockBytes)) + } + + // Check hash of the block against expected hash. + hash := TestNet4Params.GenesisBlock.BlockHash() + if !TestNet4Params.GenesisHash.IsEqual(&hash) { + t.Fatalf("TestTestNet4GenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", spew.Sdump(hash), + spew.Sdump(TestNet4Params.GenesisHash)) + } + expectedHash := "00000000da84f2bafbbc53dee25a72ae507ff4914b867c565be3" + + "50b0da8bf043" + require.Equal(t, expectedHash, hash.String()) +} + // TestSimNetGenesisBlock tests the genesis block of the simulation test network // for validity by checking the encoded bytes and hashes. func TestSimNetGenesisBlock(t *testing.T) { @@ -268,6 +297,44 @@ var testNet3GenesisBlockBytes = []byte{ 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ } +// testNet4GenesisBlockBytes are the wire encoded bytes for the genesis block of +// the test network (version 4) +var testNet4GenesisBlockBytes = []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x4e, 0x7b, 0x2b, 0x91, /* |....N{+.| */ + 0x28, 0xfe, 0x02, 0x91, 0xdb, 0x06, 0x93, 0xaf, /* |(.......| */ + 0x2a, 0xe4, 0x18, 0xb7, 0x67, 0xe6, 0x57, 0xcd, /* |*...g.W.| */ + 0x40, 0x7e, 0x80, 0xcb, 0x14, 0x34, 0x22, 0x1e, /* |@~...4".| */ + 0xae, 0xa7, 0xa0, 0x7a, 0x04, 0x6f, 0x35, 0x66, /* |...z.o5f| */ + 0xff, 0xff, 0x00, 0x1d, 0xbb, 0x0c, 0x78, 0x17, /* |......x.| */ + 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ + 0xff, 0xff, 0x55, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..U.....| */ + 0x01, 0x04, 0x4c, 0x4c, 0x30, 0x33, 0x2f, 0x4d, /* |..LL03/M| */ + 0x61, 0x79, 0x2f, 0x32, 0x30, 0x32, 0x34, 0x20, /* |ay/2024 | */ + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, /* |00000000| */ + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, /* |00000000| */ + 0x30, 0x30, 0x30, 0x30, 0x31, 0x65, 0x62, 0x64, /* |00001ebd| */ + 0x35, 0x38, 0x63, 0x32, 0x34, 0x34, 0x39, 0x37, /* |58c24497| */ + 0x30, 0x62, 0x33, 0x61, 0x61, 0x39, 0x64, 0x37, /* |0b3aa9d7| */ + 0x38, 0x33, 0x62, 0x62, 0x30, 0x30, 0x31, 0x30, /* |83bb0010| */ + 0x31, 0x31, 0x66, 0x62, 0x65, 0x38, 0x65, 0x61, /* |11fbe8ea| */ + 0x38, 0x65, 0x39, 0x38, 0x65, 0x30, 0x30, 0x65, /* |8e98e00e| */ + 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ + 0x2a, 0x01, 0x00, 0x00, 0x00, 0x23, 0x21, 0x00, /* |*....#!.| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0xac, 0x00, 0x00, 0x00, 0x00, /* |..... | */ +} + // simNetGenesisBlockBytes are the wire encoded bytes for the genesis block of // the simulation test network as of protocol version 70002. var simNetGenesisBlockBytes = []byte{ diff --git a/chaincfg/params.go b/chaincfg/params.go index 1c329cb50f..4ddeb5defa 100644 --- a/chaincfg/params.go +++ b/chaincfg/params.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "encoding/hex" "errors" + "math" "math/big" "strings" "time" @@ -78,6 +79,16 @@ type Checkpoint struct { Hash *chainhash.Hash } +// EffectiveAlwaysActiveHeight returns the effective activation height for the +// deployment. If AlwaysActiveHeight is unset (i.e. zero), it returns +// the maximum uint32 value to indicate that it does not force activation. +func (d *ConsensusDeployment) EffectiveAlwaysActiveHeight() uint32 { + if d.AlwaysActiveHeight == 0 { + return math.MaxUint32 + } + return d.AlwaysActiveHeight +} + // DNSSeed identifies a DNS seed. type DNSSeed struct { // Host defines the hostname of the seed. @@ -108,6 +119,11 @@ type ConsensusDeployment struct { // activation. A value of 1815 block denotes a 90% threshold. CustomActivationThreshold uint32 + // AlwaysActiveHeight defines an optional block threshold at which the + // deployment is forced to be active. If unset (0), it defaults to + // math.MaxUint32, meaning the deployment does not force activation. + AlwaysActiveHeight uint32 + // DeploymentStarter is used to determine if the given // ConsensusDeployment has started or not. DeploymentStarter ConsensusDeploymentStarter @@ -146,6 +162,10 @@ const ( // the deployment of BIPS 340, 341 and 342. DeploymentTaproot + // DeploymentTestDummyAlwaysActive is a dummy deployment that is meant + // to always be active. + DeploymentTestDummyAlwaysActive + // NOTE: DefinedDeployments must always come last since it is used to // determine how many defined deployments there currently are. @@ -189,6 +209,10 @@ type Params struct { // regtest like networks. PoWNoRetargeting bool + // EnforceBIP94 enforces timewarp attack mitigation and on testnet4 + // this also enforces the block storm mitigation. + EnforceBIP94 bool + // These fields define the block heights at which the specified softfork // BIP became active. BIP0034Height int32 @@ -375,6 +399,16 @@ var MainNetParams = Params{ time.Time{}, // Never expires ), }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 30, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, DeploymentCSV: { BitNumber: 0, DeploymentStarter: NewMedianTimeDeploymentStarter( @@ -486,6 +520,16 @@ var RegressionNetParams = Params{ time.Time{}, // Never expires ), }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 30, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, DeploymentCSV: { BitNumber: 0, DeploymentStarter: NewMedianTimeDeploymentStarter( @@ -620,6 +664,16 @@ var TestNet3Params = Params{ time.Time{}, // Never expires ), }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 30, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, DeploymentCSV: { BitNumber: 0, DeploymentStarter: NewMedianTimeDeploymentStarter( @@ -673,6 +727,131 @@ var TestNet3Params = Params{ HDCoinType: 1, } +// TestNet4Params defines the network parameters for the test Bitcoin network +// (version 4). +var TestNet4Params = Params{ + Name: "testnet4", + Net: wire.TestNet4, + DefaultPort: "48333", + DNSSeeds: []DNSSeed{ + {"seed.testnet4.bitcoin.sprovoost.nl", true}, + {"seed.testnet4.wiz.biz", true}, + }, + + // Chain parameters + GenesisBlock: &testNet4GenesisBlock, + GenesisHash: &testNet4GenesisHash, + PowLimit: testNet3PowLimit, + PowLimitBits: 0x1d00ffff, + EnforceBIP94: true, + BIP0034Height: 1, + BIP0065Height: 1, + BIP0066Height: 1, + CoinbaseMaturity: 100, + SubsidyReductionInterval: 210000, + TargetTimespan: time.Hour * 24 * 14, // 14 days + TargetTimePerBlock: time.Minute * 10, // 10 minutes + RetargetAdjustmentFactor: 4, // 25% less, 400% more + ReduceMinDifficulty: true, + MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2 + GenerateSupported: false, + + // Checkpoints ordered from oldest to newest. + Checkpoints: []Checkpoint{}, + + // Consensus rule change deployments. + // + // The miner confirmation window is defined as: + // target proof of work timespan / target proof of work spacing + RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow + MinerConfirmationWindow: 2016, + Deployments: [DefinedDeployments]ConsensusDeployment{ + DeploymentTestDummy: { + BitNumber: 28, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Unix(1199145601, 0), // January 1, 2008 UTC + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Unix(1230767999, 0), // December 31, 2008 UTC + ), + }, + DeploymentTestDummyMinActivation: { + BitNumber: 22, + CustomActivationThreshold: 1815, // Only needs 90% hash rate. + MinActivationHeight: 10_0000, // Can only activate after height 10k. + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 30, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, + DeploymentCSV: { + BitNumber: 31, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, + DeploymentSegwit: { + BitNumber: 29, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, + DeploymentTaproot: { + BitNumber: 2, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + MinActivationHeight: 0, + AlwaysActiveHeight: 1, + }, + }, + + // Mempool parameters + RelayNonStdTxs: true, + + // Human-readable part for Bech32 encoded segwit addresses, as defined in + // BIP 173. + Bech32HRPSegwit: "tb", // always tb for test net + + // Address encoding magics + PubKeyHashAddrID: 0x6f, // starts with m or n + ScriptHashAddrID: 0xc4, // starts with 2 + WitnessPubKeyHashAddrID: 0x03, // starts with QW + WitnessScriptHashAddrID: 0x28, // starts with T7n + PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // BIP32 hierarchical deterministic extended key magics + HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv + HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub + + // BIP44 coin type used in the hierarchical deterministic path for + // address generation. + HDCoinType: 1, +} + // SimNetParams defines the network parameters for the simulation test Bitcoin // network. This network is similar to the normal test network except it is // intended for private use within a group of individuals doing simulation @@ -761,6 +940,16 @@ var SimNetParams = Params{ ), CustomActivationThreshold: 75, // Only needs 75% hash rate. }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 29, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, }, // Mempool parameters @@ -861,6 +1050,16 @@ func CustomSignetParams(challenge []byte, dnsSeeds []DNSSeed) Params { time.Time{}, // Never expires ), }, + DeploymentTestDummyAlwaysActive: { + BitNumber: 30, + DeploymentStarter: NewMedianTimeDeploymentStarter( + time.Time{}, // Always available for vote + ), + DeploymentEnder: NewMedianTimeDeploymentEnder( + time.Time{}, // Never expires + ), + AlwaysActiveHeight: 1, + }, DeploymentCSV: { BitNumber: 29, DeploymentStarter: NewMedianTimeDeploymentStarter( @@ -1075,6 +1274,7 @@ func init() { // Register all default networks when the package is initialized. mustRegister(&MainNetParams) mustRegister(&TestNet3Params) + mustRegister(&TestNet4Params) mustRegister(&RegressionNetParams) mustRegister(&SimNetParams) } diff --git a/chaincfg/params_test.go b/chaincfg/params_test.go index 4166ce0a23..4d3f4d2273 100644 --- a/chaincfg/params_test.go +++ b/chaincfg/params_test.go @@ -9,6 +9,9 @@ import ( "encoding/hex" "math/big" "testing" + + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" ) // TestInvalidHashStr ensures the newShaHashFromStr function panics when used to @@ -104,6 +107,12 @@ func TestSigNetPowLimit(t *testing.T) { } } +// TestSigNetMagic makes sure that the default signet has the expected bitcoin +// network magic. +func TestSigNetMagic(t *testing.T) { + require.Equal(t, wire.SigNet, SigNetParams.Net) +} + // compactToBig is a copy of the blockchain.CompactToBig function. We copy it // here so we don't run into a circular dependency just because of a test. func compactToBig(compact uint32) *big.Int { diff --git a/chaincfg/register_test.go b/chaincfg/register_test.go index bcb5b3c6f6..db2544159f 100644 --- a/chaincfg/register_test.go +++ b/chaincfg/register_test.go @@ -68,6 +68,11 @@ func TestRegister(t *testing.T) { params: &TestNet3Params, err: ErrDuplicateNet, }, + { + name: "duplicate testnet4", + params: &TestNet4Params, + err: ErrDuplicateNet, + }, { name: "duplicate simnet", params: &SimNetParams, @@ -83,6 +88,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.PubKeyHashAddrID, valid: true, }, + { + magic: TestNet4Params.PubKeyHashAddrID, + valid: true, + }, { magic: RegressionNetParams.PubKeyHashAddrID, valid: true, @@ -109,6 +118,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.ScriptHashAddrID, valid: true, }, + { + magic: TestNet4Params.ScriptHashAddrID, + valid: true, + }, { magic: RegressionNetParams.ScriptHashAddrID, valid: true, @@ -135,6 +148,10 @@ func TestRegister(t *testing.T) { prefix: TestNet3Params.Bech32HRPSegwit + "1", valid: true, }, + { + prefix: TestNet4Params.Bech32HRPSegwit + "1", + valid: true, + }, { prefix: RegressionNetParams.Bech32HRPSegwit + "1", valid: true, @@ -175,6 +192,11 @@ func TestRegister(t *testing.T) { want: TestNet3Params.HDPublicKeyID[:], err: nil, }, + { + priv: TestNet4Params.HDPrivateKeyID[:], + want: TestNet4Params.HDPublicKeyID[:], + err: nil, + }, { priv: RegressionNetParams.HDPrivateKeyID[:], want: RegressionNetParams.HDPublicKeyID[:], @@ -217,6 +239,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.PubKeyHashAddrID, valid: true, }, + { + magic: TestNet4Params.PubKeyHashAddrID, + valid: true, + }, { magic: RegressionNetParams.PubKeyHashAddrID, valid: true, @@ -243,6 +269,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.ScriptHashAddrID, valid: true, }, + { + magic: TestNet4Params.ScriptHashAddrID, + valid: true, + }, { magic: RegressionNetParams.ScriptHashAddrID, valid: true, @@ -269,6 +299,10 @@ func TestRegister(t *testing.T) { prefix: TestNet3Params.Bech32HRPSegwit + "1", valid: true, }, + { + prefix: TestNet4Params.Bech32HRPSegwit + "1", + valid: true, + }, { prefix: RegressionNetParams.Bech32HRPSegwit + "1", valid: true, @@ -324,6 +358,11 @@ func TestRegister(t *testing.T) { params: &TestNet3Params, err: ErrDuplicateNet, }, + { + name: "duplicate testnet4", + params: &TestNet4Params, + err: ErrDuplicateNet, + }, { name: "duplicate simnet", params: &SimNetParams, @@ -344,6 +383,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.PubKeyHashAddrID, valid: true, }, + { + magic: TestNet4Params.PubKeyHashAddrID, + valid: true, + }, { magic: RegressionNetParams.PubKeyHashAddrID, valid: true, @@ -370,6 +413,10 @@ func TestRegister(t *testing.T) { magic: TestNet3Params.ScriptHashAddrID, valid: true, }, + { + magic: TestNet4Params.ScriptHashAddrID, + valid: true, + }, { magic: RegressionNetParams.ScriptHashAddrID, valid: true, @@ -396,6 +443,10 @@ func TestRegister(t *testing.T) { prefix: TestNet3Params.Bech32HRPSegwit + "1", valid: true, }, + { + prefix: TestNet4Params.Bech32HRPSegwit + "1", + valid: true, + }, { prefix: RegressionNetParams.Bech32HRPSegwit + "1", valid: true, @@ -436,6 +487,11 @@ func TestRegister(t *testing.T) { want: TestNet3Params.HDPublicKeyID[:], err: nil, }, + { + priv: TestNet4Params.HDPrivateKeyID[:], + want: TestNet4Params.HDPublicKeyID[:], + err: nil, + }, { priv: RegressionNetParams.HDPrivateKeyID[:], want: RegressionNetParams.HDPublicKeyID[:], diff --git a/cmd/addblock/config.go b/cmd/addblock/config.go index d49df0a11d..5f47900380 100644 --- a/cmd/addblock/config.go +++ b/cmd/addblock/config.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" @@ -30,7 +31,7 @@ var ( activeNetParams = &chaincfg.MainNetParams ) -// config defines the configuration options for findcheckpoint. +// config defines the configuration options for addblock. // // See loadConfig for details on the configuration load process. type config struct { @@ -41,7 +42,8 @@ type config struct { Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"` RegressionTest bool `long:"regtest" description:"Use the regression test network"` SimNet bool `long:"simnet" description:"Use the simulation test network"` - TestNet3 bool `long:"testnet" description:"Use the test network"` + TestNet3 bool `long:"testnet" description:"Use the test network (version 3)"` + TestNet4 bool `long:"testnet4" description:"Use the test network (version 4)"` TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` } @@ -57,13 +59,7 @@ func fileExists(name string) bool { // validDbType returns whether or not dbType is a supported database type. func validDbType(dbType string) bool { - for _, knownType := range knownDbTypes { - if dbType == knownType { - return true - } - } - - return false + return slices.Contains(knownDbTypes, dbType) } // netName returns the name used when referring to a bitcoin network. At the @@ -113,6 +109,10 @@ func loadConfig() (*config, []string, error) { numNets++ activeNetParams = &chaincfg.TestNet3Params } + if cfg.TestNet4 { + numNets++ + activeNetParams = &chaincfg.TestNet4Params + } if cfg.RegressionTest { numNets++ activeNetParams = &chaincfg.RegressionNetParams diff --git a/cmd/btcctl/btcctl.go b/cmd/btcctl/btcctl.go index 771d5f7ed7..d6d2c53a87 100644 --- a/cmd/btcctl/btcctl.go +++ b/cmd/btcctl/btcctl.go @@ -49,6 +49,7 @@ func usage(errorMessage string) { func main() { cfg, args, err := loadConfig() if err != nil { + fmt.Fprintln(os.Stderr, "Failed to load config:", err) os.Exit(1) } if len(args) < 1 { diff --git a/cmd/btcctl/config.go b/cmd/btcctl/config.go index f6ca8846ec..023dd93a35 100644 --- a/cmd/btcctl/config.go +++ b/cmd/btcctl/config.go @@ -6,7 +6,7 @@ package main import ( "fmt" - "io/ioutil" + "io" "net" "os" "path/filepath" @@ -106,7 +106,8 @@ type config struct { RPCUser string `short:"u" long:"rpcuser" description:"RPC username"` SimNet bool `long:"simnet" description:"Connect to the simulation test network"` TLSSkipVerify bool `long:"skipverify" description:"Do not verify tls certificates (not recommended!)"` - TestNet3 bool `long:"testnet" description:"Connect to testnet"` + TestNet3 bool `long:"testnet" description:"Connect to testnet (version 3)"` + TestNet4 bool `long:"testnet4" description:"Connect to testnet (version 4)"` SigNet bool `long:"signet" description:"Connect to signet"` ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` Wallet bool `long:"wallet" description:"Connect to wallet"` @@ -125,6 +126,12 @@ func normalizeAddress(addr string, chain *chaincfg.Params, useWallet bool) (stri } else { defaultPort = "18334" } + case &chaincfg.TestNet4Params: + if useWallet { + defaultPort = "48332" + } else { + defaultPort = "48334" + } case &chaincfg.SimNetParams: if useWallet { defaultPort = "18554" @@ -272,6 +279,10 @@ func loadConfig() (*config, []string, error) { numNets++ network = &chaincfg.TestNet3Params } + if cfg.TestNet4 { + numNets++ + network = &chaincfg.TestNet4Params + } if cfg.SimNet { numNets++ network = &chaincfg.SimNetParams @@ -322,7 +333,7 @@ func createDefaultConfigFile(destinationPath, serverConfigPath string) error { return err } defer serverConfigFile.Close() - content, err := ioutil.ReadAll(serverConfigFile) + content, err := io.ReadAll(serverConfigFile) if err != nil { return err } diff --git a/cmd/findcheckpoint/config.go b/cmd/findcheckpoint/config.go index 203ed27faf..d4ae5dfc83 100644 --- a/cmd/findcheckpoint/config.go +++ b/cmd/findcheckpoint/config.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" @@ -41,18 +42,13 @@ type config struct { NumCandidates int `short:"n" long:"numcandidates" description:"Max num of checkpoint candidates to show {1-20}"` RegressionTest bool `long:"regtest" description:"Use the regression test network"` SimNet bool `long:"simnet" description:"Use the simulation test network"` - TestNet3 bool `long:"testnet" description:"Use the test network"` + TestNet3 bool `long:"testnet" description:"Use the test network (version 3)"` + TestNet4 bool `long:"testnet4" description:"Use the test network (version 4)"` } // validDbType returns whether or not dbType is a supported database type. func validDbType(dbType string) bool { - for _, knownType := range knownDbTypes { - if dbType == knownType { - return true - } - } - - return false + return slices.Contains(knownDbTypes, dbType) } // netName returns the name used when referring to a bitcoin network. At the @@ -101,6 +97,10 @@ func loadConfig() (*config, []string, error) { numNets++ activeNetParams = &chaincfg.TestNet3Params } + if cfg.TestNet4 { + numNets++ + activeNetParams = &chaincfg.TestNet4Params + } if cfg.RegressionTest { numNets++ activeNetParams = &chaincfg.RegressionNetParams diff --git a/config.go b/config.go index 9bbce7f69a..cb7ce6b294 100644 --- a/config.go +++ b/config.go @@ -16,6 +16,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "sort" "strconv" "strings" @@ -114,6 +115,7 @@ type config struct { ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"` MemoryProfile string `long:"memprofile" description:"Write memory profile to the specified file"` + TraceProfile string `long:"traceprofile" description:"Write execution trace to the specified file"` DataDir string `short:"b" long:"datadir" description:"Directory to store data"` DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` @@ -169,11 +171,13 @@ type config struct { SigNet bool `long:"signet" description:"Use the signet test network"` SigNetChallenge string `long:"signetchallenge" description:"Connect to a custom signet network defined by this challenge instead of using the global default signet test network -- Can be specified multiple times"` SigNetSeedNode []string `long:"signetseednode" description:"Specify a seed node for the signet network instead of using the global default signet network seed nodes"` - TestNet3 bool `long:"testnet" description:"Use the test network"` + TestNet3 bool `long:"testnet" description:"Use the test network (version 3)"` + TestNet4 bool `long:"testnet4" description:"Use the test network (version 4)"` TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` TrickleInterval time.Duration `long:"trickleinterval" description:"Minimum time between attempts to send new inventory to a connected peer"` UtxoCacheMaxSizeMiB uint `long:"utxocachemaxsize" description:"The maximum size in MiB of the UTXO cache"` TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` + V2Transport bool `long:"v2transport" description:"Enable P2P v2 encrypted transport protocol (BIP324) (default: false)"` UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."` Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"` ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` @@ -293,13 +297,7 @@ func parseAndSetDebugLevels(debugLevel string) error { // validDbType returns whether or not dbType is a supported database type. func validDbType(dbType string) bool { - for _, knownType := range knownDbTypes { - if dbType == knownType { - return true - } - } - - return false + return slices.Contains(knownDbTypes, dbType) } // removeDuplicateAddresses returns a new slice with all duplicate entries in @@ -445,6 +443,7 @@ func loadConfig() (*config, []string, error) { Generate: defaultGenerate, TxIndex: defaultTxIndex, AddrIndex: defaultAddrIndex, + V2Transport: false, } // Service options which are only added on Windows. @@ -552,6 +551,10 @@ func loadConfig() (*config, []string, error) { numNets++ activeNetParams = &testNet3Params } + if cfg.TestNet4 { + numNets++ + activeNetParams = &testNet4Params + } if cfg.RegressionTest { numNets++ activeNetParams = ®ressionNetParams diff --git a/config_test.go b/config_test.go index 42a0cd4b90..b149943323 100644 --- a/config_test.go +++ b/config_test.go @@ -22,10 +22,7 @@ func TestCreateDefaultConfigFile(t *testing.T) { sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-btcd.conf") // Setup a temporary directory - tmpDir, err := os.MkdirTemp("", "btcd") - if err != nil { - t.Fatalf("Failed creating a temporary directory: %v", err) - } + tmpDir := t.TempDir() testpath := filepath.Join(tmpDir, "test.conf") // copy config file to location of btcd binary @@ -43,13 +40,6 @@ func TestCreateDefaultConfigFile(t *testing.T) { t.Fatalf("Failed copying sample config file: %v", err) } - // Clean-up - defer func() { - os.Remove(testpath) - os.Remove(tmpConfigFile) - os.Remove(tmpDir) - }() - err = createDefaultConfigFile(testpath) if err != nil { diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go index e88f8af0cb..97fcd6c9e5 100644 --- a/connmgr/connmanager.go +++ b/connmgr/connmanager.go @@ -153,6 +153,22 @@ type registerPending struct { done chan struct{} } +// ConnOption is a functional option type for various connection operations. +type ConnOption func(*connOptions) + +// connOptions holds the options for a connection operation. +type connOptions struct { + triggerReconnect bool +} + +// WithTriggerReconnect is a functional option that forces a reconnect attempt +// after disconnection, even for non-permanent peers. +func WithTriggerReconnect() ConnOption { + return func(opts *connOptions) { + opts.triggerReconnect = true + } +} + // handleConnected is used to queue a successful connection. type handleConnected struct { c *ConnReq @@ -161,8 +177,9 @@ type handleConnected struct { // handleDisconnected is used to remove a connection. type handleDisconnected struct { - id uint64 - retry bool + id uint64 + retry bool + triggerReconnect bool } // handleFailed is used to remove a pending connection. @@ -190,11 +207,11 @@ type ConnManager struct { // retry duration. Otherwise, if required, it makes a new connection request. // After maxFailedConnectionAttempts new connections will be retried after the // configured retry duration. -func (cm *ConnManager) handleFailedConn(c *ConnReq) { +func (cm *ConnManager) handleFailedConn(c *ConnReq, triggerReconnect bool) { if atomic.LoadInt32(&cm.stop) != 0 { return } - if c.Permanent { + if c.Permanent || triggerReconnect { c.retryCount++ d := time.Duration(c.retryCount) * cm.cfg.RetryDuration if d > maxRetryDuration { @@ -296,7 +313,6 @@ out: log.Debugf("Canceling: %v", connReq) delete(pending, msg.id) continue - } // An existing connection was located, mark as @@ -334,7 +350,7 @@ out: log.Debugf("Reconnecting to %v", connReq) pending[msg.id] = connReq - cm.handleFailedConn(connReq) + cm.handleFailedConn(connReq, msg.triggerReconnect) } case handleFailed: @@ -349,7 +365,7 @@ out: connReq.updateState(ConnFailing) log.Debugf("Failed to connect to %v: %v", connReq, msg.err) - cm.handleFailedConn(connReq) + cm.handleFailedConn(connReq, false) } case <-cm.quit: @@ -463,14 +479,21 @@ func (cm *ConnManager) Connect(c *ConnReq) { // Disconnect disconnects the connection corresponding to the given connection // id. If permanent, the connection will be retried with an increasing backoff -// duration. -func (cm *ConnManager) Disconnect(id uint64) { +// duration. Functional options can be used to modify behavior, such as forcing +// a reconnect attempt via WithTriggerReconnect. +func (cm *ConnManager) Disconnect(id uint64, options ...ConnOption) { if atomic.LoadInt32(&cm.stop) != 0 { return } + opts := connOptions{} + for _, option := range options { + option(&opts) + } select { - case cm.requests <- handleDisconnected{id, true}: + case cm.requests <- handleDisconnected{ + id: id, retry: true, triggerReconnect: opts.triggerReconnect, + }: case <-cm.quit: } } @@ -486,7 +509,7 @@ func (cm *ConnManager) Remove(id uint64) { } select { - case cm.requests <- handleDisconnected{id, false}: + case cm.requests <- handleDisconnected{id: id, retry: false}: case <-cm.quit: } } diff --git a/database/cmd/dbtool/globalconfig.go b/database/cmd/dbtool/globalconfig.go index 4e58168a33..bcea56a209 100644 --- a/database/cmd/dbtool/globalconfig.go +++ b/database/cmd/dbtool/globalconfig.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" @@ -36,6 +37,7 @@ type config struct { RegressionTest bool `long:"regtest" description:"Use the regression test network"` SimNet bool `long:"simnet" description:"Use the simulation test network"` TestNet3 bool `long:"testnet" description:"Use the test network"` + TestNet4 bool `long:"testnet4" description:"Use the test network (version 4)"` } // fileExists reports whether the named file or directory exists. @@ -50,13 +52,7 @@ func fileExists(name string) bool { // validDbType returns whether or not dbType is a supported database type. func validDbType(dbType string) bool { - for _, knownType := range knownDbTypes { - if dbType == knownType { - return true - } - } - - return false + return slices.Contains(knownDbTypes, dbType) } // netName returns the name used when referring to a bitcoin network. At the @@ -89,6 +85,10 @@ func setupGlobalConfig() error { numNets++ activeNetParams = &chaincfg.TestNet3Params } + if cfg.TestNet4 { + numNets++ + activeNetParams = &chaincfg.TestNet4Params + } if cfg.RegressionTest { numNets++ activeNetParams = &chaincfg.RegressionNetParams diff --git a/database/ffldb/blockio.go b/database/ffldb/blockio.go index 2b415a17b0..7690a6ade9 100644 --- a/database/ffldb/blockio.go +++ b/database/ffldb/blockio.go @@ -10,6 +10,7 @@ package ffldb import ( "container/list" "encoding/binary" + "errors" "fmt" "hash/crc32" "io" @@ -19,6 +20,7 @@ import ( "strconv" "strings" "sync" + "syscall" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" @@ -137,7 +139,7 @@ type blockStore struct { // lruMutex protects concurrent access to the least recently used list // and lookup map. // - // openBlocksLRU tracks how the open files are refenced by pushing the + // openBlocksLRU tracks how the open files are referenced by pushing the // most recently used files to the front of the list thereby trickling // the least recently used files to end of the list. When a file needs // to be closed due to exceeding the max number of allowed open @@ -224,7 +226,7 @@ func serializeBlockLoc(loc blockLocation) []byte { return serializedData[:] } -// blockFilePath return the file path for the provided block file number. +// blockFilePath returns the file path for the provided block file number. func blockFilePath(dbPath string, fileNum uint32) string { fileName := fmt.Sprintf(blockFilenameTemplate, fileNum) return filepath.Join(dbPath, fileName) @@ -281,17 +283,7 @@ func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) { lruList := s.openBlocksLRU if lruList.Len() >= maxOpenFiles { lruFileNum := lruList.Remove(lruList.Back()).(uint32) - oldBlockFile := s.openBlockFiles[lruFileNum] - - // Close the old file under the write lock for the file in case - // any readers are currently reading from it so it's not closed - // out from under them. - oldBlockFile.Lock() - _ = oldBlockFile.file.Close() - oldBlockFile.Unlock() - - delete(s.openBlockFiles, lruFileNum) - delete(s.fileNumToLRUElem, lruFileNum) + s.closeFile(lruFileNum) } s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum) s.lruMutex.Unlock() @@ -302,11 +294,36 @@ func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) { return blockFile, nil } +// closeFile checks that the file corresponding to the file number is open and +// if it is, it closes it in a concurrency safe manner and cleans up associated +// data in the blockstore struct. +func (s *blockStore) closeFile(fileNum uint32) { + blockFile := s.openBlockFiles[fileNum] + if blockFile == nil { + return + } + + // Close the old file under the write lock for the file in case + // any readers are currently reading from it so it's not closed + // out from under them. + blockFile.Lock() + _ = blockFile.file.Close() + blockFile.Unlock() + + delete(s.openBlockFiles, fileNum) + delete(s.fileNumToLRUElem, fileNum) +} + // deleteFile removes the block file for the passed flat file number. The file // must already be closed and it is the responsibility of the caller to do any // other state cleanup necessary. func (s *blockStore) deleteFile(fileNum uint32) error { filePath := blockFilePath(s.basePath, fileNum) + blockFile := s.openBlockFiles[fileNum] + if blockFile != nil { + err := fmt.Errorf("attempted to delete open file at %v", filePath) + return makeDbErr(database.ErrDriverSpecific, err.Error(), err) + } if err := os.Remove(filePath); err != nil { return makeDbErr(database.ErrDriverSpecific, err.Error(), err) } @@ -387,6 +404,12 @@ func (s *blockStore) writeData(data []byte, fieldName string) error { n, err := wc.curFile.file.WriteAt(data, int64(wc.curOffset)) wc.curOffset += uint32(n) if err != nil { + if errors.Is(err, syscall.ENOSPC) { + log.Errorf("%v. Cannot save any more blocks "+ + "due to the disk being full "+ + "-- exiting", err) + os.Exit(1) + } str := fmt.Sprintf("failed to write %s to file %d at "+ "offset %d: %v", fieldName, wc.curFileNum, wc.curOffset-uint32(n), err) @@ -616,6 +639,12 @@ func (s *blockStore) syncBlocks() error { // Sync the file to disk. if err := wc.curFile.file.Sync(); err != nil { + if errors.Is(err, syscall.ENOSPC) { + log.Errorf("%v. Cannot save any more blocks "+ + "due to the disk being full "+ + "-- exiting", err) + os.Exit(1) + } str := fmt.Sprintf("failed to sync file %d: %v", wc.curFileNum, err) return makeDbErr(database.ErrDriverSpecific, str, err) @@ -765,7 +794,7 @@ func scanBlockFiles(dbPath string) (int, int, uint32, error) { // and offset set and all fields initialized. func newBlockStore(basePath string, network wire.BitcoinNet) (*blockStore, error) { // Look for the end of the latest block to file to determine what the - // write cursor position is from the viewpoing of the block files on + // write cursor position is from the viewpoint of the block files on // disk. _, fileNum, fileOff, err := scanBlockFiles(basePath) if err != nil { diff --git a/database/ffldb/db.go b/database/ffldb/db.go index 3e96bfc738..60103aaa58 100644 --- a/database/ffldb/db.go +++ b/database/ffldb/db.go @@ -1630,6 +1630,9 @@ func (tx *transaction) writePendingAndCommit() error { // We do this first before doing any of the writes as we can't undo // deletions of files. for _, fileNum := range tx.pendingDelFileNums { + // Make sure the file is closed before attempting to delete it. + tx.db.store.closeFile(fileNum) + err := tx.db.store.deleteFileFunc(fileNum) if err != nil { // Nothing we can do if we fail to delete blocks besides diff --git a/database/ffldb/dbcache.go b/database/ffldb/dbcache.go index ec42ee969e..7e6a44dcbc 100644 --- a/database/ffldb/dbcache.go +++ b/database/ffldb/dbcache.go @@ -6,8 +6,11 @@ package ffldb import ( "bytes" + "errors" "fmt" + "os" "sync" + "syscall" "time" "github.com/btcsuite/btcd/database/internal/treap" @@ -511,6 +514,12 @@ func (c *dbCache) flush() error { // Perform all leveldb updates using an atomic transaction. if err := c.commitTreaps(cachedKeys, cachedRemove); err != nil { + if errors.Is(err, syscall.ENOSPC) { + log.Errorf("%v. Cannot save any more blocks "+ + "due to the disk being full "+ + "-- exiting", err) + os.Exit(1) + } return err } @@ -569,6 +578,12 @@ func (c *dbCache) commitTx(tx *transaction) error { // database if a flush is needed. if c.needsFlush(tx) { if err := c.flush(); err != nil { + if errors.Is(err, syscall.ENOSPC) { + log.Errorf("%v. Cannot save any more blocks "+ + "due to the disk being full "+ + "-- exiting", err) + os.Exit(1) + } return err } diff --git a/database/ffldb/driver_test.go b/database/ffldb/driver_test.go index 0b2f452032..7759601993 100644 --- a/database/ffldb/driver_test.go +++ b/database/ffldb/driver_test.go @@ -103,14 +103,13 @@ func TestCreateOpenFail(t *testing.T) { // Ensure operations against a closed database return the expected // error. - dbPath := filepath.Join(os.TempDir(), "ffldb-createfail") + dbPath := filepath.Join(t.TempDir(), "ffldb-createfail") _ = os.RemoveAll(dbPath) db, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Create: unexpected error: %v", err) return } - defer os.RemoveAll(dbPath) db.Close() wantErrCode = database.ErrDbNotOpen @@ -154,14 +153,13 @@ func TestPersistence(t *testing.T) { t.Parallel() // Create a new database to run tests against. - dbPath := filepath.Join(os.TempDir(), "ffldb-persistencetest") + dbPath := filepath.Join(t.TempDir(), "ffldb-persistencetest") _ = os.RemoveAll(dbPath) db, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to create test database (%s) %v", dbType, err) return } - defer os.RemoveAll(dbPath) defer db.Close() // Create a bucket, put some values into it, and store a block so they @@ -335,6 +333,20 @@ func TestPrune(t *testing.T) { t.Fatal(err) } + // Open the first block file before the pruning happens in the + // code snippet below. This let's us test that block files are + // properly closed before attempting to delete them. + err = db.View(func(tx database.Tx) error { + _, err := tx.FetchBlock(blocks[0].Hash()) + if err != nil { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + var deletedBlocks []chainhash.Hash // This should leave 3 files on disk. @@ -433,14 +445,13 @@ func TestInterface(t *testing.T) { t.Parallel() // Create a new database to run tests against. - dbPath := filepath.Join(os.TempDir(), "ffldb-interfacetest") + dbPath := filepath.Join(t.TempDir(), "ffldb-interfacetest") _ = os.RemoveAll(dbPath) db, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to create test database (%s) %v", dbType, err) return } - defer os.RemoveAll(dbPath) defer db.Close() // Ensure the driver type is the expected value. diff --git a/database/ffldb/whitebox_test.go b/database/ffldb/whitebox_test.go index cac4984077..2814dfaa93 100644 --- a/database/ffldb/whitebox_test.go +++ b/database/ffldb/whitebox_test.go @@ -165,8 +165,8 @@ func TestConvertErr(t *testing.T) { func TestCornerCases(t *testing.T) { t.Parallel() - // Create a file at the datapase path to force the open below to fail. - dbPath := filepath.Join(os.TempDir(), "ffldb-errors") + // Create a file at the database path to force the open below to fail. + dbPath := filepath.Join(t.TempDir(), "ffldb-errors") _ = os.RemoveAll(dbPath) fi, err := os.Create(dbPath) if err != nil { @@ -603,14 +603,13 @@ func testCorruption(tc *testContext) bool { // correctly. func TestFailureScenarios(t *testing.T) { // Create a new database to run tests against. - dbPath := filepath.Join(os.TempDir(), "ffldb-failurescenarios") + dbPath := filepath.Join(t.TempDir(), "ffldb-failurescenarios") _ = os.RemoveAll(dbPath) idb, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to create test database (%s) %v", dbType, err) return } - defer os.RemoveAll(dbPath) defer idb.Close() // Create a test context to pass around. diff --git a/docs/code_contribution_guidelines.md b/docs/code_contribution_guidelines.md index da775878da..ecfa0f9fb2 100644 --- a/docs/code_contribution_guidelines.md +++ b/docs/code_contribution_guidelines.md @@ -19,6 +19,20 @@ is outlined on this page. We highly encourage code contributions, however it is imperative that you adhere to the guidelines established on this page. +## Substantial contributions only + +Due to the prevalence of automated analysis and pull request authoring tools +and online competitions that incentivize creating commits in popular +repositories, the maintainers of this project are flooded with trivial pull +requests that only change some typos or other insubstantial content (e.g. the +year in the license file). +If you are an honest user that wants to contribute to this project, please +consider that every pull request takes precious time from the maintainers to +review and consider the impact of changes. Time that could be spent writing +features or fixing bugs. +If you really want to contribute, consider reviewing and testing other users' +pull requests instead. Or add value to the project by writing unit tests. + ## Minimum Recommended Skillset The following list is a set of core competencies that we recommend you possess @@ -182,6 +196,11 @@ if amt < 5460 { but it was left as a magic number to show how much of a difference a good comment can make. +**Please refer to the [code formatting rules +document](./code_formatting_rules.md)** to see the list of additional style +rules we enforce. + + ## Model Git Commit Messages This project prefers to keep a clean commit history with well-formed commit @@ -223,8 +242,8 @@ commit messages. Here are some of the reasons why wrapping your commit messages to 72 columns is a good thing. -- git log doesn’t do any special special wrapping of the commit messages. With - the default pager of less -S, this means your paragraphs flow far off the edge +- git log doesn’t do any special wrapping of the commit messages. With the + default pager of less -S, this means your paragraphs flow far off the edge of the screen, making them difficult to read. On an 80 column terminal, if we subtract 4 columns for the indent on the left and 4 more for symmetry on the right, we’re left with 72 columns. @@ -297,7 +316,7 @@ Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite ## Contribution Checklist -- [  ] All changes are Go version 1.17 compliant +- [  ] All changes are Go version 1.22 compliant - [  ] The code being submitted is commented according to the [Code Documentation and Commenting](#CodeDocumentation) section - [  ] For new code: Code is accompanied by tests which exercise both diff --git a/docs/code_formatting_rules.md b/docs/code_formatting_rules.md new file mode 100644 index 0000000000..b51ab3cb3b --- /dev/null +++ b/docs/code_formatting_rules.md @@ -0,0 +1,336 @@ +# Code formatting rules + +## Why this emphasis on formatting? + +Code in general (and Open Source code specifically) is _read_ by developers many +more times during its lifecycle than it is modified. With this fact in mind, the +Golang language was designed for readability (among other goals). +While the enforced formatting of `go fmt` and some best practices already +eliminate many discussions, the resulting code can still look and feel very +differently among different developers. + +We aim to enforce a few additional rules to unify the look and feel of all code +in `btcd` to help improve the overall readability. + +## Spacing + +Blocks of code within `btcd` should be segmented into logical stanzas of +operation. Such spacing makes the code easier to follow at a skim, and reduces +unnecessary line noise. Coupled with the commenting scheme specified in the +[contribution guide](./code_contribution_guidelines.md#code-documentation-and-commenting), +proper spacing allows readers to quickly scan code, extracting semantics quickly. +Functions should _not_ just be laid out as a bare contiguous block of code. + +**WRONG** +```go + witness := make([][]byte, 4) + witness[0] = nil + if bytes.Compare(pubA, pubB) == -1 { + witness[1] = sigB + witness[2] = sigA + } else { + witness[1] = sigA + witness[2] = sigB + } + witness[3] = witnessScript + return witness +``` +**RIGHT** +```go + witness := make([][]byte, 4) + + // When spending a p2wsh multi-sig script, rather than an OP_0, we add + // a nil stack element to eat the extra pop. + witness[0] = nil + + // When initially generating the witnessScript, we sorted the serialized + // public keys in descending order. So we do a quick comparison in order + // to ensure the signatures appear on the Script Virtual Machine stack in + // the correct order. + if bytes.Compare(pubA, pubB) == -1 { + witness[1] = sigB + witness[2] = sigA + } else { + witness[1] = sigA + witness[2] = sigB + } + + // Finally, add the preimage as the last witness element. + witness[3] = witnessScript + + return witness +``` + +Additionally, we favor spacing between stanzas within syntax like: switch case +statements and select statements. + +**WRONG** +```go + switch { + case a: + + case b: + + case c: + + case d: + + default: + + } +``` +**RIGHT** +```go + switch { + // Brief comment detailing instances of this case (repeat below). + case a: + + + case b: + + + case c: + + + case d: + + + default: + + } +``` + +## Additional Style Constraints + +Before a PR is submitted, the proposer should ensure that the file passes the +set of linting scripts run by `make lint`. These include `gofmt`. In addition +to `gofmt` we've opted to enforce the following style guidelines. + +### 80 character line length + +ALL columns (on a best effort basis) should be wrapped to 80 line columns. +Editors should be set to treat a **tab as 8 spaces**. + +**WRONG** +```go +myKey := "0214cd678a565041d00e6cf8d62ef8add33b4af4786fb2beb87b366a2e151fcee7" +``` + +**RIGHT** +```go +myKey := "0214cd678a565041d00e6cf8d62ef8add33b4af4786fb2beb87b366a2e1" + + "51fcee7" +``` + +### Wrapping long function calls + +When wrapping a line that contains a function call as the unwrapped line exceeds +the column limit, the close parenthesis should be placed on its own line. +Additionally, all arguments should begin in a new line after the open parenthesis. + +**WRONG** +```go +value, err := bar(a, + a, b, c) +``` + +**RIGHT** +```go +value, err := bar( + a, a, b, c, +) +``` + +#### Exception for log and error message formatting + +**Note that the above guidelines don't apply to log or error messages.** For +log and error messages, committers should attempt to minimize the number of +lines utilized, while still adhering to the 80-character column limit. For +example: + +**WRONG** +```go +return fmt.Errorf( + "this is a long error message with a couple (%d) place holders", + len(things), +) + +log.Debugf( + "Something happened here that we need to log: %v", + longVariableNameHere, +) +``` + +**RIGHT** +```go +return fmt.Errorf("this is a long error message with a couple (%d) place "+ + "holders", len(things)) + +log.Debugf("Something happened here that we need to log: %v", + longVariableNameHere) +``` + +This helps to visually distinguish those formatting statements (where nothing +of consequence happens except for formatting an error message or writing +to a log) from actual method or function calls. This compact formatting should +be used for calls to formatting functions like `fmt.Errorf`, +`log.(Trace|Debug|Info|Warn|Error)f` and `fmt.Printf`. +But not for statements that are important for the flow or logic of the code, +like `require.NoErrorf()`. + +#### Exceptions and additional styling for structured logging + +When making use of structured logging calls (there are any `btclog.Logger` +methods ending in `S`), a few different rules and exceptions apply. + +1) **Static messages:** Structured log calls take a `context.Context` as a first +parameter and a _static_ string as the second parameter (the `msg` parameter). +Formatted strings should ideally not be used for the construction of the `msg` +parameter. Instead, key-value pairs (or `slog` attributes) should be used to +provide additional variables to the log line. + +**WRONG** +```go +log.DebugS(ctx, fmt.Sprintf("User %d just spent %.8f to open a channel", userID, 0.0154)) +``` + +**RIGHT** +```go +log.InfoS(ctx, "Channel open performed", + slog.Int("user_id", userID), + btclog.Fmt("amount", "%.8f", 0.00154)) +``` + +2) **Key-value attributes**: The third parameter in any structured log method is +a variadic list of the `any` type but it is required that these are provided in +key-value pairs such that an associated `slog.Attr` variable can be created for +each key-value pair. The simplest way to specify this is to directly pass in the +key-value pairs as raw literals as follows: + +```go +log.InfoS(ctx, "Channel open performed", "user_id", userID, "amount", 0.00154) +``` +This does work, but it becomes easy to make a mistake and accidentally leave out +a value for each key provided leading to a nonsensical log line. To avoid this, +it is suggested to make use of the various `slog.Attr` helper functions as +follows: + +```go +log.InfoS(ctx, "Channel open performed", + slog.Int("user_id", userID), + btclog.Fmt("amount", "%.8f", 0.00154)) +``` + +3) **Line wrapping**: Structured log lines are an exception to the 80-character +line wrapping rule. This is so that the key-value pairs can be easily read and +reasoned about. If it is the case that there is only a single key-value pair +and the entire log line is still less than 80 characters, it is acceptable to +have the key-value pair on the same line as the log message. However, if there +are multiple key-value pairs, it is suggested to use the one line per key-value +pair format. Due to this suggestion, it is acceptable for any single key-value +pair line to exceed 80 characters for the sake of readability. + +**WRONG** +```go +// Example 1. +log.InfoS(ctx, "User connected", + "user_id", userID) + +// Example 2. +log.InfoS(ctx, "Channel open performed", "user_id", userID, + btclog.Fmt("amount", "%.8f", 0.00154), "channel_id", channelID) + +// Example 3. +log.InfoS(ctx, "Bytes received", + "user_id", userID, + btclog.Hex("peer_id", peerID.SerializeCompressed()), + btclog.Hex("message", []bytes{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + }))) +``` + +**RIGHT** +```go +// Example 1. +log.InfoS(ctx, "User connected", "user_id", userID) + +// Example 2. +log.InfoS(ctx, "Channel open performed", + slog.Int("user_id", userID), + btclog.Fmt("amount", "%.8f", 0.00154), + slog.String("channel_id", channelID)) + +// Example 3. +log.InfoS(ctx, "Bytes received", + "user_id", userID, + btclog.Hex("peer_id", peerID.SerializeCompressed()), + btclog.Hex("message", []bytes{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}))) +``` + +### Wrapping long function definitions + +If one is forced to wrap lines of function arguments that exceed the +80-character limit, then indentation must be kept on the following lines. Also, +lines should not end with an open parenthesis if the function definition isn't +finished yet. + +**WRONG** +```go +func foo(a, b, c, +) (d, error) { + +func bar(a, b, c) ( + d, error, +) { + +func baz(a, b, c) ( + d, error) { +``` +**RIGHT** +```go +func foo(a, b, + c) (d, error) { + +func baz(a, b, c) (d, + error) { + +func longFunctionName( + a, b, c) (d, error) { +``` + +If a function declaration spans multiple lines the body should start with an +empty line to help visually distinguishing the two elements. + +**WRONG** +```go +func foo(a, b, c, + d, e) error { + var a int +} +``` +**RIGHT** +```go +func foo(a, b, c, + d, e) error { + + var a int +} +``` + +## Recommended settings for your editor + +To make it easier to follow the rules outlined above, we recommend setting up +your editor with at least the following two settings: + +1. Set your tabulator width (also called "tab size") to **8 spaces**. +2. Set a ruler or visual guide at 80 character. + +Note that the two above settings are automatically applied in editors that +support the `EditorConfig` scheme (for example GoLand, GitHub, GitLab, +VisualStudio). In addition, specific settings for Visual Studio Code are checked +into the code base as well. + +Other editors (for example Atom, Notepad++, Vim, Emacs and so on) might install +a plugin to understand the rules in the `.editorconfig` file. + +In Vim, you might want to use `set colorcolumn=80`. diff --git a/docs/installation.md b/docs/installation.md index f6670bd50a..71a6e010d9 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -5,7 +5,7 @@ details on how to install on the supported operating systems. ## Requirements -[Go](http://golang.org) 1.17 or newer. +[Go](http://golang.org) 1.22 or newer. ## GPG Verification Key diff --git a/go.mod b/go.mod index 6eea83508e..2d543d82d0 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,10 @@ module github.com/btcsuite/btcd require ( - github.com/btcsuite/btcd/btcec/v2 v2.1.3 + github.com/btcsuite/btcd/btcec/v2 v2.3.5 github.com/btcsuite/btcd/btcutil v1.1.5 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 + github.com/btcsuite/btcd/v2transport v1.0.1 github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 @@ -16,8 +17,9 @@ require ( github.com/jrick/logrotate v1.0.0 github.com/stretchr/testify v1.8.4 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed + golang.org/x/crypto v0.25.0 + golang.org/x/sys v0.22.0 + pgregory.net/rapid v1.2.0 ) require ( @@ -27,6 +29,7 @@ require ( github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.0 // indirect + golang.org/x/net v0.24.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -62,4 +65,4 @@ retract ( v0.13.0-beta ) -go 1.17 +go 1.23.2 diff --git a/go.sum b/go.sum index ddd7e1ebd0..58fc73246f 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,9 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= -github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= @@ -14,6 +15,8 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/v2transport v1.0.1 h1:pIyyyBCPwd087K3Wdb/9tIvUubAQdzTJghjPgzTQVsE= +github.com/btcsuite/btcd/v2transport v1.0.1/go.mod h1:N6H0HGSElVVJKntzaYHYVbW71DtWDLMw2yhwVRO3ZOE= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -89,14 +92,16 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -106,12 +111,14 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed h1:J22ig1FUekjjkmZUM7pTKixYm8DvrYsvrBZdunYeIuQ= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= @@ -134,3 +141,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/integration/bip0009_test.go b/integration/bip0009_test.go index 5b64480410..8f8b59a548 100644 --- a/integration/bip0009_test.go +++ b/integration/bip0009_test.go @@ -139,6 +139,14 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { } defer r.TearDown() + // If the deployment is meant to be always active, then it should be + // active from the very first block. + if deploymentID == chaincfg.DeploymentTestDummyAlwaysActive { + assertChainHeight(r, t, 0) + assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdActive) + return + } + // *** ThresholdDefined *** // // Assert the chain height is the expected value and the soft fork @@ -340,6 +348,7 @@ func TestBIP0009(t *testing.T) { testBIP0009(t, "dummy", chaincfg.DeploymentTestDummy) testBIP0009(t, "dummy-min-activation", chaincfg.DeploymentTestDummyMinActivation) + testBIP0009(t, "dummy-always-active", chaincfg.DeploymentTestDummyAlwaysActive) testBIP0009(t, "segwit", chaincfg.DeploymentSegwit) } diff --git a/integration/rpctest/memwallet.go b/integration/rpctest/memwallet.go index d084e99feb..bad48a92a4 100644 --- a/integration/rpctest/memwallet.go +++ b/integration/rpctest/memwallet.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/binary" "fmt" + "maps" "sync" "github.com/btcsuite/btcd/blockchain" @@ -325,9 +326,7 @@ func (m *memWallet) unwindBlock(update *chainUpdate) { delete(m.utxos, utxo) } - for outPoint, utxo := range undo.utxosDestroyed { - m.utxos[outPoint] = utxo - } + maps.Copy(m.utxos, undo.utxosDestroyed) delete(m.reorgJournal, update.blockHeight) } diff --git a/integration/rpctest/rpc_harness.go b/integration/rpctest/rpc_harness.go index 0b85232868..0e8c53dbe9 100644 --- a/integration/rpctest/rpc_harness.go +++ b/integration/rpctest/rpc_harness.go @@ -303,10 +303,12 @@ func (h *Harness) SetUp(createTestChain bool, numMatureOutputs uint32) error { func (h *Harness) tearDown() error { if h.Client != nil { h.Client.Shutdown() + h.Client.WaitForShutdown() } if h.BatchClient != nil { h.BatchClient.Shutdown() + h.BatchClient.WaitForShutdown() } if err := h.node.shutdown(); err != nil { @@ -570,10 +572,10 @@ func NextAvailablePortForProcess(pid int) int { os.TempDir(), fmt.Sprintf("rpctest-port-pid-%d.lock", pid), ) timeout := time.After(time.Second) - + var ( lockFileHandle *os.File - err error + err error ) for { // Attempt to acquire the lock file. If it already exists, wait diff --git a/log.go b/log.go index 5707d7c23a..55be25cace 100644 --- a/log.go +++ b/log.go @@ -21,6 +21,7 @@ import ( "github.com/btcsuite/btcd/netsync" "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/v2transport" "github.com/btcsuite/btclog" "github.com/jrick/logrotate/rotator" @@ -69,6 +70,7 @@ var ( srvrLog = backendLog.Logger("SRVR") syncLog = backendLog.Logger("SYNC") txmpLog = backendLog.Logger("TXMP") + v2trLog = backendLog.Logger(v2transport.Subsystem) ) // Initialize package-global logger variables. @@ -84,25 +86,27 @@ func init() { txscript.UseLogger(scrpLog) netsync.UseLogger(syncLog) mempool.UseLogger(txmpLog) + v2transport.UseLogger(v2trLog) } // subsystemLoggers maps each subsystem identifier to its associated logger. var subsystemLoggers = map[string]btclog.Logger{ - "ADXR": adxrLog, - "AMGR": amgrLog, - "CMGR": cmgrLog, - "BCDB": bcdbLog, - "BTCD": btcdLog, - "CHAN": chanLog, - "DISC": discLog, - "INDX": indxLog, - "MINR": minrLog, - "PEER": peerLog, - "RPCS": rpcsLog, - "SCRP": scrpLog, - "SRVR": srvrLog, - "SYNC": syncLog, - "TXMP": txmpLog, + "ADXR": adxrLog, + "AMGR": amgrLog, + "CMGR": cmgrLog, + "BCDB": bcdbLog, + "BTCD": btcdLog, + "CHAN": chanLog, + "DISC": discLog, + "INDX": indxLog, + "MINR": minrLog, + "PEER": peerLog, + "RPCS": rpcsLog, + "SCRP": scrpLog, + "SRVR": srvrLog, + "SYNC": syncLog, + "TXMP": txmpLog, + v2transport.Subsystem: v2trLog, } // initLogRotator initializes the logging rotater to write logs to logFile and diff --git a/mempool/mempool.go b/mempool/mempool.go index cc0ec10fe0..212a07fa85 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -7,6 +7,7 @@ package mempool import ( "container/list" "fmt" + "maps" "math" "sync" "sync/atomic" @@ -105,7 +106,7 @@ type Config struct { // This can be nil if the address index is not enabled. AddrIndex *indexers.AddrIndex - // FeeEstimatator provides a feeEstimator. If it is not nil, the mempool + // FeeEstimator provides a feeEstimator. If it is not nil, the mempool // records all new transactions it observes into the feeEstimator. FeeEstimator *FeeEstimator } @@ -696,9 +697,7 @@ func (mp *TxPool) txAncestors(tx *btcutil.Tx, cache[*parent.Tx.Hash()] = moreAncestors } - for hash, ancestor := range moreAncestors { - ancestors[hash] = ancestor - } + maps.Copy(ancestors, moreAncestors) } return ancestors @@ -766,9 +765,8 @@ func (mp *TxPool) txConflicts(tx *btcutil.Tx) map[chainhash.Hash]*btcutil.Tx { continue } conflicts[*conflict.Hash()] = conflict - for hash, descendant := range mp.txDescendants(conflict, nil) { - conflicts[hash] = descendant - } + descendants := mp.txDescendants(conflict, nil) + maps.Copy(conflicts, descendants) } return conflicts } diff --git a/mempool/policy_test.go b/mempool/policy_test.go index 1b29d71f1f..29c0956a0e 100644 --- a/mempool/policy_test.go +++ b/mempool/policy_test.go @@ -190,7 +190,6 @@ func TestCheckPkScriptStandard(t *testing.T) { if err != nil { t.Fatalf("TestCheckPkScriptStandard test '%s' "+ "failed: %v", test.name, err) - continue } scriptClass := txscript.GetScriptClass(script) got := checkPkScriptStandard(script, scriptClass) @@ -272,7 +271,6 @@ func TestDust(t *testing.T) { if res != test.isDust { t.Fatalf("Dust test '%s' failed: want %v got %v", test.name, test.isDust, res) - continue } } } diff --git a/mining/policy.go b/mining/policy.go index 8ddd575462..b92df27796 100644 --- a/mining/policy.go +++ b/mining/policy.go @@ -29,7 +29,7 @@ type Policy struct { // generating a block template. BlockMaxWeight uint32 - // BlockMinWeight is the minimum block size to be used when generating + // BlockMinSize is the minimum block size to be used when generating // a block template. BlockMinSize uint32 diff --git a/netsync/manager.go b/netsync/manager.go index 3215a86ace..d10188d53f 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -309,6 +309,11 @@ func (sm *SyncManager) startSync() { higherPeers = append(higherPeers, peer) } + if sm.chain.IsCurrent() && len(higherPeers) == 0 { + log.Infof("Caught up to block %s(%d)", best.Hash.String(), best.Height) + return + } + // Pick randomly from the set of peers greater than our block height, // falling back to a random peer of the same height if none are greater. // diff --git a/params.go b/params.go index b4d1453dfb..30daec8d30 100644 --- a/params.go +++ b/params.go @@ -21,7 +21,7 @@ type params struct { } // mainNetParams contains parameters specific to the main network -// (wire.MainNet). NOTE: The RPC port is intentionally different than the +// (wire.MainNet). NOTE: The RPC port is intentionally different from the // reference implementation because btcd does not handle wallet requests. The // separate wallet process listens on the well-known port and forwards requests // it does not handle on to btcd. This approach allows the wallet process @@ -41,13 +41,21 @@ var regressionNetParams = params{ } // testNet3Params contains parameters specific to the test network (version 3) -// (wire.TestNet3). NOTE: The RPC port is intentionally different than the +// (wire.TestNet3). NOTE: The RPC port is intentionally different from the // reference implementation - see the mainNetParams comment for details. var testNet3Params = params{ Params: &chaincfg.TestNet3Params, rpcPort: "18334", } +// testNet4Params contains parameters specific to the test network (version 4) +// (wire.TestNet4). NOTE: The RPC port is intentionally different from the +// reference implementation - see the mainNetParams comment for details. +var testNet4Params = params{ + Params: &chaincfg.TestNet4Params, + rpcPort: "48334", +} + // simNetParams contains parameters specific to the simulation test network // (wire.SimNet). var simNetParams = params{ diff --git a/peer/log.go b/peer/log.go index 71bebd0d51..422c9c0ad7 100644 --- a/peer/log.go +++ b/peer/log.go @@ -165,9 +165,6 @@ func messageSummary(msg wire.Message) string { case *wire.MsgPong: // No summary - perhaps add nonce. - case *wire.MsgAlert: - // No summary. - case *wire.MsgMemPool: // No summary. diff --git a/peer/p2pdowngrader.go b/peer/p2pdowngrader.go new file mode 100644 index 0000000000..b0a7ab1d56 --- /dev/null +++ b/peer/p2pdowngrader.go @@ -0,0 +1,54 @@ +package peer + +import ( + "github.com/decred/dcrd/lru" +) + +const ( + // defaultDowngradeCacheSize is the default number of addresses to store + // in the P2P downgrader cache. + defaultDowngradeCacheSize = 100 +) + +// P2PDowngrader manages a list of peer addresses that should be attempted +// with the v1 P2P protocol on their next connection, typically after a v2 +// handshake failure. +type P2PDowngrader struct { + cache lru.Cache +} + +// NewP2PDowngrader returns a new P2PDowngrader instance. +// cacheSize specifies the maximum number of addresses to remember for downgrade. +func NewP2PDowngrader(cacheSize uint) *P2PDowngrader { + if cacheSize == 0 { + cacheSize = defaultDowngradeCacheSize + } + return &P2PDowngrader{ + cache: lru.NewCache(cacheSize), + } +} + +// MarkForDowngrade flags an address so that the next outbound connection +// attempt to it will use the v1 P2P protocol. +func (pd *P2PDowngrader) MarkForDowngrade(addr string) { + pd.cache.Add(addr) + + log.Debugf("P2PDowngrader: Marked %s for v1 downgrade", addr) +} + +// ShouldDowngrade checks if an address is marked for a v1 downgrade. If the +// address is found, it is removed from the list (consumed), and the function +// returns true. Otherwise, it returns false. +func (pd *P2PDowngrader) ShouldDowngrade(addr string) bool { + + if exists := pd.cache.Contains(addr); exists { + pd.cache.Delete(addr) + + log.Debugf("P2PDowngrader: Consumed v1 downgrade request "+ + "for %s", addr) + + return true + } + + return false +} diff --git a/peer/p2pdowngrader_test.go b/peer/p2pdowngrader_test.go new file mode 100644 index 0000000000..20c8ea0a04 --- /dev/null +++ b/peer/p2pdowngrader_test.go @@ -0,0 +1,220 @@ +package peer + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" +) + +// TestP2PDowngraderMarkAndShouldDowngrade tests the basic MarkForDowngrade and +// ShouldDowngrade functionality. +func TestP2PDowngraderMarkAndShouldDowngrade(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(t *rapid.T) { + // addrGen creates somewhat realistic-looking address strings. + addrGen := rapid.Map(rapid.SliceOfN( + rapid.Byte(), 10, 20), func(bs []byte) string { + return fmt.Sprintf("%x.testpeer.net", bs) + }) + + addr1 := addrGen.Draw(t, "addr1") + addr2 := addrGen.Draw(t, "addr2") + + // Ensure addr1 and addr2 are different for a clearer test, + // though the logic holds even if they are the same. + for addr1 == addr2 { + // Suffix with retry to ensure rapid sees it as a new + // draw attempt for addr2. + addr2 = addrGen.Draw(t, "addr2_retry") + } + + // Create a P2PDowngrader with a small capacity. + downgrader := NewP2PDowngrader(2) + + // Initially, no address should be marked for downgrade. + require.False( + t, downgrader.ShouldDowngrade(addr1), + "addr1 should not be marked initially", + ) + require.False( + t, downgrader.ShouldDowngrade(addr2), + "addr2 should not be marked initially", + ) + + // Mark addr1 for downgrade. + downgrader.MarkForDowngrade(addr1) + + // Now, ShouldDowngrade for addr1 should return true. + require.True( + t, downgrader.ShouldDowngrade(addr1), + "addr1 should be marked for downgrade after "+ + "MarkForDowngrade", + ) + + // A subsequent call for addr1 should return false as it is + // consumed. + require.False( + t, downgrader.ShouldDowngrade(addr1), + "addr1 should not be marked after being consumed "+ + "by ShouldDowngrade", + ) + + // addr2 should still not be marked. + require.False( + t, downgrader.ShouldDowngrade(addr2), + "addr2 should remain unmarked", + ) + + downgrader.MarkForDowngrade(addr2) + + require.True( + t, downgrader.ShouldDowngrade(addr2), + "addr2 should be marked", + ) + require.False( + t, downgrader.ShouldDowngrade(addr2), + "addr2 should be consumed after ShouldDowngrade", + ) + }) +} + +// TestP2PDowngraderLRUEviction tests the LRU eviction behavior of the cache. +func TestP2PDowngraderLRUEviction(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(t *rapid.T) { + // Capacity for the LRU cache. + capacity := rapid.UintRange(1, 5).Draw(t, "capacity") + + // Generate a list of unique addresses, one more than the capacity + // to trigger eviction. + numAddresses := int(capacity + 1) + addresses := rapid.SliceOfNDistinct( + rapid.String(), numAddresses, numAddresses, rapid.ID, + ).Draw(t, "addresses") + + downgrader := NewP2PDowngrader(capacity) + + // Mark the first 'capacity' addresses. These are addresses[0] + // through addresses[capacity-1]. In dcrd/lru, Add puts new + // items at the front (most recent). So, after this loop, + // addresses[capacity-1] is newest, addresses[0] is oldest. + for i := 0; i < int(capacity); i++ { + downgrader.MarkForDowngrade(addresses[i]) + } + + // The address that should be evicted is addresses[0] (the + + addressToBeEvicted := addresses[0] + + // The address that causes the eviction is addresses[capacity]. + evictingAddress := addresses[capacity] + downgrader.MarkForDowngrade(evictingAddress) + + // Check that the address that should have been evicted is no + // longer marked. + require.False( + t, downgrader.ShouldDowngrade(addressToBeEvicted), + "address %s should have been evicted by %s", + addressToBeEvicted, evictingAddress, + ) + + // Check that the evicting address is marked and consumable. + require.True( + t, downgrader.ShouldDowngrade(evictingAddress), + "address %s (the evicting one) should "+ + "be marked", evictingAddress, + ) + + // Check the remaining (capacity-1) addresses that were not the + // first one. These are addresses[1] through + // addresses[capacity-1]. They should still be present. + for i := 1; i < int(capacity); i++ { + require.True( + t, downgrader.ShouldDowngrade(addresses[i]), + "address %s should still be marked", + addresses[i], + ) + } + }) +} + +// TestP2PDowngraderMarkExistingUpdatesLRU tests that marking an existing +// address moves it to the front of the LRU list. This prevents it from being +// evicted prematurely. +func TestP2PDowngraderMarkExistingUpdatesLRU(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(t *rapid.T) { + const capacity = 3 + + // Generate 4 unique addresses. + numAddresses := capacity + 1 + addresses := rapid.SliceOfNDistinct( + rapid.String(), numAddresses, numAddresses, rapid.ID, + ).Draw(t, "addresses") + + addrA := addresses[0] + addrB := addresses[1] + addrC := addresses[2] + + // This will be the evicting address. + addrD := addresses[3] + + downgrader := NewP2PDowngrader(capacity) + + // Step 1: Mark A, B, C. + // Cache state (newest to oldest): [C, B, A]. + downgrader.MarkForDowngrade(addrA) + downgrader.MarkForDowngrade(addrB) + downgrader.MarkForDowngrade(addrC) + + // Step 2: Mark A again. This should move A to the front + // (newest). Cache state (newest to oldest): [A, C, B]. + downgrader.MarkForDowngrade(addrA) + + // Step 3: Mark D. This should evict B (which is now the + // oldest). Cache state (newest to oldest): [D, A, C]. + downgrader.MarkForDowngrade(addrD) + + // Assert that B was evicted. + require.False( + t, downgrader.ShouldDowngrade(addrB), + "addrB should have been evicted", + ) + + // Assert that A, C, and D are still present and consumable. + // Order of checking does not strictly matter here, just + // presence. + require.True( + t, downgrader.ShouldDowngrade(addrA), + "addrA should be marked (was re-marked "+ + "and moved to front)", + ) + require.True( + t, downgrader.ShouldDowngrade(addrC), + "addrC should be marked", + ) + require.True( + t, downgrader.ShouldDowngrade(addrD), + "addrD should be marked (evicted B)", + ) + + // Verify they are consumed. + require.False( + t, downgrader.ShouldDowngrade(addrA), + "addrA should be consumed", + ) + require.False( + t, downgrader.ShouldDowngrade(addrC), + "addrC should be consumed", + ) + require.False( + t, downgrader.ShouldDowngrade(addrD), + "addrD should be consumed", + ) + }) +} diff --git a/peer/peer.go b/peer/peer.go index 195fc0b4fe..ee6f3175da 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -21,6 +21,7 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/v2transport" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/go-socks/socks" "github.com/davecgh/go-spew/spew" @@ -111,9 +112,6 @@ type MessageListeners struct { // OnPong is invoked when a peer receives a pong bitcoin message. OnPong func(p *Peer, msg *wire.MsgPong) - // OnAlert is invoked when a peer receives an alert bitcoin message. - OnAlert func(p *Peer, msg *wire.MsgAlert) - // OnMemPool is invoked when a peer receives a mempool bitcoin message. OnMemPool func(p *Peer, msg *wire.MsgMemPool) @@ -289,6 +287,10 @@ type Config struct { // scenarios where the stall behavior isn't important to the system // under test. DisableStallHandler bool + + // UsingV2Conn is defined if and only if we accept and attempt to make + // v2 connections. + UsingV2Conn bool } // minUint32 is a helper function to return the minimum of two uint32s. @@ -393,6 +395,7 @@ type StatsSnap struct { LastPingNonce uint64 LastPingTime time.Time LastPingMicros int64 + V2Connection bool } // HashFunc is a function which returns a block hash, height and error @@ -463,6 +466,8 @@ type Peer struct { witnessEnabled bool sendAddrV2 bool + V2Transport *v2transport.Peer + wireEncoding wire.MessageEncoding knownInventory lru.Cache @@ -572,6 +577,7 @@ func (p *Peer) StatsSnapshot() *StatsSnap { LastPingNonce: p.lastPingNonce, LastPingMicros: p.lastPingMicros, LastPingTime: p.lastPingTime, + V2Connection: p.cfg.UsingV2Conn, } p.statsMtx.RUnlock() @@ -1065,10 +1071,41 @@ func (p *Peer) handlePongMsg(msg *wire.MsgPong) { } } -// readMessage reads the next bitcoin message from the peer with logging. -func (p *Peer) readMessage(encoding wire.MessageEncoding) (wire.Message, []byte, error) { - n, msg, buf, err := wire.ReadMessageWithEncodingN(p.conn, - p.ProtocolVersion(), p.cfg.ChainParams.Net, encoding) +// readMessage reads the next bitcoin message from the peer with logging. The +// partial bool indicates that we've partially read a message already. In this +// case, we use the ReadPartialMessageWithEncodingN function. +func (p *Peer) readMessage(encoding wire.MessageEncoding, partial bool) ( + wire.Message, []byte, error) { + + var ( + n int + msg wire.Message + buf []byte + plaintext []byte + err error + ) + + if p.cfg.UsingV2Conn { + plaintext, err = p.V2Transport.V2ReceivePacket(nil) + if err != nil { + return nil, nil, err + } + + msg, buf, err = wire.ReadV2MessageN( + plaintext, p.ProtocolVersion(), encoding, + ) + n = len(plaintext) + } else if partial { + n, msg, buf, err = wire.ReadPartialMessageWithEncodingN( + p.conn, p.ProtocolVersion(), p.cfg.ChainParams.Net, encoding, + p.V2Transport.ReceivedPrefix(), + ) + } else { + n, msg, buf, err = wire.ReadMessageWithEncodingN( + p.conn, p.ProtocolVersion(), p.cfg.ChainParams.Net, encoding, + ) + } + atomic.AddUint64(&p.bytesReceived, uint64(n)) if p.cfg.Listeners.OnRead != nil { p.cfg.Listeners.OnRead(p, n, msg, err) @@ -1105,6 +1142,25 @@ func (p *Peer) writeMessage(msg wire.Message, enc wire.MessageEncoding) error { return nil } + var ( + buf bytes.Buffer + n int + err error + ) + + if p.cfg.UsingV2Conn { + _, err = wire.WriteV2MessageN(&buf, msg, p.ProtocolVersion(), enc) + if err != nil { + return err + } + + _, n, err = p.V2Transport.V2EncPacket(buf.Bytes(), nil, false) + } else { + n, err = wire.WriteMessageWithEncodingN( + p.conn, msg, p.ProtocolVersion(), p.cfg.ChainParams.Net, enc, + ) + } + // Use closures to log expensive operations so they are only run when // the logging level requires it. log.Debugf("%v", newLogClosure(func() string { @@ -1120,18 +1176,9 @@ func (p *Peer) writeMessage(msg wire.Message, enc wire.MessageEncoding) error { return spew.Sdump(msg) })) log.Tracef("%v", newLogClosure(func() string { - var buf bytes.Buffer - _, err := wire.WriteMessageWithEncodingN(&buf, msg, p.ProtocolVersion(), - p.cfg.ChainParams.Net, enc) - if err != nil { - return err.Error() - } return spew.Sdump(buf.Bytes()) })) - // Write the message to the peer. - n, err := wire.WriteMessageWithEncodingN(p.conn, msg, - p.ProtocolVersion(), p.cfg.ChainParams.Net, enc) atomic.AddUint64(&p.bytesSent, uint64(n)) if p.cfg.Listeners.OnWrite != nil { p.cfg.Listeners.OnWrite(p, n, msg, err) @@ -1400,7 +1447,7 @@ out: // Read a message and stop the idle timer as soon as the read // is done. The timer is reset below for the next iteration if // needed. - rmsg, buf, err := p.readMessage(p.wireEncoding) + rmsg, buf, err := p.readMessage(p.wireEncoding, false) idleTimer.Stop() if err != nil { // In order to allow regression tests with malformed messages, don't @@ -1498,11 +1545,6 @@ out: p.cfg.Listeners.OnPong(p, msg) } - case *wire.MsgAlert: - if p.cfg.Listeners.OnAlert != nil { - p.cfg.Listeners.OnAlert(p, msg) - } - case *wire.MsgMemPool: if p.cfg.Listeners.OnMemPool != nil { p.cfg.Listeners.OnMemPool(p, msg) @@ -1963,10 +2005,16 @@ func (p *Peer) Disconnect() { // readRemoteVersionMsg waits for the next message to arrive from the remote // peer. If the next message is not a version message or the version is not -// acceptable then return an error. -func (p *Peer) readRemoteVersionMsg() error { - // Read their version message. - remoteMsg, _, err := p.readMessage(wire.LatestEncoding) +// acceptable then return an error. The readPartial bool denotes whether we +// need to read the rest of a partially-received version message. This only +// happens with implicitly downgraded v2->v1 connections. +func (p *Peer) readRemoteVersionMsg(readPartial bool) error { + var ( + remoteMsg wire.Message + err error + ) + + remoteMsg, _, err = p.readMessage(wire.LatestEncoding, readPartial) if err != nil { return err } @@ -2171,7 +2219,7 @@ func (p *Peer) waitToFinishNegotiation(pver uint32) error { // can receive unknown messages before and after sendaddrv2 and still // have to wait for verack. for { - remoteMsg, _, err := p.readMessage(wire.LatestEncoding) + remoteMsg, _, err := p.readMessage(wire.LatestEncoding, false) if err == wire.ErrUnknownMessage { continue } else if err != nil { @@ -2215,7 +2263,44 @@ func (p *Peer) waitToFinishNegotiation(pver uint32) error { // that btcd does not implement but bitcoind does. // 6. If remote peer sent sendaddrv2 above, wait until receipt of verack. func (p *Peer) negotiateInboundProtocol() error { - if err := p.readRemoteVersionMsg(); err != nil { + // We may be anticipating a v2 connection, but if the initiating peer + // sends us a v1 version message, we need to note down that we've + // downgraded the connection internally. + downgradedConn := false + + if p.cfg.UsingV2Conn { + garbageLen := rand.Intn(v2transport.MaxGarbageLen + 1) + err := p.V2Transport.RespondV2Handshake( + garbageLen, + v2transport.BitcoinNet(p.cfg.ChainParams.Net), + ) + switch { + case errors.Is(err, v2transport.ErrUseV1Protocol): + log.Infof("Inbound v2 connection attempt from %s "+ + "downgraded to v1 (peer sent v1 version "+ + "message)", p.addr) + + p.cfg.UsingV2Conn = false + downgradedConn = true + + case err != nil: + return err + + default: + err = p.V2Transport.CompleteHandshake( + false, nil, + v2transport.BitcoinNet(p.cfg.ChainParams.Net), + ) + if err != nil { + return err + } + } + } + + // If the connection has been downgraded, then we need to parse the + // rest of the v1 version message properly. Otherwise, we read the + // entire version message off the wire. + if err := p.readRemoteVersionMsg(downgradedConn); err != nil { return err } @@ -2253,11 +2338,36 @@ func (p *Peer) negotiateInboundProtocol() error { // in the inbound case. // 6. If sendaddrv2 was received, wait for receipt of verack. func (p *Peer) negotiateOutboundProtocol() error { + if p.cfg.UsingV2Conn { + // Note that it's possible that the v2 handshake fails because + // the peer does not support v2 connections. In this case, we + // should detect that and reconnect using a v1 connection. This + // is the logic that bitcoind uses. + garbageLen := rand.Intn(v2transport.MaxGarbageLen + 1) + err := p.V2Transport.InitiateV2Handshake(garbageLen) + if err != nil { + return err + } + + err = p.V2Transport.CompleteHandshake( + true, nil, + v2transport.BitcoinNet(p.cfg.ChainParams.Net), + ) + if errors.Is(err, v2transport.ErrShouldDowngradeToV1) { + log.Infof("Outbound v2 connection attempt to %s "+ + "failed, will downgrade to v1 (peer does "+ + "not support v2)", p.addr) + return err + } else if err != nil { + return err + } + } + if err := p.writeLocalVersionMsg(); err != nil { return err } - if err := p.readRemoteVersionMsg(); err != nil { + if err := p.readRemoteVersionMsg(false); err != nil { return err } @@ -2327,6 +2437,10 @@ func (p *Peer) AssociateConnection(conn net.Conn) { p.conn = conn p.timeConnected = time.Now() + if p.cfg.UsingV2Conn { + p.V2Transport.UseReadWriter(conn) + } + if p.inbound { p.addr = p.conn.RemoteAddr().String() @@ -2363,6 +2477,20 @@ func (p *Peer) WaitForDisconnect() { <-p.quit } +// ShouldDowngradeToV1 is called when we try to connect to a peer via v2 BIP324 +// transport and they hang up. In this case, we should reconnect with the +// legacy transport. +// +// This function is safe for concurrent access. +func (p *Peer) ShouldDowngradeToV1() bool { + // If we weren't attempting a V2 connection with a V2 transport, + // or have no V2 transport instance, then no downgrade is indicated. + if !p.cfg.UsingV2Conn || p.V2Transport == nil { + return false + } + return p.V2Transport.ShouldDowngradeToV1() +} + // newPeerBase returns a new base bitcoin peer based on the inbound flag. This // is used by the NewInboundPeer and NewOutboundPeer functions to perform base // setup needed by both types of peers. @@ -2401,6 +2529,14 @@ func newPeerBase(origCfg *Config, inbound bool) *Peer { services: cfg.Services, protocolVersion: cfg.ProtocolVersion, } + + if p.cfg.UsingV2Conn && p.Services()&wire.SFNodeP2PV2 == wire.SFNodeP2PV2 { + p.V2Transport = v2transport.NewPeer() + } else { + // TODO: Hack, change. + p.cfg.UsingV2Conn = false + } + return &p } diff --git a/peer/peer_test.go b/peer/peer_test.go index 9df90c233d..5494596879 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -370,9 +370,6 @@ func TestPeerListeners(t *testing.T) { OnPong: func(p *peer.Peer, msg *wire.MsgPong) { ok <- msg }, - OnAlert: func(p *peer.Peer, msg *wire.MsgAlert) { - ok <- msg - }, OnMemPool: func(p *peer.Peer, msg *wire.MsgMemPool) { ok <- msg }, @@ -506,10 +503,6 @@ func TestPeerListeners(t *testing.T) { "OnPong", wire.NewMsgPong(42), }, - { - "OnAlert", - wire.NewMsgAlert([]byte("payload"), []byte("signature")), - }, { "OnMemPool", wire.NewMsgMemPool(), diff --git a/rpcadapters.go b/rpcadapters.go index 5a6800c532..03905c7b16 100644 --- a/rpcadapters.go +++ b/rpcadapters.go @@ -277,7 +277,7 @@ func (b *rpcSyncMgr) SyncPeerID() int32 { return b.syncMgr.SyncPeerID() } -// LocateBlocks returns the hashes of the blocks after the first known block in +// LocateHeaders returns the hashes of the blocks after the first known block in // the provided locators until the provided stop hash or the current tip is // reached, up to a max of wire.MaxBlockHeadersPerMsg hashes. // diff --git a/rpcclient/chain.go b/rpcclient/chain.go index c8562b8e65..dcac9af6a6 100644 --- a/rpcclient/chain.go +++ b/rpcclient/chain.go @@ -275,8 +275,8 @@ func (c *Client) GetBlockVerboseTx(blockHash *chainhash.Hash) (*btcjson.GetBlock // GetBlockCountAsync RPC invocation (or an applicable error). type FutureGetBlockCountResult chan *Response -// Receive waits for the Response promised by the future and returns the number -// of blocks in the longest block chain. +// Receive waits for the Response promised by the future and returns the height +// of the most-work fully-validated chain. The genesis block has height 0. func (r FutureGetBlockCountResult) Receive() (int64, error) { res, err := ReceiveFuture(r) if err != nil { @@ -302,7 +302,8 @@ func (c *Client) GetBlockCountAsync() FutureGetBlockCountResult { return c.SendCmd(cmd) } -// GetBlockCount returns the number of blocks in the longest block chain. +// GetBlockCount returns the height of the most-work fully-validated chain. +// The genesis block has height 0. func (c *Client) GetBlockCount() (int64, error) { return c.GetBlockCountAsync().Receive() } diff --git a/rpcclient/chain_test.go b/rpcclient/chain_test.go index ad1fb7aa2a..464506d0bb 100644 --- a/rpcclient/chain_test.go +++ b/rpcclient/chain_test.go @@ -146,7 +146,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) { } testTable := []TestTableItem{ - TestTableItem{ + { Name: "TestGetChainTxStatsAsyncSuccessTx", TestCase: func(t *testing.T) { client, serverReceivedChannel, cleanup := makeClient(t) @@ -159,7 +159,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) { } }, }, - TestTableItem{ + { Name: "TestGetChainTxStatsAsyncShutdownError", TestCase: func(t *testing.T) { client, _, cleanup := makeClient(t) @@ -192,7 +192,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) { } }, }, - TestTableItem{ + { Name: "TestGetBestBlockHashAsync", TestCase: func(t *testing.T) { client, serverReceivedChannel, cleanup := makeClient(t) diff --git a/rpcclient/errors.go b/rpcclient/errors.go index 68c0780dff..7bd40796a8 100644 --- a/rpcclient/errors.go +++ b/rpcclient/errors.go @@ -338,6 +338,23 @@ func (r BitcoindRPCErr) Error() string { return "unknown error" } +// BitcoindErrMap is a map of additional errors bitcoind can throw that are +// version dependent (e.g. versions up to v29 return the error as specified in +// `Error()` above, while versions v30 and beyond return the error as mapped +// here. We add a new map for errors that were simply renamed but have the same +// semantic meaning. New errors should be added above as new error constants. +var BitcoindErrMap = map[string]error{ + // The error message was changed in + // https://github.com/bitcoin/bitcoin/pull/33050 which will be included + // in bitcoind v30.0 and beyond. + "mempool script verify flag failed": ErrNonMandatoryScriptVerifyFlag, + + // The error message was changed in + // https://github.com/bitcoin/bitcoin/pull/33183 which will also be + // included in bitcoind v30.0 and beyond. + "block script verify flag failed": ErrScriptVerifyFlag, +} + // BtcdErrMap takes the errors returned from btcd's `testmempoolaccept` and // `sendrawtransaction` RPCs and map them to the errors defined above, which // are results from calling either `testmempoolaccept` or `sendrawtransaction` @@ -411,7 +428,10 @@ var BtcdErrMap = map[string]error{ "transaction already exists in blockchain": ErrTxAlreadyConfirmed, // A transaction in the mempool. - "already have transaction in mempool": ErrTxAlreadyInMempool, + // + // NOTE: For btcd v0.24.2 and beyond, the error message is "already + // have transaction in mempool". + "already have transaction": ErrTxAlreadyInMempool, // A transaction with missing inputs, that never existed or only // existed once in the past. @@ -477,7 +497,7 @@ var BtcdErrMap = map[string]error{ // // NOTE: we assume neutrino shares the same error strings as btcd. func MapRPCErr(rpcErr error) error { - // Iterate the map and find the matching error. + // Iterate the btcd error map and find the matching error. for btcdErr, err := range BtcdErrMap { // Match it against btcd's error first. if matchErrStr(rpcErr, btcdErr) { @@ -485,6 +505,15 @@ func MapRPCErr(rpcErr error) error { } } + // Also check the bitcoind error map, which is used for bitcoind version + // dependent errors. + for bitcoindErr, err := range BitcoindErrMap { + // Match it against bitcoind's error. + if matchErrStr(rpcErr, bitcoindErr) { + return err + } + } + // If not found, try to match it against bitcoind's error. for i := uint32(0); i < uint32(errSentinel); i++ { err := BitcoindRPCErr(i) diff --git a/rpcclient/errors_test.go b/rpcclient/errors_test.go index e074622b11..0b5428a850 100644 --- a/rpcclient/errors_test.go +++ b/rpcclient/errors_test.go @@ -61,6 +61,14 @@ func TestMatchErrStr(t *testing.T) { matchStr: "missingorspent", matched: false, }, + { + name: "new bitcoind v30 error", + bitcoindErr: errors.New( + "mempool-script-verify-flag-failed", + ), + matchStr: "mempool script verify flag failed", + matched: true, + }, } for _, tc := range testCases { diff --git a/rpcclient/example_test.go b/rpcclient/example_test.go index 9ba9adadef..9b3ac0f36c 100644 --- a/rpcclient/example_test.go +++ b/rpcclient/example_test.go @@ -6,6 +6,7 @@ package rpcclient import ( "fmt" + "github.com/btcsuite/btcd/btcjson" ) diff --git a/rpcclient/examples/bitcoincoreunixsocket/README.md b/rpcclient/examples/bitcoincoreunixsocket/README.md new file mode 100644 index 0000000000..6ef9774647 --- /dev/null +++ b/rpcclient/examples/bitcoincoreunixsocket/README.md @@ -0,0 +1,41 @@ +Bitcoin Core HTTP POST Over Unix Socket Example +============================== + +This example shows how to use the rpcclient package to connect to a Bitcoin +Core RPC server using HTTP POST mode over a Unix Socket with TLS disabled +and gets the current block count. + +## Running the Example + +The first step is to use `go get` to download and install the rpcclient package: + +```bash +$ go get github.com/btcsuite/btcd/rpcclient +``` + +Next, modify the `main.go` source to specify the correct RPC username and +password for the RPC server: + +```Go + User: "yourrpcuser", + Pass: "yourrpcpass", +``` + +As Bitcoin Core supports only TCP/IP, we'll redirect RPC requests from the +Unix Socket to Bitcoin Core. For this example, we'll use the `socat` command: + +```bash +$ socat -d UNIX-LISTEN:"my-unix-socket-path",fork TCP:"host-address" +$ socat -d UNIX-LISTEN:/tmp/test.XXXX,fork TCP:localhost:8332 +``` + +Finally, navigate to the example's directory and run it with: + +```bash +$ cd $GOPATH/src/github.com/btcsuite/btcd/rpcclient/examples/bitcoincorehttp +$ go run *.go +``` + +## License + +This example is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/rpcclient/examples/bitcoincoreunixsocket/main.go b/rpcclient/examples/bitcoincoreunixsocket/main.go new file mode 100644 index 0000000000..b061d19ff8 --- /dev/null +++ b/rpcclient/examples/bitcoincoreunixsocket/main.go @@ -0,0 +1,39 @@ +// Copyright (c) 2014-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "log" + + "github.com/btcsuite/btcd/rpcclient" +) + +func main() { + // Connect to local bitcoin core RPC server using HTTP POST mode over a + // Unix Socket. + connCfg := &rpcclient.ConnConfig{ + // For unix sockets, use unix:// + "your unix socket path". + Host: "unix:///tmp/test.XXXX", + User: "yourrpcuser", + Pass: "yourrpcpass", + HTTPPostMode: true, // Bitcoin core only supports HTTP POST mode. + DisableTLS: true, // Bitcoin core does not provide TLS by default. + } + + // Notice the notification parameter is nil since notifications are + // not supported in HTTP POST mode. + client, err := rpcclient.New(connCfg, nil) + if err != nil { + log.Fatal(err) + } + defer client.Shutdown() + + // Get the current block count. + blockCount, err := client.GetBlockCount() + if err != nil { + log.Fatal(err) + } + log.Printf("Block count: %d", blockCount) +} diff --git a/rpcclient/examples/btcdwebsockets/main.go b/rpcclient/examples/btcdwebsockets/main.go index 878526b076..fe3fc81b7c 100644 --- a/rpcclient/examples/btcdwebsockets/main.go +++ b/rpcclient/examples/btcdwebsockets/main.go @@ -5,8 +5,8 @@ package main import ( - "os" "log" + "os" "path/filepath" "time" diff --git a/rpcclient/examples/btcwalletwebsockets/main.go b/rpcclient/examples/btcwalletwebsockets/main.go index a63ef3db91..c41b4414d0 100644 --- a/rpcclient/examples/btcwalletwebsockets/main.go +++ b/rpcclient/examples/btcwalletwebsockets/main.go @@ -5,8 +5,8 @@ package main import ( - "os" "log" + "os" "path/filepath" "time" diff --git a/rpcclient/infrastructure.go b/rpcclient/infrastructure.go index 030d1b49f0..06e2e8e269 100644 --- a/rpcclient/infrastructure.go +++ b/rpcclient/infrastructure.go @@ -7,6 +7,7 @@ package rpcclient import ( "bytes" "container/list" + "context" "crypto/tls" "crypto/x509" "encoding/base64" @@ -14,12 +15,13 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "net" "net/http" "net/url" "os" + "strconv" + "strings" "sync" "sync/atomic" "time" @@ -71,6 +73,9 @@ var ( // client having already connected to the RPC server. ErrClientAlreadyConnected = errors.New("websocket client has already " + "connected") + + // ErrEmptyBatch is an error to describe that there is nothing to send. + ErrEmptyBatch = errors.New("batch is empty") ) const ( @@ -89,6 +94,10 @@ const ( // requestRetryInterval is the initial amount of time to wait in between // retries when sending HTTP POST requests. requestRetryInterval = time.Millisecond * 500 + + // defaultHTTPTimeout is the default timeout for an http request, so the + // request does not block indefinitely. + defaultHTTPTimeout = time.Minute ) // jsonRequest holds information about a json request that is used to properly @@ -144,6 +153,7 @@ type Client struct { // whether or not to batch requests, false unless changed by Batch() batch bool + batchLock sync.Mutex batchList *list.List // retryCount holds the number of times the client has tried to @@ -207,7 +217,10 @@ func (c *Client) addRequest(jReq *jsonRequest) error { element := c.requestList.PushBack(jReq) c.requestMap[jReq.id] = element } else { + c.batchLock.Lock() element := c.batchList.PushBack(jReq) + c.batchLock.Unlock() + c.requestMap[jReq.id] = element } return nil @@ -231,7 +244,9 @@ func (c *Client) removeRequest(id uint64) *jsonRequest { var request *jsonRequest if c.batch { + c.batchLock.Lock() request = c.batchList.Remove(element).(*jsonRequest) + c.batchLock.Unlock() } else { request = c.requestList.Remove(element).(*jsonRequest) } @@ -752,24 +767,26 @@ out: // result, unmarshalling it, and delivering the unmarshalled result to the // provided response channel. func (c *Client) handleSendPostMessage(jReq *jsonRequest) { - protocol := "http" - if !c.config.DisableTLS { - protocol = "https" - } - url := protocol + "://" + c.config.Host - var ( - err, lastErr error + lastErr error backoff time.Duration httpResponse *http.Response ) + httpURL, err := c.config.httpURL() + if err != nil { + jReq.responseChan <- &Response{ + err: fmt.Errorf("failed to parse address %v", err), + } + return + } + tries := 10 for i := 0; i < tries; i++ { var httpReq *http.Request bodyReader := bytes.NewReader(jReq.marshalledJSON) - httpReq, err = http.NewRequest("POST", url, bodyReader) + httpReq, err = http.NewRequest("POST", httpURL, bodyReader) if err != nil { jReq.responseChan <- &Response{result: nil, err: err} return @@ -837,7 +854,7 @@ func (c *Client) handleSendPostMessage(jReq *jsonRequest) { } // Read the raw bytes and close the response. - respBytes, err := ioutil.ReadAll(httpResponse.Body) + respBytes, err := io.ReadAll(httpResponse.Body) httpResponse.Body.Close() if err != nil { err = fmt.Errorf("error reading json reply: %v", err) @@ -1356,10 +1373,24 @@ func newHTTPClient(config *ConnConfig) (*http.Client, error) { } } + parsedDialAddr, err := ParseAddressString(config.Host) + if err != nil { + return nil, err + } + transport := &ObservingTransport{ Base: &http.Transport{ Proxy: proxyFunc, TLSClientConfig: tlsConfig, + DialContext: func(ctx context.Context, _, + _ string) (net.Conn, error) { + d := &net.Dialer{} + return d.DialContext( + ctx, + parsedDialAddr.Network(), + parsedDialAddr.String(), + ) + }, }, // Wrap with response capturing if callback provided OnResponseCapture: config.OnResponseCapture, @@ -1367,11 +1398,38 @@ func newHTTPClient(config *ConnConfig) (*http.Client, error) { client := http.Client{ Transport: transport, + Timeout: defaultHTTPTimeout, } return &client, nil } +// httpURL returns the URL to use for HTTP POST requests. +func (config *ConnConfig) httpURL() (string, error) { + protocol := "http" + if !config.DisableTLS { + protocol = "https" + } + + parsedAddr, err := ParseAddressString(config.Host) + if err != nil { + return "", fmt.Errorf("error parsing host '%v': %v", + config.Host, err) + } + + var httpURL string + switch parsedAddr.Network() { + case "unix", "unixpacket": + // Using a placeholder URL because a non-empty URL is required. + // The Unix domain socket is specified in the DialContext. + httpURL = protocol + "://unix" + default: + httpURL = protocol + "://" + config.Host + } + + return httpURL, nil +} + // dial opens a websocket connection using the passed connection configuration // details. func dial(config *ConnConfig) (*websocket.Conn, error) { @@ -1504,8 +1562,12 @@ func New(config *ConnConfig, ntfnHandlers *NotificationHandlers) (*Client, error client.chainParams = &chaincfg.MainNetParams case chaincfg.TestNet3Params.Name: client.chainParams = &chaincfg.TestNet3Params + case chaincfg.TestNet4Params.Name: + client.chainParams = &chaincfg.TestNet4Params case chaincfg.RegressionNetParams.Name: client.chainParams = &chaincfg.RegressionNetParams + case chaincfg.SigNetParams.Name: + client.chainParams = &chaincfg.SigNetParams case chaincfg.SimNetParams.Name: client.chainParams = &chaincfg.SimNetParams default: @@ -1651,7 +1713,15 @@ func (c *Client) BackendVersion() (BackendVersion, error) { return c.backendVersion, nil } -func (c *Client) sendAsync() FutureGetBulkResult { +func (c *Client) sendAsync() (FutureGetBulkResult, error) { + c.batchLock.Lock() + defer c.batchLock.Unlock() + + // If batchList is empty, there's nothing to send. + if c.batchList.Len() == 0 { + return nil, ErrEmptyBatch + } + // convert the array of marshalled json requests to a single request we can send responseChan := make(chan *Response, 1) marshalledRequest := []byte("[") @@ -1673,25 +1743,24 @@ func (c *Client) sendAsync() FutureGetBulkResult { responseChan: responseChan, } c.sendPostRequest(&request) - return responseChan + return responseChan, nil } // Marshall's bulk requests and sends to the server // creates a response channel to receive the response func (c *Client) Send() error { - // if batchlist is empty, there's nothing to send - if c.batchList.Len() == 0 { - return nil + future, err := c.sendAsync() + if err != nil { + return err } - batchResp, err := c.sendAsync().Receive() + batchResp, err := future.Receive() if err != nil { // Clear batchlist in case of an error. - // - // TODO(yy): need to double check to make sure there's no - // concurrent access to this batch list, otherwise we may miss - // some batched requests. + + c.batchLock.Lock() c.batchList = list.New() + c.batchLock.Unlock() return err } @@ -1701,6 +1770,10 @@ func (c *Client) Send() error { // Perform a GC on batchList and requestMap before moving // forward. request := c.removeRequest(id) + if request == nil { + // Perhaps another goroutine has already processed this request. + continue + } // If there's an error, we log it and continue to the next // request. @@ -1727,3 +1800,75 @@ func (c *Client) Send() error { return nil } + +// cutPrefix returns s without the provided leading prefix string +// and reports whether it found the prefix. +// If s doesn't start with prefix, cutPrefix returns s, false. +// If prefix is the empty string, cutPrefix returns s, true. +// Copied from go1.20 version. +func cutPrefix(s, prefix string) (after string, found bool) { + if !strings.HasPrefix(s, prefix) { + return s, false + } + return s[len(prefix):], true +} + +// ParseAddressString converts an address in string format to a net.Addr that is +// compatible with btcd. UDP is not supported because btcd needs reliable +// connections. +func ParseAddressString(strAddress string) (net.Addr, error) { + // Addresses can either be in unix://address, unixpacket://address URL + // format, or just address:port host format for tcp. + if after, ok := cutPrefix(strAddress, "unix://"); ok { + return net.ResolveUnixAddr("unix", after) + } + if after, ok := cutPrefix(strAddress, "unixpacket://"); ok { + return net.ResolveUnixAddr("unixpacket", after) + } + + if strings.Contains(strAddress, "://") { + // Not supporting :// anywhere in the host or path. + return nil, fmt.Errorf("unsupported protocol in address: %s", + strAddress) + } + + // Parse it as a dummy URL to get the host and port. + u, err := url.Parse("dummy://" + strAddress) + if err != nil { + return nil, err + } + return net.ResolveTCPAddr("tcp", verifyPort(u.Host)) +} + +// verifyPort makes sure that an address string has both a host and a port. +// If the address is just a port, then we'll assume that the user is using the +// shortcut to specify a localhost:port address. +func verifyPort(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + // If the address itself is just an integer, then we'll assume + // that we're mapping this directly to a localhost:port pair. + // This ensures we maintain the legacy behavior. + if _, err := strconv.Atoi(address); err == nil { + return net.JoinHostPort("localhost", address) + } + + // Otherwise, we'll assume that the address just failed to + // attach its own port, so we'll leave it as is. In the + // case of IPv6 addresses, if the host is already surrounded by + // brackets, then we'll avoid using the JoinHostPort function, + // since it will always add a pair of brackets. + if strings.HasPrefix(address, "[") { + return address + } + return net.JoinHostPort(address, "") + } + + // In the case that both the host and port are empty, we'll use an empty + // port. + if host == "" && port == "" { + return ":" + } + + return address +} diff --git a/rpcclient/infrastructure_test.go b/rpcclient/infrastructure_test.go new file mode 100644 index 0000000000..8416b7ad3c --- /dev/null +++ b/rpcclient/infrastructure_test.go @@ -0,0 +1,110 @@ +package rpcclient + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestParseAddressString checks different variation of supported and +// unsupported addresses. +func TestParseAddressString(t *testing.T) { + t.Parallel() + + // Using localhost only to avoid network calls. + testCases := []struct { + name string + addressString string + expNetwork string + expAddress string + expErrStr string + }{ + { + name: "localhost", + addressString: "localhost", + expNetwork: "tcp", + expAddress: "127.0.0.1:0", + }, + { + name: "localhost ip", + addressString: "127.0.0.1", + expNetwork: "tcp", + expAddress: "127.0.0.1:0", + }, + { + name: "localhost ipv6", + addressString: "::1", + expNetwork: "tcp", + expAddress: "[::1]:0", + }, + { + name: "localhost and port", + addressString: "localhost:80", + expNetwork: "tcp", + expAddress: "127.0.0.1:80", + }, + { + name: "localhost ipv6 and port", + addressString: "[::1]:80", + expNetwork: "tcp", + expAddress: "[::1]:80", + }, + { + name: "colon and port", + addressString: ":80", + expNetwork: "tcp", + expAddress: ":80", + }, + { + name: "colon only", + addressString: ":", + expNetwork: "tcp", + expAddress: ":0", + }, + { + name: "localhost and path", + addressString: "localhost/path", + expNetwork: "tcp", + expAddress: "127.0.0.1:0", + }, + { + name: "localhost port and path", + addressString: "localhost:80/path", + expNetwork: "tcp", + expAddress: "127.0.0.1:80", + }, + { + name: "unix prefix", + addressString: "unix://the/rest/of/the/path", + expNetwork: "unix", + expAddress: "the/rest/of/the/path", + }, + { + name: "unix prefix", + addressString: "unixpacket://the/rest/of/the/path", + expNetwork: "unixpacket", + expAddress: "the/rest/of/the/path", + }, + { + name: "error http prefix", + addressString: "http://localhost:1010", + expErrStr: "unsupported protocol in address", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + addr, err := ParseAddressString(tc.addressString) + if tc.expErrStr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expErrStr) + return + } + require.NoError(t, err) + require.Equal(t, tc.expNetwork, addr.Network()) + require.Equal(t, tc.expAddress, addr.String()) + }) + } +} diff --git a/rpcserver.go b/rpcserver.go index a3c4062bcc..bec0fad82c 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -15,7 +15,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math/big" "math/rand" "net" @@ -1259,6 +1258,9 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str case chaincfg.DeploymentTestDummyMinActivation: forkName = "dummy-min-activation" + case chaincfg.DeploymentTestDummyAlwaysActive: + forkName = "dummy-always-active" + case chaincfg.DeploymentCSV: forkName = "csv" @@ -2359,7 +2361,7 @@ func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in Connections: s.cfg.ConnMgr.ConnectedCount(), Proxy: cfg.Proxy, Difficulty: getDifficultyRatio(best.Bits, s.cfg.ChainParams), - TestNet: cfg.TestNet3, + TestNet: cfg.TestNet3 || cfg.TestNet4, RelayFee: cfg.minRelayTxFee.ToBTC(), } @@ -2414,7 +2416,7 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ HashesPerSec: s.cfg.CPUMiner.HashesPerSecond(), NetworkHashPS: networkHashesPerSec, PooledTx: uint64(s.cfg.TxMemPool.Count()), - TestNet: cfg.TestNet3, + TestNet: cfg.TestNet3 || cfg.TestNet4, } return &result, nil } @@ -2586,6 +2588,7 @@ func handleGetPeerInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) BanScore: int32(p.BanScore()), FeeFilter: p.FeeFilter(), SyncNode: statsSnap.ID == syncPeerID, + V2Connection: statsSnap.V2Connection, } if p.ToPeer().LastPingNonce() != 0 { wait := float64(time.Since(statsSnap.LastPingTime).Nanoseconds()) @@ -3595,14 +3598,7 @@ func handleSignMessageWithPrivKey(s *rpcServer, cmd interface{}, closeChan <-cha wire.WriteVarString(&buf, 0, c.Message) messageHash := chainhash.DoubleHashB(buf.Bytes()) - sig, err := ecdsa.SignCompact(wif.PrivKey, - messageHash, wif.CompressPubKey) - if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, - Message: "Sign failed", - } - } + sig := ecdsa.SignCompact(wif.PrivKey, messageHash, wif.CompressPubKey) return base64.StdEncoding.EncodeToString(sig), nil } @@ -4342,7 +4338,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin } // Read and close the JSON-RPC request body from the caller. - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) r.Body.Close() if err != nil { errCode := http.StatusBadRequest diff --git a/rpcserverhelp.go b/rpcserverhelp.go index 71f96e99fd..133fb48bf8 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -500,6 +500,7 @@ var helpDescsEnUS = map[string]string{ "getpeerinforesult-banscore": "The ban score", "getpeerinforesult-feefilter": "The requested minimum fee a transaction must have to be announced to the peer", "getpeerinforesult-syncnode": "Whether or not the peer is the sync peer", + "getpeerinforesult-v2_connection": "Whether or not the peer is a v2 connection", // GetPeerInfoCmd help. "getpeerinfo--synopsis": "Returns data about each connected network peer as an array of json objects.", diff --git a/sample-btcd.conf b/sample-btcd.conf index 74b20e1660..103f75b827 100644 --- a/sample-btcd.conf +++ b/sample-btcd.conf @@ -13,6 +13,12 @@ ; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows. ; datadir=~/.btcd/data +; The prune option removes old blocks from disk after they're downloaded and +; verified. The smallest value is 1536 which will limit the block data to 1536 +; mebibytes. +; NOTE: This limit does not apply to indexes and the UTXO set which are both +; larger than 1536 mebibytes as of December 2024. +; prune=1536 ; ------------------------------------------------------------------------------ ; Network settings @@ -170,6 +176,11 @@ ; Disable committed peer filtering (CF). ; nocfilters=1 +; Enable or disable the P2P v2 encrypted transport protocol (BIP324). +; If disabled (which is the default), btcd will only attempt to use the +; v1 P2P protocol. (default: 0) +; v2transport=0 + ; ------------------------------------------------------------------------------ ; RPC server options - The following options control the built-in RPC server ; which is used to control and query information from a running btcd process. diff --git a/server.go b/server.go index 9fca2db20c..40755c8e9b 100644 --- a/server.go +++ b/server.go @@ -45,7 +45,7 @@ const ( // defaultServices describes the default services that are supported by // the server. defaultServices = wire.SFNodeNetwork | wire.SFNodeNetworkLimited | - wire.SFNodeBloom | wire.SFNodeWitness | wire.SFNodeCF + wire.SFNodeBloom | wire.SFNodeWitness | wire.SFNodeCF | wire.SFNodeP2PV2 // defaultRequiredServices describes the default services that are // required to be supported by outbound peers. @@ -217,6 +217,7 @@ type server struct { txMemPool *mempool.TxPool cpuMiner *cpuminer.CPUMiner modifyRebroadcastInv chan interface{} + p2pDowngrader *peer.P2PDowngrader newPeers chan *serverPeer donePeers chan *serverPeer banPeers chan *serverPeer @@ -457,6 +458,13 @@ func hasServices(advertised, desired wire.ServiceFlag) bool { return advertised&desired == desired } +// ShouldReconnectV1 is invoked when we need to determine if we are going to +// reconnect to an outbound peer. This will return true if we attempted to +// connect to the peer using the v2 transport, and need to fall back to v1. +func (sp *serverPeer) ShouldReconnectV1() bool { + return sp.ShouldDowngradeToV1() +} + // OnVersion is invoked when a peer receives a version bitcoin message // and is used to negotiate the protocol version details as well as kick start // the communications. @@ -679,85 +687,142 @@ func (sp *serverPeer) OnHeaders(_ *peer.Peer, msg *wire.MsgHeaders) { sp.server.syncManager.QueueHeaders(msg, sp.Peer) } -// handleGetData is invoked when a peer receives a getdata bitcoin message and -// is used to deliver block and transaction information. +// OnGetData is invoked when a peer receives a getdata bitcoin message and is +// used to deliver block and transaction information. func (sp *serverPeer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) { - numAdded := 0 - notFound := wire.NewMsgNotFound() + // failedMsg is an inventory that stores all the failed msgs - either + // the msg is an unknown type, or there's an error processing it. + failedMsg := wire.NewMsgNotFound() length := len(msg.InvList) - // A decaying ban score increase is applied to prevent exhausting resources - // with unusually large inventory queries. - // Requesting more than the maximum inventory vector length within a short - // period of time yields a score above the default ban threshold. Sustained - // bursts of small requests are not penalized as that would potentially ban - // peers performing IBD. + + // A decaying ban score increase is applied to prevent exhausting + // resources with unusually large inventory queries. + // + // Requesting more than the maximum inventory vector length within a + // short period of time yields a score above the default ban threshold. + // Sustained bursts of small requests are not penalized as that would + // potentially ban peers performing IBD. + // // This incremental score decays each minute to half of its value. if sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata") { return } - // We wait on this wait channel periodically to prevent queuing - // far more data than we can send in a reasonable time, wasting memory. - // The waiting occurs after the database fetch for the next one to - // provide a little pipelining. - var waitChan chan struct{} - doneChan := make(chan struct{}, 1) + // We wait on this wait channel periodically to prevent queuing far + // more data than we can send in a reasonable time, wasting memory. The + // waiting occurs after the database fetch for the next one to provide + // a little pipelining. + + // We now create a doneChans with a size of 5, which essentially + // behaves like a semaphore that allows 5 goroutines to be running at + // the same time. + const numBuffered = 5 + doneChans := make([]chan struct{}, 0, numBuffered) for i, iv := range msg.InvList { - var c chan struct{} - // If this will be the last message we send. - if i == length-1 && len(notFound.InvList) == 0 { - c = doneChan - } else if (i+1)%3 == 0 { - // Buffered so as to not make the send goroutine block. - c = make(chan struct{}, 1) + // doneChan behaves like a semaphore - every time a msg is + // processed, either succeeded or failed, a signal is sent to + // this doneChan. + doneChan := make(chan struct{}, 1) + + // Add this doneChan for tracking. + doneChans = append(doneChans, doneChan) + + err := sp.server.pushInventory(sp, iv, doneChan) + if err != nil { + failedMsg.AddInvVect(iv) } - var err error - switch iv.Type { - case wire.InvTypeWitnessTx: - err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding) - case wire.InvTypeTx: - err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding) - case wire.InvTypeWitnessBlock: - err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding) - case wire.InvTypeBlock: - err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding) - case wire.InvTypeFilteredWitnessBlock: - err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding) - case wire.InvTypeFilteredBlock: - err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding) - default: - peerLog.Warnf("Unknown type in inventory request %d", - iv.Type) + + // Move to the next item if we haven't processed 5 times yet. + if (i+1)%numBuffered != 0 { continue } - if err != nil { - notFound.AddInvVect(iv) - - // When there is a failure fetching the final entry - // and the done channel was sent in due to there - // being no outstanding not found inventory, consume - // it here because there is now not found inventory - // that will use the channel momentarily. - if i == len(msg.InvList)-1 && c != nil { - <-c + + // Empty all the slots. + for _, dc := range doneChans { + select { + // NOTE: We always expect am empty struct to be sent to + // this doneChan, even when `pushInventory` failed. + case <-dc: + + // Exit if the server is shutting down. + case <-sp.quit: + peerLog.Debug("Server shutting down in " + + "OnGetData") + + return } } - numAdded++ - waitChan = c + + // Re-initialize the done chans. + doneChans = make([]chan struct{}, 0, numBuffered) } - if len(notFound.InvList) != 0 { - sp.QueueMessage(notFound, doneChan) + + if len(failedMsg.InvList) != 0 { + doneChan := make(chan struct{}, 1) + + // Add this doneChan for tracking. + doneChans = append(doneChans, doneChan) + + // Send the failed msgs. + sp.QueueMessage(failedMsg, doneChan) } - // Wait for messages to be sent. We can send quite a lot of data at this - // point and this will keep the peer busy for a decent amount of time. - // We don't process anything else by them in this time so that we + // Wait for messages to be sent. We can send quite a lot of data at + // this point and this will keep the peer busy for a decent amount of + // time. We don't process anything else by them in this time so that we // have an idea of when we should hear back from them - else the idle // timeout could fire when we were only half done sending the blocks. - if numAdded > 0 { - <-doneChan + for _, dc := range doneChans { + select { + case <-dc: + + // Exit if the server is shutting down. + case <-sp.quit: + peerLog.Debug("Server shutting down in OnGetData") + return + } + } +} + +// pushInventory sends the requested inventory to the given peer. +func (s *server) pushInventory(sp *serverPeer, iv *wire.InvVect, + doneChan chan<- struct{}) error { + + switch iv.Type { + case wire.InvTypeWitnessTx: + return s.pushTxMsg(sp, &iv.Hash, doneChan, wire.WitnessEncoding) + + case wire.InvTypeTx: + return s.pushTxMsg(sp, &iv.Hash, doneChan, wire.BaseEncoding) + + case wire.InvTypeWitnessBlock: + return s.pushBlockMsg( + sp, &iv.Hash, doneChan, wire.WitnessEncoding, + ) + + case wire.InvTypeBlock: + return s.pushBlockMsg(sp, &iv.Hash, doneChan, wire.BaseEncoding) + + case wire.InvTypeFilteredWitnessBlock: + return s.pushMerkleBlockMsg( + sp, &iv.Hash, doneChan, wire.WitnessEncoding, + ) + + case wire.InvTypeFilteredBlock: + return s.pushMerkleBlockMsg( + sp, &iv.Hash, doneChan, wire.BaseEncoding, + ) + + default: + peerLog.Warnf("Unknown type in inventory request %d", iv.Type) + + if doneChan != nil { + doneChan <- struct{}{} + } + + return errors.New("unknown inventory type") } } @@ -1528,8 +1593,8 @@ func (s *server) TransactionConfirmed(tx *btcutil.Tx) { // pushTxMsg sends a tx message for the provided transaction hash to the // connected peer. An error is returned if the transaction hash is not known. -func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{}, - waitChan <-chan struct{}, encoding wire.MessageEncoding) error { +func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, + doneChan chan<- struct{}, encoding wire.MessageEncoding) error { // Attempt to fetch the requested transaction from the pool. A // call could be made to check for existence first, but simply trying @@ -1545,11 +1610,6 @@ func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- return err } - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - sp.QueueMessageWithEncoding(tx.MsgTx(), doneChan, encoding) return nil @@ -1557,8 +1617,8 @@ func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- // pushBlockMsg sends a block message for the provided block hash to the // connected peer. An error is returned if the block hash is not known. -func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{}, - waitChan <-chan struct{}, encoding wire.MessageEncoding) error { +func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, + doneChan chan<- struct{}, encoding wire.MessageEncoding) error { // Fetch the raw block bytes from the database. var blockBytes []byte @@ -1590,11 +1650,6 @@ func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan cha return err } - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - // We only send the channel for this message if we aren't sending // an inv straight after. var dc chan<- struct{} @@ -1626,7 +1681,7 @@ func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan cha // loaded, this call will simply be ignored if there is no filter loaded. An // error is returned if the block hash is not known. func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash, - doneChan chan<- struct{}, waitChan <-chan struct{}, encoding wire.MessageEncoding) error { + doneChan chan<- struct{}, encoding wire.MessageEncoding) error { // Do not send a response if the peer doesn't have a filter loaded. if !sp.filter.IsLoaded() { @@ -1652,11 +1707,6 @@ func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash, // to the filter for the peer. merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, sp.filter) - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - // Send the merkleblock. Only send the done channel with this message // if no transactions will be sent afterwards. var dc chan<- struct{} @@ -1833,9 +1883,19 @@ func (s *server) handleDonePeerMsg(state *peerState, sp *serverPeer) { // our connection manager about the disconnection. This can happen if we // process a peer's `done` message before its `add`. if !sp.Inbound() { - if sp.persistent { + switch { + case sp.persistent: s.connManager.Disconnect(sp.connReq.ID()) - } else { + + // If this isn't a persistent peer, but we failed a v2 + // handshake, then we'll disconnect, but trigger a reconnect so + // we can use v1 instead. + case sp.ShouldReconnectV1(): + s.connManager.Disconnect( + sp.connReq.ID(), connmgr.WithTriggerReconnect(), + ) + + default: s.connManager.Remove(sp.connReq.ID()) go s.connManager.NewConnReq() } @@ -2145,12 +2205,6 @@ func newPeerConfig(sp *serverPeer) *peer.Config { OnRead: sp.OnRead, OnWrite: sp.OnWrite, OnNotFound: sp.OnNotFound, - - // Note: The reference client currently bans peers that send alerts - // not signed with its key. We could verify against their key, but - // since the reference client is currently unwilling to support - // other implementations' alert messages, we will not relay theirs. - OnAlert: nil, }, NewestBlock: sp.newestBlock, HostToNetAddress: sp.server.addrManager.HostToNetAddress, @@ -2164,6 +2218,7 @@ func newPeerConfig(sp *serverPeer) *peer.Config { ProtocolVersion: peer.MaxProtocolVersion, TrickleInterval: cfg.TrickleInterval, DisableStallHandler: cfg.DisableStallHandler, + UsingV2Conn: cfg.V2Transport, } } @@ -2185,10 +2240,26 @@ func (s *server) inboundPeerConnected(conn net.Conn) { // request instance and the connection itself, and finally notifies the address // manager of the attempt. func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) { + // Just an alias. + peerAddr := c.Addr.String() sp := newServerPeer(s, c.Permanent) - p, err := peer.NewOutboundPeer(newPeerConfig(sp), c.Addr.String()) + + peerCfg := newPeerConfig(sp) + + // Check with the P2PDowngrader if this connection attempt should be + // forced to v1. + if s.p2pDowngrader.ShouldDowngrade(peerAddr) { + srvrLog.Infof("Forcing V1 connection to %s as requested by "+ + "P2P downgrader.", peerAddr) + + peerCfg.UsingV2Conn = false + } + + p, err := peer.NewOutboundPeer(peerCfg, peerAddr) if err != nil { - srvrLog.Debugf("Cannot create outbound peer %s: %v", c.Addr, err) + srvrLog.Debugf("Cannot create outbound peer %s: %v", + c.Addr, err) + if c.Permanent { s.connManager.Disconnect(c.ID()) } else { @@ -2197,6 +2268,7 @@ func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) { } return } + sp.Peer = p sp.connReq = c sp.isWhitelisted = isWhitelisted(conn.RemoteAddr()) @@ -2208,6 +2280,18 @@ func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) { // done along with other performing other desirable cleanup. func (s *server) peerDoneHandler(sp *serverPeer) { sp.WaitForDisconnect() + + // If this is an outbound peer and the shouldDowngradeToV1 bool is set + // on the underlying Peer, trigger a reconnect using the OG v1 + // connection scheme. + if !sp.Inbound() && sp.Peer.ShouldDowngradeToV1() { + srvrLog.Infof("Peer %s indicated v2->v1 downgrade. "+ + "Marking for next attempt as v1.", sp.Addr()) + + s.p2pDowngrader.MarkForDowngrade(sp.Addr()) + } + + // This is sent to a buffered channel, so it may not execute immediately. s.donePeers <- sp // Only tell sync manager we are gone if we ever told it we existed. @@ -2483,6 +2567,7 @@ func (s *server) Start() { // the RPC server are rebroadcast until being included in a block. go s.rebroadcastHandler() + s.rpcServer.cfg.StartupTime = s.startupTime s.rpcServer.Start() } @@ -2733,6 +2818,9 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, if cfg.Prune != 0 { services &^= wire.SFNodeNetwork } + if !cfg.V2Transport { + services &^= wire.SFNodeP2PV2 + } amgr := addrmgr.New(cfg.DataDir, btcdLookup) @@ -3009,6 +3097,8 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, } s.connManager = cmgr + s.p2pDowngrader = peer.NewP2PDowngrader(uint(targetOutbound) + 1) + // Start up persistent peers. permanentPeers := cfg.ConnectPeers if len(permanentPeers) == 0 { diff --git a/txscript/hashcache.go b/txscript/hashcache.go index 4a5f88fb24..d3f5774cbf 100644 --- a/txscript/hashcache.go +++ b/txscript/hashcache.go @@ -7,6 +7,7 @@ package txscript import ( "bytes" "encoding/binary" + "maps" "math" "sync" @@ -142,9 +143,7 @@ func (m *MultiPrevOutFetcher) AddPrevOut(op wire.OutPoint, txOut *wire.TxOut) { // Merge merges two instances of a MultiPrevOutFetcher into a single source. func (m *MultiPrevOutFetcher) Merge(other *MultiPrevOutFetcher) { - for k, v := range other.prevOuts { - m.prevOuts[k] = v - } + maps.Copy(m.prevOuts, other.prevOuts) } // A compile-time assertion to ensure that MultiPrevOutFetcher matches the diff --git a/txscript/opcode.go b/txscript/opcode.go index 1cd3ba24fb..770e5b470d 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -1989,7 +1989,7 @@ func opcodeCheckSig(op *opcode, data []byte, vm *Engine) error { return err } - // The signature actually needs needs to be longer than this, but at + // The signature actually needs to be longer than this, but at // least 1 byte is needed for the hash type below. The full length is // checked depending on the script flags and upon parsing the signature. // diff --git a/txscript/taproot.go b/txscript/taproot.go index ee26cae967..b258fbbbf6 100644 --- a/txscript/taproot.go +++ b/txscript/taproot.go @@ -353,7 +353,10 @@ func VerifyTaprootLeafCommitment(controlBlock *ControlBlock, expectedWitnessProgram := schnorr.SerializePubKey(taprootKey) if !bytes.Equal(expectedWitnessProgram, taprootWitnessProgram) { - return scriptError(ErrTaprootMerkleProofInvalid, "") + str := fmt.Sprintf("derived witness program: %x, expected: "+ + "%x, using tapscript_root: %x", expectedWitnessProgram, + taprootWitnessProgram, rootHash) + return scriptError(ErrTaprootMerkleProofInvalid, str) } // Finally, we'll verify that the parity of the y coordinate of the diff --git a/txscript/taproot_test.go b/txscript/taproot_test.go index 9c5bb573a4..1a535de73a 100644 --- a/txscript/taproot_test.go +++ b/txscript/taproot_test.go @@ -293,13 +293,15 @@ func TestTapscriptCommitmentVerification(t *testing.T) { // make from 0 to 1 leaf // ensure verifies properly testCases := []struct { + treeMutateFunc func(*IndexedTapScriptTree) + + ctrlBlockMutateFunc func(*ControlBlock) + numLeaves int valid bool - treeMutateFunc func(*IndexedTapScriptTree) - - ctrlBlockMutateFunc func(*ControlBlock) + expectedErr ErrorCode }{ // A valid merkle proof of a single leaf. { @@ -322,11 +324,13 @@ func TestTapscriptCommitmentVerification(t *testing.T) { // An invalid merkle proof, we modify the last byte of one of // the leaves. { - numLeaves: 4, - valid: false, + numLeaves: 4, + valid: false, + expectedErr: ErrTaprootMerkleProofInvalid, treeMutateFunc: func(t *IndexedTapScriptTree) { for _, leafProof := range t.LeafMerkleProofs { - leafProof.InclusionProof[0] ^= 1 + proofLen := len(leafProof.InclusionProof) + leafProof.InclusionProof[proofLen-1] ^= 1 } }, }, @@ -335,8 +339,9 @@ func TestTapscriptCommitmentVerification(t *testing.T) { // An invalid series of proofs, we modify the control // block to not match the parity of the final output // key commitment. - numLeaves: 2, - valid: false, + numLeaves: 2, + valid: false, + expectedErr: ErrTaprootOutputKeyParityMismatch, ctrlBlockMutateFunc: func(c *ControlBlock) { c.OutputKeyYIsOdd = !c.OutputKeyYIsOdd }, @@ -391,6 +396,15 @@ func TestTapscriptCommitmentVerification(t *testing.T) { "valid=%v, got valid=%v", testCase.valid, valid) } + + if !valid { + if !IsErrorCode(err, testCase.expectedErr) { + t.Fatalf("expected error "+ + "code %v, got %v", + testCase.expectedErr, + err) + } + } } // TODO(roasbeef): index correctness diff --git a/txscript/template.go b/txscript/template.go new file mode 100644 index 0000000000..cc30796d1c --- /dev/null +++ b/txscript/template.go @@ -0,0 +1,236 @@ +package txscript + +import ( + "bufio" + "bytes" + "encoding/hex" + "fmt" + "html/template" + "strconv" + "strings" +) + +// ScriptTemplateOpt is a function type for configuring the script template. +type ScriptTemplateOption func(*templateConfig) + +// templateConfig holds the configuration for the script template. +type templateConfig struct { + params map[string]interface{} + + customFuncs template.FuncMap +} + +// WithScriptTemplateParams adds parameters to the script template. +func WithScriptTemplateParams(params map[string]interface{}) ScriptTemplateOption { + return func(cfg *templateConfig) { + for k, v := range params { + cfg.params[k] = v + } + } +} + +// WithCustomTemplateFunc adds a custom function to the template. +func WithCustomTemplateFunc(name string, fn interface{}) ScriptTemplateOption { + return func(cfg *templateConfig) { + cfg.customFuncs[name] = fn + } +} + +// ScriptTemplate processes a script template with parameters and returns the +// corresponding script bytes. This functions allows Bitcoin scripts to be +// created using a DSL-like syntax, based on Go's templating system. +// +// An example of a simple p2pkh template would be: +// +// `OP_DUP OP_HASH160 0x14e8948c7afa71b6e6fad621256474b5959e0305 OP_EQUALVERIFY OP_CHECKSIG` +// +// Strings that have the `0x` prefix are assumed to byte strings to be pushed +// ontop of the stack. Integers can be passed as normal. If a value can't be +// parsed as an integer, then it's assume that it's a byte slice without the 0x +// prefix. +// +// Normal go template operations can be used as well. The params argument +// houses paramters to pass into the script, for example a local variable +// storing a computed public key. +func ScriptTemplate(scriptTmpl string, opts ...ScriptTemplateOption) ([]byte, error) { + cfg := &templateConfig{ + params: make(map[string]interface{}), + customFuncs: make(template.FuncMap), + } + + for _, opt := range opts { + opt(cfg) + } + + funcMap := template.FuncMap{ + "hex": hexEncode, + "hex_str": hexStr, + "unhex": hexDecode, + "range_iter": rangeIter, + } + + for k, v := range cfg.customFuncs { + funcMap[k] = v + } + + tmpl, err := template.New("script").Funcs(funcMap).Parse(scriptTmpl) + if err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, cfg.params); err != nil { + return nil, fmt.Errorf("failed to execute template: %w", err) + } + + return processScript(buf.String()) +} + +// looksLikeInt checks if a string looks like an integer. +func looksLikeInt(s string) bool { + // Check if the string starts with an optional sign. + if len(s) > 0 && (s[0] == '+' || s[0] == '-') { + s = s[1:] + } + + // Check if the remaining string contains only digits. + for _, c := range s { + if c < '0' || c > '9' { + return false + } + } + + return len(s) > 0 +} + + +// processScript converts the template output to actual script bytes. We scan +// each line, then go through each element one by one, deciding to either add a +// normal op code, a push data, or an integer value. +func processScript(script string) ([]byte, error) { + var builder ScriptBuilder + + // We'll a bufio scanner to take care of some of the parsing for us. + // bufio.ScanWords will split on word boundaries, based on unicode + // characters. + scanner := bufio.NewScanner(strings.NewReader(script)) + scanner.Split(bufio.ScanWords) + + // Run through each word, deciding if we should add an op code, a push + // data, or an integer value. + for scanner.Scan() { + token := scanner.Text() + switch { + // If it starts with OP_, then we'll try to parse out the op + // code. + case strings.HasPrefix(token, "OP_"): + opcode, ok := OpcodeByName[token] + if !ok { + return nil, fmt.Errorf("unknown opcode: "+ + "%s", token) + } + + builder.AddOp(opcode) + + // If it has an 0x prefix, then we'll try to decode it as a hex + // string to push data. + case strings.HasPrefix(token, "0x"): + data, err := hex.DecodeString( + strings.TrimPrefix(token, "0x"), + ) + if err != nil { + return nil, fmt.Errorf("invalid hex "+ + "data: %s", token) + } + + builder.AddData(data) + + // Next, we'll try to parse ints for the integer op code. + case looksLikeInt(token): + val, err := strconv.ParseInt(token, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid "+ + "integer: %s", token) + } + + builder.AddInt64(val) + + // Otherwise, we assume it's a byte string without the 0x + // prefix. + default: + data, err := hex.DecodeString(token) + if err != nil { + return nil, fmt.Errorf("invalid token: %s", + token) + } + + builder.AddData(data) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading script: %w", err) + } + + return builder.Script() +} + +// rangeIter is useful for being able to execute a bounded for loop. +func rangeIter(start, end int) []int { + var result []int + + for i := start; i < end; i++ { + result = append(result, i) + } + + return result +} + +// hexEncode is a helper function to encode bytes to hex in templates. +// It adds the "0x" prefix to ensure the output is processed as hex data +// and not misinterpreted as an integer. +func hexEncode(data []byte) string { + return "0x" + hex.EncodeToString(data) +} + +// hexStr is a helper function to encode bytes to a raw hex string in templates +// without the "0x" prefix. +func hexStr(data []byte) string { + return hex.EncodeToString(data) +} + +// hexDecode is a helper function to decode hex to bytes in templates +func hexDecode(s string) ([]byte, error) { + return hex.DecodeString(strings.TrimPrefix(s, "0x")) +} + +// Example usage: +func ExampleScriptTemplate() { + localPubkey, _ := hex.DecodeString("14e8948c7afa71b6e6fad621256474b5959e0305") + + scriptBytes, err := ScriptTemplate(` + OP_DUP OP_HASH160 0x14e8948c7afa71b6e6fad621256474b5959e0305 OP_EQUALVERIFY OP_CHECKSIG + OP_DUP OP_HASH160 {{ hex .LocalPubkeyHash }} OP_EQUALVERIFY OP_CHECKSIG + {{ .Timeout }} OP_CHECKLOCKTIMEVERIFY OP_DROP + + {{- range $i := range_iter 0 3 }} + {{ add 10 $i }} OP_ADD + {{- end }}`, + WithScriptTemplateParams(map[string]interface{}{ + "LocalPubkeyHash": localPubkey, + "Timeout": 1, + }), + ) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + + asmScript, err := DisasmString(scriptBytes) + if err != nil { + fmt.Printf("Error converting to ASM: %v\n", err) + return + } + + fmt.Printf("Script ASM:\n%s\n", asmScript) +} diff --git a/txscript/template_test.go b/txscript/template_test.go new file mode 100644 index 0000000000..2d27604759 --- /dev/null +++ b/txscript/template_test.go @@ -0,0 +1,344 @@ +package txscript + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestScriptTemplateLooksLikeInt tests the looksLikeInt function. +func TestScriptTemplateLooksLikeInt(t *testing.T) { + tests := []struct { + input string + expected bool + }{ + {"123", true}, + {"-123", true}, + {"+123", true}, + {"0", true}, + {"+0", true}, + {"-0", true}, + {"abc", false}, + {"12a", false}, + {"", false}, + {"+", false}, + {"-", false}, + {"++123", false}, + {"--123", false}, + {"+-123", false}, + {"1.23", false}, + } + + for _, test := range tests { + result := looksLikeInt(test.input) + require.Equal( + t, test.expected, result, + "looksLikeInt(%q) = %v, want %v", + test.input, result, test.expected, + ) + } +} + +// TestScriptTemplate tests the ScriptTemplate function. +func TestScriptTemplate(t *testing.T) { + tests := []struct { + name string + template string + params map[string]interface{} + customFunc map[string]interface{} + expected string + wantErr bool + }{ + { + name: "simple P2PKH", + template: "OP_DUP OP_HASH160 " + + "0x14e8948c7afa71b6e6fad621256474b5959e0305 " + + "OP_EQUALVERIFY OP_CHECKSIG", + params: nil, + expected: "OP_DUP OP_HASH160 " + + "14e8948c7afa71b6e6fad621256474b5959e0305 " + + "OP_EQUALVERIFY OP_CHECKSIG", + wantErr: false, + }, + { + name: "with positive integer", + template: "123 OP_ADD", + params: nil, + expected: "7b OP_ADD", + wantErr: false, + }, + { + name: "with negative integer", + template: "-42 OP_ADD", + params: nil, + expected: "aa OP_ADD", + wantErr: false, + }, + { + name: "with zero bytes for OP_CHECKSIG", + template: "0x0000000000000000000000000000000000000000000000000000000000000000 OP_CHECKSIG", + params: nil, + expected: "0000000000000000000000000000000000000000000000000000000000000000 OP_CHECKSIG", + wantErr: false, + }, + { + name: "with hex template function for zero bytes", + template: "{{ hex .ZeroSig }} OP_CHECKSIG", + params: map[string]interface{}{ + "ZeroSig": make([]byte, 32), + }, + expected: "0000000000000000000000000000000000000000000000000000000000000000 OP_CHECKSIG", + wantErr: false, + }, + { + name: "with hex data without 0x prefix", + template: "abcdef OP_ADD", + params: nil, + expected: "abcdef OP_ADD", + wantErr: false, + }, + { + name: "with template parameter", + template: "OP_DUP OP_HASH160 {{ hex .Pubkey }} " + + "OP_EQUALVERIFY OP_CHECKSIG", + params: map[string]interface{}{ + "Pubkey": []byte{ + 0x14, 0xe8, 0x94, 0x8c, 0x7a, 0xfa, + 0x71, 0xb6, 0xe6, 0xfa, 0xd6, 0x21, + 0x25, 0x64, 0x74, 0xb5, 0x95, 0x9e, + 0x03, 0x05, + }, + }, + expected: "OP_DUP OP_HASH160 " + + "14e8948c7afa71b6e6fad621256474b5959e0305 " + + "OP_EQUALVERIFY OP_CHECKSIG", + wantErr: false, + }, + { + name: "with range iteration", + template: ` + {{ range $i := range_iter 1 4 }} + OP_DUP OP_HASH160 + {{ if eq $i 1 }} + 0x01 + {{ else if eq $i 2 }} + 0x02 + {{ else }} + 0x03 + {{ end }} + OP_EQUALVERIFY {{ end }} + OP_CHECKSIG`, + params: nil, + expected: "OP_DUP OP_HASH160 1 OP_EQUALVERIFY " + + "OP_DUP OP_HASH160 2 OP_EQUALVERIFY " + + "OP_DUP OP_HASH160 3 OP_EQUALVERIFY " + + "OP_CHECKSIG", + wantErr: false, + }, + { + name: "with custom function", + template: "{{ add 10 5 }} OP_DROP", + params: nil, + customFunc: map[string]interface{}{ + "add": func(a, b int) int { + return a + b + }, + }, + expected: "15 OP_DROP", + wantErr: false, + }, + { + name: "invalid opcode", + template: "OP_UNKNOWN", + params: nil, + wantErr: true, + }, + { + name: "invalid hex", + template: "0xZZ", + params: nil, + wantErr: true, + }, + { + name: "invalid integer", + template: "9999999999999999999999999999", + params: nil, + wantErr: true, + }, + { + name: "invalid token", + template: "not_hex_or_op", + params: nil, + wantErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var opts []ScriptTemplateOption + if test.params != nil { + opts = append( + opts, + WithScriptTemplateParams(test.params), + ) + } + + // Add custom functions if specified. + for name, fn := range test.customFunc { + opts = append( + opts, WithCustomTemplateFunc(name, fn), + ) + } + + script, err := ScriptTemplate(test.template, opts...) + + if test.wantErr { + require.Error( + t, err, + "ScriptTemplate(%q) expected error, got nil", + test.template, + ) + + return + } + + require.NoError( + t, err, + "ScriptTemplate(%q) unexpected error", + test.template, + ) + + // Disassemble the script and compare the string + // representation. + disasm, err := DisasmString(script) + require.NoError(t, err, "Failed to disassemble script") + + require.Equal( + t, test.expected, disasm, + "ScriptTemplate(%q):\ngot: %s\nwant: %s", + test.template, disasm, test.expected, + ) + }) + } +} + +// TestScriptTemplateOptions tests the ScriptTemplate option functions. +func TestScriptTemplateOptions(t *testing.T) { + t.Run("WithScriptTemplateParams", func(t *testing.T) { + template := "{{ .Value }} OP_DROP" + params := map[string]interface{}{ + "Value": 42, + } + + script, err := ScriptTemplate( + template, WithScriptTemplateParams(params), + ) + require.NoError(t, err, "unexpected error") + + disasm, err := DisasmString(script) + require.NoError(t, err, "Failed to disassemble script") + + expected := "2a OP_DROP" + require.Equal( + t, expected, disasm, + "ScriptTemplate(%q):\ngot: %s\nwant: %s", + template, disasm, expected, + ) + }) + + t.Run("WithCustomTemplateFunc", func(t *testing.T) { + template := "{{ multiply 6 7 }} OP_DROP" + script, err := ScriptTemplate( + template, + WithCustomTemplateFunc("multiply", func(a, b int) int { + return a * b + }), + ) + require.NoError(t, err, "Unexpected error") + + disasm, err := DisasmString(script) + require.NoError(t, err, "Failed to disassemble script") + + expected := "2a OP_DROP" + require.Equal( + t, expected, disasm, + "ScriptTemplate(%q):\ngot: %s\nwant: %s", + template, disasm, expected, + ) + }) +} + +// TestScriptTemplateHelperFunctions tests the helper functions used in +// templates. +func TestScriptTemplateHelperFunctions(t *testing.T) { + t.Run("rangeIter", func(t *testing.T) { + result := rangeIter(2, 5) + expected := []int{2, 3, 4} + + require.Equal( + t, expected, result, + "rangeIter(2, 5) returned unexpected result", + ) + }) + + t.Run("hexEncode", func(t *testing.T) { + input := []byte{0x12, 0x34, 0x56} + result := hexEncode(input) + expected := "0x123456" + + require.Equal( + t, expected, result, + "hexEncode(%v) = %q, want %q", + input, result, expected, + ) + }) + + t.Run("hexStr", func(t *testing.T) { + input := []byte{0x12, 0x34, 0x56} + result := hexStr(input) + expected := "123456" + + require.Equal( + t, expected, result, + "hexStr(%v) = %q, want %q", + input, result, expected, + ) + }) + + t.Run("hexDecode", func(t *testing.T) { + tests := []struct { + input string + expected []byte + wantErr bool + }{ + {"123456", []byte{0x12, 0x34, 0x56}, false}, + {"0x123456", []byte{0x12, 0x34, 0x56}, false}, + {"zz", nil, true}, + } + + for _, test := range tests { + result, err := hexDecode(test.input) + + if test.wantErr { + require.Error( + t, err, + "hexDecode(%q) expected error, got nil", + test.input, + ) + continue + } + + require.NoError( + t, err, + "hexDecode(%q) unexpected error", + test.input, + ) + + require.Equal( + t, test.expected, result, + "hexDecode(%q) = %v, want %v", + test.input, result, test.expected, + ) + } + }) +} diff --git a/upnp.go b/upnp.go index c74e4ed79a..d06cf52f0d 100644 --- a/upnp.go +++ b/upnp.go @@ -201,7 +201,7 @@ func getChildDevice(d *device, deviceType string) *device { return nil } -// getChildDevice searches the service list of device for a service with the +// getChildService searches the service list of device for a service with the // given type. func getChildService(d *device, serviceType string) *service { for i := range d.ServiceList.Service { diff --git a/v2transport/chacha.go b/v2transport/chacha.go new file mode 100644 index 0000000000..94e14b1501 --- /dev/null +++ b/v2transport/chacha.go @@ -0,0 +1,183 @@ +package v2transport + +import ( + "crypto/cipher" + "encoding/binary" + "fmt" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/chacha20poly1305" +) + +const ( + // rekeyInterval is the number of messages that can be encrypted or + // decrypted with a single key before we rotate keys. + rekeyInterval = 224 + + // keySize is the size of the keys used. + keySize = 32 +) + +// FSChaCha20Poly1305 is a wrapper around ChaCha20Poly1305 that changes its +// nonce after every message and is rekeyed after every rekeying interval. +type FSChaCha20Poly1305 struct { + key []byte + packetCtr uint64 + cipher cipher.AEAD +} + +// NewFSChaCha20Poly1305 creates a new instance of FSChaCha20Poly1305. +func NewFSChaCha20Poly1305(initialKey []byte) (*FSChaCha20Poly1305, error) { + cipher, err := chacha20poly1305.New(initialKey) + if err != nil { + return nil, err + } + + f := &FSChaCha20Poly1305{ + key: initialKey, + packetCtr: 0, + cipher: cipher, + } + + return f, nil +} + +// Encrypt encrypts the plaintext using the associated data, returning the +// ciphertext or an error. +func (f *FSChaCha20Poly1305) Encrypt(aad, plaintext []byte) ([]byte, error) { + return f.crypt(aad, plaintext, false) +} + +// Decrypt decrypts the ciphertext using the assosicated data, returning the +// plaintext or an error. +func (f *FSChaCha20Poly1305) Decrypt(aad, ciphertext []byte) ([]byte, error) { + return f.crypt(aad, ciphertext, true) +} + +// crypt takes the aad and plaintext/ciphertext and either encrypts or decrypts +// `text` and returns the result. If a failure was encountered, an error will +// be returned. +func (f *FSChaCha20Poly1305) crypt(aad, text []byte, + decrypt bool) ([]byte, error) { + + // The nonce is constructed as the 4-byte little-endian encoding of the + // number of messages crypted with the current key followed by the + // 8-byte little-endian encoding of the number of re-keying performed. + var nonce [12]byte + numMsgs := uint32(f.packetCtr % rekeyInterval) + numRekeys := uint64(f.packetCtr / rekeyInterval) + binary.LittleEndian.PutUint32(nonce[0:4], numMsgs) + binary.LittleEndian.PutUint64(nonce[4:12], numRekeys) + + var result []byte + if decrypt { + // Decrypt using the nonce, ciphertext, and aad. + var err error + result, err = f.cipher.Open(nil, nonce[:], text, aad) + if err != nil { + // It is ok to error here without incrementing + // packetCtr because we will no longer be decrypting + // any more messages. + return nil, err + } + } else { + // Encrypt using the nonce, plaintext, and aad. + result = f.cipher.Seal(nil, nonce[:], text, aad) + } + + f.packetCtr++ + + // Rekey if we are at the rekeying interval. + if f.packetCtr%rekeyInterval == 0 { + var rekeyNonce [12]byte + rekeyNonce[0] = 0xff + rekeyNonce[1] = 0xff + rekeyNonce[2] = 0xff + rekeyNonce[3] = 0xff + + copy(rekeyNonce[4:], nonce[4:]) + + var dummyPlaintext [32]byte + f.key = f.cipher.Seal( + nil, rekeyNonce[:], dummyPlaintext[:], nil, + )[:keySize] + cipher, err := chacha20poly1305.New(f.key) + if err != nil { + fmt.Println(err) + return nil, err + } + f.cipher = cipher + } + + return result, nil +} + +// FSChaCha20 is a stream cipher that is used to encrypt the length of the +// packets. This cipher is rekeyed when chunkCtr reaches a multiple of +// rekeyInterval. +type FSChaCha20 struct { + key []byte + chunkCtr uint64 + cipher *chacha20.Cipher +} + +// NewFSChaCha20 initializes a new FSChaCha20 cipher instance. +func NewFSChaCha20(initialKey []byte) (*FSChaCha20, error) { + var initialNonce [12]byte + binary.LittleEndian.PutUint64(initialNonce[4:12], 0) + + cipher, err := chacha20.NewUnauthenticatedCipher( + initialKey, initialNonce[:], + ) + if err != nil { + return nil, err + } + + return &FSChaCha20{ + key: initialKey, + chunkCtr: 0, + cipher: cipher, + }, nil +} + +// Crypt is used to either encrypt or decrypt text. This function is used for +// both encryption and decryption as the two operations are identical. +func (f *FSChaCha20) Crypt(text []byte) ([]byte, error) { + // XOR the text with the keystream to get either the cipher or + // plaintext. + textLen := len(text) + dst := make([]byte, textLen) + f.cipher.XORKeyStream(dst, text) + + // Increment the chunkCtr every time this function is called. + f.chunkCtr++ + + // Check if we need to rekey. + if f.chunkCtr%rekeyInterval == 0 { + // Get the new key by getting 32 bytes from the keystream. Use + // all 0's so that we can get the actual bytes from + // XORKeyStream since the chacha20 library doesn't supply us + // with the keystream's bytes directly. + var ( + dummyXor [32]byte + newKey [32]byte + ) + f.cipher.XORKeyStream(newKey[:], dummyXor[:]) + f.key = newKey[:] + + var nonce [12]byte + numRekeys := f.chunkCtr / rekeyInterval + binary.LittleEndian.PutUint64(nonce[4:12], numRekeys) + + cipher, err := chacha20.NewUnauthenticatedCipher( + f.key, nonce[:], + ) + if err != nil { + return nil, err + } + + f.cipher = cipher + } + + return dst, nil +} diff --git a/v2transport/go.mod b/v2transport/go.mod new file mode 100644 index 0000000000..104d597d92 --- /dev/null +++ b/v2transport/go.mod @@ -0,0 +1,15 @@ +module github.com/btcsuite/btcd/v2transport + +go 1.23.2 + +require ( + github.com/btcsuite/btcd/btcec/v2 v2.3.5 + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f + golang.org/x/crypto v0.25.0 +) + +require ( + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + golang.org/x/sys v0.22.0 // indirect +) diff --git a/v2transport/go.sum b/v2transport/go.sum new file mode 100644 index 0000000000..03395399d2 --- /dev/null +++ b/v2transport/go.sum @@ -0,0 +1,15 @@ +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/v2transport/log.go b/v2transport/log.go new file mode 100644 index 0000000000..d151b81e99 --- /dev/null +++ b/v2transport/log.go @@ -0,0 +1,32 @@ +// Copyright (c) 2025 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package v2transport + +import ( + "github.com/btcsuite/btclog" +) + +const Subsystem = "V2TR" + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + log = btclog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/v2transport/transport.go b/v2transport/transport.go new file mode 100644 index 0000000000..d95005817d --- /dev/null +++ b/v2transport/transport.go @@ -0,0 +1,943 @@ +package v2transport + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "fmt" + "io" + "sync/atomic" + + "golang.org/x/crypto/hkdf" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ellswift" +) + +// packetBit is a type used to represent the bits in the packet's header. +type packetBit uint8 + +const ( + // ignoreBitPos is the position of the ignore bit in the packet's + // header. + ignoreBitPos packetBit = 7 +) + +// BitcoinNet is a type used to represent the Bitcoin network that we're +// connecting to. +// +// NOTE: This is identical to the wire.BitcoinNet type, but allows us to shed a +// large module dependency. +type BitcoinNet uint32 + +const ( + // garbageSize is the length in bytes of the garbage terminator that + // each party sends. + garbageSize = 16 + + // MaxGarbageLen is the maximum size of garbage that either peer is + // allowed to send. + MaxGarbageLen = 4095 + + // maxContentLen is the maximum length of content that can be encrypted + // or decrypted. + // + // TODO: This should be revisited. For some reason, the test vectors + // want us to encrypt 16777215 bytes even though bitcoind will only + // decrypt up to 1 + 12 + 4_000_000 bytes by default. + maxContentLen = 1<<24 - 1 + + // lengthFieldLen is the length of the length field when encrypting the + // content's length. + lengthFieldLen = 3 + + // headerLen is the length of the header field. It is composed of a + // single byte with only the ignoreBitPos having any meaning. + headerLen = 1 + + // chachapoly1305Expansion is the difference in bytes between the + // plaintext and ciphertext when using chachapoly1305. The ciphertext + // is larger because of the authentication tag. + chachapoly1305Expansion = 16 +) + +var ( + // transportVersion is the transport version we are currently using. + transportVersion = []byte{} + + // errInsufficientBytes is returned when we haven't received enough + // bytes to populate their ElligatorSwift encoded public key. + errInsufficientBytes = fmt.Errorf("insufficient bytes received") + + // ErrUseV1Protocol is returned when the initiating peer is attempting + // to use the V1 protocol. + ErrUseV1Protocol = fmt.Errorf("use v1 protocol instead") + + // errWrongNetV1Peer is returned when a v1 peer is using the wrong + // network. + errWrongNetV1Peer = fmt.Errorf("peer is v1 and using the wrong network") + + // errGarbageTermNotRecv is returned when a v2 peer never sends us their + // garbage terminator. + errGarbageTermNotRecv = fmt.Errorf("no garbage term received") + + // errContentLengthExceeded is returned when trying to encrypt or decrypt + // more than the maximum content length. + errContentLengthExceeded = fmt.Errorf("maximum content length exceeded") + + // errFailedToRecv is returned when a Read call fails. + errFailedToRecv = fmt.Errorf("failed to recv data") + + // errPrefixTooLarge is returned if receivedPrefix is ever too large. + // This shouldn't happen unless the API is mis-used. + errPrefixTooLarge = fmt.Errorf("prefix too large - internal error") + + // errGarbageTooLarge is returned if a caller attempts to send garbage + // larger than normal. + errGarbageTooLarge = fmt.Errorf("garbage too large") + + // ErrShouldDowngradeToV1 is returned when we send the peer our + // ellswift key and they immediately hang up. This indicates that they + // don't understand v2 transport and interpreted the 64-byte key as a + // v1 message header + message. This will (always?) decode to an + // invalid command and checksum. The caller should try connecting to + // the peer with the OG v1 transport. + ErrShouldDowngradeToV1 = fmt.Errorf("should downgrade to v1") +) + +// Peer defines the components necessary for sending/receiving data over the v2 +// transport. +type Peer struct { + // privkeyOurs is our private key + privkeyOurs *btcec.PrivateKey + + // ellswiftOurs is our ElligatorSwift-encoded public key. + ellswiftOurs [64]byte + + // sentGarbage is the garbage sent after the public key. This may be up + // to + // 4095 bytes. + sentGarbage []byte + + // receivedPrefix is used to determine which transport protocol we're + // using. + receivedPrefix []byte + + // sendL is the cipher used to send encrypted packet lengths. + sendL *FSChaCha20 + + // sendP is the cipher used to send encrypted packets. + sendP *FSChaCha20Poly1305 + + // sendGarbageTerm is the garbage terminator that we send. + sendGarbageTerm [garbageSize]byte + + // recvL is the cipher used to receive encrypted packet lengths. + recvL *FSChaCha20 + + // recvP is the cipher used to receive encrypted packets. + recvP *FSChaCha20Poly1305 + + // recvGarbageTerm is the garbage terminator our peer sends. + recvGarbageTerm []byte + + // initiatorL is the key used to seed the sendL cipher. + initiatorL []byte + + // initiatorP is the key used to seed the sendP cipher. + initiatorP []byte + + // responderL is the key used to seed the recvL cipher. + responderL []byte + + // responderP is the key used to seed the recvP cipher. + responderP []byte + + // sessionID uniquely identifies this encrypted channel. It is + // currently only used in the test vectors. + sessionID []byte + + // shouldDowngradeToV1 is true if the handshake failed in a way that + // indicates the peer does not support v2, and a v1 attempt should be + // made. + shouldDowngradeToV1 atomic.Bool + + // rw is the underlying object that will be read from / written to in + // calls to V2EncPacket and V2ReceivePacket. + rw io.ReadWriter +} + +// NewPeer returns a new instance of Peer. +func NewPeer() *Peer { + // The keys (initiatorL, initiatorP, responderL, responderP) as well as + // the sessionID must have space for the hkdf Expand-derived Reader to + // work. + return &Peer{ + receivedPrefix: make([]byte, 0), + initiatorL: make([]byte, 32), + initiatorP: make([]byte, 32), + responderL: make([]byte, 32), + responderP: make([]byte, 32), + sessionID: make([]byte, 32), + } +} + +// createV2Ciphers constructs the packet-length and packet encryption ciphers. +func (p *Peer) createV2Ciphers(ecdhSecret []byte, initiating bool, + net BitcoinNet) error { + + log.Debugf("Creating v2 ciphers (initiating=%v, net=%v)", initiating, + net) + + // Define the salt as the string "bitcoin_v2_shared_secret" followed by + // the BitcoinNet's "magic" bytes. + salt := []byte("bitcoin_v2_shared_secret") + + var magic [4]byte + binary.LittleEndian.PutUint32(magic[:], uint32(net)) + salt = append(salt, magic[:]...) + + // Use the hkdf Extract function to generate a pseudo-random key. + prk := hkdf.Extract(sha256.New, ecdhSecret, salt) + + log.Tracef("Using salt=%x for HKDF-Extract", salt) + + // Use the hkdf Expand function with info set to "session_id" to generate a + // unique sessionID. + sessionInfo := []byte("session_id") + sessionReader := hkdf.Expand(sha256.New, prk, sessionInfo) + _, err := sessionReader.Read(p.sessionID) + if err != nil { + log.Errorf("Failed to derive session_id: %v", err) + return err + } + + log.Tracef("Using prk=%x for HKDF-Expand", prk) + + log.Tracef("Derived session_id=%x", p.sessionID) + + // Use the Expand operation to generate packet and packet-length encryption + // ciphers. + initiatorLInfo := []byte("initiator_L") + initiatorLReader := hkdf.Expand(sha256.New, prk, initiatorLInfo) + _, err = initiatorLReader.Read(p.initiatorL) + if err != nil { + log.Errorf("Failed to derive initiator_L: %v", err) + return err + } + + log.Tracef("Derived initiator_L=%x", p.initiatorL) + + initiatorPInfo := []byte("initiator_P") + initiatorPReader := hkdf.Expand(sha256.New, prk, initiatorPInfo) + _, err = initiatorPReader.Read(p.initiatorP) + if err != nil { + log.Errorf("Failed to derive initiator_P: %v", err) + return err + } + + log.Tracef("Derived initiator_P=%x", p.initiatorP) + + responderLInfo := []byte("responder_L") + responderLReader := hkdf.Expand(sha256.New, prk, responderLInfo) + _, err = responderLReader.Read(p.responderL) + if err != nil { + log.Errorf("Failed to derive responder_L: %v", err) + return err + } + + log.Tracef("Derived responder_L=%x", p.responderL) + + responderPInfo := []byte("responder_P") + responderPReader := hkdf.Expand(sha256.New, prk, responderPInfo) + _, err = responderPReader.Read(p.responderP) + if err != nil { + log.Errorf("Failed to derive responder_P: %v", err) + return err + } + + log.Tracef("Derived responder_P=%x", p.responderP) + + // Create the garbage terminators that each side will use. + garbageInfo := []byte("garbage_terminators") + garbageReader := hkdf.Expand(sha256.New, prk, garbageInfo) + garbageTerminators := make([]byte, 32) + _, err = garbageReader.Read(garbageTerminators) + if err != nil { + log.Errorf("Failed to derive garbage terminators: %v", err) + return err + } + + initiatorGarbageTerminator := garbageTerminators[:garbageSize] + responderGarbageTerminator := garbageTerminators[garbageSize:] + + log.Tracef("Derived initiator garbage terminator=%x", + initiatorGarbageTerminator) + + log.Tracef("Derived responder garbage terminator=%x", + responderGarbageTerminator) + + if initiating { + p.sendL, err = NewFSChaCha20(p.initiatorL) + if err != nil { + log.Errorf("Failed to create sendL cipher: %v", err) + return err + } + + p.sendP, err = NewFSChaCha20Poly1305(p.initiatorP) + if err != nil { + log.Errorf("Failed to create sendP cipher: %v", err) + return err + } + + copy(p.sendGarbageTerm[:], initiatorGarbageTerminator) + + p.recvL, err = NewFSChaCha20(p.responderL) + if err != nil { + log.Errorf("Failed to create recvL cipher: %v", err) + return err + } + + p.recvP, err = NewFSChaCha20Poly1305(p.responderP) + if err != nil { + log.Errorf("Failed to create recvP cipher: %v", err) + return err + } + + p.recvGarbageTerm = responderGarbageTerminator + + log.Debugf("Initiator ciphers created (sendL, sendP, " + + "recvL, recvP)") + + } else { + p.sendL, err = NewFSChaCha20(p.responderL) + if err != nil { + log.Errorf("Failed to create sendL cipher: %v", err) + return err + } + + p.sendP, err = NewFSChaCha20Poly1305(p.responderP) + if err != nil { + log.Errorf("Failed to create sendP cipher: %v", err) + return err + } + + copy(p.sendGarbageTerm[:], responderGarbageTerminator) + + p.recvL, err = NewFSChaCha20(p.initiatorL) + if err != nil { + log.Errorf("Failed to create recvL cipher: %v", err) + return err + } + + p.recvP, err = NewFSChaCha20Poly1305(p.initiatorP) + if err != nil { + log.Errorf("Failed to create recvP cipher: %v", err) + return err + } + + p.recvGarbageTerm = initiatorGarbageTerminator + + log.Debugf("Responder ciphers created (sendL, sendP, " + + "recvL, recvP)") + } + + // TODO: + // To achieve forward secrecy we must wipe the key material used to initialize the ciphers: + // memory_cleanse(ecdhSecret, prk, initiator_L, initiator_P, responder_L, responder_K) + // - golang analogue? + + return nil +} + +// InitiateV2Handshake generates our private key and sends our public key as +// well as garbage data to our peer. +func (p *Peer) InitiateV2Handshake(garbageLen int) error { + log.Debugf("Initiating v2 handshake (garbageLen=%d)", garbageLen) + + var err error + p.privkeyOurs, p.ellswiftOurs, err = ellswift.EllswiftCreate() + if err != nil { + log.Errorf("Failed to create ellswift keypair: %v", err) + return err + } + + log.Tracef("Created ellswift keypair, pubkey=%x", p.ellswiftOurs) + + data, err := p.generateKeyAndGarbage(garbageLen) + if err != nil { + return err + } + + log.Debugf("Sending ellswift pubkey and garbage (total_len=%d)", + len(data)) + + p.Send(data) + + return nil +} + +// RespondV2Handshake responds to the initiator, determines if the initiator +// wants to use the v2 protocol and if so returns our ElligatorSwift-encoded +// public key followed by our garbage data over. If the initiator does not want +// to use the v2 protocol, we'll instead revert to the v1 protocol. +func (p *Peer) RespondV2Handshake(garbageLen int, net BitcoinNet) error { + v1Prefix := createV1Prefix(net) + + log.Debugf("Responding to v2 handshake (garbageLen=%d, net=%v)", + garbageLen, net) + + log.Tracef("Expecting v1 prefix: %x", v1Prefix) + + var err error + + // Check and see if the received bytes match the v1 protocol's message + // prefix. If it does, we'll revert to the v1 protocol. If it doesn't, + // we'll treat this as a v2 peer. + for len(p.receivedPrefix) < len(v1Prefix) { + log.Tracef("Received prefix len=%d, need=%d", + len(p.receivedPrefix), len(v1Prefix)) + + var receiveBytes []byte + receiveBytes, _, err = p.Receive(1) + if err != nil { + log.Errorf("Failed to receive byte for v1 prefix "+ + "check: %v", err) + return err + } + + log.Tracef("Current received prefix: %x", p.receivedPrefix) + + p.receivedPrefix = append(p.receivedPrefix, receiveBytes...) + + lastIdx := len(p.receivedPrefix) - 1 + + if p.receivedPrefix[lastIdx] != v1Prefix[lastIdx] { + log.Debugf("Received byte %x does not match v1 "+ + "prefix at index %d, assuming v2 peer", + p.receivedPrefix[lastIdx], lastIdx) + + p.privkeyOurs, p.ellswiftOurs, err = ellswift.EllswiftCreate() + if err != nil { + log.Errorf("Failed to create ellswift "+ + "keypair: %v", err) + return err + } + + log.Tracef("Created ellswift keypair, pubkey=%x", + p.ellswiftOurs) + + data, err := p.generateKeyAndGarbage(garbageLen) + if err != nil { + return err + } + + // Send over our ElligatorSwift-encoded pubkey followed + // by our randomly generated garbage. + log.Debugf("Sending ellswift pubkey and garbage "+ + "(total_len=%d)", len(data)) + p.Send(data) + + return nil + } + } + + log.Infof("Received full v1 prefix match, reverting to v1 protocol") + + return ErrUseV1Protocol +} + +// generateKeyAndGarbage returns a byte slice containing our ellswift-encoded +// public key followed by the garbage we'll send over. +func (p *Peer) generateKeyAndGarbage(garbageLen int) ([]byte, error) { + log.Tracef("Generating key and garbage (garbageLen=%d)", garbageLen) + + if garbageLen > MaxGarbageLen { + log.Errorf("Requested garbage length %d exceeds max %d", + garbageLen, MaxGarbageLen) + + return nil, errGarbageTooLarge + } + + p.sentGarbage = make([]byte, garbageLen) + _, err := rand.Read(p.sentGarbage) + if err != nil { + log.Errorf("Failed to read random bytes for garbage: %v", err) + return nil, err + } + + log.Tracef("Generated %d bytes of garbage", garbageLen) + + data := make([]byte, 0, 64+garbageLen) + data = append(data, p.ellswiftOurs[:]...) + data = append(data, p.sentGarbage...) + + log.Tracef("Generated key and garbage data (total_len=%d)", len(data)) + + return data, nil +} + +// createV1Prefix is a helper function that returns the first 16 bytes of the +// version message's header. +func createV1Prefix(net BitcoinNet) []byte { + v1Prefix := make([]byte, 0, 4+12) + + // The v1 transport protocol uses the network's 4 magic bytes followed by + // "version" followed by 5 bytes of 0. + var magic [4]byte + binary.LittleEndian.PutUint32(magic[:], uint32(net)) + + versionBytes := []byte("version\x00\x00\x00\x00\x00") + + v1Prefix = append(v1Prefix, magic[:]...) + v1Prefix = append(v1Prefix, versionBytes...) + + return v1Prefix +} + +// CompleteHandshake finishes the v2 protocol negotiation and optionally sends +// decoy packets after sending the garbage terminator. +func (p *Peer) CompleteHandshake(initiating bool, decoyContentLens []int, + btcnet BitcoinNet) error { + + log.Debugf("Completing v2 handshake (initiating=%v, "+ + "num_decoys=%d, net=%v)", initiating, len(decoyContentLens), + btcnet) + + var receivedPrefix []byte + if initiating { + log.Trace("Initiator expecting 64 bytes for peer's " + + "ellswift key") + + receivedPrefix = make([]byte, 0, 16) + } else { + // If we are the responder, we have already received bytes to + // compare against the v1 transport protocol's starting bytes. + // We have to account for these when reading the rest of the 64 + // bytes off the wire to properly parse the remote's + // ellswift-encoded public key. + receivedPrefix = p.receivedPrefix + + log.Tracef("Responder already has prefix_len=%d, expecting %d "+ + "more bytes for peer's ellswift key", + len(receivedPrefix), 64-len(receivedPrefix)) + } + + recvData, numRead, err := p.Receive(64 - len(receivedPrefix)) + if err != nil { + // If we receive an error when reading off the wire and we read + // zero bytes, then we will reconnect to the peer using v1. + // There are several different errors that Receive can return + // that indicate we should reconnect. Instead of special-casing + // them all, just perform these checks if any error was + // returned. + if numRead == 0 && initiating { + // The peer most likely attempted to parse our 64-byte + // elligator-swift key as a version message and failed + // when trying to parse the message header into + // something valid. In this case, return a special + // error that signals to the server that we can + // reconnect with the OG v1 scheme. + log.Debugf("Received transport error during " + + "v2 handshake, retying downgraded v1 " + + "connection.") + + p.shouldDowngradeToV1.Store(true) + + return ErrShouldDowngradeToV1 + } + + // If we are the recipient, we can fail. + log.Errorf("Failed to receive peer's ellswift key data: %v", + err) + return err + } + + log.Tracef("Received %d bytes for peer's ellswift key", len(recvData)) + + var ellswiftTheirs [64]byte + + if initiating { + // If we are initiating, read all 64 bytes into ellswiftTheirs. + copy(ellswiftTheirs[:], recvData) + } else { + // If we are the responder, then we need to account for the + // bytes already received as part of matching against the + // starting v1 transport bytes. We sanity check receivedPrefix + // in case it is too large for some reason. + prefixLen := len(receivedPrefix) + if prefixLen > 16 { + log.Errorf("Responder's received prefix length %d is "+ + "too large (> 16)", prefixLen) + + return errPrefixTooLarge + } + + copy(ellswiftTheirs[:], receivedPrefix) + copy(ellswiftTheirs[prefixLen:], recvData) + } + + log.Tracef("Assembled peer's ellswift key: %x", ellswiftTheirs) + + // Calculate the v1 protocol's message prefix and see if the bytes read + // read into ellswiftTheirs matches it. + v1Prefix := createV1Prefix(btcnet) + + // ellswiftTheirs should be at least 16 bytes if receive succeeded, but + // just in case, check the size. + if len(ellswiftTheirs) < 16 { + log.Errorf("Received insufficient bytes (%d) for "+ + + "ellswift key", len(ellswiftTheirs)) + return errInsufficientBytes + } + + if !initiating && bytes.Equal(ellswiftTheirs[4:16], v1Prefix[4:16]) { + log.Warnf("Peer sent v1 version message for wrong network "+ + "(expected %v)", btcnet) + return errWrongNetV1Peer + } + + log.Debug("Calculating ECDH shared secret") + + // Calculate the shared secret to be used in creating the packet + // ciphers. + ecdhSecret, err := ellswift.V2Ecdh( + p.privkeyOurs, ellswiftTheirs, p.ellswiftOurs, initiating, + ) + if err != nil { + log.Errorf("Failed to calculate ECDH shared secret: %v", err) + return err + } + + log.Tracef("Calculated ECDH shared secret: %x", ecdhSecret) + + err = p.createV2Ciphers(ecdhSecret[:], initiating, btcnet) + if err != nil { + return err + } + + // Send garbage terminator. + log.Debugf("Sending garbage terminator: %x", p.sendGarbageTerm) + p.Send(p.sendGarbageTerm[:]) + + // Optionally send decoy packets after garbage terminator. + aad := p.sentGarbage + for i := 0; i < len(decoyContentLens); i++ { + log.Tracef("Sending decoy packet %d (content_len=%d)", + i+1, decoyContentLens[i]) + + decoyContent := make([]byte, decoyContentLens[i]) + + encPacket, _, err := p.V2EncPacket(decoyContent, aad, true) + if err != nil { + log.Errorf("Failed to encrypt/send decoy "+ + "packet %d: %v", i+1, err) + return err + } + + p.Send(encPacket) + + // AAD is only used for the first packet after the handshake. + aad = nil + } + + // Send version packet. + log.Debug("Sending version packet") + _, _, err = p.V2EncPacket(transportVersion, aad, false) + if err != nil { + log.Errorf("Failed to encrypt/send version packet: %v", err) + return err + } + + log.Debugf("Receiving garbage, looking for terminator: %x", + p.recvGarbageTerm) + + // Skip garbage until encountering garbage terminator. + recvGarbage, _, err := p.Receive(16) + if err != nil { + log.Errorf("Failed to receive initial 16 bytes of "+ + "garbage: %v", err) + return err + } + + log.Tracef("Received initial garbage chunk: %x", recvGarbage) + + // The BIP text states that we can read up to 4111 bytes. We've already + // read 16 bytes for the garbage terminator, so we need to read up to + // maxGarbageLen more bytes. + for i := 0; i < MaxGarbageLen; i++ { + recvGarbageLen := len(recvGarbage) + + if bytes.Equal(recvGarbage[recvGarbageLen-16:], + p.recvGarbageTerm) { + + log.Debugf("Found garbage terminator after %d total "+ + "bytes", recvGarbageLen) + + log.Tracef("Processing %d bytes preceding garbage "+ + "terminator", recvGarbageLen-16) + + // Process any potential packet data sent before the + // terminator. + _, err = p.V2ReceivePacket( + recvGarbage[:recvGarbageLen-16], + ) + if err != nil { + log.Errorf("Error processing packet data "+ + "before garbage terminator: %v", err) + } + return err + } + + log.Tracef("Garbage terminator not found, receiving 1 more "+ + "byte (total_received=%d)", recvGarbageLen) + + recvData, _, err := p.Receive(1) + if err != nil { + log.Errorf("Failed to receive garbage "+ + "byte %d: %v", recvGarbageLen+1, err) + return err + } + + recvGarbage = append(recvGarbage, recvData...) + } + + log.Warnf("Garbage terminator not received after %d "+ + "bytes", len(recvGarbage)) + + return errGarbageTermNotRecv +} + +// V2EncPacket takes the contents and aad and returns a ciphertext. +func (p *Peer) V2EncPacket(contents []byte, aad []byte, ignore bool) ([]byte, + int, error) { + + log.Tracef("Encrypting packet (content_len=%d, aad_len=%d, ignore=%v)", + len(contents), len(aad), ignore) + + contentLen := len(contents) + + if contentLen > maxContentLen { + log.Errorf("Content length %d exceeds max %d", + contentLen, maxContentLen) + + return nil, 0, errContentLengthExceeded + } + + // Construct the packet's header based on whether or not the peer + // should ignore this packet (i.e. if this is a decoy packet). + ignoreNum := 0 + if ignore { + ignoreNum = 1 + } + + ignoreNum <<= ignoreBitPos + + header := []byte{byte(ignoreNum)} + + log.Tracef("Packet header: %x", header) + + plaintext := make([]byte, 0, contentLen+1) + plaintext = append(plaintext, header...) + plaintext = append(plaintext, contents...) + + log.Tracef("Plaintext (header + content): %x", plaintext) + + aeadCiphertext, err := p.sendP.Encrypt(aad, plaintext) + if err != nil { + log.Errorf("Failed to encrypt packet content: %v", err) + return nil, 0, err + } + + log.Tracef("AEAD ciphertext: %x", aeadCiphertext) + + // We cut off a byte when feeding to Crypt. + contentsLE := make([]byte, 4) + binary.LittleEndian.PutUint32(contentsLE, uint32(contentLen)) + + log.Tracef("Encrypting content length %d (%x)", + contentLen, contentsLE[:lengthFieldLen]) + + encContentsLen, err := p.sendL.Crypt(contentsLE[:lengthFieldLen]) + if err != nil { + log.Errorf("Failed to encrypt content length: %v", err) + return nil, 0, err + } + + log.Tracef("Encrypted content length: %x", encContentsLen) + + encPacket := make([]byte, 0, len(encContentsLen)+len(aeadCiphertext)) + encPacket = append(encPacket, encContentsLen...) + encPacket = append(encPacket, aeadCiphertext...) + + log.Tracef("Full encrypted packet: %x", encPacket) + + totalBytes, err := p.Send(encPacket) + if err != nil { + log.Errorf("Failed to send encrypted packet: %v", err) + + // Return the packet anyway, as some might have been sent. + return encPacket, totalBytes, err + } + + log.Tracef("Sent %d bytes for encrypted packet", totalBytes) + + return encPacket, totalBytes, err +} + +// V2ReceivePacket takes the aad and decrypts a received packet. +func (p *Peer) V2ReceivePacket(aad []byte) ([]byte, error) { + log.Tracef("Attempting to receive packet (aad_len=%d)", len(aad)) + + for { + log.Tracef("Receiving %d bytes for encrypted length", + lengthFieldLen) + + // Decrypt the length field so we know how many more bytes to receive. + encContentsLen, _, err := p.Receive(lengthFieldLen) + if err != nil { + log.Errorf("Failed to receive encrypted length: %v", + err) + return nil, err + } + + log.Tracef("Received encrypted length: %x", encContentsLen) + + contentsLenBytes, err := p.recvL.Crypt(encContentsLen) + if err != nil { + log.Errorf("Failed to decrypt content length: %v", err) + return nil, err + } + + log.Tracef("Decrypted content length bytes: %x", + contentsLenBytes) + + var contentsLenLE [4]byte + copy(contentsLenLE[:], contentsLenBytes) + + contentsLen := binary.LittleEndian.Uint32(contentsLenLE[:]) + + log.Tracef("Decrypted content length=%d", contentsLen) + + if contentsLen > maxContentLen { + log.Errorf("Decrypted content length %d exceeds "+ + "max %d", contentsLen, maxContentLen) + + return nil, errContentLengthExceeded + } + + // Decrypt the remainder of the packet. + numBytes := headerLen + int(contentsLen) + chachapoly1305Expansion + + log.Tracef("Receiving %d bytes for encrypted packet body", + numBytes) + + aeadCiphertext, _, err := p.Receive(numBytes) + if err != nil { + log.Errorf("Failed to receive encrypted "+ + "packet body: %v", err) + return nil, err + } + + log.Tracef("Received encrypted packet body: %x", aeadCiphertext) + + plaintext, err := p.recvP.Decrypt(aad, aeadCiphertext) + if err != nil { + log.Errorf("Failed to decrypt packet body: %v", err) + return nil, err + } + + log.Tracef("Decrypted plaintext (header + content): %x", + plaintext) + + // Only the first packet is expected to have non-empty AAD. If + // the ignore bit is set, ignore the packet. + // + // TODO: will this cause anything to leak? + // AAD is only used for the first packet after the handshake. + aad = nil + header := plaintext[:headerLen] + log.Tracef("Packet header: %x", header) + if (header[0] & (1 << ignoreBitPos)) == 0 { + log.Tracef("Ignore bit not set, returning content "+ + "(len=%d)", len(plaintext[headerLen:])) + + return plaintext[headerLen:], nil + } + + log.Debugf("Ignore bit set, discarding packet "+ + "(content_len=%d)", contentsLen) + } +} + +// ReceivedPrefix returns the partial header bytes we've already received. +func (p *Peer) ReceivedPrefix() []byte { + return p.receivedPrefix +} + +// ShouldDowngradeToV1 returns true if the v2 handshake failed in a way that +// suggests the peer does not support v2 and a v1 connection should be +// attempted. +func (p *Peer) ShouldDowngradeToV1() bool { + return p.shouldDowngradeToV1.Load() +} + +// UseWriterReader uses the passed-in ReadWriter to Send/Receive to/from. +func (p *Peer) UseReadWriter(rw io.ReadWriter) { + p.rw = rw +} + +// Send sends data to the underlying connection. It returns the number of bytes +// sent or an error. +func (p *Peer) Send(data []byte) (int, error) { + log.Tracef("Sending %d bytes", len(data)) + n, err := p.rw.Write(data) + if err != nil { + log.Errorf("Send failed after %d bytes: %v", n, err) + } else { + log.Tracef("Sent %d bytes successfully", n) + } + return n, err +} + +// Receive receives numBytes bytes from the underlying connection. +func (p *Peer) Receive(numBytes int) ([]byte, int, error) { + b := make([]byte, numBytes) + index := 0 + total := 0 + + log.Tracef("Attempting to receive %d bytes", numBytes) + + for { + // TODO: Use something that inherently prevents going over? + if total > numBytes { + // This should be logically impossible with io.ReadFull + // semantics used implicitly by the loop structure. + log.Criticalf("Receive logic error: total=%d > "+ + "numBytes=%d", total, numBytes) + return nil, total, errFailedToRecv + } + + if total == numBytes { + log.Tracef("Successfully received %d bytes", total) + return b, total, nil + } + + log.Tracef("Calling Read (need %d bytes, have "+ + "%d)", numBytes-total, total) + + n, err := p.rw.Read(b[index:]) + if err != nil { + log.Errorf("Receive failed after reading %d bytes "+ + "(target %d): %v", total+n, numBytes, err) + return nil, total, err + } + + log.Tracef("Read returned %d bytes", n) + + total += n + index += n + } +} diff --git a/v2transport/transport_test.go b/v2transport/transport_test.go new file mode 100644 index 0000000000..97fa7a27bb --- /dev/null +++ b/v2transport/transport_test.go @@ -0,0 +1,498 @@ +package v2transport + +import ( + "bytes" + "encoding/hex" + "strings" + "testing" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ellswift" +) + +func setHex(hexString string) *btcec.FieldVal { + if len(hexString)%2 != 0 { + hexString = "0" + hexString + } + bytes, _ := hex.DecodeString(hexString) + + var f btcec.FieldVal + f.SetByteSlice(bytes) + + return &f +} + +const ( + mainNet = 0xd9b4bef9 +) + +func TestPacketEncodingVectors(t *testing.T) { + tests := []struct { + inIdx int + inPrivOurs string + inEllswiftOurs string + inEllswiftTheirs string + inInitiating bool + inContents string + inMultiply int + inAad string + inIgnore bool + midXOurs string + midXTheirs string + midXShared string + midSharedSecret string + midInitiatorL string + midInitiatorP string + midResponderL string + midResponderP string + midSendGarbageTerm string + midRecvGarbageTerm string + outSessionID string + outCiphertext string + outCiphertextEndsWith string + }{ + { + inIdx: 1, + inPrivOurs: "61062ea5071d800bbfd59e2e8b53d47d194b095ae5a4df04936b49772ef0d4d7", + inEllswiftOurs: "ec0adff257bbfe500c188c80b4fdd640f6b45a482bbc15fc7cef5931deff0aa186f6eb9bba7b85dc4dcc28b28722de1e3d9108b985e2967045668f66098e475b", + inEllswiftTheirs: "a4a94dfce69b4a2a0a099313d10f9f7e7d649d60501c9e1d274c300e0d89aafaffffffffffffffffffffffffffffffffffffffffffffffffffffffff8faf88d5", + inInitiating: true, + inContents: "8e", + inMultiply: 1, + inAad: "", + inIgnore: false, + midXOurs: "19e965bc20fc40614e33f2f82d4eeff81b5e7516b12a5c6c0d6053527eba0923", + midXTheirs: "0c71defa3fafd74cb835102acd81490963f6b72d889495e06561375bd65f6ffc", + midXShared: "4eb2bf85bd00939468ea2abb25b63bc642e3d1eb8b967fb90caa2d89e716050e", + midSharedSecret: "c6992a117f5edbea70c3f511d32d26b9798be4b81a62eaee1a5acaa8459a3592", + midInitiatorL: "9a6478b5fbab1f4dd2f78994b774c03211c78312786e602da75a0d1767fb55cf", + midInitiatorP: "7d0c7820ba6a4d29ce40baf2caa6035e04f1e1cefd59f3e7e59e9e5af84f1f51", + midResponderL: "17bc726421e4054ac6a1d54915085aaa766f4d3cf67bbd168e6080eac289d15e", + midResponderP: "9f0fc1c0e85fd9a8eee07e6fc41dba2ff54c7729068a239ac97c37c524cca1c0", + midSendGarbageTerm: "faef555dfcdb936425d84aba524758f3", + midRecvGarbageTerm: "02cb8ff24307a6e27de3b4e7ea3fa65b", + outSessionID: "ce72dffb015da62b0d0f5474cab8bc72605225b0cee3f62312ec680ec5f41ba5", + outCiphertext: "7530d2a18720162ac09c25329a60d75adf36eda3c3", + outCiphertextEndsWith: "", + }, + { + inIdx: 999, + inPrivOurs: "1f9c581b35231838f0f17cf0c979835baccb7f3abbbb96ffcc318ab71e6e126f", + inEllswiftOurs: "a1855e10e94e00baa23041d916e259f7044e491da6171269694763f018c7e63693d29575dcb464ac816baa1be353ba12e3876cba7628bd0bd8e755e721eb0140", + inEllswiftTheirs: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000", + inInitiating: false, + inContents: "3eb1d4e98035cfd8eeb29bac969ed3824a", + inMultiply: 1, + inAad: "", + inIgnore: false, + midXOurs: "45b6f1f684fd9f2b16e2651ddc47156c0695c8c5cd2c0c9df6d79a1056c61120", + midXTheirs: "edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c", + midXShared: "c40eb6190caf399c9007254ad5e5fa20d64af2b41696599c59b2191d16992955", + midSharedSecret: "a0138f564f74d0ad70bc337dacc9d0bf1d2349364caf1188a1e6e8ddb3b7b184", + midInitiatorL: "b82a0a7ce7219777f914d2ab873c5c487c56bd7b68622594d67fe029a8fa7def", + midInitiatorP: "d760ba8f62dd3d29d7d5584e310caf2540285edc6b51c640f9497e99c3536fd2", + midResponderL: "9db0c6f9a903cbab5d7b3c58273a3421eec0001814ec53236bd405131a0d8e90", + midResponderP: "23d2b5e653e6a3a8db160a2ca03d11cb5a79983babba861fcb57c38413323c0c", + midSendGarbageTerm: "efb64fd80acd3825ac9bc2a67216535a", + midRecvGarbageTerm: "b3cb553453bceb002897e751ff7588bf", + outSessionID: "9267c54560607de73f18c563b76a2442718879c52dd39852885d4a3c9912c9ea", + outCiphertext: "1da1bcf589f9b61872f45b7fa5371dd3f8bdf5d515b0c5f9fe9f0044afb8dc0aa1cd39a8c4", + outCiphertextEndsWith: "", + }, + { + inIdx: 0, + inPrivOurs: "0286c41cd30913db0fdff7a64ebda5c8e3e7cef10f2aebc00a7650443cf4c60d", + inEllswiftOurs: "d1ee8a93a01130cbf299249a258f94feb5f469e7d0f2f28f69ee5e9aa8f9b54a60f2c3ff2d023634ec7f4127a96cc11662e402894cf1f694fb9a7eaa5f1d9244", + inEllswiftTheirs: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff22d5e441524d571a52b3def126189d3f416890a99d4da6ede2b0cde1760ce2c3f98457ae", + inInitiating: true, + inContents: "054290a6c6ba8d80478172e89d32bf690913ae9835de6dcf206ff1f4d652286fe0ddf74deba41d55de3edc77c42a32af79bbea2c00bae7492264c60866ae5a", + inMultiply: 1, + inAad: "84932a55aac22b51e7b128d31d9f0550da28e6a3f394224707d878603386b2f9d0c6bcd8046679bfed7b68c517e7431e75d9dd34605727d2ef1c2babbf680ecc8d68d2c4886e9953a4034abde6da4189cd47c6bb3192242cf714d502ca6103ee84e08bc2ca4fd370d5ad4e7d06c7fbf496c6c7cc7eb19c40c61fb33df2a9ba48497a96c98d7b10c1f91098a6b7b16b4bab9687f27585ade1491ae0dba6a79e1e2d85dd9d9d45c5135ca5fca3f0f99a60ea39edbc9efc7923111c937913f225d67788d5f7e8852b697e26b92ec7bfcaa334a1665511c2b4c0a42d06f7ab98a9719516c8fd17f73804555ee84ab3b7d1762f6096b778d3cb9c799cbd49a9e4a325197b4e6cc4a5c4651f8b41ff88a92ec428354531f970263b467c77ed11312e2617d0d53fe9a8707f51f9f57a77bfb49afe3d89d85ec05ee17b9186f360c94ab8bb2926b65ca99dae1d6ee1af96cad09de70b6767e949023e4b380e66669914a741ed0fa420a48dbc7bfae5ef2019af36d1022283dd90655f25eec7151d471265d22a6d3f91dc700ba749bb67c0fe4bc0888593fbaf59d3c6fff1bf756a125910a63b9682b597c20f560ecb99c11a92c8c8c3f7fbfaa103146083a0ccaecf7a5f5e735a784a8820155914a289d57d8141870ffcaf588882332e0bcd8779efa931aa108dab6c3cce76691e345df4a91a03b71074d66333fd3591bff071ea099360f787bbe43b7b3dff2a59c41c7642eb79870222ad1c6f2e5a191ed5acea51134679587c9cf71c7d8ee290be6bf465c4ee47897a125708704ad610d8d00252d01959209d7cd04d5ecbbb1419a7e84037a55fefa13dee464b48a35c96bcb9a53e7ed461c3a1607ee00c3c302fd47cd73fda7493e947c9834a92d63dcfbd65aa7c38c3e3a2748bb5d9a58e7495d243d6b741078c8f7ee9c8813e473a323375702702b0afae1550c8341eedf5247627343a95240cb02e3e17d5dca16f8d8d3b2228e19c06399f8ec5c5e9dbe4caef6a0ea3ffb1d3c7eac03ae030e791fa12e537c80d56b55b764cadf27a8701052df1282ba8b5e3eb62b5dc7973ac40160e00722fa958d95102fc25c549d8c0e84bed95b7acb61ba65700c4de4feebf78d13b9682c52e937d23026fb4c6193e6644e2d3c99f91f4f39a8b9fc6d013f89c3793ef703987954dc0412b550652c01d922f525704d32d70d6d4079bc3551b563fb29577b3aecdc9505011701dddfd94830431e7a4918927ee44fb3831ce8c4513839e2deea1287f3fa1ab9b61a256c09637dbc7b4f0f8fbb783840f9c24526da883b0df0c473cf231656bd7bc1aaba7f321fec0971c8c2c3444bff2f55e1df7fea66ec3e440a612db9aa87bb505163a59e06b96d46f50d8120b92814ac5ab146bc78dbbf91065af26107815678ce6e33812e6bf3285d4ef3b7b04b076f21e7820dcbfdb4ad5218cf4ff6a65812d8fcb98ecc1e95e2fa58e3efe4ce26cd0bd400d6036ab2ad4f6c713082b5e3f1e04eb9e3b6c8f63f57953894b9e220e0130308e1fd91f72d398c1e7962ca2c31be83f31d6157633581a0a6910496de8d55d3d07090b6aa087159e388b7e7dec60f5d8a60d93ca2ae91296bd484d916bfaaa17c8f45ea4b1a91b37c82821199a2b7596672c37156d8701e7352aa48671d3b1bbbd2bd5f0a2268894a25b0cb2514af39c8743f8cce8ab4b523053739fd8a522222a09acf51ac704489cf17e4b7125455cb8f125b4d31af1eba1f8cf7f81a5a100a141a7ee72e8083e065616649c241f233645c5fc865d17f0285f5c52d9f45312c979bfb3ce5f2a1b951deddf280ffb3f370410cffd1583bfa90077835aa201a0712d1dcd1293ee177738b14e6b5e2a496d05220c3253bb6578d6aff774be91946a614dd7e879fb3dcf7451e0b9adb6a8c44f53c2c464bcc0019e9fad89cac7791a0a3f2974f759a9856351d4d2d7c5612c17cfc50f8479945df57716767b120a590f4bf656f4645029a525694d8a238446c5f5c2c1c995c09c1405b8b1eb9e0352ffdf766cc964f8dcf9f8f043dfab6d102cf4b298021abd78f1d9025fa1f8e1d710b38d9d1652f2d88d1305874ec41609b6617b65c5adb19b6295dc5c5da5fdf69f28144ea12f17c3c6fcce6b9b5157b3dfc969d6725fa5b098a4d9b1d31547ed4c9187452d281d0a5d456008caf1aa251fac8f950ca561982dc2dc908d3691ee3b6ad3ae3d22d002577264ca8e49c523bd51c4846be0d198ad9407bf6f7b82c79893eb2c05fe9981f687a97a4f01fe45ff8c8b7ecc551135cd960a0d6001ad35020be07ffb53cb9e731522ca8ae9364628914b9b8e8cc2f37f03393263603cc2b45295767eb0aac29b0930390eb89587ab2779d2e3decb8042acece725ba42eda650863f418f8d0d50d104e44fbbe5aa7389a4a144a8cecf00f45fb14c39112f9bfb56c0acbd44fa3ff261f5ce4acaa5134c2c1d0cca447040820c81ab1bcdc16aa075b7c68b10d06bbb7ce08b5b805e0238f24402cf24a4b4e00701935a0c68add3de090903f9b85b153cb179a582f57113bfc21c2093803f0cfa4d9d4672c2b05a24f7e4c34a8e9101b70303a7378b9c50b6cddd46814ef7fd73ef6923feceab8fc5aa8b0d185f2e83c7a99dcb1077c0ab5c1f5d5f01ba2f0420443f75c4417db9ebf1665efbb33dca224989920a64b44dc26f682cc77b4632c8454d49135e52503da855bc0f6ff8edc1145451a9772c06891f41064036b66c3119a0fc6e80dffeb65dc456108b7ca0296f4175fff3ed2b0f842cd46bd7e86f4c62dfaf1ddbf836263c00b34803de164983d0811cebfac86e7720c726d3048934c36c23189b02386a722ca9f0fe00233ab50db928d3bccea355cc681144b8b7edcaae4884d5a8f04425c0890ae2c74326e138066d8c05f4c82b29df99b034ea727afde590a1f2177ace3af99cfb1729d6539ce7f7f7314b046aab74497e63dd399e1f7d5f16517c23bd830d1fdee810f3c3b77573dd69c4b97d80d71fb5a632e00acdfa4f8e829faf3580d6a72c40b28a82172f8dcd4627663ebf6069736f21735fd84a226f427cd06bb055f94e7c92f31c48075a2955d82a5b9d2d0198ce0d4e131a112570a8ee40fb80462a81436a58e7db4e34b6e2c422e82f934ecda9949893da5730fc5c23c7c920f363f85ab28cc6a4206713c3152669b47efa8238fa826735f17b4e78750276162024ec85458cd5808e06f40dd9fd43775a456a3ff6cae90550d76d8b2899e0762ad9a371482b3e38083b1274708301d6346c22fea9bb4b73db490ff3ab05b2f7f9e187adef139a7794454b7300b8cc64d3ad76c0e4bc54e08833a4419251550655380d675bc91855aeb82585220bb97f03e976579c08f321b5f8f70988d3061f41465517d53ac571dbf1b24b94443d2e9a8e8a79b392b3d6a4ecdd7f626925c365ef6221305105ce9b5f5b6ecc5bed3d702bd4b7f5008aa8eb8c7aa3ade8ecf6251516fbefeea4e1082aa0e1848eddb31ffe44b04792d296054402826e4bd054e671f223e5557e4c94f89ca01c25c44f1a2ff2c05a70b43408250705e1b858bf0670679fdcd379203e36be3500dd981b1a6422c3cf15224f7fefdef0a5f225c5a09d15767598ecd9e262460bb33a4b5d09a64591efabc57c923d3be406979032ae0bc0997b65336a06dd75b253332ad6a8b63ef043f780a1b3fb6d0b6cad98b1ef4a02535eb39e14a866cfc5fc3a9c5deb2261300d71280ebe66a0776a151469551c3c5fa308757f956655278ec6330ae9e3625468c5f87e02cd9a6489910d4143c1f4ee13aa21a6859d907b788e28572fecee273d44e4a900fa0aa668dd861a60fb6b6b12c2c5ef3c8df1bd7ef5d4b0d1cdb8c15fffbb365b9784bd94abd001c6966216b9b67554ad7cb7f958b70092514f7800fc40244003e0fd1133a9b850fb17f4fcafde07fc87b07fb510670654a5d2d6fc9876ac74728ea41593beef003d6858786a52d3a40af7529596767c17000bfaf8dc52e871359f4ad8bf6e7b2853e5229bdf39657e213580294a5317c5df172865e1e17fe37093b585e04613f5f078f761b2b1752eb32983afda24b523af8851df9a02b37e77f543f18888a782a994a50563334282bf9cdfccc183fdf4fcd75ad86ee0d94f91ee2300a5befbccd14e03a77fc031a8cfe4f01e4c5290f5ac1da0d58ea054bd4837cfd93e5e34fc0eb16e48044ba76131f228d16cde9b0bb978ca7cdcd10653c358bdb26fdb723a530232c32ae0a4cecc06082f46e1c1d596bfe60621ad1e354e01e07b040cc7347c016653f44d926d13ca74e6cbc9d4ab4c99f4491c95c76fff5076b3936eb9d0a286b97c035ca88a3c6309f5febfd4cdaac869e4f58ed409b1e9eb4192fb2f9c2f12176d460fd98286c9d6df84598f260119fd29c63f800c07d8df83d5cc95f8c2fea2812e7890e8a0718bb1e031ecbebc0436dcf3e3b9a58bcc06b4c17f711f80fe1dffc3326a6eb6e00283055c6dabe20d311bfd5019591b7954f8163c9afad9ef8390a38f3582e0a79cdf0353de8eeb6b5f9f27b16ffdef7dd62869b4840ee226ccdce95e02c4545eb981b60571cd83f03dc5eaf8c97a0829a4318a9b3dc06c0e003db700b2260ff1fa8fee66890e637b109abb03ec901b05ca599775f48af50154c0e67d82bf0f558d7d3e0778dc38bea1eb5f74dc8d7f90abdf5511a424be66bf8b6a3cacb477d2e7ef4db68d2eba4d5289122d851f9501ba7e9c4957d8eba3be3fc8e785c4265a1d65c46f2809b70846c693864b169c9dcb78be26ea14b8613f145b01887222979a9e67aee5f800caa6f5c4229bdeefc901232ace6143c9865e4d9c07f51aa200afaf7e48a7d1d8faf366023beab12906ffcb3eaf72c0eb68075e4daf3c080e0c31911befc16f0cc4a09908bb7c1e26abab38bd7b788e1a09c0edf1a35a38d2ff1d3ed47fcdaae2f0934224694f5b56705b9409b6d3d64f3833b686f7576ec64bbdd6ff174e56c2d1edac0011f904681a73face26573fbba4e34652f7ae84acfb2fa5a5b3046f98178cd0831df7477de70e06a4c00e305f31aafc026ef064dd68fd3e4252b1b91d617b26c6d09b6891a00df68f105b5962e7f9d82da101dd595d286da721443b72b2aba2377f6e7772e33b3a5e3753da9c2578c5d1daab80187f55518c72a64ee150a7cb5649823c08c9f62cd7d020b45ec2cba8310db1a7785a46ab24785b4d54ff1660b5ca78e05a9a55edba9c60bf044737bc468101c4e8bd1480d749be5024adefca1d998abe33eaeb6b11fbb39da5d905fdd3f611b2e51517ccee4b8af72c2d948573505590d61a6783ab7278fc43fe55b1fcc0e7216444d3c8039bb8145ef1ce01c50e95a3f3feab0aee883fdb94cc13ee4d21c542aa795e18932228981690f4d4c57ca4db6eb5c092e29d8a05139d509a8aeb48baa1eb97a76e597a32b280b5e9d6c36859064c98ff96ef5126130264fa8d2f49213870d9fb036cff95da51f270311d9976208554e48ffd486470d0ecdb4e619ccbd8226147204baf8e235f54d8b1cba8fa34a9a4d055de515cdf180d2bb6739a175183c472e30b5c914d09eeb1b7dafd6872b38b48c6afc146101200e6e6a44fe5684e220adc11f5c403ddb15df8051e6bdef09117a3a5349938513776286473a3cf1d2788bb875052a2e6459fa7926da33380149c7f98d7700528a60c954e6f5ecb65842fde69d614be69eaa2040a4819ae6e756accf936e14c1e894489744a79c1f2c1eb295d13e2d767c09964b61f9cfe497649f712", + inIgnore: false, + midXOurs: "33a32d10066fa3963a9518a14d1bd1cb5ccaceaeaaeddb4d7aead90c08395bfd", + midXTheirs: "568146140669e69646a6ffeb3793e8010e2732209b4c34ec13e209a070109183", + midXShared: "a1017beaa8784f283dee185cd847ae3a327a981e62ae21e8c5face175fc97e9b", + midSharedSecret: "250b93570d411149105ab8cb0bc5079914906306368c23e9d77c2a33265b994c", + midInitiatorL: "4ec7daf7294a4a2c717442dd21cf2f052a3bfe9d535b55da0f66fecf87a27534", + midInitiatorP: "52ab4db9c4b06621f8ded3405691eb32465b1360d15a6b127ded4d15f9cde466", + midResponderL: "ba9906da802407ddedf6733e29f3996c62425e79d3cbfeebbd6ec4cdc7c976a8", + midResponderP: "ee661e18c97319ad071106bf35fe1085034832f70718d92f887932128b6100c7", + midSendGarbageTerm: "d4e3f18ac2e2095edb5c3b94236118ad", + midRecvGarbageTerm: "4faa6c4233d9fd53d170ede4172142a8", + outSessionID: "23f154ac43cfc59c4243e9fc68aeec8f19ad3942d74108e833b36f0dd3dcd357", + outCiphertext: "8da7de6ea7bf2a81a396a42880ba1f5756734c4821309ac9aeffa2a26ce86873b9dc4935a772de6ec5162c6d075b14536800fb174841153511bfb597e992e2fe8a450c4bce102cc550bb37fd564c4d60bf884e", + outCiphertextEndsWith: "", + }, + { + inIdx: 223, + inPrivOurs: "6c77432d1fda31e9f942f8af44607e10f3ad38a65f8a4bddae823e5eff90dc38", + inEllswiftOurs: "d2685070c1e6376e633e825296634fd461fa9e5bdf2109bcebd735e5a91f3e587c5cb782abb797fbf6bb5074fd1542a474f2a45b673763ec2db7fb99b737bbb9", + inEllswiftTheirs: "56bd0c06f10352c3a1a9f4b4c92f6fa2b26df124b57878353c1fc691c51abea77c8817daeeb9fa546b77c8daf79d89b22b0e1b87574ece42371f00237aa9d83a", + inInitiating: false, + inContents: "7e0e78eb6990b059e6cf0ded66ea93ef82e72aa2f18ac24f2fc6ebab561ae557420729da103f64cecfa20527e15f9fb669a49bbbf274ef0389b3e43c8c44e5f60bf2ac38e2b55e7ec4273dba15ba41d21f8f5b3ee1688b3c29951218caf847a97fb50d75a86515d445699497d968164bf740012679b8962de573be941c62b7ef", + inMultiply: 1, + inAad: "", + inIgnore: true, + midXOurs: "193d019db571162e52567e0cfdf9dd6964394f32769ae2edc4933b03b502d771", + midXTheirs: "2dd7b9cc85524f8670f695c3143ac26b45cebcabb2782a85e0fe15aee3956535", + midXShared: "5e35f94adfd57976833bffec48ef6dde983d18a55501154191ea352ef06732ee", + midSharedSecret: "1918b741ef5f9d1d7670b050c152b4a4ead2c31be9aecb0681c0cd4324150853", + midInitiatorL: "97124c56236425d792b1ec85e34b846e8d88c9b9f1d4f23ac6cdcc4c177055a0", + midInitiatorP: "8c71b468c61119415e3c1dfdd184134211951e2f623199629a46bff9673611f2", + midResponderL: "b43b8791b51ed682f56d64351601be28e478264411dcf963b14ee60b9ae427fa", + midResponderP: "794dde4b38ef04250c534a7fa638f2e8cc8b6d2c6110ec290ab0171fdf277d51", + midSendGarbageTerm: "cf2e25f23501399f30738d7eee652b90", + midRecvGarbageTerm: "225a477a28a54ea7671d2b217a9c29db", + outSessionID: "7ec02fea8c1484e3d0875f978c5f36d63545e2e4acf56311394422f4b66af612", + outCiphertext: "", + outCiphertextEndsWith: "729847a3e9eba7a5bff454b5de3b393431ee360736b6c030d7a5bd01d1203d2e98f528543fd2bf886ccaa1ada5e215a730a36b3f4abfc4e252c89eb01d9512f94916dae8a76bf16e4da28986ffe159090fe5267ee3394300b7ccf4dfad389a26321b3a3423e4594a82ccfbad16d6561ecb8772b0cb040280ff999a29e3d9d4fd", + }, + { + inIdx: 448, + inPrivOurs: "a6ec25127ca1aa4cf16b20084ba1e6516baae4d32422288e9b36d8bddd2de35a", + inEllswiftOurs: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff053d7ecca53e33e185a8b9be4e7699a97c6ff4c795522e5918ab7cd6b6884f67e683f3dc", + inEllswiftTheirs: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa7730be30000000000000000000000000000000000000000000000000000000000000000", + inInitiating: true, + inContents: "00cf68f8f7ac49ffaa02c4864fdf6dfe7bbf2c740b88d98c50ebafe32c92f3427f57601ffcb21a3435979287db8fee6c302926741f9d5e464c647eeb9b7acaeda46e00abd7506fc9a719847e9a7328215801e96198dac141a15c7c2f68e0690dd1176292a0dded04d1f548aad88f1aebdc0a8f87da4bb22df32dd7c160c225b843e83f6525d6d484f502f16d923124fc538794e21da2eb689d18d87406ecced5b9f92137239ed1d37bcfa7836641a83cf5e0a1cf63f51b06f158e499a459ede41c", + inMultiply: 1, + inAad: "", + inIgnore: false, + midXOurs: "02b225089255f7b02b20276cfe9779144df8fb1957b477bff3239d802d1256e9", + midXTheirs: "5232c4b6bde9d3d45d7b763ebd7495399bb825cc21de51011761cd81a51bdc84", + midXShared: "379223d2f1ea7f8a22043c4ce4122623098309e15b1ce58286ebe3d3bf40f4e1", + midSharedSecret: "dd210aa6629f20bb328e5d89daa6eb2ac3d1c658a725536ff154f31b536c23b2", + midInitiatorL: "393472f85a5cc6b0f02c4bd466db7a2dc5b91fc9dcb15c0dd6dc21116ece8bca", + midInitiatorP: "c80b87b793db47320b2795db66d331bd3021cc24e360d59d0fa8974f54687e0c", + midResponderL: "ef16a43d77e2b270b0a145ee1618d35f3c943cc7877d6cfcff2287d41692be39", + midResponderP: "20d4b62e2d982c61bb0cc39a93283d98af36530ef12331d44b2477b0e521b490", + midSendGarbageTerm: "fead69be77825a23daec377c362aa560", + midRecvGarbageTerm: "511d4980526c5e64aa7187462faeafdd", + outSessionID: "acb8f084ea763ddd1b92ac4ed23bf44de20b84ab677d4e4e6666a6090d40353d", + outCiphertext: "", + outCiphertextEndsWith: "77b4656934a82de1a593d8481f020194ddafd8cac441f9d72aeb8721e6a14f49698ca6d9b2b6d59d07a01aa552fd4d5b68d0d1617574c77dea10bfadbaa31b83885b7ceac2fd45e3e4a331c51a74e7b1698d81b64c87c73c5b9258b4d83297f9debc2e9aa07f8572ff434dc792b83ecf07b3197de8dc9cf7be56acb59c66cff5", + }, + { + inIdx: 673, + inPrivOurs: "0af952659ed76f80f585966b95ab6e6fd68654672827878684c8b547b1b94f5a", + inEllswiftOurs: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffc81017fd92fd31637c26c906b42092e11cc0d3afae8d9019d2578af22735ce7bc469c72d", + inEllswiftTheirs: "9652d78baefc028cd37a6a92625b8b8f85fde1e4c944ad3f20e198bef8c02f19fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2e91870", + inInitiating: false, + inContents: "5c6272ee55da855bbbf7b1246d9885aa7aa601a715ab86fa46c50da533badf82b97597c968293ae04e", + inMultiply: 97561, + inAad: "", + inIgnore: false, + midXOurs: "4b1767466fe2fb8deddf2dc52cc19c7e2032007e19bfb420b30a80152d0f22d6", + midXTheirs: "64c383e0e78ac99476ddff2061683eeefa505e3666673a1371342c3e6c26981d", + midXShared: "5bcfeac98d87e87e158bf839f1269705429f7af2a25b566a25811b5f9aef9560", + midSharedSecret: "3568f2aea2e14ef4ee4a3c2a8b8d31bc5e3187ba86db10739b4ff8ec92ff6655", + midInitiatorL: "c7df866a62b7d404eb530b2be245a7aece0fb4791402a1de8f33530cbf777cc1", + midInitiatorP: "8f732e4aae2ba9314e0982492fa47954de9c189d92fbc549763b27b1b47642ce", + midResponderL: "992085edfecb92c62a3a7f96ea416f853f34d0dfe065b966b6968b8b87a83081", + midResponderP: "c5ba5eaf9e1c807154ebab3ea472499e815a7be56dfaf0c201cf6e91ffeca8e6", + midSendGarbageTerm: "5e2375ac629b8df1e4ff3617c6255a70", + midRecvGarbageTerm: "70bcbffcb62e4d29d2605d30bceef137", + outSessionID: "7332e92a3f9d2792c4d444fac5ed888c39a073043a65eefb626318fd649328f8", + outCiphertext: "", + outCiphertextEndsWith: "657a4a19711ce593c3844cb391b224f60124aba7e04266233bc50cafb971e26c7716b76e98376448f7d214dd11e629ef9a974d60e3770a695810a61c4ba66d78b936ee7892b98f0b48ddae9fcd8b599dca1c9b43e9b95e0226cf8d4459b8a7c2c4e6db80f1d58c7b20dd7208fa5c1057fb78734223ee801dbd851db601fee61e", + }, + { + inIdx: 1024, + inPrivOurs: "f90e080c64b05824c5a24b2501d5aeaf08af3872ee860aa80bdcd430f7b63494", + inEllswiftOurs: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff115173765dc202cf029ad3f15479735d57697af12b0131dd21430d5772e4ef11474d58b9", + inEllswiftTheirs: "12a50f3fafea7c1eeada4cf8d33777704b77361453afc83bda91eef349ae044d20126c6200547ea5a6911776c05dee2a7f1a9ba7dfbabbbd273c3ef29ef46e46", + inInitiating: true, + inContents: "5f67d15d22ca9b2804eeab0a66f7f8e3a10fa5de5809a046084348cbc5304e843ef96f59a59c7d7fdfe5946489f3ea297d941bac326225df316a25fc90f0e65b0d31a9c497e960fdbf8c482516bc8a9c1c77b7f6d0e1143810c737f76f9224e6f2c9af5186b4f7259c7e8d165b6e4fe3d38a60bdbdd4d06ecdcaaf62086070dbb68686b802d53dfd7db14b18743832605f5461ad81e2af4b7e8ff0eff0867a25b93cec7becf15c43131895fed09a83bf1ee4a87d44dd0f02a837bf5a1232e201cb882734eb9643dc2dc4d4e8b5690840766212c7ac8f38ad8a9ec47c7a9b3e022ae3eb6a32522128b518bd0d0085dd81c5", + inMultiply: 69615, + inAad: "", + inIgnore: true, + midXOurs: "8b8de966150bf872b4b695c9983df519c909811954d5d76e99ed0d5f1860247b", + midXTheirs: "eef379db9bd4b1aa90fc347fad33f7d53083389e22e971036f59f4e29d325ac2", + midXShared: "0a402d812314646ccc2565c315d1429ec1ed130ff92ff3f48d948f29c3762cf1", + midSharedSecret: "e25461fb0e4c162e18123ecde88342d54d449631e9b75a266fd9260c2bb2f41d", + midInitiatorL: "97771ce2ce17a25c3d65bf9f8e4acb830dce8d41392be3e4b8ed902a3106681a", + midInitiatorP: "2e7022b4eae9152942f68160a93e25d3e197a557385594aa587cb5e431bb470d", + midResponderL: "613f85a82d783ce450cfd7e91a027fcc4ad5610872f83e4dbe9e2202184c6d6e", + midResponderP: "cb5de4ed1083222e381401cf88e3167796bc9ab5b8aa1f27b718f39d1e6c0e87", + midSendGarbageTerm: "b709dea25e0be287c50e3603482c2e98", + midRecvGarbageTerm: "1f677e9d7392ebe3633fd82c9efb0f16", + outSessionID: "889f339285564fd868401fac8380bb9887925122ec8f31c8ae51ce067def103b", + outCiphertext: "", + outCiphertextEndsWith: "7c4b9e1e6c1ce69da7b01513cdc4588fd93b04dafefaf87f31561763d906c672bac3dfceb751ebd126728ac017d4d580e931b8e5c7d5dfe0123be4dc9b2d2238b655c8a7fadaf8082c31e310909b5b731efc12f0a56e849eae6bfeedcc86dd27ef9b91d159256aa8e8d2b71a311f73350863d70f18d0d7302cf551e4303c7733", + }, + } + + for _, test := range tests { + inInitiating := test.inInitiating + + // We need to convert the FieldVal into a ModNScalar so that we + // can use the ScalarBaseMultNonConst. + inPrivOurs := setHex(test.inPrivOurs) + inPrivOursBytes := inPrivOurs.Bytes() + + var inPrivOursScalar btcec.ModNScalar + overflow := inPrivOursScalar.SetBytes(inPrivOursBytes) + if overflow == 1 { + t.Fatalf("unexpected reduction") + } + + var inPubOurs btcec.JacobianPoint + btcec.ScalarBaseMultNonConst(&inPrivOursScalar, &inPubOurs) + inPubOurs.ToAffine() + + midXOurs := setHex(test.midXOurs) + if !midXOurs.Equals(&inPubOurs.X) { + t.Fatalf("expected mid-state to match our public key") + } + + // ellswift_decode takes in ellswift_bytes and returns a proper key. + // 1. convert from hex to bytes + bytesEllswiftOurs, err := hex.DecodeString(test.inEllswiftOurs) + if err != nil { + t.Fatalf("unexpected error decoding string") + } + + uEllswiftOurs := bytesEllswiftOurs[:32] + tEllswiftOurs := bytesEllswiftOurs[32:] + + var ( + uEllswiftOursFV btcec.FieldVal + tEllswiftOursFV btcec.FieldVal + ) + + truncated := uEllswiftOursFV.SetByteSlice(uEllswiftOurs) + if truncated { + uEllswiftOursFV.Normalize() + } + + truncated = tEllswiftOursFV.SetByteSlice(tEllswiftOurs) + if truncated { + tEllswiftOursFV.Normalize() + } + + xEllswiftOurs, err := ellswift.XSwiftEC( + &uEllswiftOursFV, &tEllswiftOursFV, + ) + if err != nil { + t.Fatalf("unexpected error during XSwiftEC") + } + + if !midXOurs.Equals(xEllswiftOurs) { + t.Fatalf("expected mid-state to match decoded " + + "ellswift key") + } + + bytesEllswiftTheirs, err := hex.DecodeString( + test.inEllswiftTheirs, + ) + if err != nil { + t.Fatalf("unexpected error decoding string") + } + + uEllswiftTheirs := bytesEllswiftTheirs[:32] + tEllswiftTheirs := bytesEllswiftTheirs[32:] + + var ( + uEllswiftTheirsFV btcec.FieldVal + tEllswiftTheirsFV btcec.FieldVal + ) + + truncated = uEllswiftTheirsFV.SetByteSlice(uEllswiftTheirs) + if truncated { + uEllswiftTheirsFV.Normalize() + } + + truncated = tEllswiftTheirsFV.SetByteSlice(tEllswiftTheirs) + if truncated { + tEllswiftTheirsFV.Normalize() + } + + xEllswiftTheirs, err := ellswift.XSwiftEC( + &uEllswiftTheirsFV, &tEllswiftTheirsFV, + ) + if err != nil { + t.Fatalf("unexpected error during XSwiftEC") + } + + midXTheirs := setHex(test.midXTheirs) + if !midXTheirs.Equals(xEllswiftTheirs) { + t.Fatalf("expected mid-state to match decoded " + + "ellswift key") + } + + privKeyOurs, _ := btcec.PrivKeyFromBytes((*inPrivOursBytes)[:]) + + var bytesEllswiftTheirs64 [64]byte + copy(bytesEllswiftTheirs64[:], bytesEllswiftTheirs) + + xShared, err := ellswift.EllswiftECDHXOnly( + bytesEllswiftTheirs64, privKeyOurs, + ) + if err != nil { + t.Fatalf("unexpected error when computing shared x") + } + + var xSharedFV btcec.FieldVal + overflow = xSharedFV.SetBytes(&xShared) + if overflow == 1 { + t.Fatalf("unexpected truncation") + } + + midXShared := setHex(test.midXShared) + + if !midXShared.Equals(&xSharedFV) { + t.Fatalf("expected mid-state x shared") + } + + var bytesEllswiftOurs64 [64]byte + copy(bytesEllswiftOurs64[:], bytesEllswiftOurs) + + sharedSecret, err := ellswift.V2Ecdh( + privKeyOurs, bytesEllswiftTheirs64, + bytesEllswiftOurs64, inInitiating, + ) + if err != nil { + t.Fatalf("unexpected error when calculating " + + "shared secret") + } + + midShared, err := hex.DecodeString(test.midSharedSecret) + if err != nil { + t.Fatalf("unexpected hex decode failure") + } + + if !bytes.Equal(midShared, sharedSecret[:]) { + t.Fatalf("expected mid shared secret") + } + + p := NewPeer() + + buf := bytes.NewBuffer(nil) + p.UseReadWriter(buf) + + err = p.createV2Ciphers(midShared, inInitiating, mainNet) + if err != nil { + t.Fatalf("error initiating v2 transport") + } + + midInitiatorL, err := hex.DecodeString(test.midInitiatorL) + if err != nil { + t.Fatalf("unexpected error decoding midInitiatorL") + } + + if !bytes.Equal(midInitiatorL, p.initiatorL) { + t.Fatalf("expected mid-state initiatorL to " + + "match computed value") + } + + midInitiatorP, err := hex.DecodeString(test.midInitiatorP) + if err != nil { + t.Fatalf("unexpected error decoding midInitiatorP") + } + + if !bytes.Equal(midInitiatorP, p.initiatorP) { + t.Fatalf("expected mid-state initiatorP to " + + "match computed value") + } + + midResponderL, err := hex.DecodeString(test.midResponderL) + if err != nil { + t.Fatalf("unexpected error decoding midResponderL") + } + + if !bytes.Equal(midResponderL, p.responderL) { + t.Fatalf("expected mid-state responderL to " + + "match computed value") + } + + midResponderP, err := hex.DecodeString(test.midResponderP) + if err != nil { + t.Fatalf("unexpected error decoding midResponderP") + } + + if !bytes.Equal(midResponderP, p.responderP) { + t.Fatalf("expected mid-state responderP to " + + "match computed value") + } + + midSendGarbageTerm, err := hex.DecodeString( + test.midSendGarbageTerm, + ) + if err != nil { + t.Fatalf("unexpected error decoding midSendGarbageTerm") + } + + if !bytes.Equal(midSendGarbageTerm, p.sendGarbageTerm[:]) { + t.Fatalf("expected mid-state sendGarbageTerm " + + "to match computed value") + } + + midRecvGarbageTerm, err := hex.DecodeString( + test.midRecvGarbageTerm, + ) + if err != nil { + t.Fatalf("unexpected error decoding midRecvGarbageTerm") + } + + if !bytes.Equal(midRecvGarbageTerm, p.recvGarbageTerm) { + t.Fatalf("expected mid-state recvGarbageTerm to " + + "match computed value") + } + + outSessionID, err := hex.DecodeString(test.outSessionID) + if err != nil { + t.Fatalf("unexpected error decoding outSessionID") + } + + if !bytes.Equal(outSessionID, p.sessionID) { + t.Fatalf("expected sessionID to match computed value") + } + + for i := 0; i < test.inIdx; i++ { + _, _, err = p.V2EncPacket([]byte{}, []byte{}, false) + if err != nil { + t.Fatalf("unexpected error while encrypting packet") + } + } + + initialContents, err := hex.DecodeString(test.inContents) + if err != nil { + t.Fatalf("unexpected error decoding contents") + } + + aad, err := hex.DecodeString(test.inAad) + if err != nil { + t.Fatalf("unexpected error decoding aad") + } + + var contents []byte + + copy(contents, initialContents) + + for i := 0; i < test.inMultiply; i++ { + contents = append(contents, initialContents...) + } + + ciphertext, _, err := p.V2EncPacket(contents, aad, test.inIgnore) + if err != nil { + t.Fatalf("unexpected error when encrypting packet: %v", err) + } + + if len(test.outCiphertext) != 0 { + outCiphertextBytes, err := hex.DecodeString(test.outCiphertext) + if err != nil { + t.Fatalf("unexpected error decoding outCiphertext: %v", err) + } + + if !bytes.Equal(outCiphertextBytes, ciphertext) { + t.Fatalf("ciphertext mismatch") + } + } + + if len(test.outCiphertextEndsWith) != 0 { + ciphertextHex := hex.EncodeToString(ciphertext) + if !strings.HasSuffix(ciphertextHex, test.outCiphertextEndsWith) { + t.Fatalf("suffix mismatch") + } + } + } +} diff --git a/version.go b/version.go index e02315d434..b19ea56e78 100644 --- a/version.go +++ b/version.go @@ -17,8 +17,8 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr // versioning 2.0.0 spec (http://semver.org/). const ( appMajor uint = 0 - appMinor uint = 24 - appPatch uint = 2 + appMinor uint = 25 + appPatch uint = 0 // appPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. diff --git a/wire/bench_test.go b/wire/bench_test.go index 2f63fa30a6..266aded24f 100644 --- a/wire/bench_test.go +++ b/wire/bench_test.go @@ -9,7 +9,6 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "net" "os" "testing" @@ -592,7 +591,7 @@ func BenchmarkDeserializeTxLarge(b *testing.B) { b.Fatalf("Failed to read transaction data: %v", err) } defer fi.Close() - buf, err := ioutil.ReadAll(bzip2.NewReader(fi)) + buf, err := io.ReadAll(bzip2.NewReader(fi)) if err != nil { b.Fatalf("Failed to read transaction data: %v", err) } @@ -713,7 +712,7 @@ func BenchmarkSerializeTxLarge(b *testing.B) { b.Fatalf("Failed to read transaction data: %v", err) } defer fi.Close() - buf, err := ioutil.ReadAll(bzip2.NewReader(fi)) + buf, err := io.ReadAll(bzip2.NewReader(fi)) if err != nil { b.Fatalf("Failed to read transaction data: %v", err) } diff --git a/wire/doc.go b/wire/doc.go index 5e03ff20a1..e05bb93552 100644 --- a/wire/doc.go +++ b/wire/doc.go @@ -86,6 +86,7 @@ the following constants: wire.MainNet wire.TestNet (Regression test network) wire.TestNet3 (Test network version 3) + wire.SigNet (Signet, default) wire.SimNet (Simulation test network) # Determining Message Type diff --git a/wire/message.go b/wire/message.go index 1f412fa6fa..39c1dd0c63 100644 --- a/wire/message.go +++ b/wire/message.go @@ -1,4 +1,4 @@ -// Copyright (c) 2013-2016 The btcsuite developers +// Copyright (c) 2013-2024 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -43,7 +43,6 @@ const ( CmdHeaders = "headers" CmdPing = "ping" CmdPong = "pong" - CmdAlert = "alert" CmdMemPool = "mempool" CmdFilterAdd = "filteradd" CmdFilterClear = "filterclear" @@ -59,6 +58,63 @@ const ( CmdCFHeaders = "cfheaders" CmdCFCheckpt = "cfcheckpt" CmdSendAddrV2 = "sendaddrv2" + CmdWTxIdRelay = "wtxidrelay" +) + +var ( + v2MessageIDs = map[uint8]string{ + 1: CmdAddr, + 2: CmdBlock, + 5: CmdFeeFilter, + 6: CmdFilterAdd, + 7: CmdFilterClear, + 8: CmdFilterLoad, + 9: CmdGetBlocks, + 11: CmdGetData, + 12: CmdGetHeaders, + 13: CmdHeaders, + 14: CmdInv, + 15: CmdMemPool, + 16: CmdMerkleBlock, + 17: CmdNotFound, + 18: CmdPing, + 19: CmdPong, + 21: CmdTx, + 22: CmdGetCFilters, + 23: CmdCFilter, + 24: CmdGetCFHeaders, + 25: CmdCFHeaders, + 26: CmdGetCFCheckpt, + 27: CmdCFCheckpt, + 28: CmdAddrV2, + } + + v2Messages = map[string]uint8{ + CmdAddr: 1, + CmdBlock: 2, + CmdFeeFilter: 5, + CmdFilterAdd: 6, + CmdFilterClear: 7, + CmdFilterLoad: 8, + CmdGetBlocks: 9, + CmdGetData: 11, + CmdGetHeaders: 12, + CmdHeaders: 13, + CmdInv: 14, + CmdMemPool: 15, + CmdMerkleBlock: 16, + CmdNotFound: 17, + CmdPing: 18, + CmdPong: 19, + CmdTx: 21, + CmdGetCFilters: 22, + CmdCFilter: 23, + CmdGetCFHeaders: 24, + CmdCFHeaders: 25, + CmdGetCFCheckpt: 26, + CmdCFCheckpt: 27, + CmdAddrV2: 28, + } ) // MessageEncoding represents the wire message encoding format to be used. @@ -150,9 +206,6 @@ func makeEmptyMessage(command string) (Message, error) { case CmdHeaders: msg = &MsgHeaders{} - case CmdAlert: - msg = &MsgAlert{} - case CmdMemPool: msg = &MsgMemPool{} @@ -233,6 +286,33 @@ func readMessageHeader(r io.Reader) (int, *messageHeader, error) { return n, &hdr, nil } +// readPartialHeader reads a partial bitcon message header from r. It takes a +// prefix that contains the already-parsed bytes. This is needed in the case of +// a downgraded v2->v1 transport connection since we have already started +// parsing the header bytes when calling into this function. +func readPartialHeader(prefix []byte, r io.Reader) (int, *messageHeader, + error) { + + // Fill out the messageHeader with the network magic from the prefix. + hdr := messageHeader{} + + var command [CommandSize]byte + prefixReader := bytes.NewReader(prefix[:]) + if err := readElements(prefixReader, &hdr.magic, &command); err != nil { + return 0, nil, err + } + + // Strip trailing zeros from command string. + hdr.command = string(bytes.TrimRight(command[:], "\x00")) + + // Read the rest of the message header from the passed-in reader. + if err := readElements(r, &hdr.length, &hdr.checksum); err != nil { + return 0, nil, err + } + + return MessageHeaderSize, &hdr, nil +} + // discardInput reads n bytes from reader r in chunks and discards the read // bytes. This is used to skip payloads when various errors occur and helps // prevent rogue nodes from causing massive memory allocation through forging @@ -270,6 +350,80 @@ func WriteMessage(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) erro return err } +// WriteV2MessageN writes a Message to the passed Writer using the bip324 +// v2 encoding. +func WriteV2MessageN(w io.Writer, msg Message, pver uint32, + encoding MessageEncoding) (int, error) { + + var totalBytes int + + cmd := msg.Command() + if len(cmd) > CommandSize { + str := fmt.Sprintf("command [%s] is too long [max %v]", + cmd, CommandSize) + return totalBytes, messageError("WriteMessage", str) + } + + index, exists := v2Messages[cmd] + if !exists { + var command [CommandSize]byte + copy(command[:], cmd) + hw := bytes.NewBuffer(make([]byte, 0, CommandSize+1)) + writeElements(hw, byte(0x00), command) + + n, err := w.Write(hw.Bytes()) + if err != nil { + return 0, err + } + + totalBytes += n + } else { + hw := bytes.NewBuffer(make([]byte, 0, 1)) + writeElement(hw, index) + + n, err := w.Write(hw.Bytes()) + if err != nil { + return 0, err + } + + totalBytes += n + } + + var bw bytes.Buffer + err := msg.BtcEncode(&bw, pver, encoding) + if err != nil { + return totalBytes, err + } + + payload := bw.Bytes() + lenp := len(payload) + + // Enforce maximum overall message payload. + if lenp > MaxMessagePayload { + str := fmt.Sprintf("message payload is too large - encoded "+ + "%d bytes, but maximum message payload is %d bytes", + lenp, MaxMessagePayload) + return totalBytes, messageError("WriteMessage", str) + } + + mpl := msg.MaxPayloadLength(pver) + if uint32(lenp) > mpl { + str := fmt.Sprintf("message payload is too large - encoded "+ + "%d bytes, but maximum message payload size for "+ + "messages of type [%s] is %d.", lenp, cmd, mpl) + return totalBytes, messageError("WriteMessage", str) + } + + if len(payload) > 0 { + n, err := w.Write(payload) + totalBytes += n + + return totalBytes, err + } + + return totalBytes, nil +} + // WriteMessageWithEncodingN writes a bitcoin Message to w including the // necessary header information and returns the number of bytes written. // This function is the same as WriteMessageN except it also allows the caller @@ -346,6 +500,57 @@ func WriteMessageWithEncodingN(w io.Writer, msg Message, pver uint32, return totalBytes, err } +// ReadV2MessageN takes the passed plaintext and attempts to construct a +// Message from the bytes using the bip324 v2 encoding. +func ReadV2MessageN(plaintext []byte, pver uint32, enc MessageEncoding) ( + Message, []byte, error) { + + if len(plaintext) == 0 { + return nil, nil, fmt.Errorf("invalid plaintext length") + } + + var msgCmd string + + // If the first byte is 0x00, read the next 12 bytes to determine what + // message this is. + if plaintext[0] == 0x00 { + if len(plaintext) < CommandSize+1 { + return nil, nil, fmt.Errorf("invalid plaintext length") + } + + // Slice off the first 0x00 and the trailing 0x00 bytes. + var command [CommandSize]byte + copy(command[:], plaintext[1:CommandSize+1]) + + msgCmd = string(bytes.TrimRight(command[:], "\x00")) + + plaintext = plaintext[CommandSize+1:] + } else { + // The first byte denotes what message this is. + msgCmd = v2MessageIDs[plaintext[0]] + + plaintext = plaintext[1:] + } + + msg, err := makeEmptyMessage(msgCmd) + if err != nil { + return nil, nil, err + } + + mpl := msg.MaxPayloadLength(pver) + if len(plaintext) > int(mpl) { + return nil, nil, fmt.Errorf("payload exceeds max length") + } + + buf := bytes.NewBuffer(plaintext) + err = msg.BtcDecode(buf, pver, enc) + if err != nil { + return nil, nil, err + } + + return msg, plaintext, nil +} + // ReadMessageWithEncodingN reads, validates, and parses the next bitcoin Message // from r for the provided protocol version and bitcoin network. It returns the // number of bytes read in addition to the parsed Message and raw bytes which @@ -362,6 +567,38 @@ func ReadMessageWithEncodingN(r io.Reader, pver uint32, btcnet BitcoinNet, return totalBytes, nil, nil, err } + return readMessageWithEncodingNInternal( + r, pver, hdr, btcnet, enc, totalBytes, + ) +} + +// ReadPartialMessageWithEncodingN is used in the case that we are expecting a +// v2 connection and then receive a v1 version header. In this case, we +// downgrade the implicit v2 connection to a v1 connection and must parse the +// rest of the version bytes and check it properly. +func ReadPartialMessageWithEncodingN(r io.Reader, pver uint32, + btcnet BitcoinNet, enc MessageEncoding, prefix []byte) (int, Message, + []byte, error) { + + totalBytes := 0 + n, hdr, err := readPartialHeader(prefix, r) + totalBytes += n + if err != nil { + return totalBytes, nil, nil, err + } + + return readMessageWithEncodingNInternal( + r, pver, hdr, btcnet, enc, totalBytes, + ) +} + +// readMessageWithEncodingNInternal is used to deduplicate the code because we +// typically parse messages and headers all at once except in the case of a +// downgraded v2->v1 conncection. +func readMessageWithEncodingNInternal(r io.Reader, pver uint32, + hdr *messageHeader, btcnet BitcoinNet, enc MessageEncoding, + totalBytes int) (int, Message, []byte, error) { + // Enforce maximum message payload. if hdr.length > MaxMessagePayload { str := fmt.Sprintf("message payload is too large - header "+ @@ -409,7 +646,7 @@ func ReadMessageWithEncodingN(r io.Reader, pver uint32, btcnet BitcoinNet, // Read payload. payload := make([]byte, hdr.length) - n, err = io.ReadFull(r, payload) + n, err := io.ReadFull(r, payload) totalBytes += n if err != nil { return totalBytes, nil, nil, err diff --git a/wire/message_test.go b/wire/message_test.go index 7ba2e0639f..a9c8389505 100644 --- a/wire/message_test.go +++ b/wire/message_test.go @@ -61,7 +61,6 @@ func TestMessage(t *testing.T) { msgPong := NewMsgPong(123123) msgGetHeaders := NewMsgGetHeaders() msgHeaders := NewMsgHeaders() - msgAlert := NewMsgAlert([]byte("payload"), []byte("signature")) msgMemPool := NewMsgMemPool() msgFilterAdd := NewMsgFilterAdd([]byte{0x01}) msgFilterClear := NewMsgFilterClear() @@ -98,7 +97,6 @@ func TestMessage(t *testing.T) { {msgPong, msgPong, pver, MainNet, 32}, {msgGetHeaders, msgGetHeaders, pver, MainNet, 61}, {msgHeaders, msgHeaders, pver, MainNet, 25}, - {msgAlert, msgAlert, pver, MainNet, 42}, {msgMemPool, msgMemPool, pver, MainNet, 24}, {msgFilterAdd, msgFilterAdd, pver, MainNet, 26}, {msgFilterClear, msgFilterClear, pver, MainNet, 24}, diff --git a/wire/msgaddrv2.go b/wire/msgaddrv2.go index 4db4a1334a..5303d6a811 100644 --- a/wire/msgaddrv2.go +++ b/wire/msgaddrv2.go @@ -38,14 +38,14 @@ func (m *MsgAddrV2) BtcDecode(r io.Reader, pver uint32, for i := uint64(0); i < count; i++ { na := &addrList[i] err := readNetAddressV2(r, pver, na) - switch err { - case ErrSkippedNetworkID: - // This may be a network ID we don't know of, but is - // still valid. We can safely skip those. - continue - case ErrInvalidAddressSize: - // The encoding used by the peer does not follow - // BIP-155 and we should stop processing this message. + if err != nil { + // A network address of a type we don't know of may be + // safely skipped. All other errors mean we should stop + // processing the message. + if err == ErrSkippedNetworkID { + continue + } + return err } @@ -61,8 +61,8 @@ func (m *MsgAddrV2) BtcEncode(w io.Writer, pver uint32, count := len(m.AddrList) if count > MaxV2AddrPerMsg { - str := fmt.Sprintf("too many addresses for message [count %v,"+ - " max %v]", count, MaxV2AddrPerMsg) + str := fmt.Sprintf("too many addresses for message: "+ + "got %v, max %v", count, MaxV2AddrPerMsg) return messageError("MsgAddrV2.BtcEncode", str) } diff --git a/wire/msgaddrv2_test.go b/wire/msgaddrv2_test.go index 213d699c96..40abd567f5 100644 --- a/wire/msgaddrv2_test.go +++ b/wire/msgaddrv2_test.go @@ -41,6 +41,15 @@ func TestAddrV2Decode(t *testing.T) { false, 1, }, + // Truncated address. + { + []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, + 0x7f, 0x00, 0x00, + }, + true, + 0, + }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgalert.go b/wire/msgalert.go deleted file mode 100644 index b99ac89de9..0000000000 --- a/wire/msgalert.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright (c) 2013-2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package wire - -import ( - "bytes" - "fmt" - "io" -) - -// MsgAlert contains a payload and a signature: -// -// =============================================== -// | Field | Data Type | Size | -// =============================================== -// | payload | []uchar | ? | -// ----------------------------------------------- -// | signature | []uchar | ? | -// ----------------------------------------------- -// -// Here payload is an Alert serialized into a byte array to ensure that -// versions using incompatible alert formats can still relay -// alerts among one another. -// -// An Alert is the payload deserialized as follows: -// -// =============================================== -// | Field | Data Type | Size | -// =============================================== -// | Version | int32 | 4 | -// ----------------------------------------------- -// | RelayUntil | int64 | 8 | -// ----------------------------------------------- -// | Expiration | int64 | 8 | -// ----------------------------------------------- -// | ID | int32 | 4 | -// ----------------------------------------------- -// | Cancel | int32 | 4 | -// ----------------------------------------------- -// | SetCancel | set | ? | -// ----------------------------------------------- -// | MinVer | int32 | 4 | -// ----------------------------------------------- -// | MaxVer | int32 | 4 | -// ----------------------------------------------- -// | SetSubVer | set | ? | -// ----------------------------------------------- -// | Priority | int32 | 4 | -// ----------------------------------------------- -// | Comment | string | ? | -// ----------------------------------------------- -// | StatusBar | string | ? | -// ----------------------------------------------- -// | Reserved | string | ? | -// ----------------------------------------------- -// | Total (Fixed) | 45 | -// ----------------------------------------------- -// -// NOTE: -// * string is a VarString i.e VarInt length followed by the string itself -// * set is a VarInt followed by as many number of strings -// * set is a VarInt followed by as many number of ints -// * fixedAlertSize = 40 + 5*min(VarInt) = 40 + 5*1 = 45 -// -// Now we can define bounds on Alert size, SetCancel and SetSubVer - -// Fixed size of the alert payload -const fixedAlertSize = 45 - -// maxSignatureSize is the max size of an ECDSA signature. -// NOTE: Since this size is fixed and < 255, the size of VarInt required = 1. -const maxSignatureSize = 72 - -// maxAlertSize is the maximum size an alert. -// -// MessagePayload = VarInt(Alert) + Alert + VarInt(Signature) + Signature -// MaxMessagePayload = maxAlertSize + max(VarInt) + maxSignatureSize + 1 -const maxAlertSize = MaxMessagePayload - maxSignatureSize - MaxVarIntPayload - 1 - -// maxCountSetCancel is the maximum number of cancel IDs that could possibly -// fit into a maximum size alert. -// -// maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) -// for calculating maximum number of cancel IDs, set all other var sizes to 0 -// maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(int32) -// x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 -const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 - -// maxCountSetSubVer is the maximum number of subversions that could possibly -// fit into a maximum size alert. -// -// maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) -// for calculating maximum number of subversions, set all other var sizes to 0 -// maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(string) -// x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / sizeOf(string) -// subversion would typically be something like "/Satoshi:0.7.2/" (15 bytes) -// so assuming < 255 bytes, sizeOf(string) = sizeOf(uint8) + 255 = 256 -const maxCountSetSubVer = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 256 - -// Alert contains the data deserialized from the MsgAlert payload. -type Alert struct { - // Alert format version - Version int32 - - // Timestamp beyond which nodes should stop relaying this alert - RelayUntil int64 - - // Timestamp beyond which this alert is no longer in effect and - // should be ignored - Expiration int64 - - // A unique ID number for this alert - ID int32 - - // All alerts with an ID less than or equal to this number should - // cancelled, deleted and not accepted in the future - Cancel int32 - - // All alert IDs contained in this set should be cancelled as above - SetCancel []int32 - - // This alert only applies to versions greater than or equal to this - // version. Other versions should still relay it. - MinVer int32 - - // This alert only applies to versions less than or equal to this version. - // Other versions should still relay it. - MaxVer int32 - - // If this set contains any elements, then only nodes that have their - // subVer contained in this set are affected by the alert. Other versions - // should still relay it. - SetSubVer []string - - // Relative priority compared to other alerts - Priority int32 - - // A comment on the alert that is not displayed - Comment string - - // The alert message that is displayed to the user - StatusBar string - - // Reserved - Reserved string -} - -// Serialize encodes the alert to w using the alert protocol encoding format. -func (alert *Alert) Serialize(w io.Writer, pver uint32) error { - err := writeElements(w, alert.Version, alert.RelayUntil, - alert.Expiration, alert.ID, alert.Cancel) - if err != nil { - return err - } - - count := len(alert.SetCancel) - if count > maxCountSetCancel { - str := fmt.Sprintf("too many cancel alert IDs for alert "+ - "[count %v, max %v]", count, maxCountSetCancel) - return messageError("Alert.Serialize", str) - } - err = WriteVarInt(w, pver, uint64(count)) - if err != nil { - return err - } - for i := 0; i < count; i++ { - err = writeElement(w, alert.SetCancel[i]) - if err != nil { - return err - } - } - - err = writeElements(w, alert.MinVer, alert.MaxVer) - if err != nil { - return err - } - - count = len(alert.SetSubVer) - if count > maxCountSetSubVer { - str := fmt.Sprintf("too many sub versions for alert "+ - "[count %v, max %v]", count, maxCountSetSubVer) - return messageError("Alert.Serialize", str) - } - err = WriteVarInt(w, pver, uint64(count)) - if err != nil { - return err - } - for i := 0; i < count; i++ { - err = WriteVarString(w, pver, alert.SetSubVer[i]) - if err != nil { - return err - } - } - - err = writeElement(w, alert.Priority) - if err != nil { - return err - } - err = WriteVarString(w, pver, alert.Comment) - if err != nil { - return err - } - err = WriteVarString(w, pver, alert.StatusBar) - if err != nil { - return err - } - return WriteVarString(w, pver, alert.Reserved) -} - -// Deserialize decodes from r into the receiver using the alert protocol -// encoding format. -func (alert *Alert) Deserialize(r io.Reader, pver uint32) error { - err := readElements(r, &alert.Version, &alert.RelayUntil, - &alert.Expiration, &alert.ID, &alert.Cancel) - if err != nil { - return err - } - - // SetCancel: first read a VarInt that contains - // count - the number of Cancel IDs, then - // iterate count times and read them - count, err := ReadVarInt(r, pver) - if err != nil { - return err - } - if count > maxCountSetCancel { - str := fmt.Sprintf("too many cancel alert IDs for alert "+ - "[count %v, max %v]", count, maxCountSetCancel) - return messageError("Alert.Deserialize", str) - } - alert.SetCancel = make([]int32, count) - for i := 0; i < int(count); i++ { - err := readElement(r, &alert.SetCancel[i]) - if err != nil { - return err - } - } - - err = readElements(r, &alert.MinVer, &alert.MaxVer) - if err != nil { - return err - } - - // SetSubVer: similar to SetCancel - // but read count number of sub-version strings - count, err = ReadVarInt(r, pver) - if err != nil { - return err - } - if count > maxCountSetSubVer { - str := fmt.Sprintf("too many sub versions for alert "+ - "[count %v, max %v]", count, maxCountSetSubVer) - return messageError("Alert.Deserialize", str) - } - alert.SetSubVer = make([]string, count) - for i := 0; i < int(count); i++ { - alert.SetSubVer[i], err = ReadVarString(r, pver) - if err != nil { - return err - } - } - - err = readElement(r, &alert.Priority) - if err != nil { - return err - } - alert.Comment, err = ReadVarString(r, pver) - if err != nil { - return err - } - alert.StatusBar, err = ReadVarString(r, pver) - if err != nil { - return err - } - alert.Reserved, err = ReadVarString(r, pver) - return err -} - -// NewAlert returns an new Alert with values provided. -func NewAlert(version int32, relayUntil int64, expiration int64, - id int32, cancel int32, setCancel []int32, minVer int32, - maxVer int32, setSubVer []string, priority int32, comment string, - statusBar string) *Alert { - return &Alert{ - Version: version, - RelayUntil: relayUntil, - Expiration: expiration, - ID: id, - Cancel: cancel, - SetCancel: setCancel, - MinVer: minVer, - MaxVer: maxVer, - SetSubVer: setSubVer, - Priority: priority, - Comment: comment, - StatusBar: statusBar, - Reserved: "", - } -} - -// NewAlertFromPayload returns an Alert with values deserialized from the -// serialized payload. -func NewAlertFromPayload(serializedPayload []byte, pver uint32) (*Alert, error) { - var alert Alert - r := bytes.NewReader(serializedPayload) - err := alert.Deserialize(r, pver) - if err != nil { - return nil, err - } - return &alert, nil -} - -// MsgAlert implements the Message interface and defines a bitcoin alert -// message. -// -// This is a signed message that provides notifications that the client should -// display if the signature matches the key. bitcoind/bitcoin-qt only checks -// against a signature from the core developers. -type MsgAlert struct { - // SerializedPayload is the alert payload serialized as a string so that the - // version can change but the Alert can still be passed on by older - // clients. - SerializedPayload []byte - - // Signature is the ECDSA signature of the message. - Signature []byte - - // Deserialized Payload - Payload *Alert -} - -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. -// This is part of the Message interface implementation. -func (msg *MsgAlert) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - var err error - - msg.SerializedPayload, err = ReadVarBytes(r, pver, MaxMessagePayload, - "alert serialized payload") - if err != nil { - return err - } - - msg.Payload, err = NewAlertFromPayload(msg.SerializedPayload, pver) - if err != nil { - msg.Payload = nil - } - - msg.Signature, err = ReadVarBytes(r, pver, MaxMessagePayload, - "alert signature") - return err -} - -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. -// This is part of the Message interface implementation. -func (msg *MsgAlert) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { - var err error - var serializedpayload []byte - if msg.Payload != nil { - // try to Serialize Payload if possible - r := new(bytes.Buffer) - err = msg.Payload.Serialize(r, pver) - if err != nil { - // Serialize failed - ignore & fallback - // to SerializedPayload - serializedpayload = msg.SerializedPayload - } else { - serializedpayload = r.Bytes() - } - } else { - serializedpayload = msg.SerializedPayload - } - slen := uint64(len(serializedpayload)) - if slen == 0 { - return messageError("MsgAlert.BtcEncode", "empty serialized payload") - } - err = WriteVarBytes(w, pver, serializedpayload) - if err != nil { - return err - } - return WriteVarBytes(w, pver, msg.Signature) -} - -// Command returns the protocol command string for the message. This is part -// of the Message interface implementation. -func (msg *MsgAlert) Command() string { - return CmdAlert -} - -// MaxPayloadLength returns the maximum length the payload can be for the -// receiver. This is part of the Message interface implementation. -func (msg *MsgAlert) MaxPayloadLength(pver uint32) uint32 { - // Since this can vary depending on the message, make it the max - // size allowed. - return MaxMessagePayload -} - -// NewMsgAlert returns a new bitcoin alert message that conforms to the Message -// interface. See MsgAlert for details. -func NewMsgAlert(serializedPayload []byte, signature []byte) *MsgAlert { - return &MsgAlert{ - SerializedPayload: serializedPayload, - Signature: signature, - Payload: nil, - } -} diff --git a/wire/msgalert_test.go b/wire/msgalert_test.go deleted file mode 100644 index 7c34419d28..0000000000 --- a/wire/msgalert_test.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package wire - -import ( - "bytes" - "io" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// TestMsgAlert tests the MsgAlert API. -func TestMsgAlert(t *testing.T) { - pver := ProtocolVersion - encoding := BaseEncoding - serializedpayload := []byte("some message") - signature := []byte("some sig") - - // Ensure we get the same payload and signature back out. - msg := NewMsgAlert(serializedpayload, signature) - if !reflect.DeepEqual(msg.SerializedPayload, serializedpayload) { - t.Errorf("NewMsgAlert: wrong serializedpayload - got %v, want %v", - msg.SerializedPayload, serializedpayload) - } - if !reflect.DeepEqual(msg.Signature, signature) { - t.Errorf("NewMsgAlert: wrong signature - got %v, want %v", - msg.Signature, signature) - } - - // Ensure the command is expected value. - wantCmd := "alert" - if cmd := msg.Command(); cmd != wantCmd { - t.Errorf("NewMsgAlert: wrong command - got %v want %v", - cmd, wantCmd) - } - - // Ensure max payload is expected value. - wantPayload := uint32(1024 * 1024 * 32) - maxPayload := msg.MaxPayloadLength(pver) - if maxPayload != wantPayload { - t.Errorf("MaxPayloadLength: wrong max payload length for "+ - "protocol version %d - got %v, want %v", pver, - maxPayload, wantPayload) - } - - // Test BtcEncode with Payload == nil - var buf bytes.Buffer - err := msg.BtcEncode(&buf, pver, encoding) - if err != nil { - t.Error(err.Error()) - } - // expected = 0x0c + serializedpayload + 0x08 + signature - expectedBuf := append([]byte{0x0c}, serializedpayload...) - expectedBuf = append(expectedBuf, []byte{0x08}...) - expectedBuf = append(expectedBuf, signature...) - if !bytes.Equal(buf.Bytes(), expectedBuf) { - t.Errorf("BtcEncode got: %s want: %s", - spew.Sdump(buf.Bytes()), spew.Sdump(expectedBuf)) - } - - // Test BtcEncode with Payload != nil - // note: Payload is an empty Alert but not nil - msg.Payload = new(Alert) - buf = *new(bytes.Buffer) - err = msg.BtcEncode(&buf, pver, encoding) - if err != nil { - t.Error(err.Error()) - } - // empty Alert is 45 null bytes, see Alert comments - // for details - // expected = 0x2d + 45*0x00 + 0x08 + signature - expectedBuf = append([]byte{0x2d}, bytes.Repeat([]byte{0x00}, 45)...) - expectedBuf = append(expectedBuf, []byte{0x08}...) - expectedBuf = append(expectedBuf, signature...) - if !bytes.Equal(buf.Bytes(), expectedBuf) { - t.Errorf("BtcEncode got: %s want: %s", - spew.Sdump(buf.Bytes()), spew.Sdump(expectedBuf)) - } -} - -// TestMsgAlertWire tests the MsgAlert wire encode and decode for various protocol -// versions. -func TestMsgAlertWire(t *testing.T) { - baseMsgAlert := NewMsgAlert([]byte("some payload"), []byte("somesig")) - baseMsgAlertEncoded := []byte{ - 0x0c, // Varint for payload length - 0x73, 0x6f, 0x6d, 0x65, 0x20, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, // "some payload" - 0x07, // Varint for signature length - 0x73, 0x6f, 0x6d, 0x65, 0x73, 0x69, 0x67, // "somesig" - } - - tests := []struct { - in *MsgAlert // Message to encode - out *MsgAlert // Expected decoded message - buf []byte // Wire encoding - pver uint32 // Protocol version for wire encoding - enc MessageEncoding // Message encoding format - }{ - // Latest protocol version. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - ProtocolVersion, - BaseEncoding, - }, - - // Protocol version BIP0035Version. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - BIP0035Version, - BaseEncoding, - }, - - // Protocol version BIP0031Version. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - BIP0031Version, - BaseEncoding, - }, - - // Protocol version NetAddressTimeVersion. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - NetAddressTimeVersion, - BaseEncoding, - }, - - // Protocol version MultipleAddressVersion. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - MultipleAddressVersion, - BaseEncoding, - }, - } - - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - // Encode the message to wire format. - var buf bytes.Buffer - err := test.in.BtcEncode(&buf, test.pver, test.enc) - if err != nil { - t.Errorf("BtcEncode #%d error %v", i, err) - continue - } - if !bytes.Equal(buf.Bytes(), test.buf) { - t.Errorf("BtcEncode #%d\n got: %s want: %s", i, - spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) - continue - } - - // Decode the message from wire format. - var msg MsgAlert - rbuf := bytes.NewReader(test.buf) - err = msg.BtcDecode(rbuf, test.pver, test.enc) - if err != nil { - t.Errorf("BtcDecode #%d error %v", i, err) - continue - } - if !reflect.DeepEqual(&msg, test.out) { - t.Errorf("BtcDecode #%d\n got: %s want: %s", i, - spew.Sdump(msg), spew.Sdump(test.out)) - continue - } - } -} - -// TestMsgAlertWireErrors performs negative tests against wire encode and decode -// of MsgAlert to confirm error paths work correctly. -func TestMsgAlertWireErrors(t *testing.T) { - pver := ProtocolVersion - encoding := BaseEncoding - - baseMsgAlert := NewMsgAlert([]byte("some payload"), []byte("somesig")) - baseMsgAlertEncoded := []byte{ - 0x0c, // Varint for payload length - 0x73, 0x6f, 0x6d, 0x65, 0x20, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, // "some payload" - 0x07, // Varint for signature length - 0x73, 0x6f, 0x6d, 0x65, 0x73, 0x69, 0x67, // "somesig" - } - - tests := []struct { - in *MsgAlert // Value to encode - buf []byte // Wire encoding - pver uint32 // Protocol version for wire encoding - enc MessageEncoding // Message encoding format - max int // Max size of fixed buffer to induce errors - writeErr error // Expected write error - readErr error // Expected read error - }{ - // Force error in payload length. - {baseMsgAlert, baseMsgAlertEncoded, pver, BaseEncoding, 0, io.ErrShortWrite, io.EOF}, - // Force error in payload. - {baseMsgAlert, baseMsgAlertEncoded, pver, BaseEncoding, 1, io.ErrShortWrite, io.EOF}, - // Force error in signature length. - {baseMsgAlert, baseMsgAlertEncoded, pver, BaseEncoding, 13, io.ErrShortWrite, io.EOF}, - // Force error in signature. - {baseMsgAlert, baseMsgAlertEncoded, pver, BaseEncoding, 14, io.ErrShortWrite, io.EOF}, - } - - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - // Encode to wire format. - w := newFixedWriter(test.max) - err := test.in.BtcEncode(w, test.pver, test.enc) - if reflect.TypeOf(err) != reflect.TypeOf(test.writeErr) { - t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", - i, err, test.writeErr) - continue - } - - // For errors which are not of type MessageError, check them for - // equality. - if _, ok := err.(*MessageError); !ok { - if err != test.writeErr { - t.Errorf("BtcEncode #%d wrong error got: %v, "+ - "want: %v", i, err, test.writeErr) - continue - } - } - - // Decode from wire format. - var msg MsgAlert - r := newFixedReader(test.max, test.buf) - err = msg.BtcDecode(r, test.pver, test.enc) - if reflect.TypeOf(err) != reflect.TypeOf(test.readErr) { - t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", - i, err, test.readErr) - continue - } - - // For errors which are not of type MessageError, check them for - // equality. - if _, ok := err.(*MessageError); !ok { - if err != test.readErr { - t.Errorf("BtcDecode #%d wrong error got: %v, "+ - "want: %v", i, err, test.readErr) - continue - } - } - } - - // Test Error on empty Payload - baseMsgAlert.SerializedPayload = []byte{} - w := new(bytes.Buffer) - err := baseMsgAlert.BtcEncode(w, pver, encoding) - if _, ok := err.(*MessageError); !ok { - t.Errorf("MsgAlert.BtcEncode wrong error got: %T, want: %T", - err, MessageError{}) - } - - // Test Payload Serialize error - // overflow the max number of elements in SetCancel - baseMsgAlert.Payload = new(Alert) - baseMsgAlert.Payload.SetCancel = make([]int32, maxCountSetCancel+1) - buf := *new(bytes.Buffer) - err = baseMsgAlert.BtcEncode(&buf, pver, encoding) - if _, ok := err.(*MessageError); !ok { - t.Errorf("MsgAlert.BtcEncode wrong error got: %T, want: %T", - err, MessageError{}) - } - - // overflow the max number of elements in SetSubVer - baseMsgAlert.Payload = new(Alert) - baseMsgAlert.Payload.SetSubVer = make([]string, maxCountSetSubVer+1) - buf = *new(bytes.Buffer) - err = baseMsgAlert.BtcEncode(&buf, pver, encoding) - if _, ok := err.(*MessageError); !ok { - t.Errorf("MsgAlert.BtcEncode wrong error got: %T, want: %T", - err, MessageError{}) - } -} - -// TestAlert tests serialization and deserialization -// of the payload to Alert -func TestAlert(t *testing.T) { - pver := ProtocolVersion - alert := NewAlert( - 1, 1337093712, 1368628812, 1015, - 1013, []int32{1014}, 0, 40599, []string{"/Satoshi:0.7.2/"}, 5000, "", - "URGENT: upgrade required, see http://bitcoin.org/dos for details", - ) - w := new(bytes.Buffer) - err := alert.Serialize(w, pver) - if err != nil { - t.Error(err.Error()) - } - serializedpayload := w.Bytes() - newAlert, err := NewAlertFromPayload(serializedpayload, pver) - if err != nil { - t.Error(err.Error()) - } - - if alert.Version != newAlert.Version { - t.Errorf("NewAlertFromPayload: wrong Version - got %v, want %v ", - alert.Version, newAlert.Version) - } - if alert.RelayUntil != newAlert.RelayUntil { - t.Errorf("NewAlertFromPayload: wrong RelayUntil - got %v, want %v ", - alert.RelayUntil, newAlert.RelayUntil) - } - if alert.Expiration != newAlert.Expiration { - t.Errorf("NewAlertFromPayload: wrong Expiration - got %v, want %v ", - alert.Expiration, newAlert.Expiration) - } - if alert.ID != newAlert.ID { - t.Errorf("NewAlertFromPayload: wrong ID - got %v, want %v ", - alert.ID, newAlert.ID) - } - if alert.Cancel != newAlert.Cancel { - t.Errorf("NewAlertFromPayload: wrong Cancel - got %v, want %v ", - alert.Cancel, newAlert.Cancel) - } - if len(alert.SetCancel) != len(newAlert.SetCancel) { - t.Errorf("NewAlertFromPayload: wrong number of SetCancel - got %v, want %v ", - len(alert.SetCancel), len(newAlert.SetCancel)) - } - for i := 0; i < len(alert.SetCancel); i++ { - if alert.SetCancel[i] != newAlert.SetCancel[i] { - t.Errorf("NewAlertFromPayload: wrong SetCancel[%v] - got %v, want %v ", - len(alert.SetCancel), alert.SetCancel[i], newAlert.SetCancel[i]) - } - } - if alert.MinVer != newAlert.MinVer { - t.Errorf("NewAlertFromPayload: wrong MinVer - got %v, want %v ", - alert.MinVer, newAlert.MinVer) - } - if alert.MaxVer != newAlert.MaxVer { - t.Errorf("NewAlertFromPayload: wrong MaxVer - got %v, want %v ", - alert.MaxVer, newAlert.MaxVer) - } - if len(alert.SetSubVer) != len(newAlert.SetSubVer) { - t.Errorf("NewAlertFromPayload: wrong number of SetSubVer - got %v, want %v ", - len(alert.SetSubVer), len(newAlert.SetSubVer)) - } - for i := 0; i < len(alert.SetSubVer); i++ { - if alert.SetSubVer[i] != newAlert.SetSubVer[i] { - t.Errorf("NewAlertFromPayload: wrong SetSubVer[%v] - got %v, want %v ", - len(alert.SetSubVer), alert.SetSubVer[i], newAlert.SetSubVer[i]) - } - } - if alert.Priority != newAlert.Priority { - t.Errorf("NewAlertFromPayload: wrong Priority - got %v, want %v ", - alert.Priority, newAlert.Priority) - } - if alert.Comment != newAlert.Comment { - t.Errorf("NewAlertFromPayload: wrong Comment - got %v, want %v ", - alert.Comment, newAlert.Comment) - } - if alert.StatusBar != newAlert.StatusBar { - t.Errorf("NewAlertFromPayload: wrong StatusBar - got %v, want %v ", - alert.StatusBar, newAlert.StatusBar) - } - if alert.Reserved != newAlert.Reserved { - t.Errorf("NewAlertFromPayload: wrong Reserved - got %v, want %v ", - alert.Reserved, newAlert.Reserved) - } -} - -// TestAlertErrors performs negative tests against payload serialization, -// deserialization of Alert to confirm error paths work correctly. -func TestAlertErrors(t *testing.T) { - pver := ProtocolVersion - - baseAlert := NewAlert( - 1, 1337093712, 1368628812, 1015, - 1013, []int32{1014}, 0, 40599, []string{"/Satoshi:0.7.2/"}, 5000, "", - "URGENT", - ) - baseAlertEncoded := []byte{ - 0x01, 0x00, 0x00, 0x00, 0x50, 0x6e, 0xb2, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x9e, 0x93, 0x51, //|....Pn.O....L..Q| - 0x00, 0x00, 0x00, 0x00, 0xf7, 0x03, 0x00, 0x00, 0xf5, 0x03, 0x00, 0x00, 0x01, 0xf6, 0x03, 0x00, //|................| - 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x9e, 0x00, 0x00, 0x01, 0x0f, 0x2f, 0x53, 0x61, 0x74, 0x6f, //|.........../Sato| - 0x73, 0x68, 0x69, 0x3a, 0x30, 0x2e, 0x37, 0x2e, 0x32, 0x2f, 0x88, 0x13, 0x00, 0x00, 0x00, 0x06, //|shi:0.7.2/......| - 0x55, 0x52, 0x47, 0x45, 0x4e, 0x54, 0x00, //|URGENT.| - } - tests := []struct { - in *Alert // Value to encode - buf []byte // Wire encoding - pver uint32 // Protocol version for wire encoding - max int // Max size of fixed buffer to induce errors - writeErr error // Expected write error - readErr error // Expected read error - }{ - // Force error in Version - {baseAlert, baseAlertEncoded, pver, 0, io.ErrShortWrite, io.EOF}, - // Force error in SetCancel VarInt. - {baseAlert, baseAlertEncoded, pver, 28, io.ErrShortWrite, io.EOF}, - // Force error in SetCancel ints. - {baseAlert, baseAlertEncoded, pver, 29, io.ErrShortWrite, io.EOF}, - // Force error in MinVer - {baseAlert, baseAlertEncoded, pver, 40, io.ErrShortWrite, io.EOF}, - // Force error in SetSubVer string VarInt. - {baseAlert, baseAlertEncoded, pver, 41, io.ErrShortWrite, io.EOF}, - // Force error in SetSubVer strings. - {baseAlert, baseAlertEncoded, pver, 48, io.ErrShortWrite, io.EOF}, - // Force error in Priority - {baseAlert, baseAlertEncoded, pver, 60, io.ErrShortWrite, io.EOF}, - // Force error in Comment string. - {baseAlert, baseAlertEncoded, pver, 62, io.ErrShortWrite, io.EOF}, - // Force error in StatusBar string. - {baseAlert, baseAlertEncoded, pver, 64, io.ErrShortWrite, io.EOF}, - // Force error in Reserved string. - {baseAlert, baseAlertEncoded, pver, 70, io.ErrShortWrite, io.EOF}, - } - - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - w := newFixedWriter(test.max) - err := test.in.Serialize(w, test.pver) - if reflect.TypeOf(err) != reflect.TypeOf(test.writeErr) { - t.Errorf("Alert.Serialize #%d wrong error got: %v, want: %v", - i, err, test.writeErr) - continue - } - - var alert Alert - r := newFixedReader(test.max, test.buf) - err = alert.Deserialize(r, test.pver) - if reflect.TypeOf(err) != reflect.TypeOf(test.readErr) { - t.Errorf("Alert.Deserialize #%d wrong error got: %v, want: %v", - i, err, test.readErr) - continue - } - } - - // overflow the max number of elements in SetCancel - // maxCountSetCancel + 1 == 8388575 == \xdf\xff\x7f\x00 - // replace bytes 29-33 - badAlertEncoded := []byte{ - 0x01, 0x00, 0x00, 0x00, 0x50, 0x6e, 0xb2, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x9e, 0x93, 0x51, //|....Pn.O....L..Q| - 0x00, 0x00, 0x00, 0x00, 0xf7, 0x03, 0x00, 0x00, 0xf5, 0x03, 0x00, 0x00, 0xfe, 0xdf, 0xff, 0x7f, //|................| - 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x9e, 0x00, 0x00, 0x01, 0x0f, 0x2f, 0x53, 0x61, 0x74, 0x6f, //|.........../Sato| - 0x73, 0x68, 0x69, 0x3a, 0x30, 0x2e, 0x37, 0x2e, 0x32, 0x2f, 0x88, 0x13, 0x00, 0x00, 0x00, 0x06, //|shi:0.7.2/......| - 0x55, 0x52, 0x47, 0x45, 0x4e, 0x54, 0x00, //|URGENT.| - } - var alert Alert - r := bytes.NewReader(badAlertEncoded) - err := alert.Deserialize(r, pver) - if _, ok := err.(*MessageError); !ok { - t.Errorf("Alert.Deserialize wrong error got: %T, want: %T", - err, MessageError{}) - } - - // overflow the max number of elements in SetSubVer - // maxCountSetSubVer + 1 == 131071 + 1 == \x00\x00\x02\x00 - // replace bytes 42-46 - badAlertEncoded = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x50, 0x6e, 0xb2, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x9e, 0x93, 0x51, //|....Pn.O....L..Q| - 0x00, 0x00, 0x00, 0x00, 0xf7, 0x03, 0x00, 0x00, 0xf5, 0x03, 0x00, 0x00, 0x01, 0xf6, 0x03, 0x00, //|................| - 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x9e, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x02, 0x00, 0x74, 0x6f, //|.........../Sato| - 0x73, 0x68, 0x69, 0x3a, 0x30, 0x2e, 0x37, 0x2e, 0x32, 0x2f, 0x88, 0x13, 0x00, 0x00, 0x00, 0x06, //|shi:0.7.2/......| - 0x55, 0x52, 0x47, 0x45, 0x4e, 0x54, 0x00, //|URGENT.| - } - r = bytes.NewReader(badAlertEncoded) - err = alert.Deserialize(r, pver) - if _, ok := err.(*MessageError); !ok { - t.Errorf("Alert.Deserialize wrong error got: %T, want: %T", - err, MessageError{}) - } -} diff --git a/wire/msgcfcheckpt.go b/wire/msgcfcheckpt.go index 397a3c137a..a06ce1f3b6 100644 --- a/wire/msgcfcheckpt.go +++ b/wire/msgcfcheckpt.go @@ -20,6 +20,13 @@ const ( // maxCFHeadersLen is the max number of filter headers we will attempt // to decode. maxCFHeadersLen = 100000 + + // maxCFCheckptPayload calculates the maximum reasonable payload size + // for CF checkpoint messages. + // + // Calculation: 1 byte (filter type) + 32 bytes (stop hash) + + // 5 bytes (max varint) + (maxCFHeadersLen * 32 bytes per hash) + maxCFCheckptPayload = 1 + 32 + 5 + (maxCFHeadersLen * 32) ) // ErrInsaneCFHeaderCount signals that we were asked to decode an @@ -77,16 +84,24 @@ func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) return ErrInsaneCFHeaderCount } - // Create a contiguous slice of hashes to deserialize into in order to - // reduce the number of allocations. + if count == 0 { + msg.FilterHeaders = make([]*chainhash.Hash, 0) + return nil + } + + // Optimize memory allocation by creating a single backing array for + // all hashes. This reduces GC pressure and improves cache locality. + hashes := make([]chainhash.Hash, count) msg.FilterHeaders = make([]*chainhash.Hash, count) + + // Now we'll read all the hashes directly into the backing array we've + // created above. We'll then point the underlying filter header hashes + // into this backing array. for i := uint64(0); i < count; i++ { - var cfh chainhash.Hash - _, err := io.ReadFull(r, cfh[:]) - if err != nil { + if _, err := io.ReadFull(r, hashes[i][:]); err != nil { return err } - msg.FilterHeaders[i] = &cfh + msg.FilterHeaders[i] = &hashes[i] } return nil @@ -151,15 +166,19 @@ func (msg *MsgCFCheckpt) Command() string { // MaxPayloadLength returns the maximum length the payload can be for the // receiver. This is part of the Message interface implementation. func (msg *MsgCFCheckpt) MaxPayloadLength(pver uint32) uint32 { - // Message size depends on the blockchain height, so return general limit - // for all messages. - return MaxMessagePayload + // Use a more precise calculation based on the maximum number of + // filter headers we support. No no reason to read more than we'll + // process in BtcDecode. + return maxCFCheckptPayload } -// NewMsgCFCheckpt returns a new bitcoin cfheaders message that conforms to -// the Message interface. See MsgCFCheckpt for details. +// NewMsgCFCheckpt returns a new bitcoin cfheaders message that conforms to the +// Message interface. See MsgCFCheckpt for details. func NewMsgCFCheckpt(filterType FilterType, stopHash *chainhash.Hash, headersCount int) *MsgCFCheckpt { + + // We pre-allocate with an exact capacity when count is known to avoid + // slice growth during message construction. return &MsgCFCheckpt{ FilterType: filterType, StopHash: *stopHash, diff --git a/wire/msgcfcheckpt_bench_test.go b/wire/msgcfcheckpt_bench_test.go new file mode 100644 index 0000000000..47bcfe0582 --- /dev/null +++ b/wire/msgcfcheckpt_bench_test.go @@ -0,0 +1,118 @@ +// Copyright (c) 2018 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "bytes" + "fmt" + "math/rand" + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// BenchmarkMsgCFCheckptDecode benchmarks decoding of MsgCFCheckpt messages +// to measure the performance improvements from optimized memory allocation. +func BenchmarkMsgCFCheckptDecode(b *testing.B) { + pver := ProtocolVersion + + // Test with varying number of headers: 1k, 10k, 100k. + headerCounts := []int{1000, 10000, 100000} + + for _, numHeaders := range headerCounts { + b.Run(fmt.Sprintf("headers_%d", numHeaders), func(b *testing.B) { + var buf bytes.Buffer + msg := NewMsgCFCheckpt( + GCSFilterRegular, &chainhash.Hash{}, numHeaders, + ) + + rng := rand.New(rand.NewSource(12345)) + for i := 0; i < numHeaders; i++ { + hash := chainhash.Hash{} + rng.Read(hash[:]) + msg.AddCFHeader(&hash) + } + + err := msg.BtcEncode(&buf, pver, BaseEncoding) + if err != nil { + b.Fatal(err) + } + + encodedMsg := buf.Bytes() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(encodedMsg) + + var msg MsgCFCheckpt + err := msg.BtcDecode(r, pver, BaseEncoding) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkMsgCFCheckptEncode benchmarks encoding of MsgCFCheckpt messages. +func BenchmarkMsgCFCheckptEncode(b *testing.B) { + pver := ProtocolVersion + + // Test with varying number of headers: 1k, 10k, 100k. + headerCounts := []int{1000, 10000, 100000} + + for _, numHeaders := range headerCounts { + b.Run(fmt.Sprintf("headers_%d", numHeaders), func(b *testing.B) { + msg := NewMsgCFCheckpt( + GCSFilterRegular, &chainhash.Hash{}, numHeaders, + ) + + rng := rand.New(rand.NewSource(12345)) + for i := 0; i < numHeaders; i++ { + hash := chainhash.Hash{} + rng.Read(hash[:]) + msg.AddCFHeader(&hash) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + var buf bytes.Buffer + err := msg.BtcEncode(&buf, pver, BaseEncoding) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkMsgCFCheckptDecodeEmpty benchmarks decoding empty checkpoint +// messages to ensure edge cases are handled efficiently. +func BenchmarkMsgCFCheckptDecodeEmpty(b *testing.B) { + pver := ProtocolVersion + + var buf bytes.Buffer + msg := NewMsgCFCheckpt(GCSFilterRegular, &chainhash.Hash{}, 0) + if err := msg.BtcEncode(&buf, pver, BaseEncoding); err != nil { + b.Fatal(err) + } + encodedMsg := buf.Bytes() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(encodedMsg) + var msg MsgCFCheckpt + if err := msg.BtcDecode(r, pver, BaseEncoding); err != nil { + b.Fatal(err) + } + } +} + diff --git a/wire/msgsendaddrv2.go b/wire/msgsendaddrv2.go index d6d19efb27..7be30d118d 100644 --- a/wire/msgsendaddrv2.go +++ b/wire/msgsendaddrv2.go @@ -1,6 +1,7 @@ package wire import ( + "fmt" "io" ) @@ -14,12 +15,24 @@ type MsgSendAddrV2 struct{} // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgSendAddrV2) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { + if pver < AddrV2Version { + str := fmt.Sprintf("sendaddrv2 message invalid for protocol "+ + "version %d", pver) + return messageError("MsgSendAddrV2.BtcDecode", str) + } + return nil } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgSendAddrV2) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { + if pver < AddrV2Version { + str := fmt.Sprintf("sendaddrv2 message invalid for protocol "+ + "version %d", pver) + return messageError("MsgSendAddrV2.BtcEncode", str) + } + return nil } diff --git a/wire/msgsendaddrv2_test.go b/wire/msgsendaddrv2_test.go new file mode 100644 index 0000000000..9161c08acc --- /dev/null +++ b/wire/msgsendaddrv2_test.go @@ -0,0 +1,193 @@ +// Copyright (c) 2024 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "bytes" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestSendAddrV2 tests the MsgSendAddrV2 API against the latest protocol +// version. +func TestSendAddrV2(t *testing.T) { + pver := ProtocolVersion + enc := BaseEncoding + + // Ensure the command is expected value. + wantCmd := "sendaddrv2" + msg := NewMsgSendAddrV2() + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgSendAddrV2: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure max payload is expected value. + wantPayload := uint32(0) + maxPayload := msg.MaxPayloadLength(pver) + if maxPayload != wantPayload { + t.Errorf("MaxPayloadLength: wrong max payload length for "+ + "protocol version %d - got %v, want %v", pver, + maxPayload, wantPayload) + } + + // Test encode with latest protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, pver, enc) + if err != nil { + t.Errorf("encode of MsgSendAddrV2 failed %v err <%v>", msg, + err) + } + + // Older protocol versions should fail encode since message didn't + // exist yet. + oldPver := AddrV2Version - 1 + err = msg.BtcEncode(&buf, oldPver, enc) + if err == nil { + s := "encode of MsgSendAddrV2 passed for old protocol " + + "version %v err <%v>" + t.Errorf(s, msg, err) + } + + // Test decode with latest protocol version. + readmsg := NewMsgSendAddrV2() + err = readmsg.BtcDecode(&buf, pver, enc) + if err != nil { + t.Errorf("decode of MsgSendAddrV2 failed [%v] err <%v>", buf, + err) + } + + // Older protocol versions should fail decode since message didn't + // exist yet. + err = readmsg.BtcDecode(&buf, oldPver, enc) + if err == nil { + s := "decode of MsgSendAddrV2 passed for old protocol " + + "version %v err <%v>" + t.Errorf(s, msg, err) + } +} + +// TestSendAddrV2BIP0130 tests the MsgSendAddrV2 API against the protocol +// prior to version AddrV2Version. +func TestSendAddrV2BIP0130(t *testing.T) { + // Use the protocol version just prior to AddrV2Version changes. + pver := AddrV2Version - 1 + enc := BaseEncoding + + msg := NewMsgSendAddrV2() + + // Test encode with old protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, pver, enc) + if err == nil { + t.Errorf("encode of MsgSendAddrV2 succeeded when it should " + + "have failed") + } + + // Test decode with old protocol version. + readmsg := NewMsgSendAddrV2() + err = readmsg.BtcDecode(&buf, pver, enc) + if err == nil { + t.Errorf("decode of MsgSendAddrV2 succeeded when it should " + + "have failed") + } +} + +// TestSendAddrV2CrossProtocol tests the MsgSendAddrV2 API when encoding with +// the latest protocol version and decoding with AddrV2Version. +func TestSendAddrV2CrossProtocol(t *testing.T) { + enc := BaseEncoding + msg := NewMsgSendAddrV2() + + // Encode with latest protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, ProtocolVersion, enc) + if err != nil { + t.Errorf("encode of MsgSendAddrV2 failed %v err <%v>", msg, + err) + } + + // Decode with old protocol version. + readmsg := NewMsgSendAddrV2() + err = readmsg.BtcDecode(&buf, AddrV2Version, enc) + if err != nil { + t.Errorf("decode of MsgSendAddrV2 failed [%v] err <%v>", buf, + err) + } +} + +// TestSendAddrV2Wire tests the MsgSendAddrV2 wire encode and decode for +// various protocol versions. +func TestSendAddrV2Wire(t *testing.T) { + msgSendAddrV2 := NewMsgSendAddrV2() + msgSendAddrV2Encoded := []byte{} + + tests := []struct { + in *MsgSendAddrV2 // Message to encode + out *MsgSendAddrV2 // Expected decoded message + buf []byte // Wire encoding + pver uint32 // Protocol version for wire encoding + enc MessageEncoding // Message encoding format + }{ + // Latest protocol version. + { + msgSendAddrV2, + msgSendAddrV2, + msgSendAddrV2Encoded, + ProtocolVersion, + BaseEncoding, + }, + + // Protocol version AddrV2Version+1 + { + msgSendAddrV2, + msgSendAddrV2, + msgSendAddrV2Encoded, + AddrV2Version + 1, + BaseEncoding, + }, + + // Protocol version AddrV2Version + { + msgSendAddrV2, + msgSendAddrV2, + msgSendAddrV2Encoded, + AddrV2Version, + BaseEncoding, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Encode the message to wire format. + var buf bytes.Buffer + err := test.in.BtcEncode(&buf, test.pver, test.enc) + if err != nil { + t.Errorf("BtcEncode #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("BtcEncode #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Decode the message from wire format. + var msg MsgSendAddrV2 + rbuf := bytes.NewReader(test.buf) + err = msg.BtcDecode(rbuf, test.pver, test.enc) + if err != nil { + t.Errorf("BtcDecode #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&msg, test.out) { + t.Errorf("BtcDecode #%d\n got: %s want: %s", i, + spew.Sdump(msg), spew.Sdump(test.out)) + continue + } + } +} diff --git a/wire/msgtx.go b/wire/msgtx.go index 1864ec6e36..e4f3051c47 100644 --- a/wire/msgtx.go +++ b/wire/msgtx.go @@ -229,6 +229,11 @@ func NewOutPointFromString(outpoint string) (*OutPoint, error) { if len(parts) != 2 { return nil, errors.New("outpoint should be of the form txid:index") } + + if len(parts[0]) != chainhash.MaxHashStringSize { + return nil, errors.New("outpoint txid should be 64 hex chars") + } + hash, err := chainhash.NewHashFromStr(parts[0]) if err != nil { return nil, err diff --git a/wire/msgtx_test.go b/wire/msgtx_test.go index e6b7bae44c..62d57098f3 100644 --- a/wire/msgtx_test.go +++ b/wire/msgtx_test.go @@ -849,6 +849,15 @@ func TestTxOutPointFromString(t *testing.T) { }, err: false, }, + { + name: "normal outpoint 2 with 31-byte txid", + input: "c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55:42", + result: &OutPoint{ + Hash: hashFromStr("c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55"), + Index: 42, + }, + err: true, + }, { name: "bad string", input: "not_outpoint_not_outpoint_not_outpoint", diff --git a/wire/msgwtxidrelay.go b/wire/msgwtxidrelay.go new file mode 100644 index 0000000000..ab131bc5bd --- /dev/null +++ b/wire/msgwtxidrelay.go @@ -0,0 +1,59 @@ +// Copyright (c) 2024 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "fmt" + "io" +) + +// MsgWTxIdRelay defines a bitcoin wtxidrelay message which is used for a peer +// to signal support for relaying witness transaction id (BIP141). It +// implements the Message interface. +// +// This message has no payload. +type MsgWTxIdRelay struct{} + +// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// This is part of the Message interface implementation. +func (msg *MsgWTxIdRelay) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { + if pver < AddrV2Version { + str := fmt.Sprintf("wtxidrelay message invalid for protocol "+ + "version %d", pver) + return messageError("MsgWTxIdRelay.BtcDecode", str) + } + + return nil +} + +// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// This is part of the Message interface implementation. +func (msg *MsgWTxIdRelay) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { + if pver < AddrV2Version { + str := fmt.Sprintf("wtxidrelay message invalid for protocol "+ + "version %d", pver) + return messageError("MsgWTxIdRelay.BtcEncode", str) + } + + return nil +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgWTxIdRelay) Command() string { + return CmdWTxIdRelay +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgWTxIdRelay) MaxPayloadLength(pver uint32) uint32 { + return 0 +} + +// NewMsgWTxIdRelay returns a new bitcoin wtxidrelay message that conforms +// to the Message interface. +func NewMsgWTxIdRelay() *MsgWTxIdRelay { + return &MsgWTxIdRelay{} +} diff --git a/wire/msgwtxidrelay_test.go b/wire/msgwtxidrelay_test.go new file mode 100644 index 0000000000..7f519b4e93 --- /dev/null +++ b/wire/msgwtxidrelay_test.go @@ -0,0 +1,193 @@ +// Copyright (c) 2024 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "bytes" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestWTxIdRelay tests the MsgWTxIdRelay API against the latest protocol +// version. +func TestWTxIdRelay(t *testing.T) { + pver := ProtocolVersion + enc := BaseEncoding + + // Ensure the command is expected value. + wantCmd := "wtxidrelay" + msg := NewMsgWTxIdRelay() + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgWTxIdRelay: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure max payload is expected value. + wantPayload := uint32(0) + maxPayload := msg.MaxPayloadLength(pver) + if maxPayload != wantPayload { + t.Errorf("MaxPayloadLength: wrong max payload length for "+ + "protocol version %d - got %v, want %v", pver, + maxPayload, wantPayload) + } + + // Test encode with latest protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, pver, enc) + if err != nil { + t.Errorf("encode of MsgWTxIdRelay failed %v err <%v>", msg, + err) + } + + // Older protocol versions should fail encode since message didn't + // exist yet. + oldPver := AddrV2Version - 1 + err = msg.BtcEncode(&buf, oldPver, enc) + if err == nil { + s := "encode of MsgWTxIdRelay passed for old protocol " + + "version %v err <%v>" + t.Errorf(s, msg, err) + } + + // Test decode with latest protocol version. + readmsg := NewMsgWTxIdRelay() + err = readmsg.BtcDecode(&buf, pver, enc) + if err != nil { + t.Errorf("decode of MsgWTxIdRelay failed [%v] err <%v>", buf, + err) + } + + // Older protocol versions should fail decode since message didn't + // exist yet. + err = readmsg.BtcDecode(&buf, oldPver, enc) + if err == nil { + s := "decode of MsgWTxIdRelay passed for old protocol " + + "version %v err <%v>" + t.Errorf(s, msg, err) + } +} + +// TestWTxIdRelayBIP0130 tests the MsgWTxIdRelay API against the protocol +// prior to version AddrV2Version. +func TestWTxIdRelayBIP0130(t *testing.T) { + // Use the protocol version just prior to AddrV2Version changes. + pver := AddrV2Version - 1 + enc := BaseEncoding + + msg := NewMsgWTxIdRelay() + + // Test encode with old protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, pver, enc) + if err == nil { + t.Errorf("encode of MsgWTxIdRelay succeeded when it should " + + "have failed") + } + + // Test decode with old protocol version. + readmsg := NewMsgWTxIdRelay() + err = readmsg.BtcDecode(&buf, pver, enc) + if err == nil { + t.Errorf("decode of MsgWTxIdRelay succeeded when it should " + + "have failed") + } +} + +// TestWTxIdRelayCrossProtocol tests the MsgWTxIdRelay API when encoding with +// the latest protocol version and decoding with AddrV2Version. +func TestWTxIdRelayCrossProtocol(t *testing.T) { + enc := BaseEncoding + msg := NewMsgWTxIdRelay() + + // Encode with latest protocol version. + var buf bytes.Buffer + err := msg.BtcEncode(&buf, ProtocolVersion, enc) + if err != nil { + t.Errorf("encode of MsgWTxIdRelay failed %v err <%v>", msg, + err) + } + + // Decode with old protocol version. + readmsg := NewMsgWTxIdRelay() + err = readmsg.BtcDecode(&buf, AddrV2Version, enc) + if err != nil { + t.Errorf("decode of MsgWTxIdRelay failed [%v] err <%v>", buf, + err) + } +} + +// TestWTxIdRelayWire tests the MsgWTxIdRelay wire encode and decode for +// various protocol versions. +func TestWTxIdRelayWire(t *testing.T) { + msgWTxIdRelay := NewMsgWTxIdRelay() + msgWTxIdRelayEncoded := []byte{} + + tests := []struct { + in *MsgWTxIdRelay // Message to encode + out *MsgWTxIdRelay // Expected decoded message + buf []byte // Wire encoding + pver uint32 // Protocol version for wire encoding + enc MessageEncoding // Message encoding format + }{ + // Latest protocol version. + { + msgWTxIdRelay, + msgWTxIdRelay, + msgWTxIdRelayEncoded, + ProtocolVersion, + BaseEncoding, + }, + + // Protocol version AddrV2Version+1 + { + msgWTxIdRelay, + msgWTxIdRelay, + msgWTxIdRelayEncoded, + AddrV2Version + 1, + BaseEncoding, + }, + + // Protocol version AddrV2Version + { + msgWTxIdRelay, + msgWTxIdRelay, + msgWTxIdRelayEncoded, + AddrV2Version, + BaseEncoding, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Encode the message to wire format. + var buf bytes.Buffer + err := test.in.BtcEncode(&buf, test.pver, test.enc) + if err != nil { + t.Errorf("BtcEncode #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("BtcEncode #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Decode the message from wire format. + var msg MsgWTxIdRelay + rbuf := bytes.NewReader(test.buf) + err = msg.BtcDecode(rbuf, test.pver, test.enc) + if err != nil { + t.Errorf("BtcDecode #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&msg, test.out) { + t.Errorf("BtcDecode #%d\n got: %s want: %s", i, + spew.Sdump(msg), spew.Sdump(test.out)) + continue + } + } +} diff --git a/wire/protocol.go b/wire/protocol.go index baeec05369..dbe66b1727 100644 --- a/wire/protocol.go +++ b/wire/protocol.go @@ -1,4 +1,4 @@ -// Copyright (c) 2013-2016 The btcsuite developers +// Copyright (c) 2013-2024 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -103,6 +103,10 @@ const ( // SFNodeNetWorkLimited is a flag used to indicate a peer supports serving // the last 288 blocks. SFNodeNetworkLimited = 1 << 10 + + // SFNodeP2PV2 is a flag used to indicate a peer supports BIP324 v2 + // connections. + SFNodeP2PV2 = 1 << 11 ) // Map of service flags back to their constant names for pretty printing. @@ -116,6 +120,7 @@ var sfStrings = map[ServiceFlag]string{ SFNodeCF: "SFNodeCF", SFNode2X: "SFNode2X", SFNodeNetworkLimited: "SFNodeNetworkLimited", + SFNodeP2PV2: "SFNodeP2PV2", } // orderedSFStrings is an ordered list of service flags from highest to @@ -130,6 +135,7 @@ var orderedSFStrings = []ServiceFlag{ SFNodeCF, SFNode2X, SFNodeNetworkLimited, + SFNodeP2PV2, } // HasFlag returns a bool indicating if the service has the given flag. @@ -165,7 +171,7 @@ func (f ServiceFlag) String() string { // BitcoinNet represents which bitcoin network a message belongs to. type BitcoinNet uint32 -// Constants used to indicate the message bitcoin network. They can also be +// Constants used to indicate the message bitcoin network. They can also be // used to seek to the next message when a stream's state is unknown, but // this package does not provide that functionality since it's generally a // better idea to simply disconnect clients that are misbehaving over TCP. @@ -179,6 +185,13 @@ const ( // TestNet3 represents the test network (version 3). TestNet3 BitcoinNet = 0x0709110b + // TestNet4 represents the test network (version 4). + TestNet4 BitcoinNet = 0x283f161c + + // SigNet represents the public default SigNet. For custom signets, + // see CustomSignetParams. + SigNet BitcoinNet = 0x40CF030A + // SimNet represents the simulation test network. SimNet BitcoinNet = 0x12141c16 ) @@ -189,6 +202,8 @@ var bnStrings = map[BitcoinNet]string{ MainNet: "MainNet", TestNet: "TestNet", TestNet3: "TestNet3", + TestNet4: "TestNet4", + SigNet: "SigNet", SimNet: "SimNet", } diff --git a/wire/protocol_test.go b/wire/protocol_test.go index eeeffb600a..c795751ee9 100644 --- a/wire/protocol_test.go +++ b/wire/protocol_test.go @@ -26,7 +26,7 @@ func TestServiceFlagStringer(t *testing.T) { {SFNodeCF, "SFNodeCF"}, {SFNode2X, "SFNode2X"}, {SFNodeNetworkLimited, "SFNodeNetworkLimited"}, - {0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|SFNodeNetworkLimited|0xfffffb00"}, + {0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|SFNodeNetworkLimited|SFNodeP2PV2|0xfffff300"}, } t.Logf("Running %d tests", len(tests)) @@ -49,6 +49,8 @@ func TestBitcoinNetStringer(t *testing.T) { {MainNet, "MainNet"}, {TestNet, "TestNet"}, {TestNet3, "TestNet3"}, + {TestNet4, "TestNet4"}, + {SigNet, "SigNet"}, {SimNet, "SimNet"}, {0xffffffff, "Unknown BitcoinNet (4294967295)"}, }