Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions amp/derivation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
type sharerTest struct {
name string
numShares int
merge bool
}

var sharerTests = []sharerTest{
Expand All @@ -25,6 +26,16 @@ var sharerTests = []sharerTest{
name: "many shares",
numShares: 10,
},
{
name: "merge 4 shares",
numShares: 4,
merge: true,
},
{
name: "merge many shares",
numShares: 20,
merge: true,
},
}

// TestSharer executes the end-to-end derivation between sender and receiver,
Expand Down Expand Up @@ -71,10 +82,27 @@ func testSharer(t *testing.T, test sharerTest) {

// Compute the final share and finalize the sharing.
child := sharer.Child(0)
sharer = sharer.Zero()

assertChildShare(t, child, 0)
children = append(children, child)

// If we are testing merging, merge half of the created children back
// into the sharer.
if test.merge {
for i := len(children) / 2; i < len(children); i++ {
sharer = sharer.Merge(children[i])
}
children = children[:len(children)/2]

// We must create a new last child from what we just merged
// back.
child := sharer.Child(0)

assertChildShare(t, child, 0)
children = append(children, child)
}

assertReconstruction(t, children...)
}

Expand Down
165 changes: 165 additions & 0 deletions amp/shard_tracker.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
package amp

import (
"crypto/rand"
"encoding/binary"
"fmt"
"sync"

"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
"github.com/lightningnetwork/lnd/routing/shards"
)

// Shard is an implementation of the shards.PaymentShards interface specific
// to AMP payments.
type Shard struct {
child *Child
mpp *record.MPP
amp *record.AMP
}

// A compile time check to ensure Shard implements the shards.PaymentShard
// interface.
var _ shards.PaymentShard = (*Shard)(nil)

// Hash returns the hash used for the HTLC representing this AMP shard.
func (s *Shard) Hash() lntypes.Hash {
return s.child.Hash
}

// MPP returns any extra MPP records that should be set for the final hop on
// the route used by this shard.
func (s *Shard) MPP() *record.MPP {
return s.mpp
}

// AMP returns any extra AMP records that should be set for the final hop on
// the route used by this shard.
func (s *Shard) AMP() *record.AMP {
return s.amp
}

// ShardTracker is an implementation of the shards.ShardTracker interface
// that is able to generate payment shards according to the AMP splitting
// algorithm. It can be used to generate new hashes to use for HTLCs, and also
// cancel shares used for failed payment shards.
type ShardTracker struct {
setID [32]byte
paymentAddr [32]byte
totalAmt lnwire.MilliSatoshi

sharer Sharer

shards map[uint64]*Child
sync.Mutex
}

// A compile time check to ensure ShardTracker implements the
// shards.ShardTracker interface.
var _ shards.ShardTracker = (*ShardTracker)(nil)

// NewShardTracker creates a new shard tracker to use for AMP payments. The
// root shard, setID, payment address and total amount must be correctly set in
// order for the TLV options to include with each shard to be created
// correctly.
func NewShardTracker(root, setID, payAddr [32]byte,
totalAmt lnwire.MilliSatoshi) *ShardTracker {

// Create a new seed sharer from this root.
rootShare := Share(root)
rootSharer := SeedSharerFromRoot(&rootShare)

return &ShardTracker{
setID: setID,
paymentAddr: payAddr,
totalAmt: totalAmt,
sharer: rootSharer,
shards: make(map[uint64]*Child),
}
}

// NewShard registers a new attempt with the ShardTracker and returns a
// new shard representing this attempt. This attempt's shard should be canceled
// if it ends up not being used by the overall payment, i.e. if the attempt
// fails.
func (s *ShardTracker) NewShard(pid uint64, last bool) (shards.PaymentShard,
error) {

s.Lock()
defer s.Unlock()

// Use a random child index.
var childIndex [4]byte
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With a 32-bit random value, isn't the risk of a collision too high? Maybe the htlc id or the pid can be used here or a set-specific incrementing id.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess it depends on how it is being used. Currently a child gets created, and if it fails we xor the share back into the parent. This means that it will be split again most likely (which adds 32 byte randomness) before a new shard is sent.

In the scenario where it's the last shard that is being created, we can end up deriving multiple children from it. However, you must send a lot of attempts before a collision is likely. Maybe that potential privacy leak is not worth optimizing out? (since I think using the same index only would be a privacy concern)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, just wanted to bring it up. I think it is safe to assume that overall the vast majority of the payments will be single shot and in a post-hyperbitcoinization world with money streaming from every netflix subscriber - maybe collisions happen.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And what about adding a counter field to the last share struct? That doesn't leak info about other payments to the receiver.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

at 32-bits, we expect a payment hash collision after 2^16 retries of the same HTLC. even at 1 sec per attempt, that's continually resending the same AMP HTLC for 18 hours. AMP HTLCs with different shares shouldn't ever collide in practice because they have plenty of entropy.

if we are really concerned about collisions, we could use a CTR mode (random offset + increment) and then we'd only get collisions on wrap around. in the prior example—after 136 years of sending the same HTLC. in the end, the worst that happens is payment hash reuse. if a new use case comes along that might require this sort of sustained retry behavior then we can easily switch to CTR without any protocol changes

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But what is wrong with my suggestion to use a counter to not have this problem at all, without sacrificing privacy?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

More, real work, for only a theoretical gain? :p

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in a post-hyperbitcoinization world with money streaming from every netflix subscriber - maybe collisions happen.

In case it isn't clear, this index is just within the context of a given setID. So the ensure that all the world streaming netflix payment doesn't collide, we just need to ensure that the setID is unique, which easy given the negligible probability of colliding 256-bit integers.

Ultimately, with the route Johan has taken here with the implementation, since we just XOR out the failed shares rather than increment the childIndex (which would result in a distinct payment hash for a same share), we don't really need to worry about this colliding in practice. The alternative would've been to get another random child index, for the same share, then try again (uses a diff payment hash, but same share). This alternative route would result in more collisions in practice.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The context was clear. I just meant to say that if massive amounts of payments happen, the overall chance of a collision somewhere on the earth increases. It was totally clear that independent payments cannot collide. Let's say in some time frame, 2^32 independent payments happen that need a second attempt. There will then be on average one user that experiences a collision.

The out-xor'ing does not apply to the final share and I'd say that almost all AMP payments will be single-shot and have only a final share that may be retried with a different child index.

The alternative would've been to get another random child index, for the same share, then try again (uses a diff payment hash, but same share). This alternative route would result in more collisions in practice.

So what I am proposing is to just keep a counter for each (split) share that is generated. So there will be a counter within the context of a final share. Then keep incrementing that with each attempt. Then there is no risk of collision, so in my opinion a better option.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this like something you had in mind? #5255

However there is still the case that after 2^32 "last shards" the counter will wrap around and collisions will start happening.

if _, err := rand.Read(childIndex[:]); err != nil {
return nil, err
}
idx := binary.BigEndian.Uint32(childIndex[:])

// Depending on whether we are requesting the last shard or not, either
// split the current share into two, or get a Child directly from the
// current sharer.
var child *Child
if last {
child = s.sharer.Child(idx)

// If this was the last shard, set the current share to the
// zero share to indicate we cannot split it further.
s.sharer = s.sharer.Zero()
} else {
left, sharer, err := s.sharer.Split()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minor comment: The naming left and right led me to think that the xor tree is more than it really is. Somehow I expected left and right to be different. That there is some kind of condition that determines what's left and what's right. Could be that I am the only one that has this.

Perhaps something like share and remainder could work too.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It can be hard to grok at first as there's actually less enforced structure than one might think. For example, there's no requirement that we split using our current right-skewed binary tree. Instead we could opt to make it more balanced. With the way Split() works, left is a random value while right is the parent xor'd with that random value. Rather then incrementing the child index for left (which will generate a new payment hash but keep the same share), this PR instead "destroys" left by XOR'ing that value out of right, right can then be split again to get new randomness and retry the payment.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I found it easier to just visualize it as a set off pebbles that we can split and merge at will.

A tree is perhaps not the best description, as we can also merge leafs from different sub-branches IIUC? @cfromknecht

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Indeed, something like a cord that you can cut and glue together again matches much better with how I assume it works.

if err != nil {
return nil, err
}

s.sharer = sharer
child = left.Child(idx)
}

// Track the new child and return the shard.
s.shards[pid] = child

mpp := record.NewMPP(s.totalAmt, s.paymentAddr)
amp := record.NewAMP(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question about record.NewAMP: Why is the first parameter named rootShare? Shouldn't it be just share?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

at the time i think I used that as short hand for shareOfRoot, but yes just using share would be better

child.ChildDesc.Share, s.setID, child.ChildDesc.Index,
)

return &Shard{
child: child,
mpp: mpp,
amp: amp,
}, nil
}

// CancelShard cancel's the shard corresponding to the given attempt ID.
func (s *ShardTracker) CancelShard(pid uint64) error {
s.Lock()
defer s.Unlock()

c, ok := s.shards[pid]
if !ok {
return fmt.Errorf("pid not found")
}
delete(s.shards, pid)

// Now that we are canceling this shard, we XOR the share back into our
// current share.
s.sharer = s.sharer.Merge(c)
return nil
}

// GetHash retrieves the hash used by the shard of the given attempt ID. This
// will return an error if the attempt ID is unknown.
func (s *ShardTracker) GetHash(pid uint64) (lntypes.Hash, error) {
s.Lock()
defer s.Unlock()

c, ok := s.shards[pid]
if !ok {
return lntypes.Hash{}, fmt.Errorf("AMP shard for attempt %v "+
"not found", pid)
}

return c.Hash, nil
}
95 changes: 95 additions & 0 deletions amp/shard_tracker_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
package amp_test

import (
"crypto/rand"
"testing"

"github.com/lightningnetwork/lnd/amp"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/shards"
"github.com/stretchr/testify/require"
)

// TestAMPShardTracker tests that we can derive and cancel shards at will using
// the AMP shard tracker.
func TestAMPShardTracker(t *testing.T) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

var root, setID, payAddr [32]byte
_, err := rand.Read(root[:])
require.NoError(t, err)

_, err = rand.Read(setID[:])
require.NoError(t, err)

_, err = rand.Read(payAddr[:])
require.NoError(t, err)

var totalAmt lnwire.MilliSatoshi = 1000

// Create an AMP shard tracker using the random data we just generated.
tracker := amp.NewShardTracker(root, setID, payAddr, totalAmt)

// Trying to retrieve a hash for id 0 should result in an error.
_, err = tracker.GetHash(0)
require.Error(t, err)

// We start by creating 20 shards.
const numShards = 20

var shards []shards.PaymentShard
for i := uint64(0); i < numShards; i++ {
s, err := tracker.NewShard(i, i == numShards-1)
require.NoError(t, err)

// Check that the shards have their payloads set as expected.
require.Equal(t, setID, s.AMP().SetID())
require.Equal(t, totalAmt, s.MPP().TotalMsat())
require.Equal(t, payAddr, s.MPP().PaymentAddr())

shards = append(shards, s)
}

// Make sure we can retrieve the hash for all of them.
for i := uint64(0); i < numShards; i++ {
hash, err := tracker.GetHash(i)
require.NoError(t, err)
require.Equal(t, shards[i].Hash(), hash)
}

// Now cancel half of the shards.
j := 0
for i := uint64(0); i < numShards; i++ {
if i%2 == 0 {
err := tracker.CancelShard(i)
require.NoError(t, err)
continue
}

// Keep shard.
shards[j] = shards[i]
j++
}
shards = shards[:j]

// Get a new last shard.
s, err := tracker.NewShard(numShards, true)
require.NoError(t, err)
shards = append(shards, s)

// Finally make sure these shards together can be used to reconstruct
// the children.
childDescs := make([]amp.ChildDesc, len(shards))
for i, s := range shards {
childDescs[i] = amp.ChildDesc{
Share: s.AMP().RootShare(),
Index: s.AMP().ChildIndex(),
}
}

// Using the child descriptors, reconstruct the children.
children := amp.ReconstructChildren(childDescs...)

// Validate that the derived child preimages match the hash of each shard.
for i, child := range children {
require.Equal(t, shards[i].Hash(), child.Hash)
}
}
36 changes: 36 additions & 0 deletions amp/sharer.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,12 @@ package amp

import (
"crypto/rand"
"fmt"
)

// zeroShare is the all-zero 32-byte share.
var zeroShare = Share{}

// Sharer facilitates dynamic splitting of a root share value and derivation of
// child preimage and hashes for individual HTLCs in an AMP payment. A sharer
// represents a specific node in an abstract binary tree that can generate up to
Expand Down Expand Up @@ -34,6 +38,16 @@ type Sharer interface {
// that the shares of all nodes descending from the parent will XOR to
// the parent's share.
Split() (Sharer, Sharer, error)

// Merge takes the given Child and "merges" it into the Sharer by
// XOR-ing its share with the Sharer's current share.
Merge(*Child) Sharer

// Zero returns a a new "zero Sharer" that has its current share set to
// zero, while keeping the root share. Merging a Child from the
// original Sharer into this zero-Sharer gives back the original
// Sharer.
Zero() Sharer
}

// SeedSharer orchestrates the sharing of the root AMP seed along multiple
Expand Down Expand Up @@ -81,6 +95,11 @@ func (s *SeedSharer) Root() Share {
// parent share should no longer be used, and the caller should use the Child
// method on each to derive preimage/hash pairs for the HTLCs.
func (s *SeedSharer) Split() (Sharer, Sharer, error) {
// We cannot split the zero-Sharer.
if s.curr == zeroShare {
return nil, nil, fmt.Errorf("cannot split zero-Sharer")
}

shareLeft, shareRight, err := split(&s.curr)
if err != nil {
return nil, nil, err
Expand All @@ -92,6 +111,23 @@ func (s *SeedSharer) Split() (Sharer, Sharer, error) {
return left, right, nil
}

// Merge takes the given Child and "merges" it into the Sharer by XOR-ing its
// share with the Sharer's current share.
func (s *SeedSharer) Merge(child *Child) Sharer {
var curr Share
curr.Xor(&s.curr, &child.Share)

sharer := initSeedSharer(&s.root, &curr)
return sharer
}

// Zero returns a a new "zero Sharer" that has its current share set to zero,
// while keeping the root share. Merging a Child from the original Sharer into
// this zero-Sharer gives back the original Sharer.
func (s *SeedSharer) Zero() Sharer {
return initSeedSharer(&s.root, &zeroShare)
}

// Child derives a preimage/hash pair to be used for an AMP HTLC.
// All children of s will use the same underlying share, but have unique
// preimage and hash. This can be used to rerandomize the preimage/hash pair for
Expand Down
2 changes: 1 addition & 1 deletion channeldb/duplicate_payments.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func deserializeDuplicatePaymentCreationInfo(r io.Reader) (

c := &PaymentCreationInfo{}

if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil {
if _, err := io.ReadFull(r, c.PaymentIdentifier[:]); err != nil {
return nil, err
}

Expand Down
Loading