Skip to content

Commit

Permalink
Added tests and changed function to NthNext.
Browse files Browse the repository at this point in the history
  • Loading branch information
Frozen committed Nov 19, 2024
1 parent 0349844 commit b7f8a1f
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 68 deletions.
11 changes: 5 additions & 6 deletions consensus/quorum/quorum.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ type ParticipantTracker interface {
// NthNextValidator returns key for next validator. It assumes external validators and leader rotation.
NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error)
FirstParticipant() *bls.PublicKeyWrapper
UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper)
}
Expand Down Expand Up @@ -201,20 +202,18 @@ func (s *cIdentities) IndexOf(pubKey bls.SerializedPublicKey) int {
}

// NthNext return the Nth next pubkey, next can be negative number
func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) {
found := false

func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) {
idx := s.IndexOf(pubKey.Bytes)
if idx != -1 {
found = true
if idx == -1 {
return nil, errors.Errorf("pubKey not found %x", pubKey.Bytes)
}
numNodes := int(s.ParticipantsCount())
// sanity check to avoid out of bound access
if numNodes <= 0 || numNodes > len(s.publicKeys) {
numNodes = len(s.publicKeys)
}
idx = (idx + next) % numNodes
return found, &s.publicKeys[idx]
return &s.publicKeys[idx], nil
}

// NthNextValidator return the Nth next pubkey nodes, but from another validator.
Expand Down
6 changes: 6 additions & 0 deletions consensus/quorum/thread_safe_decider.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@ func (a threadSafeDeciderImpl) NthNextValidator(slotList shard.SlotList, pubKey
return a.decider.NthNextValidator(slotList, pubKey, next)
}

func (a threadSafeDeciderImpl) NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) {
a.mu.Lock()
defer a.mu.Unlock()
return a.decider.NthNext(pubKey, next)
}

func (a threadSafeDeciderImpl) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) {
a.mu.Lock()
defer a.mu.Unlock()
Expand Down
36 changes: 18 additions & 18 deletions consensus/view_change.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,9 @@ func (consensus *Consensus) getNextLeaderKeySkipSameAddress(viewID uint64, commi
}
// use pubkey as default key as well
leaderPubKey := consensus.getLeaderPubKey()
rs, ok := viewChangeNextValidator(consensus.decider, gap, committee.Slots, leaderPubKey)
if !ok {
rs, err := viewChangeNextValidator(consensus.decider, gap, committee.Slots, leaderPubKey)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[getNextLeaderKeySkipSameAddress] viewChangeNextValidator failed")
return leaderPubKey
}
return rs
Expand Down Expand Up @@ -258,16 +259,17 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com
return next
}

func viewChangeNextValidator(decider quorum.Decider, gap int, slots shard.SlotList, lastLeaderPubKey *bls.PublicKeyWrapper) (*bls.PublicKeyWrapper, bool) {
var wasFound bool
var next *bls.PublicKeyWrapper
type nthNext interface {
NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error)
}

func viewChangeNextValidator(decider nthNext, gap int, slots shard.SlotList, lastLeaderPubKey *bls.PublicKeyWrapper) (*bls.PublicKeyWrapper, error) {
if gap > 1 {
wasFoundCurrent, current := decider.NthNextValidator(
slots,
current, err := decider.NthNext(
lastLeaderPubKey,
gap-1)
if !wasFoundCurrent {
return nil, false
if err != nil {
return nil, errors.WithMessagef(err, "NthNext failed, gap %d", gap)
}

publicToAddress := make(map[bls.SerializedPublicKey]common.Address)
Expand All @@ -277,26 +279,24 @@ func viewChangeNextValidator(decider quorum.Decider, gap int, slots shard.SlotLi

for i := 0; i < len(slots); i++ {
gap = gap + i
wasFound, next = decider.NthNextValidator(
slots,
next, err := decider.NthNext(
lastLeaderPubKey,
gap)
if !wasFound {
return nil, false
if err != nil {
return nil, errors.New("current leader not found")
}

if publicToAddress[current.Bytes] != publicToAddress[next.Bytes] {
return next, true
return next, nil
}
}
} else {
wasFound, next = decider.NthNextValidator(
slots,
next, err := decider.NthNext(
lastLeaderPubKey,
gap)
return next, wasFound
return next, errors.WithMessagef(err, "NthNext failed, gap %d", gap)
}
return nil, false
return nil, errors.New("current leader not found")
}

func createTimeout() map[TimeoutType]*utils.Timeout {
Expand Down
102 changes: 58 additions & 44 deletions consensus/view_change_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,12 @@ func TestViewChangeNextValidator(t *testing.T) {
decider.UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{})
assert.EqualValues(t, keyCount, decider.ParticipantsCount())

t.Run("check_different_address_for_validators_with_gap_0", func(t *testing.T) {
slots := []shard.Slot{}
t.Run("check_different_address_for_validators", func(t *testing.T) {
var (
rs *bls.PublicKeyWrapper
err error
slots []shard.Slot
)
for i := 0; i < keyCount; i++ {
slot := shard.Slot{
EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))),
Expand All @@ -148,43 +152,32 @@ func TestViewChangeNextValidator(t *testing.T) {
slots = append(slots, slot)
}

rs, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.True(t, ok)
require.Equal(t, &wrappedBLSKeys[1], rs)
})
t.Run("check_different_address_for_validators_with_gap_1", func(t *testing.T) {
slots := []shard.Slot{}
for i := 0; i < keyCount; i++ {
slot := shard.Slot{
EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))),
BLSPublicKey: wrappedBLSKeys[i].Bytes,
}
slots = append(slots, slot)
}
rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[0], rs)

rs, ok := viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0])
require.True(t, ok)
rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[1], rs)
})
t.Run("check_different_address_for_validators_with_gap_2", func(t *testing.T) {
slots := []shard.Slot{}
for i := 0; i < keyCount; i++ {
slot := shard.Slot{
EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))),
BLSPublicKey: wrappedBLSKeys[i].Bytes,
}
slots = append(slots, slot)
}

rs, ok := viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0])
require.True(t, ok)
rs, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[2], rs)

// and no panic or error for future 1k gaps
for i := 0; i < 1000; i++ {
_, err = viewChangeNextValidator(decider, i, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
}
})

// we can't find next validator, because all validators have the same address
t.Run("check_same_address_for_validators", func(t *testing.T) {
// Slot represents node id (BLS address)
slots := []shard.Slot{}
t.Run("same_address_for_all_validators", func(t *testing.T) {
var (
rs *bls.PublicKeyWrapper
err error
slots []shard.Slot
)
for i := 0; i < keyCount; i++ {
slot := shard.Slot{
EcdsaAddress: common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z3")),
Expand All @@ -193,8 +186,23 @@ func TestViewChangeNextValidator(t *testing.T) {
slots = append(slots, slot)
}

_, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.False(t, ok)
rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[0], rs)

rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[1], rs)

// error because all validators belong same address
_, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0])
require.Error(t, err)

// all of them return error, no way to recover
for i := 2; i < 1000; i++ {
_, err = viewChangeNextValidator(decider, i, slots, &wrappedBLSKeys[0])
require.Errorf(t, err, "error because all validators belong same address %d", i)
}
})

// we can't find next validator, because all validators have the same address
Expand All @@ -203,6 +211,8 @@ func TestViewChangeNextValidator(t *testing.T) {
var (
addr1 = common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z3"))
addr2 = common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z4"))
rs *bls.PublicKeyWrapper
err error
)
slots := []shard.Slot{
{
Expand All @@ -226,17 +236,21 @@ func TestViewChangeNextValidator(t *testing.T) {
BLSPublicKey: wrappedBLSKeys[4].Bytes,
},
}
rs, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.True(t, ok)
require.Equal(t, &wrappedBLSKeys[2], rs)

rs, ok = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0])
require.True(t, ok)
require.Equal(t, &wrappedBLSKeys[3], rs)
rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[0], rs)

rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[1], rs)

rs, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[2], rs)

// TODO
//rs, ok = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0])
//require.True(t, ok)
//require.Equal(t, &wrappedBLSKeys[0], rs)
rs, err = viewChangeNextValidator(decider, 3, slots, &wrappedBLSKeys[0])
require.NoError(t, err)
require.Equal(t, &wrappedBLSKeys[1], rs)
})
}

0 comments on commit b7f8a1f

Please sign in to comment.