From b7f8a1fdc9989ca815f8d8aa573a981ccab512fa Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 19 Nov 2024 18:20:38 -0400 Subject: [PATCH] Added tests and changed function to NthNext. --- consensus/quorum/quorum.go | 11 ++- consensus/quorum/thread_safe_decider.go | 6 ++ consensus/view_change.go | 36 ++++----- consensus/view_change_test.go | 102 ++++++++++++++---------- 4 files changed, 87 insertions(+), 68 deletions(-) diff --git a/consensus/quorum/quorum.go b/consensus/quorum/quorum.go index 07da6b3c77..251268ca68 100644 --- a/consensus/quorum/quorum.go +++ b/consensus/quorum/quorum.go @@ -78,6 +78,7 @@ type ParticipantTracker interface { // NthNextValidator returns key for next validator. It assumes external validators and leader rotation. NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) + NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) FirstParticipant() *bls.PublicKeyWrapper UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) } @@ -201,12 +202,10 @@ func (s *cIdentities) IndexOf(pubKey bls.SerializedPublicKey) int { } // NthNext return the Nth next pubkey, next can be negative number -func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { - found := false - +func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) { idx := s.IndexOf(pubKey.Bytes) - if idx != -1 { - found = true + if idx == -1 { + return nil, errors.Errorf("pubKey not found %x", pubKey.Bytes) } numNodes := int(s.ParticipantsCount()) // sanity check to avoid out of bound access @@ -214,7 +213,7 @@ func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (bool, *bl numNodes = len(s.publicKeys) } idx = (idx + next) % numNodes - return found, &s.publicKeys[idx] + return &s.publicKeys[idx], nil } // NthNextValidator return the Nth next pubkey nodes, but from another validator. diff --git a/consensus/quorum/thread_safe_decider.go b/consensus/quorum/thread_safe_decider.go index 8d0a3c09b5..8fda2489e9 100644 --- a/consensus/quorum/thread_safe_decider.go +++ b/consensus/quorum/thread_safe_decider.go @@ -56,6 +56,12 @@ func (a threadSafeDeciderImpl) NthNextValidator(slotList shard.SlotList, pubKey return a.decider.NthNextValidator(slotList, pubKey, next) } +func (a threadSafeDeciderImpl) NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.NthNext(pubKey, next) +} + func (a threadSafeDeciderImpl) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { a.mu.Lock() defer a.mu.Unlock() diff --git a/consensus/view_change.go b/consensus/view_change.go index 0c6d057876..2b6df8bde6 100644 --- a/consensus/view_change.go +++ b/consensus/view_change.go @@ -169,8 +169,9 @@ func (consensus *Consensus) getNextLeaderKeySkipSameAddress(viewID uint64, commi } // use pubkey as default key as well leaderPubKey := consensus.getLeaderPubKey() - rs, ok := viewChangeNextValidator(consensus.decider, gap, committee.Slots, leaderPubKey) - if !ok { + rs, err := viewChangeNextValidator(consensus.decider, gap, committee.Slots, leaderPubKey) + if err != nil { + consensus.getLogger().Error().Err(err).Msg("[getNextLeaderKeySkipSameAddress] viewChangeNextValidator failed") return leaderPubKey } return rs @@ -258,16 +259,17 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com return next } -func viewChangeNextValidator(decider quorum.Decider, gap int, slots shard.SlotList, lastLeaderPubKey *bls.PublicKeyWrapper) (*bls.PublicKeyWrapper, bool) { - var wasFound bool - var next *bls.PublicKeyWrapper +type nthNext interface { + NthNext(pubKey *bls.PublicKeyWrapper, next int) (*bls.PublicKeyWrapper, error) +} + +func viewChangeNextValidator(decider nthNext, gap int, slots shard.SlotList, lastLeaderPubKey *bls.PublicKeyWrapper) (*bls.PublicKeyWrapper, error) { if gap > 1 { - wasFoundCurrent, current := decider.NthNextValidator( - slots, + current, err := decider.NthNext( lastLeaderPubKey, gap-1) - if !wasFoundCurrent { - return nil, false + if err != nil { + return nil, errors.WithMessagef(err, "NthNext failed, gap %d", gap) } publicToAddress := make(map[bls.SerializedPublicKey]common.Address) @@ -277,26 +279,24 @@ func viewChangeNextValidator(decider quorum.Decider, gap int, slots shard.SlotLi for i := 0; i < len(slots); i++ { gap = gap + i - wasFound, next = decider.NthNextValidator( - slots, + next, err := decider.NthNext( lastLeaderPubKey, gap) - if !wasFound { - return nil, false + if err != nil { + return nil, errors.New("current leader not found") } if publicToAddress[current.Bytes] != publicToAddress[next.Bytes] { - return next, true + return next, nil } } } else { - wasFound, next = decider.NthNextValidator( - slots, + next, err := decider.NthNext( lastLeaderPubKey, gap) - return next, wasFound + return next, errors.WithMessagef(err, "NthNext failed, gap %d", gap) } - return nil, false + return nil, errors.New("current leader not found") } func createTimeout() map[TimeoutType]*utils.Timeout { diff --git a/consensus/view_change_test.go b/consensus/view_change_test.go index c943e5938c..ee599ca3da 100644 --- a/consensus/view_change_test.go +++ b/consensus/view_change_test.go @@ -138,8 +138,12 @@ func TestViewChangeNextValidator(t *testing.T) { decider.UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{}) assert.EqualValues(t, keyCount, decider.ParticipantsCount()) - t.Run("check_different_address_for_validators_with_gap_0", func(t *testing.T) { - slots := []shard.Slot{} + t.Run("check_different_address_for_validators", func(t *testing.T) { + var ( + rs *bls.PublicKeyWrapper + err error + slots []shard.Slot + ) for i := 0; i < keyCount; i++ { slot := shard.Slot{ EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))), @@ -148,43 +152,32 @@ func TestViewChangeNextValidator(t *testing.T) { slots = append(slots, slot) } - rs, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) - require.True(t, ok) - require.Equal(t, &wrappedBLSKeys[1], rs) - }) - t.Run("check_different_address_for_validators_with_gap_1", func(t *testing.T) { - slots := []shard.Slot{} - for i := 0; i < keyCount; i++ { - slot := shard.Slot{ - EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))), - BLSPublicKey: wrappedBLSKeys[i].Bytes, - } - slots = append(slots, slot) - } + rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[0], rs) - rs, ok := viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0]) - require.True(t, ok) + rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) require.Equal(t, &wrappedBLSKeys[1], rs) - }) - t.Run("check_different_address_for_validators_with_gap_2", func(t *testing.T) { - slots := []shard.Slot{} - for i := 0; i < keyCount; i++ { - slot := shard.Slot{ - EcdsaAddress: common.BigToAddress(big.NewInt(int64(i))), - BLSPublicKey: wrappedBLSKeys[i].Bytes, - } - slots = append(slots, slot) - } - rs, ok := viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0]) - require.True(t, ok) + rs, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) require.Equal(t, &wrappedBLSKeys[2], rs) + + // and no panic or error for future 1k gaps + for i := 0; i < 1000; i++ { + _, err = viewChangeNextValidator(decider, i, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + } }) // we can't find next validator, because all validators have the same address - t.Run("check_same_address_for_validators", func(t *testing.T) { - // Slot represents node id (BLS address) - slots := []shard.Slot{} + t.Run("same_address_for_all_validators", func(t *testing.T) { + var ( + rs *bls.PublicKeyWrapper + err error + slots []shard.Slot + ) for i := 0; i < keyCount; i++ { slot := shard.Slot{ EcdsaAddress: common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z3")), @@ -193,8 +186,23 @@ func TestViewChangeNextValidator(t *testing.T) { slots = append(slots, slot) } - _, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) - require.False(t, ok) + rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[0], rs) + + rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[1], rs) + + // error because all validators belong same address + _, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0]) + require.Error(t, err) + + // all of them return error, no way to recover + for i := 2; i < 1000; i++ { + _, err = viewChangeNextValidator(decider, i, slots, &wrappedBLSKeys[0]) + require.Errorf(t, err, "error because all validators belong same address %d", i) + } }) // we can't find next validator, because all validators have the same address @@ -203,6 +211,8 @@ func TestViewChangeNextValidator(t *testing.T) { var ( addr1 = common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z3")) addr2 = common.BytesToAddress([]byte("one1ay37rp2pc3kjarg7a322vu3sa8j9puahg679z4")) + rs *bls.PublicKeyWrapper + err error ) slots := []shard.Slot{ { @@ -226,17 +236,21 @@ func TestViewChangeNextValidator(t *testing.T) { BLSPublicKey: wrappedBLSKeys[4].Bytes, }, } - rs, ok := viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) - require.True(t, ok) - require.Equal(t, &wrappedBLSKeys[2], rs) - rs, ok = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0]) - require.True(t, ok) - require.Equal(t, &wrappedBLSKeys[3], rs) + rs, err = viewChangeNextValidator(decider, 0, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[0], rs) + + rs, err = viewChangeNextValidator(decider, 1, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[1], rs) + + rs, err = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[2], rs) - // TODO - //rs, ok = viewChangeNextValidator(decider, 2, slots, &wrappedBLSKeys[0]) - //require.True(t, ok) - //require.Equal(t, &wrappedBLSKeys[0], rs) + rs, err = viewChangeNextValidator(decider, 3, slots, &wrappedBLSKeys[0]) + require.NoError(t, err) + require.Equal(t, &wrappedBLSKeys[1], rs) }) }