diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index e1cdd3378f..1dfc65b441 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -712,7 +712,11 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int, defaultKey *bls.PublicK return defaultKey } const blocksCountAliveness = 4 - utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch)) + utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v rotation v2: %v", + epoch.Uint64(), + bc.Config().IsLeaderRotationInternalValidators(epoch), + bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch), + bc.Config().IsLeaderRotationV2Epoch(epoch)) ss, err := bc.ReadShardState(epoch) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to read shard state") @@ -758,7 +762,9 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int, defaultKey *bls.PublicK ) for i := 0; i < len(committee.Slots); i++ { - if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { + if bc.Config().IsLeaderRotationV2Epoch(epoch) { + wasFound, next = consensus.decider.NthNextValidatorV2(committee.Slots, leader, offset) + } else if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { wasFound, next = consensus.decider.NthNextValidator(committee.Slots, leader, offset) } else { wasFound, next = consensus.decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset) diff --git a/consensus/quorum/quorom_test.go b/consensus/quorum/quorom_test.go index 67f9496b17..7622f5aac3 100644 --- a/consensus/quorum/quorom_test.go +++ b/consensus/quorum/quorom_test.go @@ -1,9 +1,11 @@ package quorum import ( + "fmt" "math/big" "strings" "testing" + "time" bls_core "github.com/harmony-one/bls/ffi/go/bls" harmony_bls "github.com/harmony-one/harmony/crypto/bls" @@ -549,32 +551,233 @@ func TestInvalidAggregateSig(test *testing.T) { } } -func TestCIdentities_NthNextValidatorHmy(t *testing.T) { - address := []common.Address{ - common.HexToAddress("0x1"), - common.HexToAddress("0x2"), - common.HexToAddress("0x3"), +func createTestCIdentities(numAddresses int, keysPerAddress int) (*cIdentities, shard.SlotList, []harmony_bls.PublicKeyWrapper) { + testAddresses := make([]common.Address, 0, numAddresses*numAddresses) + for i := int(0); i < numAddresses; i++ { + h := fmt.Sprintf("0x%040x", i) + addr := common.HexToAddress(h) + testAddresses = append(testAddresses, addr) } slots := shard.SlotList{} list := []harmony_bls.PublicKeyWrapper{} - for i := 0; i < 3; i++ { - for j := 0; j < 3; j++ { + // generate slots and public keys + for i := 0; i < numAddresses; i++ { + for j := 0; j < keysPerAddress; j++ { // keys per address blsKey := harmony_bls.RandPrivateKey() wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} wrapper.Bytes.FromLibBLSPublicKey(wrapper.Object) + slots = append(slots, shard.Slot{ - EcdsaAddress: address[i%3], + EcdsaAddress: testAddresses[i], BLSPublicKey: wrapper.Bytes, EffectiveStake: nil, }) list = append(list, wrapper) } } + // initialize and update cIdentities + c := newCIdentities() + c.UpdateParticipants(list, []bls.PublicKeyWrapper{}) + return c, slots, list +} + +func TestCIdentities_NthNextValidatorHmy(t *testing.T) { + c, slots, list := createTestCIdentities(3, 3) + + found, key := c.NthNextValidator(slots, &list[0], 1) + require.Equal(t, true, found) + // because we skip 3 keys of current validator + require.Equal(t, 3, c.IndexOf(key.Bytes)) +} + +func TestCIdentities_NthNextValidatorFailedEdgeCase1(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Recovered from panic as expected: %v", r) + } else { + t.Errorf("Expected a panic when next is 0, but no panic occurred") + } + }() + + // create test identities and slots + c, slots, _ := createTestCIdentities(3, 3) + + // create a public key wrapper that doesn't exist in the identities + t.Log("creating a random public key wrapper not present in test identities") + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + + // Edge Case: Trigger NthNextValidator with next=0, which should cause a panic + t.Log("Calling NthNextValidator with next=0 to test panic handling") + c.NthNextValidator(slots, &wrapper, 0) +} + +func TestCIdentities_NthNextValidatorFailedEdgeCase2(t *testing.T) { + // create test identities and slots + c, slots, list := createTestCIdentities(1, 3) + + done := make(chan bool) + + go func() { + // possible infinite loop, it will time out + c.NthNextValidator(slots, &list[1], 1) + + done <- true + }() + + select { + case <-done: + t.Error("Expected a timeout, but successfully calculated next leader") + + case <-time.After(5 * time.Second): + t.Log("Test timed out, possible infinite loop") + } +} +func TestCIdentities_NthNextValidatorFailedEdgeCase3(t *testing.T) { + // create 3 test addresses + testAddresses := make([]common.Address, 0, 3) + for i := int(0); i < 3; i++ { + h := fmt.Sprintf("0x%040x", i) + addr := common.HexToAddress(h) + testAddresses = append(testAddresses, addr) + } + slots := shard.SlotList{} + list := []harmony_bls.PublicKeyWrapper{} + + // First add 4 keys for first address + for i := 0; i < 4; i++ { + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + wrapper.Bytes.FromLibBLSPublicKey(wrapper.Object) + + slots = append(slots, shard.Slot{ + EcdsaAddress: testAddresses[0], + BLSPublicKey: wrapper.Bytes, + EffectiveStake: nil, + }) + list = append(list, wrapper) + } + + // Then add 1 key for next two addresses + for i := 1; i < 3; i++ { + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + wrapper.Bytes.FromLibBLSPublicKey(wrapper.Object) + + slots = append(slots, shard.Slot{ + EcdsaAddress: testAddresses[i], + BLSPublicKey: wrapper.Bytes, + EffectiveStake: nil, + }) + list = append(list, wrapper) + } + + // initialize and update cIdentities c := newCIdentities() c.UpdateParticipants(list, []bls.PublicKeyWrapper{}) + + // current key is the first one. found, key := c.NthNextValidator(slots, &list[0], 1) require.Equal(t, true, found) + // because we skip 4 keys of first validator, the next validator key index is 4 (starts from 0) + // but it returns 5 and skips second validator (key index: 4) + require.Equal(t, 5, c.IndexOf(key.Bytes)) + t.Log("second validator were skipped") +} + +func TestCIdentities_NthNextValidatorV2Hmy(t *testing.T) { + c, slots, list := createTestCIdentities(3, 3) + + found, key := c.NthNextValidatorV2(slots, &list[0], 1) + require.Equal(t, true, found) // because we skip 3 keys of current validator require.Equal(t, 3, c.IndexOf(key.Bytes)) } + +func TestCIdentities_NthNextValidatorV2EdgeCase1(t *testing.T) { + // create test identities and slots + c, slots, _ := createTestCIdentities(3, 3) + + // create a public key wrapper that doesn't exist in the identities + t.Log("creating a random public key wrapper not present in test identities") + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + + // Edge Case: Trigger NthNextValidator with next=0, which should cause a panic + t.Log("Calling NthNextValidatorV2 with next=0 to test panic handling") + found, key := c.NthNextValidatorV2(slots, &wrapper, 0) + + require.Equal(t, true, found) + require.Equal(t, 0, c.IndexOf(key.Bytes)) +} + +func TestCIdentities_NthNextValidatorV2EdgeCase2(t *testing.T) { + // create test identities and slots + c, slots, list := createTestCIdentities(1, 3) + + done := make(chan bool) + + go func() { + c.NthNextValidatorV2(slots, &list[1], 1) + + done <- true + }() + + select { + case <-done: + t.Log("Test completed successfully ") + case <-time.After(5 * time.Second): + t.Error("timeout, possible infinite loop") + } +} + +func TestCIdentities_NthNextValidatorV2EdgeCase3(t *testing.T) { + // create 3 test addresses + testAddresses := make([]common.Address, 0, 3) + for i := int(0); i < 3; i++ { + h := fmt.Sprintf("0x%040x", i) + addr := common.HexToAddress(h) + testAddresses = append(testAddresses, addr) + } + slots := shard.SlotList{} + list := []harmony_bls.PublicKeyWrapper{} + + // First add 4 keys for first address + for i := 0; i < 4; i++ { + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + wrapper.Bytes.FromLibBLSPublicKey(wrapper.Object) + + slots = append(slots, shard.Slot{ + EcdsaAddress: testAddresses[0], + BLSPublicKey: wrapper.Bytes, + EffectiveStake: nil, + }) + list = append(list, wrapper) + } + + // Then add 1 key for next two addresses + for i := 1; i < 3; i++ { + blsKey := harmony_bls.RandPrivateKey() + wrapper := harmony_bls.PublicKeyWrapper{Object: blsKey.GetPublicKey()} + wrapper.Bytes.FromLibBLSPublicKey(wrapper.Object) + + slots = append(slots, shard.Slot{ + EcdsaAddress: testAddresses[i], + BLSPublicKey: wrapper.Bytes, + EffectiveStake: nil, + }) + list = append(list, wrapper) + } + + // initialize and update cIdentities + c := newCIdentities() + c.UpdateParticipants(list, []bls.PublicKeyWrapper{}) + + // current key is the first one. + found, key := c.NthNextValidatorV2(slots, &list[0], 1) + require.Equal(t, true, found) + // because we skip 4 keys of first validator, the next validator key index is 4 (starts from 0) + require.Equal(t, 4, c.IndexOf(key.Bytes)) +} diff --git a/consensus/quorum/quorum.go b/consensus/quorum/quorum.go index f2cfe82bee..888b2c3511 100644 --- a/consensus/quorum/quorum.go +++ b/consensus/quorum/quorum.go @@ -77,6 +77,7 @@ type ParticipantTracker interface { ParticipantsCount() int64 // NthNextValidator returns key for next validator. It assumes external validators and leader rotation. NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) + NthNextValidatorV2(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) FirstParticipant(shardingconfig.Instance) *bls.PublicKeyWrapper UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) @@ -217,7 +218,46 @@ func (s *cIdentities) NthNext(pubKey *bls.PublicKeyWrapper, next int) (bool, *bl return found, &s.publicKeys[idx] } -// NthNextValidator return the Nth next pubkey nodes, but from another validator. +// NthNextValidatorV2 returns the Nth next pubkey nodes, but from another validator. +func (s *cIdentities) NthNextValidatorV2(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { + if len(s.publicKeys) == 0 || next < 0 { + return false, pubKey + } + + publicToAddress := make(map[bls.SerializedPublicKey]common.Address, len(slotList)) + for _, slot := range slotList { + publicToAddress[slot.BLSPublicKey] = slot.EcdsaAddress + } + + pubKeyIndex := s.IndexOf(pubKey.Bytes) + if pubKeyIndex == -1 { + utils.Logger().Error(). + Str("key", pubKey.Bytes.Hex()). + Msg("[NthNextValidator] pubKey not found") + } + + if pubKeyIndex == -1 && next == 0 { + return true, &s.publicKeys[0] + } + + numKeys := len(s.publicKeys) + attempts := 0 + + for { + idx := (pubKeyIndex + attempts + next) % numKeys + if attempts > numKeys { + utils.Logger().Warn(). + Str("key", pubKey.Bytes.Hex()). + Msg("[NthNextValidator] Could not find a different validator within limit") + return false, pubKey + } + if publicToAddress[s.publicKeys[idx].Bytes] != publicToAddress[pubKey.Bytes] { + return true, &s.publicKeys[idx] + } + attempts++ + } +} + func (s *cIdentities) NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { found := false diff --git a/consensus/quorum/thread_safe_decider.go b/consensus/quorum/thread_safe_decider.go index ac5ae75dec..2cc877448a 100644 --- a/consensus/quorum/thread_safe_decider.go +++ b/consensus/quorum/thread_safe_decider.go @@ -56,6 +56,12 @@ func (a threadSafeDeciderImpl) NthNextValidator(slotList shard.SlotList, pubKey return a.decider.NthNextValidator(slotList, pubKey, next) } +func (a threadSafeDeciderImpl) NthNextValidatorV2(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.NthNextValidator(slotList, pubKey, next) +} + func (a threadSafeDeciderImpl) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { a.mu.Lock() defer a.mu.Unlock() diff --git a/consensus/view_change.go b/consensus/view_change.go index 42dadba50c..6fb6def705 100644 --- a/consensus/view_change.go +++ b/consensus/view_change.go @@ -198,7 +198,12 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com var wasFound bool var next *bls.PublicKeyWrapper if blockchain != nil && blockchain.Config().IsLeaderRotationInternalValidators(epoch) { - if blockchain.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { + if blockchain.Config().IsLeaderRotationV2Epoch(epoch) { + wasFound, next = consensus.decider.NthNextValidatorV2( + committee.Slots, + lastLeaderPubKey, + gap) + } else if blockchain.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { wasFound, next = consensus.decider.NthNextValidator( committee.Slots, lastLeaderPubKey, diff --git a/internal/params/config.go b/internal/params/config.go index 77a4c37f34..3e774c113f 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -71,6 +71,7 @@ var ( AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: big.NewInt(2152), // 2024-10-31 13:02 UTC LeaderRotationExternalValidatorsEpoch: big.NewInt(2152), // 2024-10-31 13:02 UTC + LeaderRotationV2Epoch: EpochTBD, FeeCollectEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00 ValidatorCodeFixEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00 HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 @@ -119,6 +120,7 @@ var ( AllowlistEpoch: big.NewInt(2), LeaderRotationInternalValidatorsEpoch: big.NewInt(3044), LeaderRotationExternalValidatorsEpoch: big.NewInt(3044), + LeaderRotationV2Epoch: EpochTBD, FeeCollectEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00 ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00 HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 @@ -166,6 +168,7 @@ var ( AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: EpochTBD, LeaderRotationExternalValidatorsEpoch: EpochTBD, + LeaderRotationV2Epoch: EpochTBD, FeeCollectEpoch: EpochTBD, ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, @@ -214,6 +217,7 @@ var ( AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: big.NewInt(144), LeaderRotationExternalValidatorsEpoch: big.NewInt(144), + LeaderRotationV2Epoch: EpochTBD, FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), @@ -263,6 +267,7 @@ var ( FeeCollectEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: EpochTBD, LeaderRotationExternalValidatorsEpoch: EpochTBD, + LeaderRotationV2Epoch: EpochTBD, ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), @@ -309,6 +314,7 @@ var ( AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: big.NewInt(5), LeaderRotationExternalValidatorsEpoch: big.NewInt(6), + LeaderRotationV2Epoch: EpochTBD, FeeCollectEpoch: big.NewInt(2), ValidatorCodeFixEpoch: big.NewInt(2), HIP30Epoch: EpochTBD, @@ -358,6 +364,7 @@ var ( big.NewInt(0), // AllowlistEpoch big.NewInt(1), // LeaderRotationExternalNonBeaconLeaders big.NewInt(1), // LeaderRotationExternalBeaconLeaders + big.NewInt(0), // LeaderRotationV2Epoch big.NewInt(0), // FeeCollectEpoch big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // BlockGas30M @@ -408,6 +415,7 @@ var ( big.NewInt(0), // AllowlistEpoch big.NewInt(1), // LeaderRotationExternalNonBeaconLeaders big.NewInt(1), // LeaderRotationExternalBeaconLeaders + big.NewInt(0), // LeaderRotationV2Epoch big.NewInt(0), // FeeCollectEpoch big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // HIP30Epoch @@ -560,6 +568,8 @@ type ChainConfig struct { LeaderRotationExternalValidatorsEpoch *big.Int `json:"leader-rotation-external-validators,omitempty"` + LeaderRotationV2Epoch *big.Int `json:"leader-rotation-v2-epoch,omitempty"` + // FeeCollectEpoch is the first epoch that enables txn fees to be collected into the community-managed account. // It should >= StakingEpoch. // Before StakingEpoch, txn fees are paid to miner/leader. @@ -842,6 +852,10 @@ func (c *ChainConfig) IsLeaderRotationExternalValidatorsAllowed(epoch *big.Int) return isForked(c.LeaderRotationExternalValidatorsEpoch, epoch) } +func (c *ChainConfig) IsLeaderRotationV2Epoch(epoch *big.Int) bool { + return isForked(c.LeaderRotationV2Epoch, epoch) +} + // IsFeeCollectEpoch determines whether Txn Fees will be collected into the community-managed account. func (c *ChainConfig) IsFeeCollectEpoch(epoch *big.Int) bool { return isForked(c.FeeCollectEpoch, epoch)